1
0
forked from rpms/curl

import curl-7.61.1-30.el8

This commit is contained in:
CentOS Sources 2023-03-28 08:30:53 +00:00 committed by root
parent 557d119aff
commit 30b6af8b75
6 changed files with 775 additions and 1 deletions

View File

@ -0,0 +1,171 @@
From 005d3f387bc5c3b2ee94d0597b5e202644c825f5 Mon Sep 17 00:00:00 2001
From: Daniel Stenberg <daniel@haxx.se>
Date: Wed, 31 Oct 2018 11:08:49 +0100
Subject: [PATCH 1/3] runtests: use the local curl for verifying
... revert the mistaken change brought in commit 8440616f53.
Reported-by: Alessandro Ghedini
Bug: https://curl.haxx.se/mail/lib-2018-10/0118.html
Closes #3198
Upstream-commit: 8effa8c2b09906a2f00a3f08322dc5da35245b0a
Signed-off-by: Kamil Dudka <kdudka@redhat.com>
---
tests/runtests.pl | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/runtests.pl b/tests/runtests.pl
index 8d8ed81..d62fa40 100755
--- a/tests/runtests.pl
+++ b/tests/runtests.pl
@@ -152,7 +152,7 @@ my $NEGTELNETPORT; # TELNET server port with negotiation
my $srcdir = $ENV{'srcdir'} || '.';
my $CURL="../src/curl".exe_ext(); # what curl executable to run on the tests
-my $VCURL="curl"; # what curl binary to use to verify the servers with
+my $VCURL=$CURL; # what curl binary to use to verify the servers with
# VCURL is handy to set to the system one when the one you
# just built hangs or crashes and thus prevent verification
my $DBGCURL=$CURL; #"../src/.libs/curl"; # alternative for debugging
--
2.37.3
From fbc2ac6f06ec13cc872ce7adb870f4d7c7d5dded Mon Sep 17 00:00:00 2001
From: Daniel Stenberg <daniel@haxx.se>
Date: Mon, 29 Aug 2022 00:09:17 +0200
Subject: [PATCH 2/3] cookie: reject cookies with "control bytes"
Rejects 0x01 - 0x1f (except 0x09) plus 0x7f
Reported-by: Axel Chong
Bug: https://curl.se/docs/CVE-2022-35252.html
CVE-2022-35252
Closes #9381
Upstream-commit: 8dfc93e573ca740544a2d79ebb0ed786592c65c3
Signed-off-by: Kamil Dudka <kdudka@redhat.com>
---
lib/cookie.c | 29 +++++++++++++++++++++++++++++
1 file changed, 29 insertions(+)
diff --git a/lib/cookie.c b/lib/cookie.c
index cb0c03b..e0470a1 100644
--- a/lib/cookie.c
+++ b/lib/cookie.c
@@ -371,6 +371,30 @@ static void strstore(char **str, const char *newstr)
*str = strdup(newstr);
}
+/*
+ RFC 6265 section 4.1.1 says a server should accept this range:
+
+ cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E
+
+ But Firefox and Chrome as of June 2022 accept space, comma and double-quotes
+ fine. The prime reason for filtering out control bytes is that some HTTP
+ servers return 400 for requests that contain such.
+*/
+static int invalid_octets(const char *p)
+{
+ /* Reject all bytes \x01 - \x1f (*except* \x09, TAB) + \x7f */
+ static const char badoctets[] = {
+ "\x01\x02\x03\x04\x05\x06\x07\x08\x0a"
+ "\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14"
+ "\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x7f"
+ };
+ size_t vlen, len;
+ /* scan for all the octets that are *not* in cookie-octet */
+ len = strcspn(p, badoctets);
+ vlen = strlen(p);
+ return (len != vlen);
+}
+
/*
* remove_expired() removes expired cookies.
*/
@@ -541,6 +565,11 @@ Curl_cookie_add(struct Curl_easy *data,
badcookie = TRUE;
break;
}
+ if(invalid_octets(whatptr) || invalid_octets(name)) {
+ infof(data, "invalid octets in name/value, cookie dropped");
+ badcookie = TRUE;
+ break;
+ }
}
else if(!len) {
/* this was a "<name>=" with no content, and we must allow
--
2.37.1
From 1a3e2bd48572761236934651091c899a4d460ef5 Mon Sep 17 00:00:00 2001
From: Daniel Stenberg <daniel@haxx.se>
Date: Mon, 29 Aug 2022 00:09:17 +0200
Subject: [PATCH 3/3] test8: verify that "ctrl-byte cookies" are ignored
Upstream-commit: 2fc031d834d488854ffc58bf7dbcef7fa7c1fc28
Signed-off-by: Kamil Dudka <kdudka@redhat.com>
---
tests/data/test8 | 32 +++++++++++++++++++++++++++++++-
1 file changed, 31 insertions(+), 1 deletion(-)
diff --git a/tests/data/test8 b/tests/data/test8
index a8548e6..8587611 100644
--- a/tests/data/test8
+++ b/tests/data/test8
@@ -46,6 +46,36 @@ Set-Cookie: trailingspace = removed; path=/we/want;
Set-Cookie: nocookie=yes; path=/WE;
Set-Cookie: blexp=yesyes; domain=%HOSTIP; domain=%HOSTIP; expiry=totally bad;
Set-Cookie: partialip=nono; domain=.0.0.1;
+Set-Cookie: cookie1=-junk
+Set-Cookie: cookie2=-junk
+Set-Cookie: cookie3=-junk
+Set-Cookie: cookie4=-junk
+Set-Cookie: cookie5=-junk
+Set-Cookie: cookie6=-junk
+Set-Cookie: cookie7=-junk
+Set-Cookie: cookie8=-junk
+Set-Cookie: cookie9=junk- -
+Set-Cookie: cookie11= -junk
+Set-Cookie: cookie12= -junk
+Set-Cookie: cookie14=-junk
+Set-Cookie: cookie15=-junk
+Set-Cookie: cookie16=-junk
+Set-Cookie: cookie17=-junk
+Set-Cookie: cookie18=-junk
+Set-Cookie: cookie19=-junk
+Set-Cookie: cookie20=-junk
+Set-Cookie: cookie21=-junk
+Set-Cookie: cookie22=-junk
+Set-Cookie: cookie23=-junk
+Set-Cookie: cookie24=-junk
+Set-Cookie: cookie25=-junk
+Set-Cookie: cookie26=-junk
+Set-Cookie: cookie27=-junk
+Set-Cookie: cookie28=-junk
+Set-Cookie: cookie29=-junk
+Set-Cookie: cookie30=-junk
+Set-Cookie: cookie31=-junk
+Set-Cookie: cookie31=-junk
</file>
<precheck>
@@ -62,7 +92,7 @@ perl -e 'if ("%HOSTIP" !~ /\.0\.0\.1$/) {print "Test only works for HOSTIPs endi
GET /we/want/8 HTTP/1.1
Host: %HOSTIP:%HTTPPORT
Accept: */*
-Cookie: name with space=is weird but; trailingspace=removed; cookie=perhaps; cookie=yes; foobar=name; blexp=yesyes
+Cookie: name with space=is weird but; trailingspace=removed; cookie=perhaps; cookie=yes; foobar=name; blexp=yesyes; cookie9=junk- -
</protocol>
</verify>
--
2.37.1

View File

@ -0,0 +1,112 @@
From 78b62ef1206621e8f4f1628ad4eb0a7be877c96f Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Fri, 7 Dec 2018 17:04:39 +0100
Subject: [PATCH] Upon HTTP_1_1_REQUIRED, retry the request with HTTP/1.1
This is a companion patch to cbea2fd2c (NTLM: force the connection to
HTTP/1.1, 2018-12-06): with NTLM, we can switch to HTTP/1.1
preemptively. However, with other (Negotiate) authentication it is not
clear to this developer whether there is a way to make it work with
HTTP/2, so let's try HTTP/2 first and fall back in case we encounter the
error HTTP_1_1_REQUIRED.
Note: we will still keep the NTLM workaround, as it avoids an extra
round trip.
Daniel Stenberg helped a lot with this patch, in particular by
suggesting to introduce the Curl_h2_http_1_1_error() function.
Closes #3349
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Upstream-commit: d997aa0e963c5be5de100dccdc5208d39bd3d62b
Signed-off-by: Kamil Dudka <kdudka@redhat.com>
---
lib/http2.c | 8 ++++++++
lib/http2.h | 4 ++++
lib/multi.c | 20 ++++++++++++++++++++
3 files changed, 32 insertions(+)
diff --git a/lib/http2.c b/lib/http2.c
index d769193..3071097 100644
--- a/lib/http2.c
+++ b/lib/http2.c
@@ -2300,6 +2300,14 @@ void Curl_http2_cleanup_dependencies(struct Curl_easy *data)
Curl_http2_remove_child(data->set.stream_depends_on, data);
}
+/* Only call this function for a transfer that already got a HTTP/2
+ CURLE_HTTP2_STREAM error! */
+bool Curl_h2_http_1_1_error(struct connectdata *conn)
+{
+ struct http_conn *httpc = &conn->proto.httpc;
+ return (httpc->error_code == NGHTTP2_HTTP_1_1_REQUIRED);
+}
+
#else /* !USE_NGHTTP2 */
/* Satisfy external references even if http2 is not compiled in. */
diff --git a/lib/http2.h b/lib/http2.h
index 21cd9b8..91e504c 100644
--- a/lib/http2.h
+++ b/lib/http2.h
@@ -59,6 +59,9 @@ CURLcode Curl_http2_add_child(struct Curl_easy *parent,
void Curl_http2_remove_child(struct Curl_easy *parent,
struct Curl_easy *child);
void Curl_http2_cleanup_dependencies(struct Curl_easy *data);
+
+/* returns true if the HTTP/2 stream error was HTTP_1_1_REQUIRED */
+bool Curl_h2_http_1_1_error(struct connectdata *conn);
#else /* USE_NGHTTP2 */
#define Curl_http2_init(x) CURLE_UNSUPPORTED_PROTOCOL
#define Curl_http2_send_request(x) CURLE_UNSUPPORTED_PROTOCOL
@@ -74,6 +77,7 @@ void Curl_http2_cleanup_dependencies(struct Curl_easy *data);
#define Curl_http2_add_child(x, y, z)
#define Curl_http2_remove_child(x, y)
#define Curl_http2_cleanup_dependencies(x)
+#define Curl_h2_http_1_1_error(x) 0
#endif
#endif /* HEADER_CURL_HTTP2_H */
diff --git a/lib/multi.c b/lib/multi.c
index 0f57fd5..d64ba94 100644
--- a/lib/multi.c
+++ b/lib/multi.c
@@ -46,6 +46,7 @@
#include "vtls/vtls.h"
#include "connect.h"
#include "http_proxy.h"
+#include "http2.h"
/* The last 3 #include files should be in this order */
#include "curl_printf.h"
#include "curl_memory.h"
@@ -1943,6 +1944,25 @@ static CURLMcode multi_runsingle(struct Curl_multi *multi,
done = TRUE;
}
}
+ else if((CURLE_HTTP2_STREAM == result) &&
+ Curl_h2_http_1_1_error(data->easy_conn)) {
+ CURLcode ret = Curl_retry_request(data->easy_conn, &newurl);
+
+ infof(data, "Forcing HTTP/1.1 for NTLM");
+ data->set.httpversion = CURL_HTTP_VERSION_1_1;
+
+ if(!ret)
+ retry = (newurl)?TRUE:FALSE;
+ else
+ result = ret;
+
+ if(retry) {
+ /* if we are to retry, set the result to OK and consider the
+ request as done */
+ result = CURLE_OK;
+ done = TRUE;
+ }
+ }
if(result) {
/*
--
2.37.3

View File

@ -0,0 +1,81 @@
From 5cdcf1dbd39c64e18a81fc912a36942a3ec87565 Mon Sep 17 00:00:00 2001
From: Daniel Stenberg <daniel@haxx.se>
Date: Mon, 19 Dec 2022 08:38:37 +0100
Subject: [PATCH] smb/telnet: do not free the protocol struct in *_done()
It is managed by the generic layer.
Reported-by: Trail of Bits
Closes #10112
Upstream-commit: 4f20188ac644afe174be6005ef4f6ffba232b8b2
Signed-off-by: Kamil Dudka <kdudka@redhat.com>
---
lib/smb.c | 14 ++------------
lib/telnet.c | 3 ---
2 files changed, 2 insertions(+), 15 deletions(-)
diff --git a/lib/smb.c b/lib/smb.c
index 039d680..f682c1f 100644
--- a/lib/smb.c
+++ b/lib/smb.c
@@ -61,8 +61,6 @@ static CURLcode smb_connect(struct connectdata *conn, bool *done);
static CURLcode smb_connection_state(struct connectdata *conn, bool *done);
static CURLcode smb_do(struct connectdata *conn, bool *done);
static CURLcode smb_request_state(struct connectdata *conn, bool *done);
-static CURLcode smb_done(struct connectdata *conn, CURLcode status,
- bool premature);
static CURLcode smb_disconnect(struct connectdata *conn, bool dead);
static int smb_getsock(struct connectdata *conn, curl_socket_t *socks,
int numsocks);
@@ -75,7 +73,7 @@ const struct Curl_handler Curl_handler_smb = {
"SMB", /* scheme */
smb_setup_connection, /* setup_connection */
smb_do, /* do_it */
- smb_done, /* done */
+ ZERO_NULL, /* done */
ZERO_NULL, /* do_more */
smb_connect, /* connect_it */
smb_connection_state, /* connecting */
@@ -100,7 +98,7 @@ const struct Curl_handler Curl_handler_smbs = {
"SMBS", /* scheme */
smb_setup_connection, /* setup_connection */
smb_do, /* do_it */
- smb_done, /* done */
+ ZERO_NULL, /* done */
ZERO_NULL, /* do_more */
smb_connect, /* connect_it */
smb_connection_state, /* connecting */
@@ -915,14 +913,6 @@ static CURLcode smb_request_state(struct connectdata *conn, bool *done)
return CURLE_OK;
}
-static CURLcode smb_done(struct connectdata *conn, CURLcode status,
- bool premature)
-{
- (void) premature;
- Curl_safefree(conn->data->req.protop);
- return status;
-}
-
static CURLcode smb_disconnect(struct connectdata *conn, bool dead)
{
struct smb_conn *smbc = &conn->proto.smbc;
diff --git a/lib/telnet.c b/lib/telnet.c
index 923c7f8..48cd0d7 100644
--- a/lib/telnet.c
+++ b/lib/telnet.c
@@ -1294,9 +1294,6 @@ static CURLcode telnet_done(struct connectdata *conn,
curl_slist_free_all(tn->telnet_vars);
tn->telnet_vars = NULL;
-
- Curl_safefree(conn->data->req.protop);
-
return CURLE_OK;
}
--
2.38.1

View File

@ -0,0 +1,44 @@
From 4bbd1947aeb26d5dbcddbb058652e0e64771b71d Mon Sep 17 00:00:00 2001
From: Kamil Dudka <kdudka@redhat.com>
Date: Mon, 6 Feb 2023 17:46:36 +0100
Subject: [PATCH] h2: lower initial window size to 32 MiB
Cherry-picked from upstream commit
15f51474c837679c0b79825c23356ac681ffabde which was focused on paused
transfers but required an update of nghttp2 to work properly.
Bug: https://bugzilla.redhat.com/2166254
---
lib/http2.c | 2 +-
tests/data/test1800 | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/lib/http2.c b/lib/http2.c
index 3071097..1fd2233 100644
--- a/lib/http2.c
+++ b/lib/http2.c
@@ -63,7 +63,7 @@
#define NGHTTP2_HAS_SET_LOCAL_WINDOW_SIZE 1
#endif
-#define HTTP2_HUGE_WINDOW_SIZE (1 << 30)
+#define HTTP2_HUGE_WINDOW_SIZE (32 * 1024 * 1024) /* 32 MB */
#ifdef DEBUG_HTTP2
#define H2BUGF(x) x
diff --git a/tests/data/test1800 b/tests/data/test1800
index 0110184..c308c99 100644
--- a/tests/data/test1800
+++ b/tests/data/test1800
@@ -48,7 +48,7 @@ Host: %HOSTIP:%HTTPPORT
Accept: */*
Connection: Upgrade, HTTP2-Settings
Upgrade: %H2CVER
-HTTP2-Settings: AAMAAABkAARAAAAAAAIAAAAA
+HTTP2-Settings: AAMAAABkAAQCAAAAAAIAAAAA
</protocol>
</verify>
--
2.39.1

View File

@ -0,0 +1,331 @@
From 95f873ff983a1ae57415b3c16a881e74432cf8b8 Mon Sep 17 00:00:00 2001
From: Fabian Keil <fk@fabiankeil.de>
Date: Tue, 9 Feb 2021 14:04:32 +0100
Subject: [PATCH 1/2] runtests.pl: support the nonewline attribute for the data
part
Closes #8239
Upstream-commit: 736847611a40c01e7c290407e22e2f0f5f8efd6a
Signed-off-by: Kamil Dudka <kdudka@redhat.com>
---
tests/runtests.pl | 7 +++++++
tests/server/getpart.c | 11 ++++++++++-
2 files changed, 17 insertions(+), 1 deletion(-)
diff --git a/tests/runtests.pl b/tests/runtests.pl
index 40315aa..2e1500d 100755
--- a/tests/runtests.pl
+++ b/tests/runtests.pl
@@ -3817,6 +3817,13 @@ sub singletest {
else {
# check against the data section
@reply = getpart("reply", "data");
+ if(@reply) {
+ my %hash = getpartattr("reply", "data");
+ if($hash{'nonewline'}) {
+ # cut off the final newline from the final line of the data
+ chomp($reply[$#reply]);
+ }
+ }
# get the mode attribute
my $filemode=$replyattr{'mode'};
if($filemode && ($filemode eq "text") && $has_textaware) {
diff --git a/tests/server/getpart.c b/tests/server/getpart.c
index 32b55bc..f8fe3f6 100644
--- a/tests/server/getpart.c
+++ b/tests/server/getpart.c
@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 1998 - 2017, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2022, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -295,6 +295,7 @@ int getpart(char **outbuf, size_t *outlen,
size_t outalloc = 256;
int in_wanted_part = 0;
int base64 = 0;
+ int nonewline = 0;
int error;
enum {
@@ -360,6 +361,8 @@ int getpart(char **outbuf, size_t *outlen,
if(error)
return error;
}
+ if(nonewline)
+ (*outlen)--;
break;
}
}
@@ -377,6 +380,8 @@ int getpart(char **outbuf, size_t *outlen,
if(error)
return error;
}
+ if(nonewline)
+ (*outlen)--;
break;
}
}
@@ -451,6 +456,10 @@ int getpart(char **outbuf, size_t *outlen,
/* bit rough test, but "mostly" functional, */
/* treat wanted part data as base64 encoded */
base64 = 1;
+ if(strstr(patt, "nonewline=")) {
+ show(("* setting nonewline\n"));
+ nonewline = 1;
+ }
}
continue;
}
--
2.39.1
From bc5fc958b017895728962c9d44c469418cbec1a0 Mon Sep 17 00:00:00 2001
From: Patrick Monnerat <patrick@monnerat.net>
Date: Mon, 13 Feb 2023 08:33:09 +0100
Subject: [PATCH 2/2] content_encoding: do not reset stage counter for each
header
Test 418 verifies
Closes #10492
Upstream-commit: 119fb187192a9ea13dc90d9d20c215fc82799ab9
Signed-off-by: Kamil Dudka <kdudka@redhat.com>
---
lib/content_encoding.c | 7 +-
lib/urldata.h | 1 +
tests/data/Makefile.inc | 1 +
tests/data/test387 | 2 +-
tests/data/test418 | 152 ++++++++++++++++++++++++++++++++++++++++
5 files changed, 158 insertions(+), 5 deletions(-)
create mode 100644 tests/data/test418
diff --git a/lib/content_encoding.c b/lib/content_encoding.c
index bfc13e2..94344d6 100644
--- a/lib/content_encoding.c
+++ b/lib/content_encoding.c
@@ -944,7 +944,6 @@ CURLcode Curl_build_unencoding_stack(struct connectdata *conn,
{
struct Curl_easy *data = conn->data;
struct SingleRequest *k = &data->req;
- int counter = 0;
do {
const char *name;
@@ -979,9 +978,9 @@ CURLcode Curl_build_unencoding_stack(struct connectdata *conn,
if(!encoding)
encoding = &error_encoding; /* Defer error at stack use. */
- if(++counter >= MAX_ENCODE_STACK) {
- failf(data, "Reject response due to %u content encodings",
- counter);
+ if(k->writer_stack_depth++ >= MAX_ENCODE_STACK) {
+ failf(data, "Reject response due to more than %u content encodings",
+ MAX_ENCODE_STACK);
return CURLE_BAD_CONTENT_ENCODING;
}
/* Stack the unencoding stage. */
diff --git a/lib/urldata.h b/lib/urldata.h
index 5b4b34f..8c8c20b 100644
--- a/lib/urldata.h
+++ b/lib/urldata.h
@@ -539,6 +539,7 @@ struct SingleRequest {
struct curltime start; /* transfer started at this time */
struct curltime now; /* current time */
+ unsigned char writer_stack_depth; /* Unencoding stack depth. */
bool header; /* incoming data has HTTP header */
enum {
HEADER_NORMAL, /* no bad header at all */
diff --git a/tests/data/Makefile.inc b/tests/data/Makefile.inc
index fb51cd6..86b6f85 100644
--- a/tests/data/Makefile.inc
+++ b/tests/data/Makefile.inc
@@ -66,6 +66,7 @@ test393 test394 test395 \
\
test400 test401 test402 test403 test404 test405 test406 test407 test408 \
test409 \
+test418 \
\
test500 test501 test502 test503 test504 test505 test506 test507 test508 \
test509 test510 test511 test512 test513 test514 test515 test516 test517 \
diff --git a/tests/data/test387 b/tests/data/test387
index 015ec25..644fc7f 100644
--- a/tests/data/test387
+++ b/tests/data/test387
@@ -47,7 +47,7 @@ Accept: */*
61
</errorcode>
<stderr mode="text">
-curl: (61) Reject response due to 5 content encodings
+curl: (61) Reject response due to more than 5 content encodings
</stderr>
</verify>
</testcase>
diff --git a/tests/data/test418 b/tests/data/test418
new file mode 100644
index 0000000..50e974e
--- /dev/null
+++ b/tests/data/test418
@@ -0,0 +1,152 @@
+<testcase>
+<info>
+<keywords>
+HTTP
+gzip
+</keywords>
+</info>
+
+#
+# Server-side
+<reply>
+<data nocheck="yes">
+HTTP/1.1 200 OK
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+Transfer-Encoding: gzip
+
+-foo-
+</data>
+</reply>
+
+#
+# Client-side
+<client>
+<server>
+http
+</server>
+ <name>
+Response with multiple Transfer-Encoding headers
+ </name>
+ <command>
+http://%HOSTIP:%HTTPPORT/%TESTNUMBER -sS
+</command>
+</client>
+
+#
+# Verify data after the test has been "shot"
+<verify>
+<protocol>
+GET /%TESTNUMBER HTTP/1.1
+Host: %HOSTIP:%HTTPPORT
+User-Agent: curl/7.61.1
+Accept: */*
+
+</protocol>
+
+# CURLE_BAD_CONTENT_ENCODING is 61
+<errorcode>
+61
+</errorcode>
+<stderr mode="text">
+curl: (61) Reject response due to more than 5 content encodings
+</stderr>
+</verify>
+</testcase>
--
2.39.1

View File

@ -1,7 +1,7 @@
Summary: A utility for getting files from remote servers (FTP, HTTP, and others)
Name: curl
Version: 7.61.1
Release: 25%{?dist}
Release: 30%{?dist}
License: MIT
Source: https://curl.haxx.se/download/%{name}-%{version}.tar.xz
@ -121,6 +121,21 @@ Patch41: 0041-curl-7.61.1-CVE-2022-32206.patch
# setopt: enable CURLOPT_SSH_KNOWNHOSTS and CURLOPT_SSH_KEYFUNCTION (#2063703)
Patch42: 0042-curl-7.61.1-ssh-known-hosts.patch
# control code in cookie denial of service (CVE-2022-35252)
Patch43: 0043-curl-7.61.1-CVE-2022-35252.patch
# upon HTTP_1_1_REQUIRED, retry the request with HTTP/1.1 (#2139337)
Patch44: 0044-curl-7.61.1-retry-http11.patch
# smb/telnet: fix use-after-free when HTTP proxy denies tunnel (CVE-2022-43552)
Patch45: 0045-curl-7.61.1-CVE-2022-43552.patch
# h2: lower initial window size to 32 MiB (#2166254)
Patch46: 0046-curl-7.61.1-h2-window-size.patch
# fix HTTP multi-header compression denial of service (CVE-2023-23916)
Patch47: 0047-curl-7.61.1-CVE-2023-23916.patch
# patch making libcurl multilib ready
Patch101: 0101-curl-7.32.0-multilib.patch
@ -336,6 +351,11 @@ sed -e 's|:8992/|:%{?__isa_bits}92/|g' -i tests/data/test97{3..6}
%patch40 -p1
%patch41 -p1
%patch42 -p1
%patch43 -p1
%patch44 -p1
%patch45 -p1
%patch46 -p1
%patch47 -p1
# make tests/*.py use Python 3
sed -e '1 s|^#!/.*python|#!%{__python3}|' -i tests/*.py
@ -498,6 +518,21 @@ rm -f ${RPM_BUILD_ROOT}%{_libdir}/libcurl.la
%{_libdir}/libcurl.so.4.[0-9].[0-9].minimal
%changelog
* Wed Feb 15 2023 Kamil Dudka <kdudka@redhat.com> - 7.61.1-30
- fix HTTP multi-header compression denial of service (CVE-2023-23916)
* Tue Feb 07 2023 Kamil Dudka <kdudka@redhat.com> - 7.61.1-29
- h2: lower initial window size to 32 MiB (#2166254)
* Wed Dec 21 2022 Kamil Dudka <kdudka@redhat.com> - 7.61.1-28
- smb/telnet: fix use-after-free when HTTP proxy denies tunnel (CVE-2022-43552)
* Fri Nov 18 2022 Kamil Dudka <kdudka@redhat.com> - 7.61.1-27
- upon HTTP_1_1_REQUIRED, retry the request with HTTP/1.1 (#2139337)
* Fri Sep 02 2022 Kamil Dudka <kdudka@redhat.com> - 7.61.1-26
- control code in cookie denial of service (CVE-2022-35252)
* Wed Jun 29 2022 Kamil Dudka <kdudka@redhat.com> - 7.61.1-25
- setopt: enable CURLOPT_SSH_KNOWNHOSTS and CURLOPT_SSH_KEYFUNCTION (#2063703)
- fix HTTP compression denial of service (CVE-2022-32206)