From c10b412a0dbe2980c45feca971cd9dcd9e545230 Mon Sep 17 00:00:00 2001 From: eabdullin Date: Wed, 15 Nov 2023 02:42:44 +0000 Subject: [PATCH] import UBI httpd-2.4.37-62.module+el8.9.0+19699+7a7a2044 --- SOURCES/httpd-2.4.37-CVE-2022-29404.patch | 38 +- SOURCES/httpd-2.4.37-CVE-2022-31813.patch | 22 +- SOURCES/httpd-2.4.37-CVE-2023-25690.patch | 157 +--- ...ttpd-2.4.37-mod_status-duplicate-key.patch | 170 ++++ SOURCES/httpd-2.4.37-r1885607.patch | 849 ++++++++++++++++++ SPECS/httpd.spec | 38 +- 6 files changed, 1068 insertions(+), 206 deletions(-) create mode 100644 SOURCES/httpd-2.4.37-mod_status-duplicate-key.patch create mode 100644 SOURCES/httpd-2.4.37-r1885607.patch diff --git a/SOURCES/httpd-2.4.37-CVE-2022-29404.patch b/SOURCES/httpd-2.4.37-CVE-2022-29404.patch index 3d706be..08d0b7b 100644 --- a/SOURCES/httpd-2.4.37-CVE-2022-29404.patch +++ b/SOURCES/httpd-2.4.37-CVE-2022-29404.patch @@ -75,44 +75,8 @@ index 6bedcac..393343a 100644 #ifdef AP_DEBUG { /* Make sure ap_getline() didn't leave any droppings. */ -diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c -index 7da9bde..1b7bb81 100644 ---- a/modules/proxy/mod_proxy_http.c -+++ b/modules/proxy/mod_proxy_http.c -@@ -439,13 +439,10 @@ static int spool_reqbody_cl(proxy_http_req_t *req, apr_off_t *bytes_spooled) - apr_bucket *e; - apr_off_t bytes, fsize = 0; - apr_file_t *tmpfile = NULL; -- apr_off_t limit; - - body_brigade = apr_brigade_create(p, bucket_alloc); - *bytes_spooled = 0; - -- limit = ap_get_limit_req_body(r); -- - do { - if (APR_BRIGADE_EMPTY(input_brigade)) { - rv = stream_reqbody_read(req, input_brigade, 0); -@@ -462,17 +459,6 @@ static int spool_reqbody_cl(proxy_http_req_t *req, apr_off_t *bytes_spooled) - apr_brigade_length(input_brigade, 1, &bytes); - - if (*bytes_spooled + bytes > MAX_MEM_SPOOL) { -- /* -- * LimitRequestBody does not affect Proxy requests (Should it?). -- * Let it take effect if we decide to store the body in a -- * temporary file on disk. -- */ -- if (limit && (*bytes_spooled + bytes > limit)) { -- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01088) -- "Request body is larger than the configured " -- "limit of %" APR_OFF_T_FMT, limit); -- return HTTP_REQUEST_ENTITY_TOO_LARGE; -- } - /* can't spool any more in memory; write latest brigade to disk */ - if (tmpfile == NULL) { - const char *temp_dir; diff --git a/server/core.c b/server/core.c -index 09664fc..084e243 100644 +index a0bfaad..6556f20 100644 --- a/server/core.c +++ b/server/core.c @@ -65,7 +65,7 @@ diff --git a/SOURCES/httpd-2.4.37-CVE-2022-31813.patch b/SOURCES/httpd-2.4.37-CVE-2022-31813.patch index 7fe91d1..bc0e232 100644 --- a/SOURCES/httpd-2.4.37-CVE-2022-31813.patch +++ b/SOURCES/httpd-2.4.37-CVE-2022-31813.patch @@ -1,8 +1,8 @@ diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c -index 3d5b220..ec9a414 100644 +index efcc6ca..6626ea0 100644 --- a/modules/proxy/proxy_util.c +++ b/modules/proxy/proxy_util.c -@@ -3621,12 +3621,14 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, +@@ -3631,12 +3631,14 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, char **old_cl_val, char **old_te_val) { @@ -18,7 +18,7 @@ index 3d5b220..ec9a414 100644 apr_bucket *e; int do_100_continue; conn_rec *origin = p_conn->connection; -@@ -3662,6 +3664,52 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, +@@ -3672,6 +3674,52 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, ap_xlate_proto_to_ascii(buf, strlen(buf)); e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(header_brigade, e); @@ -71,7 +71,7 @@ index 3d5b220..ec9a414 100644 if (dconf->preserve_host == 0) { if (ap_strchr_c(uri->hostname, ':')) { /* if literal IPv6 address */ if (uri->port_str && uri->port != DEFAULT_HTTP_PORT) { -@@ -3683,7 +3731,7 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, +@@ -3693,7 +3741,7 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, /* don't want to use r->hostname, as the incoming header might have a * port attached */ @@ -80,7 +80,7 @@ index 3d5b220..ec9a414 100644 if (!hostname) { hostname = r->server->server_hostname; ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01092) -@@ -3697,21 +3745,7 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, +@@ -3707,21 +3755,7 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, ap_xlate_proto_to_ascii(buf, strlen(buf)); e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(header_brigade, e); @@ -103,7 +103,7 @@ index 3d5b220..ec9a414 100644 /* handle Via */ if (conf->viaopt == via_block) { -@@ -3778,8 +3812,6 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, +@@ -3788,8 +3822,6 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, */ if (dconf->add_forwarded_headers) { if (PROXYREQ_REVERSE == r->proxyreq) { @@ -112,7 +112,7 @@ index 3d5b220..ec9a414 100644 /* Add X-Forwarded-For: so that the upstream has a chance to * determine, where the original request came from. */ -@@ -3789,8 +3821,9 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, +@@ -3799,8 +3831,9 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, /* Add X-Forwarded-Host: so that upstream knows what the * original request hostname was. */ @@ -124,7 +124,7 @@ index 3d5b220..ec9a414 100644 } /* Add X-Forwarded-Server: so that upstream knows what the -@@ -3802,10 +3835,27 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, +@@ -3812,10 +3845,27 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, } } @@ -155,7 +155,7 @@ index 3d5b220..ec9a414 100644 creds = apr_table_get(r->notes, "proxy-basic-creds"); if (creds) { -@@ -3817,55 +3867,8 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, +@@ -3827,55 +3877,8 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, headers_in = (const apr_table_entry_t *) headers_in_array->elts; for (counter = 0; counter < headers_in_array->nelts; counter++) { if (headers_in[counter].key == NULL @@ -213,7 +213,7 @@ index 3d5b220..ec9a414 100644 } buf = apr_pstrcat(p, headers_in[counter].key, ": ", -@@ -3876,11 +3879,9 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, +@@ -3886,11 +3889,9 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, APR_BRIGADE_INSERT_TAIL(header_brigade, e); } @@ -226,4 +226,4 @@ index 3d5b220..ec9a414 100644 + return rc; } - PROXY_DECLARE(int) ap_proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc, + PROXY_DECLARE(int) ap_proxy_prefetch_input(request_rec *r, diff --git a/SOURCES/httpd-2.4.37-CVE-2023-25690.patch b/SOURCES/httpd-2.4.37-CVE-2023-25690.patch index 5b24a06..bb856b7 100644 --- a/SOURCES/httpd-2.4.37-CVE-2023-25690.patch +++ b/SOURCES/httpd-2.4.37-CVE-2023-25690.patch @@ -22,7 +22,7 @@ index 815ec72..2b8ed35 100644 backrefnoplus|BNP If backreferences are being escaped, spaces should be escaped to diff --git a/docs/manual/rewrite/flags.html.en b/docs/manual/rewrite/flags.html.en -index 80d0759..21edfe7 100644 +index 80d0759..734809a 100644 --- a/docs/manual/rewrite/flags.html.en +++ b/docs/manual/rewrite/flags.html.en @@ -85,10 +85,6 @@ of how you might use them.

@@ -57,155 +57,6 @@ index 80d0759..21edfe7 100644
top

BNP|backrefnoplus (don't escape space to +)

-@@ -127,8 +137,40 @@ strings in the encoded form.

- in a backreference to %20 rather than '+'. Useful when the backreference - will be used in the path component rather than the query string.

- -+
-+# Escape spaces to %20 in the path instead of + as used in form submission via
-+# the query string
-+RewriteRule "^search/(.*)$" "/search.php/$1" "[B,BNP]"
-+
-+ -

This flag is available in version 2.4.26 and later.

- -+
top
-+
-+

BCTLS

-+

The [BCTLS] flag is similar to the [B] flag, but only escapes -+control characters and the space character. This is the same set of -+characters rejected when they are copied into the query string unencoded. -+

-+ -+
-+# Escape control characters and spaces
-+RewriteRule "^search/(.*)$" "/search.php/$1" "[BCTLS]"
-+
-+ -+
top
-+
-+

BNE

-+

The list of characters in [BNE=...] are treated as exclusions to the -+characters of the [B] or [BCTLS] flags. The listed characters will not be -+escaped. -+

-+ -+
-+# Escape the default characters, but leave /
-+RewriteRule "^search/(.*)$" "/search.php?term=$1" "[B,BNE=/]"
-+
-+ -
top
-
-

C|chain

-@@ -204,7 +246,7 @@ browsers that support this feature. -

Consider this example:

- -
RewriteEngine On
--RewriteRule "^/index\.html" "-" [CO=frontdoor:yes:.example.com:1440:/]
-+RewriteRule "^/index\.html" "-" [CO=frontdoor:yes:.example.com:1440:/] - - -

In the example give, the rule doesn't rewrite the request. -@@ -410,8 +452,8 @@ argument to index.php, however, the index.php, the RewriteRule will be skipped.

- -
RewriteBase "/"
--RewriteCond "%{REQUEST_URI}" "!=/index.php"
--RewriteRule "^(.*)" "/index.php?req=$1" [L,PT]
-+RewriteCond "%{REQUEST_URI}" !=/index.php -+RewriteRule "^(.*)" "/index.php?req=$1" [L,PT] - -
top
-
-@@ -434,11 +476,11 @@ pattern still matches (i.e., while the URI still contains an - A), perform this substitution (i.e., replace the - A with a B).

- --

In 2.4.8 and later, this module returns an error after 32,000 iterations to -+

In 2.4.8 and later, this module returns an error after 10,000 iterations to - protect against unintended looping. An alternative maximum number of - iterations can be specified by adding to the N flag.

-
# Be willing to replace 1 character in each pass of the loop
--RewriteRule "(.+)[><;]$" "$1" [N=64000]
-+RewriteRule "(.+)[><;]$" "$1" [N=32000]
- # ... or, give up if after 10 loops
- RewriteRule "(.+)[><;]$" "$1" [N=10]
- -@@ -681,19 +723,21 @@ URI in request' warnings. -

The [S] flag is used to skip rules that you don't want to run. The - syntax of the skip flag is [S=N], where N signifies - the number of rules to skip (provided the --RewriteRule matches). This can be thought of as a goto --statement in your rewrite ruleset. In the following example, we only want --to run the RewriteRule if the --requested URI doesn't correspond with an actual file.

-+RewriteRule and any preceding -+RewriteCond directives match). This can be thought of as a -+goto statement in your rewrite ruleset. In the following -+example, we only want to run the -+RewriteRule if the requested URI doesn't correspond with an -+actual file.

- -
# Is the request for a non-existent file?
--RewriteCond "%{REQUEST_FILENAME}" "!-f"
--RewriteCond "%{REQUEST_FILENAME}" "!-d"
-+RewriteCond "%{REQUEST_FILENAME}" !-f
-+RewriteCond "%{REQUEST_FILENAME}" !-d
- # If so, skip these two RewriteRules
--RewriteRule ".?" "-" [S=2]
-+RewriteRule ".?"                  "-" [S=2]
- 
--RewriteRule "(.*\.gif)" "images.php?$1"
--RewriteRule "(.*\.html)" "docs.php?$1"
-+RewriteRule "(.*\.gif)" "images.php?$1" -+RewriteRule "(.*\.html)" "docs.php?$1" - - -

This technique is useful because a RewriteCond only applies to the -@@ -705,18 +749,18 @@ use this to make pseudo if-then-else constructs: The last rule of - the then-clause becomes skip=N, where N is the - number of rules in the else-clause:

-
# Does the file exist?
--RewriteCond "%{REQUEST_FILENAME}" "!-f"
--RewriteCond "%{REQUEST_FILENAME}" "!-d"
-+RewriteCond "%{REQUEST_FILENAME}" !-f
-+RewriteCond "%{REQUEST_FILENAME}" !-d
- # Create an if-then-else construct by skipping 3 lines if we meant to go to the "else" stanza.
--RewriteRule ".?" "-" [S=3]
-+RewriteRule ".?"                  "-" [S=3]
- 
- # IF the file exists, then:
--    RewriteRule "(.*\.gif)" "images.php?$1"
-+    RewriteRule "(.*\.gif)"  "images.php?$1"
-     RewriteRule "(.*\.html)" "docs.php?$1"
-     # Skip past the "else" stanza.
--    RewriteRule ".?" "-" [S=1]
-+    RewriteRule ".?"         "-" [S=1]
- # ELSE...
--    RewriteRule "(.*)" "404.php?file=$1"
-+    RewriteRule "(.*)"       "404.php?file=$1"
- # END
- - -@@ -733,7 +777,7 @@ sent. This has the same effect as the # Serve .pl files as plain text --RewriteRule "\.pl$" "-" [T=text/plain] -+RewriteRule "\.pl$" "-" [T=text/plain] - - -

Or, perhaps, if you have a camera that produces jpeg images without -@@ -741,7 +785,7 @@ file extensions, you could force those images to be served with the - correct MIME type by virtue of their file names:

- -
# Files with 'IMG' in the name are jpg images.
--RewriteRule "IMG" "-" [T=image/jpg]
-+RewriteRule "IMG" "-" [T=image/jpg] - - -

Please note that this is a trivial example, and could be better done diff --git a/modules/mappers/mod_rewrite.c b/modules/mappers/mod_rewrite.c index 38dbb24..b71c67c 100644 --- a/modules/mappers/mod_rewrite.c @@ -612,7 +463,7 @@ index 38dbb24..b71c67c 100644 /* append the QUERY_STRING part */ diff --git a/modules/proxy/mod_proxy_ajp.c b/modules/proxy/mod_proxy_ajp.c -index 6faabea..59396a8 100644 +index cbb0872..873ccf1 100644 --- a/modules/proxy/mod_proxy_ajp.c +++ b/modules/proxy/mod_proxy_ajp.c @@ -69,6 +69,16 @@ static int proxy_ajp_canon(request_rec *r, char *url) @@ -654,7 +505,7 @@ index 3a28038..c599e1a 100644 if (path == NULL) return HTTP_BAD_REQUEST; diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c -index 7da9bde..2cdc61e 100644 +index 7573638..fe7b322 100644 --- a/modules/proxy/mod_proxy_http.c +++ b/modules/proxy/mod_proxy_http.c @@ -90,6 +90,16 @@ static int proxy_http_canon(request_rec *r, char *url) @@ -716,7 +567,7 @@ index 48ae6f4..6a153a3 100644 return 0; } diff --git a/server/util.c b/server/util.c -index 2a5dd04..1d82fd8 100644 +index 45051b7..9d897d4 100644 --- a/server/util.c +++ b/server/util.c @@ -74,13 +74,6 @@ diff --git a/SOURCES/httpd-2.4.37-mod_status-duplicate-key.patch b/SOURCES/httpd-2.4.37-mod_status-duplicate-key.patch new file mode 100644 index 0000000..a48fad6 --- /dev/null +++ b/SOURCES/httpd-2.4.37-mod_status-duplicate-key.patch @@ -0,0 +1,170 @@ +commit 84e6f25f67de9a9bddefdcdbfee3f251fead647e +Author: Tomas Korbar +Date: Thu Jul 20 14:41:33 2023 +0200 + + Fix duplicate presence of keys printed by mod_status + +diff --git a/modules/generators/mod_status.c b/modules/generators/mod_status.c +index 5917953..5bada07 100644 +--- a/modules/generators/mod_status.c ++++ b/modules/generators/mod_status.c +@@ -186,7 +186,8 @@ static int status_handler(request_rec *r) + apr_uint32_t up_time; + ap_loadavg_t t; + int j, i, res, written; +- int ready; ++ int idle; ++ int graceful; + int busy; + unsigned long count; + unsigned long lres, my_lres, conn_lres; +@@ -203,6 +204,7 @@ static int status_handler(request_rec *r) + char *stat_buffer; + pid_t *pid_buffer, worker_pid; + int *thread_idle_buffer = NULL; ++ int *thread_graceful_buffer = NULL; + int *thread_busy_buffer = NULL; + clock_t tu, ts, tcu, tcs; + clock_t gu, gs, gcu, gcs; +@@ -231,7 +233,8 @@ static int status_handler(request_rec *r) + #endif + #endif + +- ready = 0; ++ idle = 0; ++ graceful = 0; + busy = 0; + count = 0; + bcount = 0; +@@ -250,6 +253,7 @@ static int status_handler(request_rec *r) + stat_buffer = apr_palloc(r->pool, server_limit * thread_limit * sizeof(char)); + if (is_async) { + thread_idle_buffer = apr_palloc(r->pool, server_limit * sizeof(int)); ++ thread_graceful_buffer = apr_palloc(r->pool, server_limit * sizeof(int)); + thread_busy_buffer = apr_palloc(r->pool, server_limit * sizeof(int)); + } + +@@ -318,6 +322,7 @@ static int status_handler(request_rec *r) + ps_record = ap_get_scoreboard_process(i); + if (is_async) { + thread_idle_buffer[i] = 0; ++ thread_graceful_buffer[i] = 0; + thread_busy_buffer[i] = 0; + } + for (j = 0; j < thread_limit; ++j) { +@@ -336,18 +341,20 @@ static int status_handler(request_rec *r) + && ps_record->pid) { + if (res == SERVER_READY) { + if (ps_record->generation == mpm_generation) +- ready++; ++ idle++; + if (is_async) + thread_idle_buffer[i]++; + } + else if (res != SERVER_DEAD && + res != SERVER_STARTING && + res != SERVER_IDLE_KILL) { +- busy++; +- if (is_async) { +- if (res == SERVER_GRACEFUL) +- thread_idle_buffer[i]++; +- else ++ if (res == SERVER_GRACEFUL) { ++ graceful++; ++ if (is_async) ++ thread_graceful_buffer[i]++; ++ } else { ++ busy++; ++ if (is_async) + thread_busy_buffer[i]++; + } + } +@@ -548,10 +555,10 @@ static int status_handler(request_rec *r) + } /* ap_extended_status */ + + if (!short_report) +- ap_rprintf(r, "

%d requests currently being processed, " +- "%d idle workers
\n", busy, ready); ++ ap_rprintf(r, "
%d requests currently being processed, %d workers gracefully restarting, " ++ "%d idle workers
\n", busy, graceful, idle); + else +- ap_rprintf(r, "BusyWorkers: %d\nIdleWorkers: %d\n", busy, ready); ++ ap_rprintf(r, "BusyWorkers: %d\nGracefulWorkers: %d\nIdleWorkers: %d\n", busy, graceful, idle); + + if (!short_report) + ap_rputs("", r); +@@ -559,11 +566,6 @@ static int status_handler(request_rec *r) + if (is_async) { + int write_completion = 0, lingering_close = 0, keep_alive = 0, + connections = 0, stopping = 0, procs = 0; +- /* +- * These differ from 'busy' and 'ready' in how gracefully finishing +- * threads are counted. XXX: How to make this clear in the html? +- */ +- int busy_workers = 0, idle_workers = 0; + if (!short_report) + ap_rputs("\n\n\n" + "" +@@ -573,7 +575,7 @@ static int status_handler(request_rec *r) + "" + "\n" + "" +- "" ++ "" + "\n", r); + for (i = 0; i < server_limit; ++i) { + ps_record = ap_get_scoreboard_process(i); +@@ -582,8 +584,6 @@ static int status_handler(request_rec *r) + write_completion += ps_record->write_completion; + keep_alive += ps_record->keep_alive; + lingering_close += ps_record->lingering_close; +- busy_workers += thread_busy_buffer[i]; +- idle_workers += thread_idle_buffer[i]; + procs++; + if (ps_record->quiescing) { + stopping++; +@@ -599,7 +599,7 @@ static int status_handler(request_rec *r) + ap_rprintf(r, "" + "" + "" +- "" ++ "" + "" + "\n", + i, ps_record->pid, +@@ -607,6 +607,7 @@ static int status_handler(request_rec *r) + ps_record->connections, + ps_record->not_accepting ? "no" : "yes", + thread_busy_buffer[i], ++ thread_graceful_buffer[i], + thread_idle_buffer[i], + ps_record->write_completion, + ps_record->keep_alive, +@@ -618,25 +619,22 @@ static int status_handler(request_rec *r) + ap_rprintf(r, "" + "" + "" +- "" ++ "" + "" + "\n
SlotThreadsAsync connections
totalacceptingbusyidlebusygracefulidlewritingkeep-aliveclosing
%u%" APR_PID_T_FMT "%s%s%u%s%u%u%u%u%u%u%u%u
Sum%d%d%d %d%d%d%d%d%d%d%d
\n", + procs, stopping, + connections, +- busy_workers, idle_workers, ++ busy, graceful, idle, + write_completion, keep_alive, lingering_close); + } + else { + ap_rprintf(r, "Processes: %d\n" + "Stopping: %d\n" +- "BusyWorkers: %d\n" +- "IdleWorkers: %d\n" + "ConnsTotal: %d\n" + "ConnsAsyncWriting: %d\n" + "ConnsAsyncKeepAlive: %d\n" + "ConnsAsyncClosing: %d\n", + procs, stopping, +- busy_workers, idle_workers, + connections, + write_completion, keep_alive, lingering_close); + } diff --git a/SOURCES/httpd-2.4.37-r1885607.patch b/SOURCES/httpd-2.4.37-r1885607.patch new file mode 100644 index 0000000..124d06c --- /dev/null +++ b/SOURCES/httpd-2.4.37-r1885607.patch @@ -0,0 +1,849 @@ +diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h +index fbbd508..8fcd26d 100644 +--- a/modules/proxy/mod_proxy.h ++++ b/modules/proxy/mod_proxy.h +@@ -1168,6 +1168,55 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, + char **old_cl_val, + char **old_te_val); + ++/** ++ * Prefetch the client request body (in memory), up to a limit. ++ * Read what's in the client pipe. If nonblocking is set and read is EAGAIN, ++ * pass a FLUSH bucket to the backend and read again in blocking mode. ++ * @param r client request ++ * @param backend backend connection ++ * @param input_brigade input brigade to use/fill ++ * @param block blocking or non-blocking mode ++ * @param bytes_read number of bytes read ++ * @param max_read maximum number of bytes to read ++ * @return OK or HTTP_* error code ++ * @note max_read is rounded up to APR_BUCKET_BUFF_SIZE ++ */ ++PROXY_DECLARE(int) ap_proxy_prefetch_input(request_rec *r, ++ proxy_conn_rec *backend, ++ apr_bucket_brigade *input_brigade, ++ apr_read_type_e block, ++ apr_off_t *bytes_read, ++ apr_off_t max_read); ++ ++/** ++ * Spool the client request body to memory, or disk above given limit. ++ * @param r client request ++ * @param backend backend connection ++ * @param input_brigade input brigade to use/fill ++ * @param bytes_spooled number of bytes spooled ++ * @param max_mem_spool maximum number of in-memory bytes ++ * @return OK or HTTP_* error code ++ */ ++PROXY_DECLARE(int) ap_proxy_spool_input(request_rec *r, ++ proxy_conn_rec *backend, ++ apr_bucket_brigade *input_brigade, ++ apr_off_t *bytes_spooled, ++ apr_off_t max_mem_spool); ++ ++/** ++ * Read what's in the client pipe. If the read would block (EAGAIN), ++ * pass a FLUSH bucket to the backend and read again in blocking mode. ++ * @param r client request ++ * @param backend backend connection ++ * @param input_brigade brigade to use/fill ++ * @param max_read maximum number of bytes to read ++ * @return OK or HTTP_* error code ++ */ ++PROXY_DECLARE(int) ap_proxy_read_input(request_rec *r, ++ proxy_conn_rec *backend, ++ apr_bucket_brigade *input_brigade, ++ apr_off_t max_read); ++ + /** + * @param bucket_alloc bucket allocator + * @param r request +diff --git a/modules/proxy/mod_proxy_fcgi.c b/modules/proxy/mod_proxy_fcgi.c +index 2e97408..f9cf716 100644 +--- a/modules/proxy/mod_proxy_fcgi.c ++++ b/modules/proxy/mod_proxy_fcgi.c +@@ -521,7 +521,8 @@ static int handle_headers(request_rec *r, int *state, + static apr_status_t dispatch(proxy_conn_rec *conn, proxy_dir_conf *conf, + request_rec *r, apr_pool_t *setaside_pool, + apr_uint16_t request_id, const char **err, +- int *bad_request, int *has_responded) ++ int *bad_request, int *has_responded, ++ apr_bucket_brigade *input_brigade) + { + apr_bucket_brigade *ib, *ob; + int seen_end_of_headers = 0, done = 0, ignore_body = 0; +@@ -583,9 +584,26 @@ static apr_status_t dispatch(proxy_conn_rec *conn, proxy_dir_conf *conf, + int last_stdin = 0; + char *iobuf_cursor; + +- rv = ap_get_brigade(r->input_filters, ib, +- AP_MODE_READBYTES, APR_BLOCK_READ, +- iobuf_size); ++ if (APR_BRIGADE_EMPTY(input_brigade)) { ++ rv = ap_get_brigade(r->input_filters, ib, ++ AP_MODE_READBYTES, APR_BLOCK_READ, ++ iobuf_size); ++ } ++ else { ++ apr_bucket *e; ++ APR_BRIGADE_CONCAT(ib, input_brigade); ++ rv = apr_brigade_partition(ib, iobuf_size, &e); ++ if (rv == APR_SUCCESS) { ++ while (e != APR_BRIGADE_SENTINEL(ib) ++ && APR_BUCKET_IS_METADATA(e)) { ++ e = APR_BUCKET_NEXT(e); ++ } ++ apr_brigade_split_ex(ib, e, input_brigade); ++ } ++ else if (rv == APR_INCOMPLETE) { ++ rv = APR_SUCCESS; ++ } ++ } + if (rv != APR_SUCCESS) { + *err = "reading input brigade"; + *bad_request = 1; +@@ -924,7 +942,8 @@ static int fcgi_do_request(apr_pool_t *p, request_rec *r, + conn_rec *origin, + proxy_dir_conf *conf, + apr_uri_t *uri, +- char *url, char *server_portstr) ++ char *url, char *server_portstr, ++ apr_bucket_brigade *input_brigade) + { + /* Request IDs are arbitrary numbers that we assign to a + * single request. This would allow multiplex/pipelining of +@@ -960,7 +979,8 @@ static int fcgi_do_request(apr_pool_t *p, request_rec *r, + + /* Step 3: Read records from the back end server and handle them. */ + rv = dispatch(conn, conf, r, temp_pool, request_id, +- &err, &bad_request, &has_responded); ++ &err, &bad_request, &has_responded, ++ input_brigade); + if (rv != APR_SUCCESS) { + /* If the client aborted the connection during retrieval or (partially) + * sending the response, don't return a HTTP_SERVICE_UNAVAILABLE, since +@@ -996,6 +1016,8 @@ static int fcgi_do_request(apr_pool_t *p, request_rec *r, + + #define FCGI_SCHEME "FCGI" + ++#define MAX_MEM_SPOOL 16384 ++ + /* + * This handles fcgi:(dest) URLs + */ +@@ -1008,6 +1030,8 @@ static int proxy_fcgi_handler(request_rec *r, proxy_worker *worker, + char server_portstr[32]; + conn_rec *origin = NULL; + proxy_conn_rec *backend = NULL; ++ apr_bucket_brigade *input_brigade; ++ apr_off_t input_bytes = 0; + apr_uri_t *uri; + + proxy_dir_conf *dconf = ap_get_module_config(r->per_dir_config, +@@ -1050,6 +1074,101 @@ static int proxy_fcgi_handler(request_rec *r, proxy_worker *worker, + goto cleanup; + } + ++ /* We possibly reuse input data prefetched in previous call(s), e.g. for a ++ * balancer fallback scenario. ++ */ ++ apr_pool_userdata_get((void **)&input_brigade, "proxy-fcgi-input", p); ++ if (input_brigade == NULL) { ++ const char *old_te = apr_table_get(r->headers_in, "Transfer-Encoding"); ++ const char *old_cl = NULL; ++ if (old_te) { ++ apr_table_unset(r->headers_in, "Content-Length"); ++ } ++ else { ++ old_cl = apr_table_get(r->headers_in, "Content-Length"); ++ } ++ ++ input_brigade = apr_brigade_create(p, r->connection->bucket_alloc); ++ apr_pool_userdata_setn(input_brigade, "proxy-fcgi-input", NULL, p); ++ ++ /* Prefetch (nonlocking) the request body so to increase the chance ++ * to get the whole (or enough) body and determine Content-Length vs ++ * chunked or spooled. By doing this before connecting or reusing the ++ * backend, we want to minimize the delay between this connection is ++ * considered alive and the first bytes sent (should the client's link ++ * be slow or some input filter retain the data). This is a best effort ++ * to prevent the backend from closing (from under us) what it thinks is ++ * an idle connection, hence to reduce to the minimum the unavoidable ++ * local is_socket_connected() vs remote keepalive race condition. ++ */ ++ status = ap_proxy_prefetch_input(r, backend, input_brigade, ++ APR_NONBLOCK_READ, &input_bytes, ++ MAX_MEM_SPOOL); ++ if (status != OK) { ++ goto cleanup; ++ } ++ ++ /* ++ * The request body is streamed by default, using either C-L or ++ * chunked T-E, like this: ++ * ++ * The whole body (including no body) was received on prefetch, i.e. ++ * the input brigade ends with EOS => C-L = input_bytes. ++ * ++ * C-L is known and reliable, i.e. only protocol filters in the input ++ * chain thus none should change the body => use C-L from client. ++ * ++ * The administrator has not "proxy-sendcl" which prevents T-E => use ++ * T-E and chunks. ++ * ++ * Otherwise we need to determine and set a content-length, so spool ++ * the entire request body to memory/temporary file (MAX_MEM_SPOOL), ++ * such that we finally know its length => C-L = input_bytes. ++ */ ++ if (!APR_BRIGADE_EMPTY(input_brigade) ++ && APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { ++ /* The whole thing fit, so our decision is trivial, use the input ++ * bytes for the Content-Length. If we expected no body, and read ++ * no body, do not set the Content-Length. ++ */ ++ if (old_cl || old_te || input_bytes) { ++ apr_table_setn(r->headers_in, "Content-Length", ++ apr_off_t_toa(p, input_bytes)); ++ if (old_te) { ++ apr_table_unset(r->headers_in, "Transfer-Encoding"); ++ } ++ } ++ } ++ else if (old_cl && r->input_filters == r->proto_input_filters) { ++ /* Streaming is possible by preserving the existing C-L */ ++ } ++ else if (!apr_table_get(r->subprocess_env, "proxy-sendcl")) { ++ /* Streaming is possible using T-E: chunked */ ++ } ++ else { ++ /* No streaming, C-L is the only option so spool to memory/file */ ++ apr_bucket_brigade *tmp_bb; ++ apr_off_t remaining_bytes = 0; ++ ++ AP_DEBUG_ASSERT(MAX_MEM_SPOOL >= input_bytes); ++ tmp_bb = apr_brigade_create(p, r->connection->bucket_alloc); ++ status = ap_proxy_spool_input(r, backend, tmp_bb, &remaining_bytes, ++ MAX_MEM_SPOOL - input_bytes); ++ if (status != OK) { ++ goto cleanup; ++ } ++ ++ APR_BRIGADE_CONCAT(input_brigade, tmp_bb); ++ input_bytes += remaining_bytes; ++ ++ apr_table_setn(r->headers_in, "Content-Length", ++ apr_off_t_toa(p, input_bytes)); ++ if (old_te) { ++ apr_table_unset(r->headers_in, "Transfer-Encoding"); ++ } ++ } ++ } ++ + /* This scheme handler does not reuse connections by default, to + * avoid tying up a fastcgi that isn't expecting to work on + * parallel requests. But if the user went out of their way to +@@ -1074,7 +1193,7 @@ static int proxy_fcgi_handler(request_rec *r, proxy_worker *worker, + + /* Step Three: Process the Request */ + status = fcgi_do_request(p, r, backend, origin, dconf, uri, url, +- server_portstr); ++ server_portstr, input_brigade); + + cleanup: + ap_proxy_release_connection(FCGI_SCHEME, backend, r->server); +diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c +index df10997..7f67f26 100644 +--- a/modules/proxy/mod_proxy_http.c ++++ b/modules/proxy/mod_proxy_http.c +@@ -266,50 +266,6 @@ typedef struct { + prefetch_nonblocking:1; + } proxy_http_req_t; + +-/* Read what's in the client pipe. If nonblocking is set and read is EAGAIN, +- * pass a FLUSH bucket to the backend and read again in blocking mode. +- */ +-static int stream_reqbody_read(proxy_http_req_t *req, apr_bucket_brigade *bb, +- int nonblocking) +-{ +- request_rec *r = req->r; +- proxy_conn_rec *p_conn = req->backend; +- apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; +- apr_read_type_e block = nonblocking ? APR_NONBLOCK_READ : APR_BLOCK_READ; +- apr_status_t status; +- int rv; +- +- for (;;) { +- status = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES, +- block, HUGE_STRING_LEN); +- if (block == APR_BLOCK_READ +- || (!APR_STATUS_IS_EAGAIN(status) +- && (status != APR_SUCCESS || !APR_BRIGADE_EMPTY(bb)))) { +- break; +- } +- +- /* Flush and retry (blocking) */ +- apr_brigade_cleanup(bb); +- rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, req->origin, bb, 1); +- if (rv != OK) { +- return rv; +- } +- block = APR_BLOCK_READ; +- } +- +- if (status != APR_SUCCESS) { +- conn_rec *c = r->connection; +- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02608) +- "read request body failed to %pI (%s)" +- " from %s (%s)", p_conn->addr, +- p_conn->hostname ? p_conn->hostname: "", +- c->client_ip, c->remote_host ? c->remote_host: ""); +- return ap_map_http_request_error(status, HTTP_BAD_REQUEST); +- } +- +- return OK; +-} +- + static int stream_reqbody(proxy_http_req_t *req) + { + request_rec *r = req->r; +@@ -328,7 +284,8 @@ static int stream_reqbody(proxy_http_req_t *req) + do { + if (APR_BRIGADE_EMPTY(input_brigade) + && APR_BRIGADE_EMPTY(header_brigade)) { +- rv = stream_reqbody_read(req, input_brigade, 1); ++ rv = ap_proxy_read_input(r, p_conn, input_brigade, ++ HUGE_STRING_LEN); + if (rv != OK) { + return rv; + } +@@ -409,7 +366,7 @@ static int stream_reqbody(proxy_http_req_t *req) + */ + APR_BRIGADE_PREPEND(input_brigade, header_brigade); + +- /* Flush here on EOS because we won't stream_reqbody_read() again */ ++ /* Flush here on EOS because we won't ap_proxy_read_input() again. */ + rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, + input_brigade, seen_eos); + if (rv != OK) { +@@ -427,137 +384,6 @@ static int stream_reqbody(proxy_http_req_t *req) + return OK; + } + +-static int spool_reqbody_cl(proxy_http_req_t *req, apr_off_t *bytes_spooled) +-{ +- apr_pool_t *p = req->p; +- request_rec *r = req->r; +- int seen_eos = 0, rv = OK; +- apr_status_t status = APR_SUCCESS; +- apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; +- apr_bucket_brigade *input_brigade = req->input_brigade; +- apr_bucket_brigade *body_brigade; +- apr_bucket *e; +- apr_off_t bytes, fsize = 0; +- apr_file_t *tmpfile = NULL; +- apr_off_t limit; +- +- body_brigade = apr_brigade_create(p, bucket_alloc); +- *bytes_spooled = 0; +- +- limit = ap_get_limit_req_body(r); +- +- do { +- if (APR_BRIGADE_EMPTY(input_brigade)) { +- rv = stream_reqbody_read(req, input_brigade, 0); +- if (rv != OK) { +- return rv; +- } +- } +- +- /* If this brigade contains EOS, either stop or remove it. */ +- if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { +- seen_eos = 1; +- } +- +- apr_brigade_length(input_brigade, 1, &bytes); +- +- if (*bytes_spooled + bytes > MAX_MEM_SPOOL) { +- /* +- * LimitRequestBody does not affect Proxy requests (Should it?). +- * Let it take effect if we decide to store the body in a +- * temporary file on disk. +- */ +- if (limit && (*bytes_spooled + bytes > limit)) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01088) +- "Request body is larger than the configured " +- "limit of %" APR_OFF_T_FMT, limit); +- return HTTP_REQUEST_ENTITY_TOO_LARGE; +- } +- /* can't spool any more in memory; write latest brigade to disk */ +- if (tmpfile == NULL) { +- const char *temp_dir; +- char *template; +- +- status = apr_temp_dir_get(&temp_dir, p); +- if (status != APR_SUCCESS) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01089) +- "search for temporary directory failed"); +- return HTTP_INTERNAL_SERVER_ERROR; +- } +- apr_filepath_merge(&template, temp_dir, +- "modproxy.tmp.XXXXXX", +- APR_FILEPATH_NATIVE, p); +- status = apr_file_mktemp(&tmpfile, template, 0, p); +- if (status != APR_SUCCESS) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01090) +- "creation of temporary file in directory " +- "%s failed", temp_dir); +- return HTTP_INTERNAL_SERVER_ERROR; +- } +- } +- for (e = APR_BRIGADE_FIRST(input_brigade); +- e != APR_BRIGADE_SENTINEL(input_brigade); +- e = APR_BUCKET_NEXT(e)) { +- const char *data; +- apr_size_t bytes_read, bytes_written; +- +- apr_bucket_read(e, &data, &bytes_read, APR_BLOCK_READ); +- status = apr_file_write_full(tmpfile, data, bytes_read, &bytes_written); +- if (status != APR_SUCCESS) { +- const char *tmpfile_name; +- +- if (apr_file_name_get(&tmpfile_name, tmpfile) != APR_SUCCESS) { +- tmpfile_name = "(unknown)"; +- } +- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01091) +- "write to temporary file %s failed", +- tmpfile_name); +- return HTTP_INTERNAL_SERVER_ERROR; +- } +- AP_DEBUG_ASSERT(bytes_read == bytes_written); +- fsize += bytes_written; +- } +- apr_brigade_cleanup(input_brigade); +- } +- else { +- +- /* +- * Save input_brigade in body_brigade. (At least) in the SSL case +- * input_brigade contains transient buckets whose data would get +- * overwritten during the next call of ap_get_brigade in the loop. +- * ap_save_brigade ensures these buckets to be set aside. +- * Calling ap_save_brigade with NULL as filter is OK, because +- * body_brigade already has been created and does not need to get +- * created by ap_save_brigade. +- */ +- status = ap_save_brigade(NULL, &body_brigade, &input_brigade, p); +- if (status != APR_SUCCESS) { +- return HTTP_INTERNAL_SERVER_ERROR; +- } +- +- } +- +- *bytes_spooled += bytes; +- } while (!seen_eos); +- +- APR_BRIGADE_CONCAT(input_brigade, body_brigade); +- if (tmpfile) { +- apr_brigade_insert_file(input_brigade, tmpfile, 0, fsize, p); +- } +- if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) { +- e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(input_brigade, e); +- } +- if (tmpfile) { +- /* We dropped metadata buckets when spooling to tmpfile, +- * terminate with EOS for stream_reqbody() to flush the +- * whole in one go. +- */ +- e = apr_bucket_eos_create(bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(input_brigade, e); +- } +- return OK; +-} + + static int ap_proxy_http_prefetch(proxy_http_req_t *req, + apr_uri_t *uri, char *url) +@@ -569,14 +395,12 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req, + apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; + apr_bucket_brigade *header_brigade = req->header_brigade; + apr_bucket_brigade *input_brigade = req->input_brigade; +- apr_bucket_brigade *temp_brigade; + apr_bucket *e; +- char *buf; + apr_status_t status; ++ char *buf; + apr_off_t bytes_read = 0; + apr_off_t bytes; + int force10, rv; +- apr_read_type_e block; + conn_rec *origin = p_conn->connection; + + if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) { +@@ -641,69 +465,12 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req, + p_conn->close = 1; + } + +- /* Prefetch MAX_MEM_SPOOL bytes +- * +- * This helps us avoid any election of C-L v.s. T-E +- * request bodies, since we are willing to keep in +- * memory this much data, in any case. This gives +- * us an instant C-L election if the body is of some +- * reasonable size. +- */ +- temp_brigade = apr_brigade_create(p, bucket_alloc); +- block = req->prefetch_nonblocking ? APR_NONBLOCK_READ : APR_BLOCK_READ; +- +- /* Account for saved input, if any. */ +- apr_brigade_length(input_brigade, 0, &bytes_read); +- +- /* Ensure we don't hit a wall where we have a buffer too small +- * for ap_get_brigade's filters to fetch us another bucket, +- * surrender once we hit 80 bytes less than MAX_MEM_SPOOL +- * (an arbitrary value). +- */ +- while (bytes_read < MAX_MEM_SPOOL - 80 +- && (APR_BRIGADE_EMPTY(input_brigade) +- || !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade)))) { +- status = ap_get_brigade(r->input_filters, temp_brigade, +- AP_MODE_READBYTES, block, +- MAX_MEM_SPOOL - bytes_read); +- /* ap_get_brigade may return success with an empty brigade +- * for a non-blocking read which would block +- */ +- if (block == APR_NONBLOCK_READ +- && ((status == APR_SUCCESS && APR_BRIGADE_EMPTY(temp_brigade)) +- || APR_STATUS_IS_EAGAIN(status))) { +- break; +- } +- if (status != APR_SUCCESS) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01095) +- "prefetch request body failed to %pI (%s)" +- " from %s (%s)", +- p_conn->addr, p_conn->hostname ? p_conn->hostname: "", +- c->client_ip, c->remote_host ? c->remote_host: ""); +- return ap_map_http_request_error(status, HTTP_BAD_REQUEST); +- } +- +- apr_brigade_length(temp_brigade, 1, &bytes); +- bytes_read += bytes; +- +- /* +- * Save temp_brigade in input_brigade. (At least) in the SSL case +- * temp_brigade contains transient buckets whose data would get +- * overwritten during the next call of ap_get_brigade in the loop. +- * ap_save_brigade ensures these buckets to be set aside. +- * Calling ap_save_brigade with NULL as filter is OK, because +- * input_brigade already has been created and does not need to get +- * created by ap_save_brigade. +- */ +- status = ap_save_brigade(NULL, &input_brigade, &temp_brigade, p); +- if (status != APR_SUCCESS) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01096) +- "processing prefetched request body failed" +- " to %pI (%s) from %s (%s)", +- p_conn->addr, p_conn->hostname ? p_conn->hostname: "", +- c->client_ip, c->remote_host ? c->remote_host: ""); +- return HTTP_INTERNAL_SERVER_ERROR; +- } ++ rv = ap_proxy_prefetch_input(r, req->backend, input_brigade, ++ req->prefetch_nonblocking ? APR_NONBLOCK_READ ++ : APR_BLOCK_READ, ++ &bytes_read, MAX_MEM_SPOOL); ++ if (rv != OK) { ++ return rv; + } + + /* Use chunked request body encoding or send a content-length body? +@@ -772,7 +539,7 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req, + char *endstr; + status = apr_strtoff(&req->cl_val, req->old_cl_val, &endstr, 10); + if (status != APR_SUCCESS || *endstr || req->cl_val < 0) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01085) ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01085) + "could not parse request Content-Length (%s)", + req->old_cl_val); + return HTTP_BAD_REQUEST; +@@ -812,7 +579,8 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req, + /* If we have to spool the body, do it now, before connecting or + * reusing the backend connection. + */ +- rv = spool_reqbody_cl(req, &bytes); ++ rv = ap_proxy_spool_input(r, p_conn, input_brigade, ++ &bytes, MAX_MEM_SPOOL); + if (rv != OK) { + return rv; + } +diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c +index ab88d8f..973aa83 100644 +--- a/modules/proxy/proxy_util.c ++++ b/modules/proxy/proxy_util.c +@@ -3866,6 +3866,268 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, + return OK; + } + ++PROXY_DECLARE(int) ap_proxy_prefetch_input(request_rec *r, ++ proxy_conn_rec *backend, ++ apr_bucket_brigade *input_brigade, ++ apr_read_type_e block, ++ apr_off_t *bytes_read, ++ apr_off_t max_read) ++{ ++ apr_pool_t *p = r->pool; ++ conn_rec *c = r->connection; ++ apr_bucket_brigade *temp_brigade; ++ apr_status_t status; ++ apr_off_t bytes; ++ ++ *bytes_read = 0; ++ if (max_read < APR_BUCKET_BUFF_SIZE) { ++ max_read = APR_BUCKET_BUFF_SIZE; ++ } ++ ++ /* Prefetch max_read bytes ++ * ++ * This helps us avoid any election of C-L v.s. T-E ++ * request bodies, since we are willing to keep in ++ * memory this much data, in any case. This gives ++ * us an instant C-L election if the body is of some ++ * reasonable size. ++ */ ++ temp_brigade = apr_brigade_create(p, input_brigade->bucket_alloc); ++ ++ /* Account for saved input, if any. */ ++ apr_brigade_length(input_brigade, 0, bytes_read); ++ ++ /* Ensure we don't hit a wall where we have a buffer too small for ++ * ap_get_brigade's filters to fetch us another bucket, surrender ++ * once we hit 80 bytes (an arbitrary value) less than max_read. ++ */ ++ while (*bytes_read < max_read - 80 ++ && (APR_BRIGADE_EMPTY(input_brigade) ++ || !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade)))) { ++ status = ap_get_brigade(r->input_filters, temp_brigade, ++ AP_MODE_READBYTES, block, ++ max_read - *bytes_read); ++ /* ap_get_brigade may return success with an empty brigade ++ * for a non-blocking read which would block ++ */ ++ if (block == APR_NONBLOCK_READ ++ && ((status == APR_SUCCESS && APR_BRIGADE_EMPTY(temp_brigade)) ++ || APR_STATUS_IS_EAGAIN(status))) { ++ break; ++ } ++ if (status != APR_SUCCESS) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01095) ++ "prefetch request body failed to %pI (%s)" ++ " from %s (%s)", backend->addr, ++ backend->hostname ? backend->hostname : "", ++ c->client_ip, c->remote_host ? c->remote_host : ""); ++ return ap_map_http_request_error(status, HTTP_BAD_REQUEST); ++ } ++ ++ apr_brigade_length(temp_brigade, 1, &bytes); ++ *bytes_read += bytes; ++ ++ /* ++ * Save temp_brigade in input_brigade. (At least) in the SSL case ++ * temp_brigade contains transient buckets whose data would get ++ * overwritten during the next call of ap_get_brigade in the loop. ++ * ap_save_brigade ensures these buckets to be set aside. ++ * Calling ap_save_brigade with NULL as filter is OK, because ++ * input_brigade already has been created and does not need to get ++ * created by ap_save_brigade. ++ */ ++ status = ap_save_brigade(NULL, &input_brigade, &temp_brigade, p); ++ if (status != APR_SUCCESS) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01096) ++ "processing prefetched request body failed" ++ " to %pI (%s) from %s (%s)", backend->addr, ++ backend->hostname ? backend->hostname : "", ++ c->client_ip, c->remote_host ? c->remote_host : ""); ++ return HTTP_INTERNAL_SERVER_ERROR; ++ } ++ } ++ ++ return OK; ++} ++ ++PROXY_DECLARE(int) ap_proxy_read_input(request_rec *r, ++ proxy_conn_rec *backend, ++ apr_bucket_brigade *bb, ++ apr_off_t max_read) ++{ ++ apr_bucket_alloc_t *bucket_alloc = bb->bucket_alloc; ++ apr_read_type_e block = (backend->connection) ? APR_NONBLOCK_READ ++ : APR_BLOCK_READ; ++ apr_status_t status; ++ int rv; ++ ++ for (;;) { ++ apr_brigade_cleanup(bb); ++ status = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES, ++ block, max_read); ++ if (block == APR_BLOCK_READ ++ || (!(status == APR_SUCCESS && APR_BRIGADE_EMPTY(bb)) ++ && !APR_STATUS_IS_EAGAIN(status))) { ++ break; ++ } ++ ++ /* Flush and retry (blocking) */ ++ apr_brigade_cleanup(bb); ++ rv = ap_proxy_pass_brigade(bucket_alloc, r, backend, ++ backend->connection, bb, 1); ++ if (rv != OK) { ++ return rv; ++ } ++ block = APR_BLOCK_READ; ++ } ++ ++ if (status != APR_SUCCESS) { ++ conn_rec *c = r->connection; ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02608) ++ "read request body failed to %pI (%s)" ++ " from %s (%s)", backend->addr, ++ backend->hostname ? backend->hostname : "", ++ c->client_ip, c->remote_host ? c->remote_host : ""); ++ return ap_map_http_request_error(status, HTTP_BAD_REQUEST); ++ } ++ ++ return OK; ++} ++ ++PROXY_DECLARE(int) ap_proxy_spool_input(request_rec *r, ++ proxy_conn_rec *backend, ++ apr_bucket_brigade *input_brigade, ++ apr_off_t *bytes_spooled, ++ apr_off_t max_mem_spool) ++{ ++ apr_pool_t *p = r->pool; ++ int seen_eos = 0, rv = OK; ++ apr_status_t status = APR_SUCCESS; ++ apr_bucket_alloc_t *bucket_alloc = input_brigade->bucket_alloc; ++ apr_bucket_brigade *body_brigade; ++ apr_bucket *e; ++ apr_off_t bytes, fsize = 0; ++ apr_file_t *tmpfile = NULL; ++ apr_off_t limit; ++ ++ *bytes_spooled = 0; ++ body_brigade = apr_brigade_create(p, bucket_alloc); ++ ++ limit = ap_get_limit_req_body(r); ++ ++ do { ++ if (APR_BRIGADE_EMPTY(input_brigade)) { ++ rv = ap_proxy_read_input(r, backend, input_brigade, ++ HUGE_STRING_LEN); ++ if (rv != OK) { ++ return rv; ++ } ++ } ++ ++ /* If this brigade contains EOS, either stop or remove it. */ ++ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { ++ seen_eos = 1; ++ } ++ ++ apr_brigade_length(input_brigade, 1, &bytes); ++ ++ if (*bytes_spooled + bytes > max_mem_spool) { ++ /* ++ * LimitRequestBody does not affect Proxy requests (Should it?). ++ * Let it take effect if we decide to store the body in a ++ * temporary file on disk. ++ */ ++ if (limit && (*bytes_spooled + bytes > limit)) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01088) ++ "Request body is larger than the configured " ++ "limit of %" APR_OFF_T_FMT, limit); ++ return HTTP_REQUEST_ENTITY_TOO_LARGE; ++ } ++ /* can't spool any more in memory; write latest brigade to disk */ ++ if (tmpfile == NULL) { ++ const char *temp_dir; ++ char *template; ++ ++ status = apr_temp_dir_get(&temp_dir, p); ++ if (status != APR_SUCCESS) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01089) ++ "search for temporary directory failed"); ++ return HTTP_INTERNAL_SERVER_ERROR; ++ } ++ apr_filepath_merge(&template, temp_dir, ++ "modproxy.tmp.XXXXXX", ++ APR_FILEPATH_NATIVE, p); ++ status = apr_file_mktemp(&tmpfile, template, 0, p); ++ if (status != APR_SUCCESS) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01090) ++ "creation of temporary file in directory " ++ "%s failed", temp_dir); ++ return HTTP_INTERNAL_SERVER_ERROR; ++ } ++ } ++ for (e = APR_BRIGADE_FIRST(input_brigade); ++ e != APR_BRIGADE_SENTINEL(input_brigade); ++ e = APR_BUCKET_NEXT(e)) { ++ const char *data; ++ apr_size_t bytes_read, bytes_written; ++ ++ apr_bucket_read(e, &data, &bytes_read, APR_BLOCK_READ); ++ status = apr_file_write_full(tmpfile, data, bytes_read, &bytes_written); ++ if (status != APR_SUCCESS) { ++ const char *tmpfile_name; ++ ++ if (apr_file_name_get(&tmpfile_name, tmpfile) != APR_SUCCESS) { ++ tmpfile_name = "(unknown)"; ++ } ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01091) ++ "write to temporary file %s failed", ++ tmpfile_name); ++ return HTTP_INTERNAL_SERVER_ERROR; ++ } ++ AP_DEBUG_ASSERT(bytes_read == bytes_written); ++ fsize += bytes_written; ++ } ++ apr_brigade_cleanup(input_brigade); ++ } ++ else { ++ ++ /* ++ * Save input_brigade in body_brigade. (At least) in the SSL case ++ * input_brigade contains transient buckets whose data would get ++ * overwritten during the next call of ap_get_brigade in the loop. ++ * ap_save_brigade ensures these buckets to be set aside. ++ * Calling ap_save_brigade with NULL as filter is OK, because ++ * body_brigade already has been created and does not need to get ++ * created by ap_save_brigade. ++ */ ++ status = ap_save_brigade(NULL, &body_brigade, &input_brigade, p); ++ if (status != APR_SUCCESS) { ++ return HTTP_INTERNAL_SERVER_ERROR; ++ } ++ ++ } ++ ++ *bytes_spooled += bytes; ++ } while (!seen_eos); ++ ++ APR_BRIGADE_CONCAT(input_brigade, body_brigade); ++ if (tmpfile) { ++ apr_brigade_insert_file(input_brigade, tmpfile, 0, fsize, p); ++ } ++ if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) { ++ e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc); ++ APR_BRIGADE_INSERT_TAIL(input_brigade, e); ++ } ++ if (tmpfile) { ++ /* We dropped metadata buckets when spooling to tmpfile, ++ * terminate with EOS to allow for flushing in a one go. ++ */ ++ e = apr_bucket_eos_create(bucket_alloc); ++ APR_BRIGADE_INSERT_TAIL(input_brigade, e); ++ } ++ return OK; ++} ++ + PROXY_DECLARE(int) ap_proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc, + request_rec *r, proxy_conn_rec *p_conn, + conn_rec *origin, apr_bucket_brigade *bb, diff --git a/SPECS/httpd.spec b/SPECS/httpd.spec index 6e3fae9..a3ce992 100644 --- a/SPECS/httpd.spec +++ b/SPECS/httpd.spec @@ -3,7 +3,7 @@ %define suexec_caller apache %define mmn 20120211 %define mmnisa %{mmn}%{__isa_name}%{__isa_bits} -%define vstring %(source /etc/os-release; echo ${REDHAT_SUPPORT_PRODUCT}) +%define vstring %(source /etc/os-release; echo ${NAME}) %if 0%{?fedora} > 26 || 0%{?rhel} > 7 %global mpm event %else @@ -13,7 +13,7 @@ Summary: Apache HTTP Server Name: httpd Version: 2.4.37 -Release: 56%{?dist}.7 +Release: 62%{?dist} URL: https://httpd.apache.org/ Source0: https://www.apache.org/dist/httpd/httpd-%{version}.tar.bz2 Source2: httpd.logrotate @@ -165,6 +165,10 @@ Patch89: httpd-2.4.37-r1862410.patch Patch90: httpd-2.4.37-hcheck-mem-issues.patch # https://bugzilla.redhat.com/show_bug.cgi?id=2017543 Patch91: httpd-2.4.37-add-SNI-support.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=2159603 +Patch92: httpd-2.4.37-mod_status-duplicate-key.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=2221083 +Patch93: httpd-2.4.37-r1885607.patch # Security fixes Patch200: httpd-2.4.37-r1851471.patch @@ -431,6 +435,8 @@ interface for storing and accessing per-user session data. %patch89 -p1 -b .r1862410 %patch90 -p1 -b .hcheck-mem-issues %patch91 -p1 -b .SNI +%patch92 -p1 -b .mod_status-dupl +%patch93 -p1 -b .r1885607 %patch200 -p1 -b .r1851471 %patch201 -p1 -b .CVE-2019-0211 @@ -977,11 +983,33 @@ rm -rf $RPM_BUILD_ROOT %{_rpmconfigdir}/macros.d/macros.httpd %changelog -* Wed Aug 30 2023 Luboš Uhliarik - 2.4.37-56.7 -- Resolves: #2236177 - CVE-2023-27522 httpd:2.4/httpd: mod_proxy_uwsgi HTTP +* Thu Aug 17 2023 Johnny Hughes - 2.4.37-62 +- change for CentOS Stream Branding + +* Thu Jul 27 2023 Luboš Uhliarik - 2.4.37-62 +- Resolves: #2221083 - Apache Bug 57087: mod_proxy_fcgi doesn't send cgi + CONTENT_LENGTH variable when the client request used Transfer-Encoding:chunked + +* Thu Jul 20 2023 Tomas Korbar - 2.4.37-61 +- Fix issue found by covscan +- Related: #2159603 + +* Mon Jul 17 2023 Tomas Korbar - 2.4.37-60 +- Another rebuild because of mistake in workflow +- Related: #2159603 + +* Mon Jul 17 2023 Tomas Korbar - 2.4.37-59 +- Rebuild because of mistake in workflow +- Related: #2159603 + +* Mon Jul 17 2023 Tomas Korbar - 2.4.37-58 +- Resolves: #2159603 - mod_status lists BusyWorkers IdleWorkers keys twice + +* Thu May 25 2023 Luboš Uhliarik - 2.4.37-57 +- Resolves: #2176723 - CVE-2023-27522 httpd:2.4/httpd: mod_proxy_uwsgi HTTP response splitting -* Thu Apr 27 2023 Luboš Uhliarik - 2.4.37-56.6 +* Thu Apr 27 2023 Luboš Uhliarik - 2.4.37-56.5 - Resolves: #2190133 - mod_rewrite regression with CVE-2023-25690 * Sat Mar 18 2023 Luboš Uhliarik - 2.4.37-56.4