Resolves: #2184403 - rebase httpd to 2.4.57

Resolves: #2177753 - CVE-2023-25690 httpd: HTTP request splitting with
  mod_rewrite and mod_proxy
This commit is contained in:
Luboš Uhliarik 2023-04-11 14:31:37 +02:00
parent 188a9ca177
commit d4b55888c2
20 changed files with 52 additions and 1906 deletions

1
.gitignore vendored
View File

@ -42,3 +42,4 @@ x86_64
/httpd-2.4.49.tar.bz2.asc
/httpd-2.4.51.tar.bz2.asc
/httpd-2.4.53.tar.bz2.asc
/httpd-2.4.57.tar.bz2.asc

View File

@ -1,20 +0,0 @@
diff --git a/modules/ssl/ssl_engine_config.c b/modules/ssl/ssl_engine_config.c
index b53f3f8..979489c 100644
--- a/modules/ssl/ssl_engine_config.c
+++ b/modules/ssl/ssl_engine_config.c
@@ -812,8 +812,14 @@ const char *ssl_cmd_SSLCipherSuite(cmd_parms *cmd,
static const char *ssl_cmd_check_file(cmd_parms *parms,
const char **file)
{
- const char *filepath = ap_server_root_relative(parms->pool, *file);
+ const char *filepath;
+ /* If only dumping the config, don't verify the paths */
+ if (ap_state_query(AP_SQ_RUN_MODE) == AP_SQ_RM_CONFIG_DUMP) {
+ return NULL;
+ }
+
+ filepath = ap_server_root_relative(parms->pool, *file);
if (!filepath) {
return apr_pstrcat(parms->pool, parms->cmd->name,
": Invalid file path ", *file, NULL);

View File

@ -1,20 +0,0 @@
diff --git a/modules/dav/main/util.c b/modules/dav/main/util.c
index 1ae5914027c..3f7822fc931 100644
--- a/modules/dav/main/util.c
+++ b/modules/dav/main/util.c
@@ -801,8 +801,14 @@ static dav_error * dav_process_if_header(request_rec *r, dav_if_header **p_ih)
"for the same state.");
}
condition = DAV_IF_COND_NOT;
+ list += 2;
+ }
+ else {
+ return dav_new_error(r->pool, HTTP_BAD_REQUEST,
+ DAV_ERR_IF_UNK_CHAR, 0,
+ "Invalid \"If:\" header: "
+ "Unexpected character in List");
}
- list += 2;
break;
case ' ':

View File

@ -1,26 +0,0 @@
diff --git a/modules/proxy/mod_proxy_ajp.c b/modules/proxy/mod_proxy_ajp.c
index e2992fc..46d42bc 100644
--- a/modules/proxy/mod_proxy_ajp.c
+++ b/modules/proxy/mod_proxy_ajp.c
@@ -246,9 +246,18 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
/* read the first block of data */
input_brigade = apr_brigade_create(p, r->connection->bucket_alloc);
tenc = apr_table_get(r->headers_in, "Transfer-Encoding");
- if (tenc && (ap_cstr_casecmp(tenc, "chunked") == 0)) {
- /* The AJP protocol does not want body data yet */
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00870) "request is chunked");
+ if (tenc) {
+ if (ap_cstr_casecmp(tenc, "chunked") == 0) {
+ /* The AJP protocol does not want body data yet */
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00870)
+ "request is chunked");
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10396)
+ "%s Transfer-Encoding is not supported",
+ tenc);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
} else {
/* Get client provided Content-Length header */
content_length = get_content_length(r);

View File

@ -1,61 +0,0 @@
From 8c14927162cf3b4f810683e1c5505e9ef9e1f123 Mon Sep 17 00:00:00 2001
From: Eric Covener <covener@apache.org>
Date: Wed, 1 Jun 2022 12:34:16 +0000
Subject: [PATCH] Merge r1901500 from trunk:
handle large writes in ap_rputs
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1901501 13f79535-47bb-0310-9956-ffa450edef68
---
include/http_protocol.h | 22 +++++++++++++++++++++-
server/protocol.c | 3 +++
2 files changed, 24 insertions(+), 1 deletion(-)
diff --git a/include/http_protocol.h b/include/http_protocol.h
index 20bd2022266..94c481e5f43 100644
--- a/include/http_protocol.h
+++ b/include/http_protocol.h
@@ -475,7 +475,27 @@ AP_DECLARE(int) ap_rwrite(const void *buf, int nbyte, request_rec *r);
*/
static APR_INLINE int ap_rputs(const char *str, request_rec *r)
{
- return ap_rwrite(str, (int)strlen(str), r);
+ apr_size_t len;
+
+ len = strlen(str);
+
+ for (;;) {
+ if (len <= INT_MAX) {
+ return ap_rwrite(str, (int)len, r);
+ }
+ else {
+ int rc;
+
+ rc = ap_rwrite(str, INT_MAX, r);
+ if (rc < 0) {
+ return rc;
+ }
+ else {
+ str += INT_MAX;
+ len -= INT_MAX;
+ }
+ }
+ }
}
/**
diff --git a/server/protocol.c b/server/protocol.c
index 298f61e1fb8..7adc7f75c10 100644
--- a/server/protocol.c
+++ b/server/protocol.c
@@ -2128,6 +2128,9 @@ AP_DECLARE(int) ap_rputc(int c, request_rec *r)
AP_DECLARE(int) ap_rwrite(const void *buf, int nbyte, request_rec *r)
{
+ if (nbyte < 0)
+ return -1;
+
if (r->connection->aborted)
return -1;

View File

@ -1,22 +0,0 @@
diff --git a/server/util.c b/server/util.c
index 604be1a..6808164 100644
--- a/server/util.c
+++ b/server/util.c
@@ -185,7 +185,7 @@ AP_DECLARE(char *) ap_ht_time(apr_pool_t *p, apr_time_t t, const char *fmt,
*/
AP_DECLARE(int) ap_strcmp_match(const char *str, const char *expected)
{
- int x, y;
+ apr_size_t x, y;
for (x = 0, y = 0; expected[y]; ++y, ++x) {
if (expected[y] == '*') {
@@ -209,7 +209,7 @@ AP_DECLARE(int) ap_strcmp_match(const char *str, const char *expected)
AP_DECLARE(int) ap_strcasecmp_match(const char *str, const char *expected)
{
- int x, y;
+ apr_size_t x, y;
for (x = 0, y = 0; expected[y]; ++y, ++x) {
if (!str[x] && expected[y] != '*')

View File

@ -1,126 +0,0 @@
diff --git a/docs/manual/mod/core.html.en b/docs/manual/mod/core.html.en
index bb6b90a..d14aed4 100644
--- a/docs/manual/mod/core.html.en
+++ b/docs/manual/mod/core.html.en
@@ -2796,16 +2796,16 @@ subrequests</td></tr>
<tr><th><a href="directive-dict.html#Description">Description:</a></th><td>Restricts the total size of the HTTP request body sent
from the client</td></tr>
<tr><th><a href="directive-dict.html#Syntax">Syntax:</a></th><td><code>LimitRequestBody <var>bytes</var></code></td></tr>
-<tr><th><a href="directive-dict.html#Default">Default:</a></th><td><code>LimitRequestBody 0</code></td></tr>
+<tr><th><a href="directive-dict.html#Default">Default:</a></th><td><code>LimitRequestBody 1073741824</code></td></tr>
<tr><th><a href="directive-dict.html#Context">Context:</a></th><td>server config, virtual host, directory, .htaccess</td></tr>
<tr><th><a href="directive-dict.html#Override">Override:</a></th><td>All</td></tr>
<tr><th><a href="directive-dict.html#Status">Status:</a></th><td>Core</td></tr>
<tr><th><a href="directive-dict.html#Module">Module:</a></th><td>core</td></tr>
+<tr><th><a href="directive-dict.html#Compatibility">Compatibility:</a></th><td>In Apache HTTP Server 2.4.53 and earlier, the default value
+ was 0 (unlimited)</td></tr>
</table>
- <p>This directive specifies the number of <var>bytes</var> from 0
- (meaning unlimited) to 2147483647 (2GB) that are allowed in a
- request body. See the note below for the limited applicability
- to proxy requests.</p>
+ <p>This directive specifies the number of <var>bytes</var>
+ that are allowed in a request body. A value of <var>0</var> means unlimited.</p>
<p>The <code class="directive">LimitRequestBody</code> directive allows
the user to set a limit on the allowed size of an HTTP request
@@ -2831,12 +2831,6 @@ from the client</td></tr>
<pre class="prettyprint lang-config">LimitRequestBody 102400</pre>
-
- <div class="note"><p>For a full description of how this directive is interpreted by
- proxy requests, see the <code class="module"><a href="../mod/mod_proxy.html">mod_proxy</a></code> documentation.</p>
- </div>
-
-
</div>
<div class="top"><a href="#page-header"><img alt="top" src="../images/up.gif" /></a></div>
<div class="directive-section"><h2><a name="LimitRequestFields" id="LimitRequestFields">LimitRequestFields</a> <a name="limitrequestfields" id="limitrequestfields">Directive</a></h2>
diff --git a/docs/manual/mod/mod_proxy.html.en b/docs/manual/mod/mod_proxy.html.en
index ee7b1e3..233d234 100644
--- a/docs/manual/mod/mod_proxy.html.en
+++ b/docs/manual/mod/mod_proxy.html.en
@@ -463,9 +463,6 @@ ProxyPass "/examples" "http://backend.example.com/examples" timeout=10</pre>
Content-Length header, but the server is configured to filter incoming
request bodies.</p>
- <p><code class="directive"><a href="../mod/core.html#limitrequestbody">LimitRequestBody</a></code> only applies to
- request bodies that the server will spool to disk</p>
-
</div><div class="top"><a href="#page-header"><img alt="top" src="../images/up.gif" /></a></div>
<div class="section">
<h2><a name="x-headers" id="x-headers">Reverse Proxy Request Headers</a></h2>
diff --git a/modules/http/http_filters.c b/modules/http/http_filters.c
index 43e8c6d..33c78f3 100644
--- a/modules/http/http_filters.c
+++ b/modules/http/http_filters.c
@@ -1703,6 +1703,7 @@ AP_DECLARE(int) ap_setup_client_block(request_rec *r, int read_policy)
{
const char *tenc = apr_table_get(r->headers_in, "Transfer-Encoding");
const char *lenp = apr_table_get(r->headers_in, "Content-Length");
+ apr_off_t limit_req_body = ap_get_limit_req_body(r);
r->read_body = read_policy;
r->read_chunked = 0;
@@ -1738,6 +1739,11 @@ AP_DECLARE(int) ap_setup_client_block(request_rec *r, int read_policy)
return HTTP_REQUEST_ENTITY_TOO_LARGE;
}
+ if (limit_req_body > 0 && (r->remaining > limit_req_body)) {
+ /* will be logged when the body is discarded */
+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
+ }
+
#ifdef AP_DEBUG
{
/* Make sure ap_getline() didn't leave any droppings. */
diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c
index bc86253..85f2f9c 100644
--- a/modules/proxy/proxy_util.c
+++ b/modules/proxy/proxy_util.c
@@ -4260,13 +4260,10 @@ PROXY_DECLARE(int) ap_proxy_spool_input(request_rec *r,
apr_bucket *e;
apr_off_t bytes, fsize = 0;
apr_file_t *tmpfile = NULL;
- apr_off_t limit;
*bytes_spooled = 0;
body_brigade = apr_brigade_create(p, bucket_alloc);
- limit = ap_get_limit_req_body(r);
-
do {
if (APR_BRIGADE_EMPTY(input_brigade)) {
rv = ap_proxy_read_input(r, backend, input_brigade,
@@ -4284,17 +4281,6 @@ PROXY_DECLARE(int) ap_proxy_spool_input(request_rec *r,
apr_brigade_length(input_brigade, 1, &bytes);
if (*bytes_spooled + bytes > max_mem_spool) {
- /*
- * LimitRequestBody does not affect Proxy requests (Should it?).
- * Let it take effect if we decide to store the body in a
- * temporary file on disk.
- */
- if (limit && (*bytes_spooled + bytes > limit)) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01088)
- "Request body is larger than the configured "
- "limit of %" APR_OFF_T_FMT, limit);
- return HTTP_REQUEST_ENTITY_TOO_LARGE;
- }
/* can't spool any more in memory; write latest brigade to disk */
if (tmpfile == NULL) {
const char *temp_dir;
diff --git a/server/core.c b/server/core.c
index 3d44e0e..682259f 100644
--- a/server/core.c
+++ b/server/core.c
@@ -71,7 +71,7 @@
/* LimitRequestBody handling */
#define AP_LIMIT_REQ_BODY_UNSET ((apr_off_t) -1)
-#define AP_DEFAULT_LIMIT_REQ_BODY ((apr_off_t) 0)
+#define AP_DEFAULT_LIMIT_REQ_BODY ((apr_off_t) 1<<30) /* 1GB */
/* LimitXMLRequestBody handling */
#define AP_LIMIT_UNSET ((long) -1)

View File

@ -1,557 +0,0 @@
From db47781128e42bd49f55076665b3f6ca4e2bc5e2 Mon Sep 17 00:00:00 2001
From: Eric Covener <covener@apache.org>
Date: Wed, 1 Jun 2022 12:50:40 +0000
Subject: [PATCH] Merge r1901506 from trunk:
limit mod_sed memory use
Resync mod_sed.c with trunk due to merge conflicts.
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1901509 13f79535-47bb-0310-9956-ffa450edef68
---
modules/filters/mod_sed.c | 75 ++++++++----------
modules/filters/sed1.c | 158 +++++++++++++++++++++++++++-----------
2 files changed, 147 insertions(+), 86 deletions(-)
diff --git a/modules/filters/mod_sed.c b/modules/filters/mod_sed.c
index 4bdb4ce33ae..12cb04a20f9 100644
--- a/modules/filters/mod_sed.c
+++ b/modules/filters/mod_sed.c
@@ -59,7 +59,7 @@ typedef struct sed_filter_ctxt
module AP_MODULE_DECLARE_DATA sed_module;
/* This function will be call back from libsed functions if there is any error
- * happend during execution of sed scripts
+ * happened during execution of sed scripts
*/
static apr_status_t log_sed_errf(void *data, const char *error)
{
@@ -277,7 +277,7 @@ static apr_status_t sed_response_filter(ap_filter_t *f,
apr_bucket_brigade *bb)
{
apr_bucket *b;
- apr_status_t status;
+ apr_status_t status = APR_SUCCESS;
sed_config *cfg = ap_get_module_config(f->r->per_dir_config,
&sed_module);
sed_filter_ctxt *ctx = f->ctx;
@@ -302,9 +302,9 @@ static apr_status_t sed_response_filter(ap_filter_t *f,
return status;
ctx = f->ctx;
apr_table_unset(f->r->headers_out, "Content-Length");
- }
- ctx->bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
+ ctx->bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
+ }
/* Here is the main logic. Iterate through all the buckets, read the
* content of the bucket, call sed_eval_buffer on the data.
@@ -326,63 +326,52 @@ static apr_status_t sed_response_filter(ap_filter_t *f,
* in sed's internal buffer which can't be flushed until new line
* character is arrived.
*/
- for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb);) {
- const char *buf = NULL;
- apr_size_t bytes = 0;
+ while (!APR_BRIGADE_EMPTY(bb)) {
+ b = APR_BRIGADE_FIRST(bb);
if (APR_BUCKET_IS_EOS(b)) {
- apr_bucket *b1 = APR_BUCKET_NEXT(b);
/* Now clean up the internal sed buffer */
sed_finalize_eval(&ctx->eval, ctx);
status = flush_output_buffer(ctx);
if (status != APR_SUCCESS) {
- clear_ctxpool(ctx);
- return status;
+ break;
}
+ /* Move the eos bucket to ctx->bb brigade */
APR_BUCKET_REMOVE(b);
- /* Insert the eos bucket to ctx->bb brigade */
APR_BRIGADE_INSERT_TAIL(ctx->bb, b);
- b = b1;
}
else if (APR_BUCKET_IS_FLUSH(b)) {
- apr_bucket *b1 = APR_BUCKET_NEXT(b);
- APR_BUCKET_REMOVE(b);
status = flush_output_buffer(ctx);
if (status != APR_SUCCESS) {
- clear_ctxpool(ctx);
- return status;
+ break;
}
+ /* Move the flush bucket to ctx->bb brigade */
+ APR_BUCKET_REMOVE(b);
APR_BRIGADE_INSERT_TAIL(ctx->bb, b);
- b = b1;
- }
- else if (APR_BUCKET_IS_METADATA(b)) {
- b = APR_BUCKET_NEXT(b);
}
- else if (apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ)
- == APR_SUCCESS) {
- apr_bucket *b1 = APR_BUCKET_NEXT(b);
- status = sed_eval_buffer(&ctx->eval, buf, bytes, ctx);
- if (status != APR_SUCCESS) {
- clear_ctxpool(ctx);
- return status;
+ else {
+ if (!APR_BUCKET_IS_METADATA(b)) {
+ const char *buf = NULL;
+ apr_size_t bytes = 0;
+
+ status = apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ);
+ if (status == APR_SUCCESS) {
+ status = sed_eval_buffer(&ctx->eval, buf, bytes, ctx);
+ }
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, f->r, APLOGNO(10394) "error evaluating sed on output");
+ break;
+ }
}
- APR_BUCKET_REMOVE(b);
apr_bucket_delete(b);
- b = b1;
- }
- else {
- apr_bucket *b1 = APR_BUCKET_NEXT(b);
- APR_BUCKET_REMOVE(b);
- b = b1;
}
}
- apr_brigade_cleanup(bb);
- status = flush_output_buffer(ctx);
- if (status != APR_SUCCESS) {
- clear_ctxpool(ctx);
- return status;
+ if (status == APR_SUCCESS) {
+ status = flush_output_buffer(ctx);
}
if (!APR_BRIGADE_EMPTY(ctx->bb)) {
- status = ap_pass_brigade(f->next, ctx->bb);
+ if (status == APR_SUCCESS) {
+ status = ap_pass_brigade(f->next, ctx->bb);
+ }
apr_brigade_cleanup(ctx->bb);
}
clear_ctxpool(ctx);
@@ -433,7 +422,7 @@ static apr_status_t sed_request_filter(ap_filter_t *f,
* the buckets in bbinp and read the data from buckets and invoke
* sed_eval_buffer on the data. libsed will generate its output using
* sed_write_output which will add data in ctx->bb. Do it until it have
- * atleast one bucket in ctx->bb. At the end of data eos bucket
+ * at least one bucket in ctx->bb. At the end of data eos bucket
* should be there.
*
* Once eos bucket is seen, then invoke sed_finalize_eval to clear the
@@ -475,8 +464,10 @@ static apr_status_t sed_request_filter(ap_filter_t *f,
if (apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ)
== APR_SUCCESS) {
status = sed_eval_buffer(&ctx->eval, buf, bytes, ctx);
- if (status != APR_SUCCESS)
+ if (status != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, f->r, APLOGNO(10395) "error evaluating sed on input");
return status;
+ }
flush_output_buffer(ctx);
}
}
diff --git a/modules/filters/sed1.c b/modules/filters/sed1.c
index 67a8d06515e..047f49ba131 100644
--- a/modules/filters/sed1.c
+++ b/modules/filters/sed1.c
@@ -87,18 +87,20 @@ static void eval_errf(sed_eval_t *eval, const char *fmt, ...)
}
#define INIT_BUF_SIZE 1024
+#define MAX_BUF_SIZE 1024*8192
/*
* grow_buffer
*/
-static void grow_buffer(apr_pool_t *pool, char **buffer,
+static apr_status_t grow_buffer(apr_pool_t *pool, char **buffer,
char **spend, apr_size_t *cursize,
apr_size_t newsize)
{
char* newbuffer = NULL;
apr_size_t spendsize = 0;
- if (*cursize >= newsize)
- return;
+ if (*cursize >= newsize) {
+ return APR_SUCCESS;
+ }
/* Avoid number of times realloc is called. It could cause huge memory
* requirement if line size is huge e.g 2 MB */
if (newsize < *cursize * 2) {
@@ -107,6 +109,9 @@ static void grow_buffer(apr_pool_t *pool, char **buffer,
/* Align it to 4 KB boundary */
newsize = (newsize + ((1 << 12) - 1)) & ~((1 << 12) - 1);
+ if (newsize > MAX_BUF_SIZE) {
+ return APR_ENOMEM;
+ }
newbuffer = apr_pcalloc(pool, newsize);
if (*spend && *buffer && (*cursize > 0)) {
spendsize = *spend - *buffer;
@@ -119,63 +124,77 @@ static void grow_buffer(apr_pool_t *pool, char **buffer,
if (spend != buffer) {
*spend = *buffer + spendsize;
}
+ return APR_SUCCESS;
}
/*
* grow_line_buffer
*/
-static void grow_line_buffer(sed_eval_t *eval, apr_size_t newsize)
+static apr_status_t grow_line_buffer(sed_eval_t *eval, apr_size_t newsize)
{
- grow_buffer(eval->pool, &eval->linebuf, &eval->lspend,
+ return grow_buffer(eval->pool, &eval->linebuf, &eval->lspend,
&eval->lsize, newsize);
}
/*
* grow_hold_buffer
*/
-static void grow_hold_buffer(sed_eval_t *eval, apr_size_t newsize)
+static apr_status_t grow_hold_buffer(sed_eval_t *eval, apr_size_t newsize)
{
- grow_buffer(eval->pool, &eval->holdbuf, &eval->hspend,
+ return grow_buffer(eval->pool, &eval->holdbuf, &eval->hspend,
&eval->hsize, newsize);
}
/*
* grow_gen_buffer
*/
-static void grow_gen_buffer(sed_eval_t *eval, apr_size_t newsize,
+static apr_status_t grow_gen_buffer(sed_eval_t *eval, apr_size_t newsize,
char **gspend)
{
+ apr_status_t rc = 0;
if (gspend == NULL) {
gspend = &eval->genbuf;
}
- grow_buffer(eval->pool, &eval->genbuf, gspend,
- &eval->gsize, newsize);
- eval->lcomend = &eval->genbuf[71];
+ rc = grow_buffer(eval->pool, &eval->genbuf, gspend,
+ &eval->gsize, newsize);
+ if (rc == APR_SUCCESS) {
+ eval->lcomend = &eval->genbuf[71];
+ }
+ return rc;
}
/*
* appendmem_to_linebuf
*/
-static void appendmem_to_linebuf(sed_eval_t *eval, const char* sz, apr_size_t len)
+static apr_status_t appendmem_to_linebuf(sed_eval_t *eval, const char* sz, apr_size_t len)
{
+ apr_status_t rc = 0;
apr_size_t reqsize = (eval->lspend - eval->linebuf) + len;
if (eval->lsize < reqsize) {
- grow_line_buffer(eval, reqsize);
+ rc = grow_line_buffer(eval, reqsize);
+ if (rc != APR_SUCCESS) {
+ return rc;
+ }
}
memcpy(eval->lspend, sz, len);
eval->lspend += len;
+ return APR_SUCCESS;
}
/*
* append_to_linebuf
*/
-static void append_to_linebuf(sed_eval_t *eval, const char* sz,
+static apr_status_t append_to_linebuf(sed_eval_t *eval, const char* sz,
step_vars_storage *step_vars)
{
apr_size_t len = strlen(sz);
char *old_linebuf = eval->linebuf;
+ apr_status_t rc = 0;
/* Copy string including null character */
- appendmem_to_linebuf(eval, sz, len + 1);
+ rc = appendmem_to_linebuf(eval, sz, len + 1);
+ if (rc != APR_SUCCESS) {
+ return rc;
+ }
--eval->lspend; /* lspend will now point to NULL character */
/* Sync step_vars after a possible linebuf expansion */
if (step_vars && old_linebuf != eval->linebuf) {
@@ -189,68 +208,84 @@ static void append_to_linebuf(sed_eval_t *eval, const char* sz,
step_vars->locs = step_vars->locs - old_linebuf + eval->linebuf;
}
}
+ return APR_SUCCESS;
}
/*
* copy_to_linebuf
*/
-static void copy_to_linebuf(sed_eval_t *eval, const char* sz,
+static apr_status_t copy_to_linebuf(sed_eval_t *eval, const char* sz,
step_vars_storage *step_vars)
{
eval->lspend = eval->linebuf;
- append_to_linebuf(eval, sz, step_vars);
+ return append_to_linebuf(eval, sz, step_vars);
}
/*
* append_to_holdbuf
*/
-static void append_to_holdbuf(sed_eval_t *eval, const char* sz)
+static apr_status_t append_to_holdbuf(sed_eval_t *eval, const char* sz)
{
apr_size_t len = strlen(sz);
apr_size_t reqsize = (eval->hspend - eval->holdbuf) + len + 1;
+ apr_status_t rc = 0;
if (eval->hsize <= reqsize) {
- grow_hold_buffer(eval, reqsize);
+ rc = grow_hold_buffer(eval, reqsize);
+ if (rc != APR_SUCCESS) {
+ return rc;
+ }
}
memcpy(eval->hspend, sz, len + 1);
/* hspend will now point to NULL character */
eval->hspend += len;
+ return APR_SUCCESS;
}
/*
* copy_to_holdbuf
*/
-static void copy_to_holdbuf(sed_eval_t *eval, const char* sz)
+static apr_status_t copy_to_holdbuf(sed_eval_t *eval, const char* sz)
{
eval->hspend = eval->holdbuf;
- append_to_holdbuf(eval, sz);
+ return append_to_holdbuf(eval, sz);
}
/*
* append_to_genbuf
*/
-static void append_to_genbuf(sed_eval_t *eval, const char* sz, char **gspend)
+static apr_status_t append_to_genbuf(sed_eval_t *eval, const char* sz, char **gspend)
{
apr_size_t len = strlen(sz);
apr_size_t reqsize = (*gspend - eval->genbuf) + len + 1;
+ apr_status_t rc = 0;
if (eval->gsize < reqsize) {
- grow_gen_buffer(eval, reqsize, gspend);
+ rc = grow_gen_buffer(eval, reqsize, gspend);
+ if (rc != APR_SUCCESS) {
+ return rc;
+ }
}
memcpy(*gspend, sz, len + 1);
/* *gspend will now point to NULL character */
*gspend += len;
+ return APR_SUCCESS;
}
/*
* copy_to_genbuf
*/
-static void copy_to_genbuf(sed_eval_t *eval, const char* sz)
+static apr_status_t copy_to_genbuf(sed_eval_t *eval, const char* sz)
{
apr_size_t len = strlen(sz);
apr_size_t reqsize = len + 1;
+ apr_status_t rc = APR_SUCCESS;;
if (eval->gsize < reqsize) {
- grow_gen_buffer(eval, reqsize, NULL);
+ rc = grow_gen_buffer(eval, reqsize, NULL);
+ if (rc != APR_SUCCESS) {
+ return rc;
+ }
}
memcpy(eval->genbuf, sz, len + 1);
+ return rc;
}
/*
@@ -397,6 +432,7 @@ apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, apr_size_t bufsz
}
while (bufsz) {
+ apr_status_t rc = 0;
char *n;
apr_size_t llen;
@@ -411,7 +447,10 @@ apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, apr_size_t bufsz
break;
}
- appendmem_to_linebuf(eval, buf, llen + 1);
+ rc = appendmem_to_linebuf(eval, buf, llen + 1);
+ if (rc != APR_SUCCESS) {
+ return rc;
+ }
--eval->lspend;
/* replace new line character with NULL */
*eval->lspend = '\0';
@@ -426,7 +465,10 @@ apr_status_t sed_eval_buffer(sed_eval_t *eval, const char *buf, apr_size_t bufsz
/* Save the leftovers for later */
if (bufsz) {
- appendmem_to_linebuf(eval, buf, bufsz);
+ apr_status_t rc = appendmem_to_linebuf(eval, buf, bufsz);
+ if (rc != APR_SUCCESS) {
+ return rc;
+ }
}
return APR_SUCCESS;
@@ -448,6 +490,7 @@ apr_status_t sed_finalize_eval(sed_eval_t *eval, void *fout)
/* Process leftovers */
if (eval->lspend > eval->linebuf) {
apr_status_t rv;
+ apr_status_t rc = 0;
if (eval->lreadyflag) {
eval->lreadyflag = 0;
@@ -457,7 +500,10 @@ apr_status_t sed_finalize_eval(sed_eval_t *eval, void *fout)
* buffer is not a newline.
*/
/* Assure space for NULL */
- append_to_linebuf(eval, "", NULL);
+ rc = append_to_linebuf(eval, "", NULL);
+ if (rc != APR_SUCCESS) {
+ return rc;
+ }
}
*eval->lspend = '\0';
@@ -655,11 +701,15 @@ static apr_status_t dosub(sed_eval_t *eval, char *rhsbuf, int n,
sp = eval->genbuf;
rp = rhsbuf;
sp = place(eval, sp, lp, step_vars->loc1);
+ if (sp == NULL) {
+ return APR_EGENERAL;
+ }
while ((c = *rp++) != 0) {
if (c == '&') {
sp = place(eval, sp, step_vars->loc1, step_vars->loc2);
- if (sp == NULL)
+ if (sp == NULL) {
return APR_EGENERAL;
+ }
}
else if (c == '\\') {
c = *rp++;
@@ -675,13 +725,19 @@ static apr_status_t dosub(sed_eval_t *eval, char *rhsbuf, int n,
*sp++ = c;
if (sp >= eval->genbuf + eval->gsize) {
/* expand genbuf and set the sp appropriately */
- grow_gen_buffer(eval, eval->gsize + 1024, &sp);
+ rv = grow_gen_buffer(eval, eval->gsize + 1024, &sp);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
}
}
lp = step_vars->loc2;
step_vars->loc2 = sp - eval->genbuf + eval->linebuf;
- append_to_genbuf(eval, lp, &sp);
- copy_to_linebuf(eval, eval->genbuf, step_vars);
+ rv = append_to_genbuf(eval, lp, &sp);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ rv = copy_to_linebuf(eval, eval->genbuf, step_vars);
return rv;
}
@@ -695,7 +751,10 @@ static char *place(sed_eval_t *eval, char *asp, char *al1, char *al2)
apr_size_t reqsize = (sp - eval->genbuf) + n + 1;
if (eval->gsize < reqsize) {
- grow_gen_buffer(eval, reqsize, &sp);
+ apr_status_t rc = grow_gen_buffer(eval, reqsize, &sp);
+ if (rc != APR_SUCCESS) {
+ return NULL;
+ }
}
memcpy(sp, al1, n);
return sp + n;
@@ -750,7 +809,8 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc,
}
p1++;
- copy_to_linebuf(eval, p1, step_vars);
+ rv = copy_to_linebuf(eval, p1, step_vars);
+ if (rv != APR_SUCCESS) return rv;
eval->jflag++;
break;
@@ -760,21 +820,27 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc,
break;
case GCOM:
- copy_to_linebuf(eval, eval->holdbuf, step_vars);
+ rv = copy_to_linebuf(eval, eval->holdbuf, step_vars);
+ if (rv != APR_SUCCESS) return rv;
break;
case CGCOM:
- append_to_linebuf(eval, "\n", step_vars);
- append_to_linebuf(eval, eval->holdbuf, step_vars);
+ rv = append_to_linebuf(eval, "\n", step_vars);
+ if (rv != APR_SUCCESS) return rv;
+ rv = append_to_linebuf(eval, eval->holdbuf, step_vars);
+ if (rv != APR_SUCCESS) return rv;
break;
case HCOM:
- copy_to_holdbuf(eval, eval->linebuf);
+ rv = copy_to_holdbuf(eval, eval->linebuf);
+ if (rv != APR_SUCCESS) return rv;
break;
case CHCOM:
- append_to_holdbuf(eval, "\n");
- append_to_holdbuf(eval, eval->linebuf);
+ rv = append_to_holdbuf(eval, "\n");
+ if (rv != APR_SUCCESS) return rv;
+ rv = append_to_holdbuf(eval, eval->linebuf);
+ if (rv != APR_SUCCESS) return rv;
break;
case ICOM:
@@ -896,7 +962,8 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc,
if (rv != APR_SUCCESS)
return rv;
}
- append_to_linebuf(eval, "\n", step_vars);
+ rv = append_to_linebuf(eval, "\n", step_vars);
+ if (rv != APR_SUCCESS) return rv;
eval->pending = ipc->next;
break;
@@ -970,9 +1037,12 @@ static apr_status_t command(sed_eval_t *eval, sed_reptr_t *ipc,
break;
case XCOM:
- copy_to_genbuf(eval, eval->linebuf);
- copy_to_linebuf(eval, eval->holdbuf, step_vars);
- copy_to_holdbuf(eval, eval->genbuf);
+ rv = copy_to_genbuf(eval, eval->linebuf);
+ if (rv != APR_SUCCESS) return rv;
+ rv = copy_to_linebuf(eval, eval->holdbuf, step_vars);
+ if (rv != APR_SUCCESS) return rv;
+ rv = copy_to_holdbuf(eval, eval->genbuf);
+ if (rv != APR_SUCCESS) return rv;
break;
case YCOM:

View File

@ -1,246 +0,0 @@
From 3a561759fcb37af179585adb8478922dc9bc6a85 Mon Sep 17 00:00:00 2001
From: Eric Covener <covener@apache.org>
Date: Wed, 1 Jun 2022 12:36:39 +0000
Subject: [PATCH] Merge r1901502 from trunk:
use filters consistently
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1901503 13f79535-47bb-0310-9956-ffa450edef68
---
modules/lua/lua_request.c | 144 ++++++++++++++------------------------
1 file changed, 53 insertions(+), 91 deletions(-)
diff --git a/modules/lua/lua_request.c b/modules/lua/lua_request.c
index a3e3b613bc9..2ec453e86b4 100644
--- a/modules/lua/lua_request.c
+++ b/modules/lua/lua_request.c
@@ -2227,23 +2227,20 @@ static int lua_websocket_greet(lua_State *L)
return 0;
}
-static apr_status_t lua_websocket_readbytes(conn_rec* c, char* buffer,
- apr_off_t len)
+static apr_status_t lua_websocket_readbytes(conn_rec* c,
+ apr_bucket_brigade *brigade,
+ char* buffer, apr_off_t len)
{
- apr_bucket_brigade *brigade = apr_brigade_create(c->pool, c->bucket_alloc);
+ apr_size_t delivered;
apr_status_t rv;
+
rv = ap_get_brigade(c->input_filters, brigade, AP_MODE_READBYTES,
APR_BLOCK_READ, len);
if (rv == APR_SUCCESS) {
- if (!APR_BRIGADE_EMPTY(brigade)) {
- apr_bucket* bucket = APR_BRIGADE_FIRST(brigade);
- const char* data = NULL;
- apr_size_t data_length = 0;
- rv = apr_bucket_read(bucket, &data, &data_length, APR_BLOCK_READ);
- if (rv == APR_SUCCESS) {
- memcpy(buffer, data, len);
- }
- apr_bucket_delete(bucket);
+ delivered = len;
+ rv = apr_brigade_flatten(brigade, buffer, &delivered);
+ if ((rv == APR_SUCCESS) && (delivered < len)) {
+ rv = APR_INCOMPLETE;
}
}
apr_brigade_cleanup(brigade);
@@ -2273,35 +2270,28 @@ static int lua_websocket_peek(lua_State *L)
static int lua_websocket_read(lua_State *L)
{
- apr_socket_t *sock;
apr_status_t rv;
int do_read = 1;
int n = 0;
- apr_size_t len = 1;
apr_size_t plen = 0;
unsigned short payload_short = 0;
apr_uint64_t payload_long = 0;
unsigned char *mask_bytes;
char byte;
- int plaintext;
-
-
+ apr_bucket_brigade *brigade;
+ conn_rec* c;
+
request_rec *r = ap_lua_check_request_rec(L, 1);
- plaintext = ap_lua_ssl_is_https(r->connection) ? 0 : 1;
+ c = r->connection;
-
mask_bytes = apr_pcalloc(r->pool, 4);
- sock = ap_get_conn_socket(r->connection);
+
+ brigade = apr_brigade_create(r->pool, c->bucket_alloc);
while (do_read) {
do_read = 0;
/* Get opcode and FIN bit */
- if (plaintext) {
- rv = apr_socket_recv(sock, &byte, &len);
- }
- else {
- rv = lua_websocket_readbytes(r->connection, &byte, 1);
- }
+ rv = lua_websocket_readbytes(c, brigade, &byte, 1);
if (rv == APR_SUCCESS) {
unsigned char ubyte, fin, opcode, mask, payload;
ubyte = (unsigned char)byte;
@@ -2311,12 +2301,7 @@ static int lua_websocket_read(lua_State *L)
opcode = ubyte & 0xf;
/* Get the payload length and mask bit */
- if (plaintext) {
- rv = apr_socket_recv(sock, &byte, &len);
- }
- else {
- rv = lua_websocket_readbytes(r->connection, &byte, 1);
- }
+ rv = lua_websocket_readbytes(c, brigade, &byte, 1);
if (rv == APR_SUCCESS) {
ubyte = (unsigned char)byte;
/* Mask is the first bit */
@@ -2327,40 +2312,25 @@ static int lua_websocket_read(lua_State *L)
/* Extended payload? */
if (payload == 126) {
- len = 2;
- if (plaintext) {
- /* XXX: apr_socket_recv does not receive len bits, only up to len bits! */
- rv = apr_socket_recv(sock, (char*) &payload_short, &len);
- }
- else {
- rv = lua_websocket_readbytes(r->connection,
- (char*) &payload_short, 2);
- }
- payload_short = ntohs(payload_short);
+ rv = lua_websocket_readbytes(c, brigade,
+ (char*) &payload_short, 2);
- if (rv == APR_SUCCESS) {
- plen = payload_short;
- }
- else {
+ if (rv != APR_SUCCESS) {
return 0;
}
+
+ plen = ntohs(payload_short);
}
/* Super duper extended payload? */
if (payload == 127) {
- len = 8;
- if (plaintext) {
- rv = apr_socket_recv(sock, (char*) &payload_long, &len);
- }
- else {
- rv = lua_websocket_readbytes(r->connection,
- (char*) &payload_long, 8);
- }
- if (rv == APR_SUCCESS) {
- plen = ap_ntoh64(&payload_long);
- }
- else {
+ rv = lua_websocket_readbytes(c, brigade,
+ (char*) &payload_long, 8);
+
+ if (rv != APR_SUCCESS) {
return 0;
}
+
+ plen = ap_ntoh64(&payload_long);
}
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03210)
"Websocket: Reading %" APR_SIZE_T_FMT " (%s) bytes, masking is %s. %s",
@@ -2369,46 +2339,27 @@ static int lua_websocket_read(lua_State *L)
mask ? "on" : "off",
fin ? "This is a final frame" : "more to follow");
if (mask) {
- len = 4;
- if (plaintext) {
- rv = apr_socket_recv(sock, (char*) mask_bytes, &len);
- }
- else {
- rv = lua_websocket_readbytes(r->connection,
- (char*) mask_bytes, 4);
- }
+ rv = lua_websocket_readbytes(c, brigade,
+ (char*) mask_bytes, 4);
+
if (rv != APR_SUCCESS) {
return 0;
}
}
if (plen < (HUGE_STRING_LEN*1024) && plen > 0) {
apr_size_t remaining = plen;
- apr_size_t received;
- apr_off_t at = 0;
char *buffer = apr_palloc(r->pool, plen+1);
buffer[plen] = 0;
- if (plaintext) {
- while (remaining > 0) {
- received = remaining;
- rv = apr_socket_recv(sock, buffer+at, &received);
- if (received > 0 ) {
- remaining -= received;
- at += received;
- }
- }
- ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
- "Websocket: Frame contained %" APR_OFF_T_FMT " bytes, pushed to Lua stack",
- at);
- }
- else {
- rv = lua_websocket_readbytes(r->connection, buffer,
- remaining);
- ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
- "Websocket: SSL Frame contained %" APR_SIZE_T_FMT " bytes, "\
- "pushed to Lua stack",
- remaining);
+ rv = lua_websocket_readbytes(c, brigade, buffer, remaining);
+
+ if (rv != APR_SUCCESS) {
+ return 0;
}
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "Websocket: Frame contained %" APR_SIZE_T_FMT \
+ " bytes, pushed to Lua stack", remaining);
if (mask) {
for (n = 0; n < plen; n++) {
buffer[n] ^= mask_bytes[n%4];
@@ -2420,14 +2371,25 @@ static int lua_websocket_read(lua_State *L)
return 2;
}
-
/* Decide if we need to react to the opcode or not */
if (opcode == 0x09) { /* ping */
char frame[2];
- plen = 2;
+ apr_bucket *b;
+
frame[0] = 0x8A;
frame[1] = 0;
- apr_socket_send(sock, frame, &plen); /* Pong! */
+
+ /* Pong! */
+ b = apr_bucket_transient_create(frame, 2, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(brigade, b);
+
+ rv = ap_pass_brigade(c->output_filters, brigade);
+ apr_brigade_cleanup(brigade);
+
+ if (rv != APR_SUCCESS) {
+ return 0;
+ }
+
do_read = 1;
}
}

View File

@ -1,230 +0,0 @@
diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c
index 3af5aed..bc86253 100644
--- a/modules/proxy/proxy_util.c
+++ b/modules/proxy/proxy_util.c
@@ -3854,12 +3854,14 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
char **old_cl_val,
char **old_te_val)
{
+ int rc = OK;
conn_rec *c = r->connection;
int counter;
char *buf;
+ apr_table_t *saved_headers_in = r->headers_in;
+ const char *saved_host = apr_table_get(saved_headers_in, "Host");
const apr_array_header_t *headers_in_array;
const apr_table_entry_t *headers_in;
- apr_table_t *saved_headers_in;
apr_bucket *e;
int do_100_continue;
conn_rec *origin = p_conn->connection;
@@ -3896,6 +3898,52 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
ap_xlate_proto_to_ascii(buf, strlen(buf));
e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(header_brigade, e);
+
+ /*
+ * Make a copy on r->headers_in for the request we make to the backend,
+ * modify the copy in place according to our configuration and connection
+ * handling, use it to fill in the forwarded headers' brigade, and finally
+ * restore the saved/original ones in r->headers_in.
+ *
+ * Note: We need to take r->pool for apr_table_copy as the key / value
+ * pairs in r->headers_in have been created out of r->pool and
+ * p might be (and actually is) a longer living pool.
+ * This would trigger the bad pool ancestry abort in apr_table_copy if
+ * apr is compiled with APR_POOL_DEBUG.
+ *
+ * icing: if p indeed lives longer than r->pool, we should allocate
+ * all new header values from r->pool as well and avoid leakage.
+ */
+ r->headers_in = apr_table_copy(r->pool, saved_headers_in);
+
+ /* Return the original Transfer-Encoding and/or Content-Length values
+ * then drop the headers, they must be set by the proxy handler based
+ * on the actual body being forwarded.
+ */
+ if ((*old_te_val = (char *)apr_table_get(r->headers_in,
+ "Transfer-Encoding"))) {
+ apr_table_unset(r->headers_in, "Transfer-Encoding");
+ }
+ if ((*old_cl_val = (char *)apr_table_get(r->headers_in,
+ "Content-Length"))) {
+ apr_table_unset(r->headers_in, "Content-Length");
+ }
+
+ /* Clear out hop-by-hop request headers not to forward */
+ if (ap_proxy_clear_connection(r, r->headers_in) < 0) {
+ rc = HTTP_BAD_REQUEST;
+ goto cleanup;
+ }
+
+ /* RFC2616 13.5.1 says we should strip these */
+ apr_table_unset(r->headers_in, "Keep-Alive");
+ apr_table_unset(r->headers_in, "Upgrade");
+ apr_table_unset(r->headers_in, "Trailer");
+ apr_table_unset(r->headers_in, "TE");
+
+ /* We used to send `Host: ` always first, so let's keep it that
+ * way. No telling which legacy backend is relying no this.
+ */
if (dconf->preserve_host == 0) {
if (ap_strchr_c(uri->hostname, ':')) { /* if literal IPv6 address */
if (uri->port_str && uri->port != DEFAULT_HTTP_PORT) {
@@ -3917,7 +3965,7 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
/* don't want to use r->hostname, as the incoming header might have a
* port attached
*/
- const char* hostname = apr_table_get(r->headers_in,"Host");
+ const char* hostname = saved_host;
if (!hostname) {
hostname = r->server->server_hostname;
ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01092)
@@ -3931,21 +3979,7 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
ap_xlate_proto_to_ascii(buf, strlen(buf));
e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(header_brigade, e);
-
- /*
- * Save the original headers in here and restore them when leaving, since
- * we will apply proxy purpose only modifications (eg. clearing hop-by-hop
- * headers, add Via or X-Forwarded-* or Expect...), whereas the originals
- * will be needed later to prepare the correct response and logging.
- *
- * Note: We need to take r->pool for apr_table_copy as the key / value
- * pairs in r->headers_in have been created out of r->pool and
- * p might be (and actually is) a longer living pool.
- * This would trigger the bad pool ancestry abort in apr_table_copy if
- * apr is compiled with APR_POOL_DEBUG.
- */
- saved_headers_in = r->headers_in;
- r->headers_in = apr_table_copy(r->pool, saved_headers_in);
+ apr_table_unset(r->headers_in, "Host");
/* handle Via */
if (conf->viaopt == via_block) {
@@ -4012,8 +4046,6 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
*/
if (dconf->add_forwarded_headers) {
if (PROXYREQ_REVERSE == r->proxyreq) {
- const char *buf;
-
/* Add X-Forwarded-For: so that the upstream has a chance to
* determine, where the original request came from.
*/
@@ -4023,8 +4055,9 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
/* Add X-Forwarded-Host: so that upstream knows what the
* original request hostname was.
*/
- if ((buf = apr_table_get(r->headers_in, "Host"))) {
- apr_table_mergen(r->headers_in, "X-Forwarded-Host", buf);
+ if (saved_host) {
+ apr_table_mergen(r->headers_in, "X-Forwarded-Host",
+ saved_host);
}
/* Add X-Forwarded-Server: so that upstream knows what the
@@ -4036,11 +4069,28 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
}
}
- proxy_run_fixups(r);
- if (ap_proxy_clear_connection(r, r->headers_in) < 0) {
- return HTTP_BAD_REQUEST;
+ /* Do we want to strip Proxy-Authorization ?
+ * If we haven't used it, then NO
+ * If we have used it then MAYBE: RFC2616 says we MAY propagate it.
+ * So let's make it configurable by env.
+ */
+ if (r->user != NULL /* we've authenticated */
+ && !apr_table_get(r->subprocess_env, "Proxy-Chain-Auth")) {
+ apr_table_unset(r->headers_in, "Proxy-Authorization");
}
+ /* for sub-requests, ignore freshness/expiry headers */
+ if (r->main) {
+ apr_table_unset(r->headers_in, "If-Match");
+ apr_table_unset(r->headers_in, "If-Modified-Since");
+ apr_table_unset(r->headers_in, "If-Range");
+ apr_table_unset(r->headers_in, "If-Unmodified-Since");
+ apr_table_unset(r->headers_in, "If-None-Match");
+ }
+
+ /* run hook to fixup the request we are about to send */
+ proxy_run_fixups(r);
+
creds = apr_table_get(r->notes, "proxy-basic-creds");
if (creds) {
apr_table_mergen(r->headers_in, "Proxy-Authorization", creds);
@@ -4051,55 +4101,8 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
headers_in = (const apr_table_entry_t *) headers_in_array->elts;
for (counter = 0; counter < headers_in_array->nelts; counter++) {
if (headers_in[counter].key == NULL
- || headers_in[counter].val == NULL
-
- /* Already sent */
- || !ap_cstr_casecmp(headers_in[counter].key, "Host")
-
- /* Clear out hop-by-hop request headers not to send
- * RFC2616 13.5.1 says we should strip these headers
- */
- || !ap_cstr_casecmp(headers_in[counter].key, "Keep-Alive")
- || !ap_cstr_casecmp(headers_in[counter].key, "TE")
- || !ap_cstr_casecmp(headers_in[counter].key, "Trailer")
- || !ap_cstr_casecmp(headers_in[counter].key, "Upgrade")
-
- ) {
- continue;
- }
- /* Do we want to strip Proxy-Authorization ?
- * If we haven't used it, then NO
- * If we have used it then MAYBE: RFC2616 says we MAY propagate it.
- * So let's make it configurable by env.
- */
- if (!ap_cstr_casecmp(headers_in[counter].key,"Proxy-Authorization")) {
- if (r->user != NULL) { /* we've authenticated */
- if (!apr_table_get(r->subprocess_env, "Proxy-Chain-Auth")) {
- continue;
- }
- }
- }
-
- /* Skip Transfer-Encoding and Content-Length for now.
- */
- if (!ap_cstr_casecmp(headers_in[counter].key, "Transfer-Encoding")) {
- *old_te_val = headers_in[counter].val;
- continue;
- }
- if (!ap_cstr_casecmp(headers_in[counter].key, "Content-Length")) {
- *old_cl_val = headers_in[counter].val;
- continue;
- }
-
- /* for sub-requests, ignore freshness/expiry headers */
- if (r->main) {
- if ( !ap_cstr_casecmp(headers_in[counter].key, "If-Match")
- || !ap_cstr_casecmp(headers_in[counter].key, "If-Modified-Since")
- || !ap_cstr_casecmp(headers_in[counter].key, "If-Range")
- || !ap_cstr_casecmp(headers_in[counter].key, "If-Unmodified-Since")
- || !ap_cstr_casecmp(headers_in[counter].key, "If-None-Match")) {
- continue;
- }
+ || headers_in[counter].val == NULL) {
+ continue;
}
buf = apr_pstrcat(p, headers_in[counter].key, ": ",
@@ -4110,11 +4113,9 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
APR_BRIGADE_INSERT_TAIL(header_brigade, e);
}
- /* Restore the original headers in (see comment above),
- * we won't modify them anymore.
- */
+cleanup:
r->headers_in = saved_headers_in;
- return OK;
+ return rc;
}
PROXY_DECLARE(int) ap_proxy_prefetch_input(request_rec *r,

View File

@ -1,23 +0,0 @@
From 5efc9507c487c37dfe2a279a4a0335cad701cd5f Mon Sep 17 00:00:00 2001
From: Eric Covener <covener@apache.org>
Date: Tue, 10 Jan 2023 13:19:07 +0000
Subject: [PATCH] cleanup on error
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1906540 13f79535-47bb-0310-9956-ffa450edef68
---
modules/proxy/mod_proxy_ajp.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/modules/proxy/mod_proxy_ajp.c b/modules/proxy/mod_proxy_ajp.c
index 9cd7adbcbbf..07f37392d88 100644
--- a/modules/proxy/mod_proxy_ajp.c
+++ b/modules/proxy/mod_proxy_ajp.c
@@ -255,6 +255,8 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10396)
"%s Transfer-Encoding is not supported",
tenc);
+ /* We had a failure: Close connection to backend */
+ conn->close = 1;
return HTTP_INTERNAL_SERVER_ERROR;
}
} else {

View File

@ -1,129 +0,0 @@
From 8b6d55f6a047acf62675e32606b037f5eea8ccc7 Mon Sep 17 00:00:00 2001
From: Eric Covener <covener@apache.org>
Date: Tue, 10 Jan 2023 13:20:09 +0000
Subject: [PATCH] Merge r1906539 from trunk:
fail on bad header
Submitted By: covener
Reviewed By: covener, rpluem, gbechis
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1906541 13f79535-47bb-0310-9956-ffa450edef68
---
modules/proxy/mod_proxy_http.c | 46 ++++++++++++++++++++--------------
server/protocol.c | 2 ++
2 files changed, 29 insertions(+), 19 deletions(-)
diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c
index d74ae054ac9..ec4e7fb06b5 100644
--- a/modules/proxy/mod_proxy_http.c
+++ b/modules/proxy/mod_proxy_http.c
@@ -788,7 +788,7 @@ static void process_proxy_header(request_rec *r, proxy_dir_conf *c,
* any sense at all, since we depend on buffer still containing
* what was read by ap_getline() upon return.
*/
-static void ap_proxy_read_headers(request_rec *r, request_rec *rr,
+static apr_status_t ap_proxy_read_headers(request_rec *r, request_rec *rr,
char *buffer, int size,
conn_rec *c, int *pread_len)
{
@@ -820,19 +820,26 @@ static void ap_proxy_read_headers(request_rec *r, request_rec *rr,
rc = ap_proxygetline(tmp_bb, buffer, size, rr,
AP_GETLINE_FOLD | AP_GETLINE_NOSPC_EOL, &len);
- if (len <= 0)
- break;
- if (APR_STATUS_IS_ENOSPC(rc)) {
- /* The header could not fit in the provided buffer, warn.
- * XXX: falls through with the truncated header, 5xx instead?
- */
- int trunc = (len > 128 ? 128 : len) / 2;
- ap_log_rerror(APLOG_MARK, APLOG_WARNING, rc, r, APLOGNO(10124)
- "header size is over the limit allowed by "
- "ResponseFieldSize (%d bytes). "
- "Bad response header: '%.*s[...]%s'",
- size, trunc, buffer, buffer + len - trunc);
+ if (rc != APR_SUCCESS) {
+ if (APR_STATUS_IS_ENOSPC(rc)) {
+ int trunc = (len > 128 ? 128 : len) / 2;
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, rc, r, APLOGNO(10124)
+ "header size is over the limit allowed by "
+ "ResponseFieldSize (%d bytes). "
+ "Bad response header: '%.*s[...]%s'",
+ size, trunc, buffer, buffer + len - trunc);
+ }
+ else {
+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, rc, r, APLOGNO(10404)
+ "Error reading headers from backend");
+ }
+ r->headers_out = NULL;
+ return rc;
+ }
+
+ if (len <= 0) {
+ break;
}
else {
ap_log_rerror(APLOG_MARK, APLOG_TRACE4, 0, r, "%s", buffer);
@@ -855,7 +862,7 @@ static void ap_proxy_read_headers(request_rec *r, request_rec *rr,
if (psc->badopt == bad_error) {
/* Nope, it wasn't even an extra HTTP header. Give up. */
r->headers_out = NULL;
- return;
+ return APR_EINVAL;
}
else if (psc->badopt == bad_body) {
/* if we've already started loading headers_out, then
@@ -869,13 +876,13 @@ static void ap_proxy_read_headers(request_rec *r, request_rec *rr,
"in headers returned by %s (%s)",
r->uri, r->method);
*pread_len = len;
- return;
+ return APR_SUCCESS;
}
else {
ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01099)
"No HTTP headers returned by %s (%s)",
r->uri, r->method);
- return;
+ return APR_SUCCESS;
}
}
}
@@ -905,6 +912,7 @@ static void ap_proxy_read_headers(request_rec *r, request_rec *rr,
process_proxy_header(r, dconf, buffer, value);
saw_headers = 1;
}
+ return APR_SUCCESS;
}
@@ -1218,10 +1226,10 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
"Set-Cookie", NULL);
/* shove the headers direct into r->headers_out */
- ap_proxy_read_headers(r, backend->r, buffer, response_field_size,
- origin, &pread_len);
+ rc = ap_proxy_read_headers(r, backend->r, buffer, response_field_size,
+ origin, &pread_len);
- if (r->headers_out == NULL) {
+ if (rc != APR_SUCCESS || r->headers_out == NULL) {
ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01106)
"bad HTTP/%d.%d header returned by %s (%s)",
major, minor, r->uri, r->method);
diff --git a/server/protocol.c b/server/protocol.c
index 7adc7f75c10..6f9540ad1de 100644
--- a/server/protocol.c
+++ b/server/protocol.c
@@ -508,6 +508,8 @@ AP_DECLARE(apr_status_t) ap_rgetline_core(char **s, apr_size_t n,
/* PR#43039: We shouldn't accept NULL bytes within the line */
bytes_handled = strlen(*s);
if (bytes_handled < *read) {
+ ap_log_data(APLOG_MARK, APLOG_DEBUG, ap_server_conf,
+ "NULL bytes in header", *s, *read, 0);
*read = bytes_handled;
if (rv == APR_SUCCESS) {
rv = APR_EINVAL;

View File

@ -1,14 +0,0 @@
diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c
index e488aa6..8267f1b 100644
--- a/modules/proxy/proxy_util.c
+++ b/modules/proxy/proxy_util.c
@@ -3121,7 +3121,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_check_connection(const char *scheme,
"%s: backend socket is disconnected.", scheme);
}
else {
- ap_log_error(APLOG_MARK, APLOG_WARNING, 0, server, APLOGNO(03408)
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, server, APLOGNO(03408)
"%s: reusable backend connection is not empty: "
"forcibly closed", scheme);
}

View File

@ -1,336 +0,0 @@
--- a/server/mpm/event/event.c 2022/05/24 08:59:30 1901198
+++ b/server/mpm/event/event.c 2022/05/24 09:00:19 1901199
@@ -379,7 +379,7 @@
* We use this value to optimize routines that have to scan the entire
* scoreboard.
*/
- int max_daemons_limit;
+ int max_daemon_used;
/*
* All running workers, active and shutting down, including those that
@@ -645,7 +645,7 @@
*rv = APR_SUCCESS;
switch (query_code) {
case AP_MPMQ_MAX_DAEMON_USED:
- *result = retained->max_daemons_limit;
+ *result = retained->max_daemon_used;
break;
case AP_MPMQ_IS_THREADED:
*result = AP_MPMQ_STATIC;
@@ -696,14 +696,32 @@
return OK;
}
-static void event_note_child_killed(int childnum, pid_t pid, ap_generation_t gen)
+static void event_note_child_stopped(int slot, pid_t pid, ap_generation_t gen)
{
- if (childnum != -1) { /* child had a scoreboard slot? */
- ap_run_child_status(ap_server_conf,
- ap_scoreboard_image->parent[childnum].pid,
- ap_scoreboard_image->parent[childnum].generation,
- childnum, MPM_CHILD_EXITED);
- ap_scoreboard_image->parent[childnum].pid = 0;
+ if (slot != -1) { /* child had a scoreboard slot? */
+ process_score *ps = &ap_scoreboard_image->parent[slot];
+ int i;
+
+ pid = ps->pid;
+ gen = ps->generation;
+ for (i = 0; i < threads_per_child; i++) {
+ ap_update_child_status_from_indexes(slot, i, SERVER_DEAD, NULL);
+ }
+ ap_run_child_status(ap_server_conf, pid, gen, slot, MPM_CHILD_EXITED);
+ if (ps->quiescing != 2) { /* vs perform_idle_server_maintenance() */
+ retained->active_daemons--;
+ }
+ retained->total_daemons--;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
+ "Child %d stopped: pid %d, gen %d, "
+ "active %d/%d, total %d/%d/%d, quiescing %d",
+ slot, (int)pid, (int)gen,
+ retained->active_daemons, active_daemons_limit,
+ retained->total_daemons, retained->max_daemon_used,
+ server_limit, ps->quiescing);
+ ps->not_accepting = 0;
+ ps->quiescing = 0;
+ ps->pid = 0;
}
else {
ap_run_child_status(ap_server_conf, pid, gen, -1, MPM_CHILD_EXITED);
@@ -713,9 +731,19 @@
static void event_note_child_started(int slot, pid_t pid)
{
ap_generation_t gen = retained->mpm->my_generation;
+
+ retained->total_daemons++;
+ retained->active_daemons++;
ap_scoreboard_image->parent[slot].pid = pid;
ap_scoreboard_image->parent[slot].generation = gen;
ap_run_child_status(ap_server_conf, pid, gen, slot, MPM_CHILD_STARTED);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
+ "Child %d started: pid %d, gen %d, "
+ "active %d/%d, total %d/%d/%d",
+ slot, (int)pid, (int)gen,
+ retained->active_daemons, active_daemons_limit,
+ retained->total_daemons, retained->max_daemon_used,
+ server_limit);
}
static const char *event_get_name(void)
@@ -737,7 +765,7 @@
}
if (one_process) {
- event_note_child_killed(/* slot */ 0, 0, 0);
+ event_note_child_stopped(/* slot */ 0, 0, 0);
}
exit(code);
@@ -2712,8 +2740,8 @@
{
int pid;
- if (slot + 1 > retained->max_daemons_limit) {
- retained->max_daemons_limit = slot + 1;
+ if (slot + 1 > retained->max_daemon_used) {
+ retained->max_daemon_used = slot + 1;
}
if (ap_scoreboard_image->parent[slot].pid != 0) {
@@ -2781,11 +2809,7 @@
return -1;
}
- ap_scoreboard_image->parent[slot].quiescing = 0;
- ap_scoreboard_image->parent[slot].not_accepting = 0;
event_note_child_started(slot, pid);
- retained->active_daemons++;
- retained->total_daemons++;
return 0;
}
@@ -2805,7 +2829,8 @@
}
}
-static void perform_idle_server_maintenance(int child_bucket)
+static void perform_idle_server_maintenance(int child_bucket,
+ int *max_daemon_used)
{
int num_buckets = retained->mpm->num_buckets;
int idle_thread_count = 0;
@@ -2821,7 +2846,7 @@
/* We only care about child_bucket in this call */
continue;
}
- if (i >= retained->max_daemons_limit &&
+ if (i >= retained->max_daemon_used &&
free_length == retained->idle_spawn_rate[child_bucket]) {
/* short cut if all active processes have been examined and
* enough empty scoreboard slots have been found
@@ -2835,6 +2860,13 @@
if (ps->quiescing == 1) {
ps->quiescing = 2;
retained->active_daemons--;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
+ "Child %d quiescing: pid %d, gen %d, "
+ "active %d/%d, total %d/%d/%d",
+ i, (int)ps->pid, (int)ps->generation,
+ retained->active_daemons, active_daemons_limit,
+ retained->total_daemons, retained->max_daemon_used,
+ server_limit);
}
for (j = 0; j < threads_per_child; j++) {
int status = ap_scoreboard_image->servers[i][j].status;
@@ -2863,8 +2895,9 @@
free_slots[free_length++] = i;
}
}
-
- retained->max_daemons_limit = last_non_dead + 1;
+ if (*max_daemon_used < last_non_dead + 1) {
+ *max_daemon_used = last_non_dead + 1;
+ }
if (retained->sick_child_detected) {
if (had_healthy_child) {
@@ -2893,6 +2926,10 @@
}
}
+ AP_DEBUG_ASSERT(retained->active_daemons <= retained->total_daemons
+ && retained->total_daemons <= retained->max_daemon_used
+ && retained->max_daemon_used <= server_limit);
+
if (idle_thread_count > max_spare_threads / num_buckets) {
/*
* Child processes that we ask to shut down won't die immediately
@@ -2915,13 +2952,12 @@
active_daemons_limit));
ap_log_error(APLOG_MARK, APLOG_TRACE5, 0, ap_server_conf,
"%shutting down one child: "
- "active daemons %d / active limit %d / "
- "total daemons %d / ServerLimit %d / "
- "idle threads %d / max workers %d",
+ "active %d/%d, total %d/%d/%d, "
+ "idle threads %d, max workers %d",
(do_kill) ? "S" : "Not s",
retained->active_daemons, active_daemons_limit,
- retained->total_daemons, server_limit,
- idle_thread_count, max_workers);
+ retained->total_daemons, retained->max_daemon_used,
+ server_limit, idle_thread_count, max_workers);
if (do_kill) {
ap_mpm_podx_signal(all_buckets[child_bucket].pod,
AP_MPM_PODX_GRACEFUL);
@@ -2970,10 +3006,14 @@
else {
ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf,
"server is at active daemons limit, spawning "
- "of %d children cancelled: %d/%d active, "
- "rate %d", free_length,
+ "of %d children cancelled: active %d/%d, "
+ "total %d/%d/%d, rate %d", free_length,
retained->active_daemons, active_daemons_limit,
- retained->idle_spawn_rate[child_bucket]);
+ retained->total_daemons, retained->max_daemon_used,
+ server_limit, retained->idle_spawn_rate[child_bucket]);
+ /* reset the spawning rate and prevent its growth below */
+ retained->idle_spawn_rate[child_bucket] = 1;
+ ++retained->hold_off_on_exponential_spawning;
free_length = 0;
}
}
@@ -2989,12 +3029,13 @@
retained->total_daemons);
}
for (i = 0; i < free_length; ++i) {
- ap_log_error(APLOG_MARK, APLOG_TRACE5, 0, ap_server_conf,
- "Spawning new child: slot %d active / "
- "total daemons: %d/%d",
- free_slots[i], retained->active_daemons,
- retained->total_daemons);
- make_child(ap_server_conf, free_slots[i], child_bucket);
+ int slot = free_slots[i];
+ if (make_child(ap_server_conf, slot, child_bucket) < 0) {
+ continue;
+ }
+ if (*max_daemon_used < slot + 1) {
+ *max_daemon_used = slot + 1;
+ }
}
/* the next time around we want to spawn twice as many if this
* wasn't good enough, but not if we've just done a graceful
@@ -3016,6 +3057,7 @@
static void server_main_loop(int remaining_children_to_start)
{
int num_buckets = retained->mpm->num_buckets;
+ int max_daemon_used = 0;
int child_slot;
apr_exit_why_e exitwhy;
int status, processed_status;
@@ -3061,19 +3103,8 @@
}
/* non-fatal death... note that it's gone in the scoreboard. */
if (child_slot >= 0) {
- process_score *ps;
+ event_note_child_stopped(child_slot, 0, 0);
- for (i = 0; i < threads_per_child; i++)
- ap_update_child_status_from_indexes(child_slot, i,
- SERVER_DEAD, NULL);
-
- event_note_child_killed(child_slot, 0, 0);
- ps = &ap_scoreboard_image->parent[child_slot];
- if (ps->quiescing != 2)
- retained->active_daemons--;
- ps->quiescing = 0;
- /* NOTE: We don't dec in the (child_slot < 0) case! */
- retained->total_daemons--;
if (processed_status == APEXIT_CHILDSICK) {
/* resource shortage, minimize the fork rate */
retained->idle_spawn_rate[child_slot % num_buckets] = 1;
@@ -3123,9 +3154,11 @@
continue;
}
+ max_daemon_used = 0;
for (i = 0; i < num_buckets; i++) {
- perform_idle_server_maintenance(i);
+ perform_idle_server_maintenance(i, &max_daemon_used);
}
+ retained->max_daemon_used = max_daemon_used;
}
}
@@ -3213,7 +3246,7 @@
AP_MPM_PODX_RESTART);
}
ap_reclaim_child_processes(1, /* Start with SIGTERM */
- event_note_child_killed);
+ event_note_child_stopped);
if (!child_fatal) {
/* cleanup pid file on normal shutdown */
@@ -3239,7 +3272,7 @@
ap_mpm_podx_killpg(all_buckets[i].pod, active_daemons_limit,
AP_MPM_PODX_GRACEFUL);
}
- ap_relieve_child_processes(event_note_child_killed);
+ ap_relieve_child_processes(event_note_child_stopped);
if (!child_fatal) {
/* cleanup pid file on normal shutdown */
@@ -3261,10 +3294,10 @@
apr_sleep(apr_time_from_sec(1));
/* Relieve any children which have now exited */
- ap_relieve_child_processes(event_note_child_killed);
+ ap_relieve_child_processes(event_note_child_stopped);
active_children = 0;
- for (index = 0; index < retained->max_daemons_limit; ++index) {
+ for (index = 0; index < retained->max_daemon_used; ++index) {
if (ap_mpm_safe_kill(MPM_CHILD_PID(index), 0) == APR_SUCCESS) {
active_children = 1;
/* Having just one child is enough to stay around */
@@ -3282,7 +3315,7 @@
ap_mpm_podx_killpg(all_buckets[i].pod, active_daemons_limit,
AP_MPM_PODX_RESTART);
}
- ap_reclaim_child_processes(1, event_note_child_killed);
+ ap_reclaim_child_processes(1, event_note_child_stopped);
return DONE;
}
@@ -3302,8 +3335,7 @@
if (!retained->mpm->is_ungraceful) {
ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00493)
- AP_SIG_GRACEFUL_STRING
- " received. Doing graceful restart");
+ AP_SIG_GRACEFUL_STRING " received. Doing graceful restart");
/* wake up the children...time to die. But we'll have more soon */
for (i = 0; i < num_buckets; i++) {
ap_mpm_podx_killpg(all_buckets[i].pod, active_daemons_limit,
@@ -3316,6 +3348,8 @@
}
else {
+ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00494)
+ "SIGHUP received. Attempting to restart");
/* Kill 'em all. Since the child acts the same on the parents SIGTERM
* and a SIGHUP, we may as well use the same signal, because some user
* pthreads are stealing signals from us left and right.
@@ -3326,9 +3360,7 @@
}
ap_reclaim_child_processes(1, /* Start with SIGTERM */
- event_note_child_killed);
- ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00494)
- "SIGHUP received. Attempting to restart");
+ event_note_child_stopped);
}
return OK;

View File

@ -1,22 +1,8 @@
From d4e5b6e1e5585d341d1e51f1ddc637c099111076 Mon Sep 17 00:00:00 2001
From: Joe Orton <jorton@redhat.com>
Date: Tue, 7 Jul 2020 09:48:01 +0100
Subject: [PATCH] Check and use gettid() directly with glibc 2.30+.
* configure.in: Check for gettid() and define HAVE_SYS_GETTID if
gettid() is only usable via syscall().
* server/log.c (log_tid): Use gettid() directly if available.
---
configure.in | 14 +++++++++-----
server/log.c | 8 ++++++--
2 files changed, 15 insertions(+), 7 deletions(-)
diff --git a/configure.in b/configure.in
index 423d58d4b9a..60cbf7b7f81 100644
--- httpd-2.4.43/configure.in.gettid
+++ httpd-2.4.43/configure.in
@@ -478,7 +500,8 @@
index a3c994b..9a4351a 100644
--- a/configure.in
+++ b/configure.in
@@ -524,7 +524,8 @@ prctl \
timegm \
getpgid \
fopen64 \
@ -26,7 +12,7 @@ index 423d58d4b9a..60cbf7b7f81 100644
)
dnl confirm that a void pointer is large enough to store a long integer
@@ -489,16 +512,19 @@
@@ -535,16 +536,19 @@ AC_CHECK_LIB(selinux, is_selinux_enabled, [
APR_ADDTO(HTTPD_LIBS, [-lselinux])
])
@ -50,8 +36,10 @@ index 423d58d4b9a..60cbf7b7f81 100644
fi
dnl ## Check for the tm_gmtoff field in struct tm to get the timezone diffs
--- httpd-2.4.43/server/log.c.gettid
+++ httpd-2.4.43/server/log.c
diff --git a/server/log.c b/server/log.c
index cc04c38..ed3b920 100644
--- a/server/log.c
+++ b/server/log.c
@@ -55,7 +55,7 @@
#include "ap_mpm.h"
#include "ap_listen.h"
@ -61,7 +49,7 @@ index 423d58d4b9a..60cbf7b7f81 100644
#include <sys/syscall.h>
#include <sys/types.h>
#endif
@@ -625,14 +625,18 @@
@@ -627,14 +627,18 @@ static int log_tid(const ap_errorlog_info *info, const char *arg,
#if APR_HAS_THREADS
int result;
#endif
@ -82,7 +70,7 @@ index 423d58d4b9a..60cbf7b7f81 100644
#if APR_HAS_THREADS
if (ap_mpm_query(AP_MPMQ_IS_THREADED, &result) == APR_SUCCESS
&& result != AP_MPMQ_NOT_SUPPORTED)
@@ -966,7 +970,7 @@
@@ -968,7 +972,7 @@ static int do_errorlog_default(const ap_errorlog_info *info, char *buf,
#if APR_HAS_THREADS
field_start = len;
len += cpystrn(buf + len, ":tid ", buflen - len);

View File

@ -1,8 +1,8 @@
diff --git a/modules/proxy/mod_proxy.c b/modules/proxy/mod_proxy.c
index e599515..154ab21 100644
index 537c3c2..596320d 100644
--- a/modules/proxy/mod_proxy.c
+++ b/modules/proxy/mod_proxy.c
@@ -1200,11 +1200,20 @@ static int proxy_handler(request_rec *r)
@@ -1460,11 +1460,20 @@ static int proxy_handler(request_rec *r)
/* handle the scheme */
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01142)
"Trying to run scheme_handler against proxy");
@ -23,7 +23,7 @@ index e599515..154ab21 100644
/* Did the scheme handler process the request? */
if (access_status != DECLINED) {
const char *cl_a;
@@ -1620,8 +1629,8 @@ static void *merge_proxy_dir_config(apr_pool_t *p, void *basev, void *addv)
@@ -1902,8 +1911,8 @@ static void *merge_proxy_dir_config(apr_pool_t *p, void *basev, void *addv)
return new;
}
@ -34,7 +34,7 @@ index e599515..154ab21 100644
{
server_rec *s = cmd->server;
proxy_server_conf *conf =
@@ -1679,19 +1688,24 @@ static const char *
@@ -1961,19 +1970,24 @@ static const char *
new->port = port;
new->regexp = reg;
new->use_regex = regex;
@ -65,7 +65,7 @@ index e599515..154ab21 100644
}
PROXY_DECLARE(const char *) ap_proxy_de_socketfy(apr_pool_t *p, const char *url)
@@ -2637,9 +2651,9 @@ static const command_rec proxy_cmds[] =
@@ -3012,9 +3026,9 @@ static const command_rec proxy_cmds[] =
"location, in regular expression syntax"),
AP_INIT_FLAG("ProxyRequests", set_proxy_req, NULL, RSRC_CONF,
"on if the true proxy requests should be accepted"),
@ -78,10 +78,10 @@ index e599515..154ab21 100644
AP_INIT_FLAG("ProxyPassInterpolateEnv", ap_set_flag_slot_char,
(void*)APR_OFFSETOF(proxy_dir_conf, interpolate_env),
diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h
index 895b937..538839f 100644
index c51145e..eaf431d 100644
--- a/modules/proxy/mod_proxy.h
+++ b/modules/proxy/mod_proxy.h
@@ -116,6 +116,7 @@ struct proxy_remote {
@@ -121,6 +121,7 @@ struct proxy_remote {
const char *protocol; /* the scheme used to talk to this proxy */
const char *hostname; /* the hostname of this proxy */
ap_regex_t *regexp; /* compiled regex (if any) for the remote */
@ -90,10 +90,10 @@ index 895b937..538839f 100644
apr_port_t port; /* the port for this proxy */
};
diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c
index e7ffe33..50561a4 100644
index caafde0..ea36465 100644
--- a/modules/proxy/proxy_util.c
+++ b/modules/proxy/proxy_util.c
@@ -2474,11 +2474,14 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
@@ -2708,11 +2708,14 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
* So let's make it configurable by env.
* The logic here is the same used in mod_proxy_http.
*/
@ -111,7 +111,7 @@ index e7ffe33..50561a4 100644
forward->proxy_auth = apr_pstrdup(conn->pool, proxy_auth);
}
}
@@ -2714,7 +2717,8 @@ static apr_status_t send_http_connect(proxy_conn_rec *backend,
@@ -2948,7 +2951,8 @@ static apr_status_t send_http_connect(proxy_conn_rec *backend,
nbytes = apr_snprintf(buffer, sizeof(buffer),
"CONNECT %s:%d HTTP/1.0" CRLF,
forward->target_host, forward->target_port);
@ -121,23 +121,23 @@ index e7ffe33..50561a4 100644
if (forward->proxy_auth != NULL) {
nbytes += apr_snprintf(buffer + nbytes, sizeof(buffer) - nbytes,
"Proxy-Authorization: %s" CRLF,
@@ -3627,6 +3631,7 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
apr_bucket *e;
int do_100_continue;
@@ -3909,6 +3913,7 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
int force10 = 0, do_100_continue = 0;
conn_rec *origin = p_conn->connection;
const char *host, *val;
+ const char *creds;
proxy_dir_conf *dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
/*
@@ -3803,6 +3808,11 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
return HTTP_BAD_REQUEST;
}
@@ -4131,6 +4136,11 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
/* run hook to fixup the request we are about to send */
proxy_run_fixups(r);
+ creds = apr_table_get(r->notes, "proxy-basic-creds");
+ if (creds) {
+ apr_table_mergen(r->headers_in, "Proxy-Authorization", creds);
+ }
+
/* send request headers */
headers_in_array = apr_table_elts(r->headers_in);
headers_in = (const apr_table_entry_t *) headers_in_array->elts;
/* We used to send `Host: ` always first, so let's keep it that
* way. No telling which legacy backend is relying on this.
* If proxy_run_fixups() changed the value, use it (though removal

View File

@ -1,8 +1,8 @@
diff --git a/configure.in b/configure.in
index c8f9aa2..cb43246 100644
index 1e342bb..a3c994b 100644
--- a/configure.in
+++ b/configure.in
@@ -484,6 +484,11 @@ getloadavg
@@ -530,6 +530,11 @@ getloadavg
dnl confirm that a void pointer is large enough to store a long integer
APACHE_CHECK_VOID_PTR_LEN
@ -15,10 +15,10 @@ index c8f9aa2..cb43246 100644
[AC_TRY_RUN(#define _GNU_SOURCE
#include <unistd.h>
diff --git a/server/core.c b/server/core.c
index dc0f17a..7ed9527 100644
index ca33d94..41e9bdc 100644
--- a/server/core.c
+++ b/server/core.c
@@ -59,6 +59,10 @@
@@ -65,6 +65,10 @@
#include <unistd.h>
#endif
@ -28,8 +28,8 @@ index dc0f17a..7ed9527 100644
+
/* LimitRequestBody handling */
#define AP_LIMIT_REQ_BODY_UNSET ((apr_off_t) -1)
#define AP_DEFAULT_LIMIT_REQ_BODY ((apr_off_t) 0)
@@ -5015,6 +5019,28 @@ static int core_post_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *pte
#define AP_DEFAULT_LIMIT_REQ_BODY ((apr_off_t) 1<<30) /* 1GB */
@@ -5157,6 +5161,28 @@ static int core_post_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *pte
}
#endif

View File

@ -12,8 +12,8 @@
Summary: Apache HTTP Server
Name: httpd
Version: 2.4.53
Release: 11%{?dist}
Version: 2.4.57
Release: 1%{?dist}
URL: https://httpd.apache.org/
Source0: https://www.apache.org/dist/httpd/httpd-%{version}.tar.bz2
Source1: https://www.apache.org/dist/httpd/httpd-%{version}.tar.bz2.asc
@ -73,29 +73,25 @@ Patch21: httpd-2.4.48-r1842929+.patch
Patch22: httpd-2.4.43-mod_systemd.patch
Patch23: httpd-2.4.48-export.patch
Patch24: httpd-2.4.43-corelimit.patch
Patch25: httpd-2.4.43-selinux.patch
Patch26: httpd-2.4.43-gettid.patch
Patch25: httpd-2.4.57-selinux.patch
Patch26: httpd-2.4.57-gettid.patch
Patch27: httpd-2.4.53-icons.patch
Patch30: httpd-2.4.43-cachehardmax.patch
Patch34: httpd-2.4.43-socket-activation.patch
Patch38: httpd-2.4.43-sslciphdefault.patch
Patch39: httpd-2.4.43-sslprotdefault.patch
Patch40: httpd-2.4.43-r1861269.patch
Patch41: httpd-2.4.43-r1861793+.patch
Patch42: httpd-2.4.48-r1828172+.patch
Patch45: httpd-2.4.43-logjournal.patch
Patch46: httpd-2.4.48-proxy-ws-idle-timeout.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1949969
Patch47: httpd-2.4.43-pr37355.patch
Patch47: httpd-2.4.57-pr37355.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1949606
Patch48: httpd-2.4.46-freebind.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1950021
Patch49: httpd-2.4.48-ssl-proxy-chains.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2004143
Patch50: httpd-2.4.48-r1825120.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2079939
# backported regression fix
Patch51: httpd-2.4.53-r1901199.patch
Patch50: httpd-2.4.57-r1825120.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2065677
Patch52: httpd-2.4.53-separate-systemd-fns.patch
@ -114,30 +110,10 @@ Patch66: httpd-2.4.51-r1892413+.patch
Patch67: httpd-2.4.51-r1811831.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2098056
Patch68: httpd-2.4.53-r1878890.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2151313
Patch69: httpd-2.4.53-proxy-util-loglevel.patch
# Security fixes
# https://bugzilla.redhat.com/show_bug.cgi?id=2094997
Patch200: httpd-2.4.53-CVE-2022-26377.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2095006
Patch201: httpd-2.4.53-CVE-2022-28615.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2095020
Patch202: httpd-2.4.53-CVE-2022-31813.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2095002
Patch203: httpd-2.4.53-CVE-2022-28614.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2095012
Patch204: httpd-2.4.53-CVE-2022-29404.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2095015
Patch205: httpd-2.4.53-CVE-2022-30522.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2095018
Patch206: httpd-2.4.53-CVE-2022-30556.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2161773
Patch207: httpd-2.4.53-CVE-2022-37436.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2161774
Patch208: httpd-2.4.53-CVE-2006-20001.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2161777
Patch209: httpd-2.4.53-CVE-2022-36760.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=...
# Patch200: ...
License: ASL 2.0
BuildRequires: gcc, autoconf, pkgconfig, findutils, xmlto
@ -286,7 +262,6 @@ written in the Lua programming language.
%patch34 -p1 -b .socketactivation
%patch38 -p1 -b .sslciphdefault
%patch39 -p1 -b .sslprotdefault
%patch40 -p1 -b .r1861269
%patch41 -p1 -b .r1861793+
%patch42 -p1 -b .r1828172+
%patch45 -p1 -b .logjournal
@ -295,7 +270,6 @@ written in the Lua programming language.
%patch48 -p1 -b .freebind
%patch49 -p1 -b .ssl-proxy-chains
%patch50 -p1 -b .r1825120
%patch51 -p1 -b .r1901199
%patch52 -p1 -b .separatesystemd
%patch60 -p1 -b .enable-sslv3
@ -305,18 +279,6 @@ written in the Lua programming language.
%patch66 -p1 -b .r1892413+
%patch67 -p1 -b .r1811831
%patch68 -p1 -b .r1878890
%patch69 -p1 -b .proxyutil-loglevel
%patch200 -p1 -b .CVE-2022-26377
%patch201 -p1 -b .CVE-2022-28615
%patch202 -p1 -b .CVE-2022-31813
%patch203 -p1 -b .CVE-2022-28614
%patch204 -p1 -b .CVE-2022-29404
%patch205 -p1 -b .CVE-2022-30522
%patch206 -p1 -b .CVE-2022-30556
%patch207 -p1 -b .CVE-2022-37436
%patch208 -p1 -b .CVE-2006-20001
%patch209 -p1 -b .CVE-2022-36760
# Patch in the vendor string
sed -i '/^#define PLATFORM/s/Unix/%{vstring}/' os/unix/os.h
@ -876,6 +838,11 @@ exit $rv
%{_rpmconfigdir}/macros.d/macros.httpd
%changelog
* Tue Apr 11 2023 Luboš Uhliarik <luhliari@redhat.com> - 2.4.57-1
- Resolves: #2184403 - rebase httpd to 2.4.57
- Resolves: #2177753 - CVE-2023-25690 httpd: HTTP request splitting with
mod_rewrite and mod_proxy
* Mon Jan 30 2023 Luboš Uhliarik <luhliari@redhat.com> - 2.4.53-11
- Resolves: #2162500 - CVE-2006-20001 httpd: mod_dav: out-of-bounds read/write
of zero byte

View File

@ -1,3 +1,3 @@
SHA512 (httpd-2.4.53.tar.bz2) = 07ef59594251a30a864cc9cc9a58ab788c2d006cef85b728f29533243927c63cb063e0867f2a306f37324c3adb9cf7dcb2402f3516b05c2c6f32469d475dd756
SHA512 (httpd-2.4.53.tar.bz2.asc) = 553df571cf8edda9146c2aaadce7e5a204f9aa8bd05b165dd81e2339db830c06bdb2b546321d3ab1dcc3133a7d37bbbeb31944b725d8a5fc6b6dc389a5c25686
SHA512 (httpd-2.4.57.tar.bz2) = 4d1e0a274ee90bdfb5f38d4a7d73a7367ed1c6388e26280e640014e49abc0df03683705b88dcfe2ec2da313dda4c7b4a3b86daffa1911f58e224eba89d82d155
SHA512 (httpd-2.4.57.tar.bz2.asc) = 3d40491da7610b91894ea24d011da213c0ba4c04dbf3d5dbefac704ba55d9a56acf375dd61363f50291a748ef5f14e7d2dcba96b15a8ce448267bfeb26bf7ecd
SHA512 (KEYS) = 88c848b7ab9e4915d6625dcad3e8328673b0448f2ce76f2c44eecc612cf6afbce3287a4ee7219a44c6fcc61d5ecb2a1a8545456a4a16b90400263d7249cbf192