Resolves: #2221083 - Apache Bug 57087: mod_proxy_fcgi doesn't send cgi
CONTENT_LENGTH variable when the client request used Transfer-Encoding:chunked
This commit is contained in:
parent
9c83050d1b
commit
3292c1f84b
3
clog
3
clog
@ -1,3 +0,0 @@
|
||||
Resolves: #2176723 - CVE-2023-27522 httpd:2.4/httpd: mod_proxy_uwsgi HTTP
|
||||
|
||||
response splitting
|
@ -75,44 +75,8 @@ index 6bedcac..393343a 100644
|
||||
#ifdef AP_DEBUG
|
||||
{
|
||||
/* Make sure ap_getline() didn't leave any droppings. */
|
||||
diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c
|
||||
index 7da9bde..1b7bb81 100644
|
||||
--- a/modules/proxy/mod_proxy_http.c
|
||||
+++ b/modules/proxy/mod_proxy_http.c
|
||||
@@ -439,13 +439,10 @@ static int spool_reqbody_cl(proxy_http_req_t *req, apr_off_t *bytes_spooled)
|
||||
apr_bucket *e;
|
||||
apr_off_t bytes, fsize = 0;
|
||||
apr_file_t *tmpfile = NULL;
|
||||
- apr_off_t limit;
|
||||
|
||||
body_brigade = apr_brigade_create(p, bucket_alloc);
|
||||
*bytes_spooled = 0;
|
||||
|
||||
- limit = ap_get_limit_req_body(r);
|
||||
-
|
||||
do {
|
||||
if (APR_BRIGADE_EMPTY(input_brigade)) {
|
||||
rv = stream_reqbody_read(req, input_brigade, 0);
|
||||
@@ -462,17 +459,6 @@ static int spool_reqbody_cl(proxy_http_req_t *req, apr_off_t *bytes_spooled)
|
||||
apr_brigade_length(input_brigade, 1, &bytes);
|
||||
|
||||
if (*bytes_spooled + bytes > MAX_MEM_SPOOL) {
|
||||
- /*
|
||||
- * LimitRequestBody does not affect Proxy requests (Should it?).
|
||||
- * Let it take effect if we decide to store the body in a
|
||||
- * temporary file on disk.
|
||||
- */
|
||||
- if (limit && (*bytes_spooled + bytes > limit)) {
|
||||
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01088)
|
||||
- "Request body is larger than the configured "
|
||||
- "limit of %" APR_OFF_T_FMT, limit);
|
||||
- return HTTP_REQUEST_ENTITY_TOO_LARGE;
|
||||
- }
|
||||
/* can't spool any more in memory; write latest brigade to disk */
|
||||
if (tmpfile == NULL) {
|
||||
const char *temp_dir;
|
||||
diff --git a/server/core.c b/server/core.c
|
||||
index 09664fc..084e243 100644
|
||||
index a0bfaad..6556f20 100644
|
||||
--- a/server/core.c
|
||||
+++ b/server/core.c
|
||||
@@ -65,7 +65,7 @@
|
||||
|
@ -1,8 +1,8 @@
|
||||
diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c
|
||||
index 3d5b220..ec9a414 100644
|
||||
index efcc6ca..6626ea0 100644
|
||||
--- a/modules/proxy/proxy_util.c
|
||||
+++ b/modules/proxy/proxy_util.c
|
||||
@@ -3621,12 +3621,14 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
|
||||
@@ -3631,12 +3631,14 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
|
||||
char **old_cl_val,
|
||||
char **old_te_val)
|
||||
{
|
||||
@ -18,7 +18,7 @@ index 3d5b220..ec9a414 100644
|
||||
apr_bucket *e;
|
||||
int do_100_continue;
|
||||
conn_rec *origin = p_conn->connection;
|
||||
@@ -3662,6 +3664,52 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
|
||||
@@ -3672,6 +3674,52 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
|
||||
ap_xlate_proto_to_ascii(buf, strlen(buf));
|
||||
e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
|
||||
APR_BRIGADE_INSERT_TAIL(header_brigade, e);
|
||||
@ -71,7 +71,7 @@ index 3d5b220..ec9a414 100644
|
||||
if (dconf->preserve_host == 0) {
|
||||
if (ap_strchr_c(uri->hostname, ':')) { /* if literal IPv6 address */
|
||||
if (uri->port_str && uri->port != DEFAULT_HTTP_PORT) {
|
||||
@@ -3683,7 +3731,7 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
|
||||
@@ -3693,7 +3741,7 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
|
||||
/* don't want to use r->hostname, as the incoming header might have a
|
||||
* port attached
|
||||
*/
|
||||
@ -80,7 +80,7 @@ index 3d5b220..ec9a414 100644
|
||||
if (!hostname) {
|
||||
hostname = r->server->server_hostname;
|
||||
ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01092)
|
||||
@@ -3697,21 +3745,7 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
|
||||
@@ -3707,21 +3755,7 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
|
||||
ap_xlate_proto_to_ascii(buf, strlen(buf));
|
||||
e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
|
||||
APR_BRIGADE_INSERT_TAIL(header_brigade, e);
|
||||
@ -103,7 +103,7 @@ index 3d5b220..ec9a414 100644
|
||||
|
||||
/* handle Via */
|
||||
if (conf->viaopt == via_block) {
|
||||
@@ -3778,8 +3812,6 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
|
||||
@@ -3788,8 +3822,6 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
|
||||
*/
|
||||
if (dconf->add_forwarded_headers) {
|
||||
if (PROXYREQ_REVERSE == r->proxyreq) {
|
||||
@ -112,7 +112,7 @@ index 3d5b220..ec9a414 100644
|
||||
/* Add X-Forwarded-For: so that the upstream has a chance to
|
||||
* determine, where the original request came from.
|
||||
*/
|
||||
@@ -3789,8 +3821,9 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
|
||||
@@ -3799,8 +3831,9 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
|
||||
/* Add X-Forwarded-Host: so that upstream knows what the
|
||||
* original request hostname was.
|
||||
*/
|
||||
@ -124,7 +124,7 @@ index 3d5b220..ec9a414 100644
|
||||
}
|
||||
|
||||
/* Add X-Forwarded-Server: so that upstream knows what the
|
||||
@@ -3802,10 +3835,27 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
|
||||
@@ -3812,10 +3845,27 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
|
||||
}
|
||||
}
|
||||
|
||||
@ -155,7 +155,7 @@ index 3d5b220..ec9a414 100644
|
||||
|
||||
creds = apr_table_get(r->notes, "proxy-basic-creds");
|
||||
if (creds) {
|
||||
@@ -3817,55 +3867,8 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
|
||||
@@ -3827,55 +3877,8 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
|
||||
headers_in = (const apr_table_entry_t *) headers_in_array->elts;
|
||||
for (counter = 0; counter < headers_in_array->nelts; counter++) {
|
||||
if (headers_in[counter].key == NULL
|
||||
@ -213,7 +213,7 @@ index 3d5b220..ec9a414 100644
|
||||
}
|
||||
|
||||
buf = apr_pstrcat(p, headers_in[counter].key, ": ",
|
||||
@@ -3876,11 +3879,9 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
|
||||
@@ -3886,11 +3889,9 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
|
||||
APR_BRIGADE_INSERT_TAIL(header_brigade, e);
|
||||
}
|
||||
|
||||
@ -226,4 +226,4 @@ index 3d5b220..ec9a414 100644
|
||||
+ return rc;
|
||||
}
|
||||
|
||||
PROXY_DECLARE(int) ap_proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc,
|
||||
PROXY_DECLARE(int) ap_proxy_prefetch_input(request_rec *r,
|
||||
|
849
httpd-2.4.37-r1885607.patch
Normal file
849
httpd-2.4.37-r1885607.patch
Normal file
@ -0,0 +1,849 @@
|
||||
diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h
|
||||
index fbbd508..8fcd26d 100644
|
||||
--- a/modules/proxy/mod_proxy.h
|
||||
+++ b/modules/proxy/mod_proxy.h
|
||||
@@ -1168,6 +1168,55 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
|
||||
char **old_cl_val,
|
||||
char **old_te_val);
|
||||
|
||||
+/**
|
||||
+ * Prefetch the client request body (in memory), up to a limit.
|
||||
+ * Read what's in the client pipe. If nonblocking is set and read is EAGAIN,
|
||||
+ * pass a FLUSH bucket to the backend and read again in blocking mode.
|
||||
+ * @param r client request
|
||||
+ * @param backend backend connection
|
||||
+ * @param input_brigade input brigade to use/fill
|
||||
+ * @param block blocking or non-blocking mode
|
||||
+ * @param bytes_read number of bytes read
|
||||
+ * @param max_read maximum number of bytes to read
|
||||
+ * @return OK or HTTP_* error code
|
||||
+ * @note max_read is rounded up to APR_BUCKET_BUFF_SIZE
|
||||
+ */
|
||||
+PROXY_DECLARE(int) ap_proxy_prefetch_input(request_rec *r,
|
||||
+ proxy_conn_rec *backend,
|
||||
+ apr_bucket_brigade *input_brigade,
|
||||
+ apr_read_type_e block,
|
||||
+ apr_off_t *bytes_read,
|
||||
+ apr_off_t max_read);
|
||||
+
|
||||
+/**
|
||||
+ * Spool the client request body to memory, or disk above given limit.
|
||||
+ * @param r client request
|
||||
+ * @param backend backend connection
|
||||
+ * @param input_brigade input brigade to use/fill
|
||||
+ * @param bytes_spooled number of bytes spooled
|
||||
+ * @param max_mem_spool maximum number of in-memory bytes
|
||||
+ * @return OK or HTTP_* error code
|
||||
+ */
|
||||
+PROXY_DECLARE(int) ap_proxy_spool_input(request_rec *r,
|
||||
+ proxy_conn_rec *backend,
|
||||
+ apr_bucket_brigade *input_brigade,
|
||||
+ apr_off_t *bytes_spooled,
|
||||
+ apr_off_t max_mem_spool);
|
||||
+
|
||||
+/**
|
||||
+ * Read what's in the client pipe. If the read would block (EAGAIN),
|
||||
+ * pass a FLUSH bucket to the backend and read again in blocking mode.
|
||||
+ * @param r client request
|
||||
+ * @param backend backend connection
|
||||
+ * @param input_brigade brigade to use/fill
|
||||
+ * @param max_read maximum number of bytes to read
|
||||
+ * @return OK or HTTP_* error code
|
||||
+ */
|
||||
+PROXY_DECLARE(int) ap_proxy_read_input(request_rec *r,
|
||||
+ proxy_conn_rec *backend,
|
||||
+ apr_bucket_brigade *input_brigade,
|
||||
+ apr_off_t max_read);
|
||||
+
|
||||
/**
|
||||
* @param bucket_alloc bucket allocator
|
||||
* @param r request
|
||||
diff --git a/modules/proxy/mod_proxy_fcgi.c b/modules/proxy/mod_proxy_fcgi.c
|
||||
index 2e97408..f9cf716 100644
|
||||
--- a/modules/proxy/mod_proxy_fcgi.c
|
||||
+++ b/modules/proxy/mod_proxy_fcgi.c
|
||||
@@ -521,7 +521,8 @@ static int handle_headers(request_rec *r, int *state,
|
||||
static apr_status_t dispatch(proxy_conn_rec *conn, proxy_dir_conf *conf,
|
||||
request_rec *r, apr_pool_t *setaside_pool,
|
||||
apr_uint16_t request_id, const char **err,
|
||||
- int *bad_request, int *has_responded)
|
||||
+ int *bad_request, int *has_responded,
|
||||
+ apr_bucket_brigade *input_brigade)
|
||||
{
|
||||
apr_bucket_brigade *ib, *ob;
|
||||
int seen_end_of_headers = 0, done = 0, ignore_body = 0;
|
||||
@@ -583,9 +584,26 @@ static apr_status_t dispatch(proxy_conn_rec *conn, proxy_dir_conf *conf,
|
||||
int last_stdin = 0;
|
||||
char *iobuf_cursor;
|
||||
|
||||
- rv = ap_get_brigade(r->input_filters, ib,
|
||||
- AP_MODE_READBYTES, APR_BLOCK_READ,
|
||||
- iobuf_size);
|
||||
+ if (APR_BRIGADE_EMPTY(input_brigade)) {
|
||||
+ rv = ap_get_brigade(r->input_filters, ib,
|
||||
+ AP_MODE_READBYTES, APR_BLOCK_READ,
|
||||
+ iobuf_size);
|
||||
+ }
|
||||
+ else {
|
||||
+ apr_bucket *e;
|
||||
+ APR_BRIGADE_CONCAT(ib, input_brigade);
|
||||
+ rv = apr_brigade_partition(ib, iobuf_size, &e);
|
||||
+ if (rv == APR_SUCCESS) {
|
||||
+ while (e != APR_BRIGADE_SENTINEL(ib)
|
||||
+ && APR_BUCKET_IS_METADATA(e)) {
|
||||
+ e = APR_BUCKET_NEXT(e);
|
||||
+ }
|
||||
+ apr_brigade_split_ex(ib, e, input_brigade);
|
||||
+ }
|
||||
+ else if (rv == APR_INCOMPLETE) {
|
||||
+ rv = APR_SUCCESS;
|
||||
+ }
|
||||
+ }
|
||||
if (rv != APR_SUCCESS) {
|
||||
*err = "reading input brigade";
|
||||
*bad_request = 1;
|
||||
@@ -924,7 +942,8 @@ static int fcgi_do_request(apr_pool_t *p, request_rec *r,
|
||||
conn_rec *origin,
|
||||
proxy_dir_conf *conf,
|
||||
apr_uri_t *uri,
|
||||
- char *url, char *server_portstr)
|
||||
+ char *url, char *server_portstr,
|
||||
+ apr_bucket_brigade *input_brigade)
|
||||
{
|
||||
/* Request IDs are arbitrary numbers that we assign to a
|
||||
* single request. This would allow multiplex/pipelining of
|
||||
@@ -960,7 +979,8 @@ static int fcgi_do_request(apr_pool_t *p, request_rec *r,
|
||||
|
||||
/* Step 3: Read records from the back end server and handle them. */
|
||||
rv = dispatch(conn, conf, r, temp_pool, request_id,
|
||||
- &err, &bad_request, &has_responded);
|
||||
+ &err, &bad_request, &has_responded,
|
||||
+ input_brigade);
|
||||
if (rv != APR_SUCCESS) {
|
||||
/* If the client aborted the connection during retrieval or (partially)
|
||||
* sending the response, don't return a HTTP_SERVICE_UNAVAILABLE, since
|
||||
@@ -996,6 +1016,8 @@ static int fcgi_do_request(apr_pool_t *p, request_rec *r,
|
||||
|
||||
#define FCGI_SCHEME "FCGI"
|
||||
|
||||
+#define MAX_MEM_SPOOL 16384
|
||||
+
|
||||
/*
|
||||
* This handles fcgi:(dest) URLs
|
||||
*/
|
||||
@@ -1008,6 +1030,8 @@ static int proxy_fcgi_handler(request_rec *r, proxy_worker *worker,
|
||||
char server_portstr[32];
|
||||
conn_rec *origin = NULL;
|
||||
proxy_conn_rec *backend = NULL;
|
||||
+ apr_bucket_brigade *input_brigade;
|
||||
+ apr_off_t input_bytes = 0;
|
||||
apr_uri_t *uri;
|
||||
|
||||
proxy_dir_conf *dconf = ap_get_module_config(r->per_dir_config,
|
||||
@@ -1050,6 +1074,101 @@ static int proxy_fcgi_handler(request_rec *r, proxy_worker *worker,
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
+ /* We possibly reuse input data prefetched in previous call(s), e.g. for a
|
||||
+ * balancer fallback scenario.
|
||||
+ */
|
||||
+ apr_pool_userdata_get((void **)&input_brigade, "proxy-fcgi-input", p);
|
||||
+ if (input_brigade == NULL) {
|
||||
+ const char *old_te = apr_table_get(r->headers_in, "Transfer-Encoding");
|
||||
+ const char *old_cl = NULL;
|
||||
+ if (old_te) {
|
||||
+ apr_table_unset(r->headers_in, "Content-Length");
|
||||
+ }
|
||||
+ else {
|
||||
+ old_cl = apr_table_get(r->headers_in, "Content-Length");
|
||||
+ }
|
||||
+
|
||||
+ input_brigade = apr_brigade_create(p, r->connection->bucket_alloc);
|
||||
+ apr_pool_userdata_setn(input_brigade, "proxy-fcgi-input", NULL, p);
|
||||
+
|
||||
+ /* Prefetch (nonlocking) the request body so to increase the chance
|
||||
+ * to get the whole (or enough) body and determine Content-Length vs
|
||||
+ * chunked or spooled. By doing this before connecting or reusing the
|
||||
+ * backend, we want to minimize the delay between this connection is
|
||||
+ * considered alive and the first bytes sent (should the client's link
|
||||
+ * be slow or some input filter retain the data). This is a best effort
|
||||
+ * to prevent the backend from closing (from under us) what it thinks is
|
||||
+ * an idle connection, hence to reduce to the minimum the unavoidable
|
||||
+ * local is_socket_connected() vs remote keepalive race condition.
|
||||
+ */
|
||||
+ status = ap_proxy_prefetch_input(r, backend, input_brigade,
|
||||
+ APR_NONBLOCK_READ, &input_bytes,
|
||||
+ MAX_MEM_SPOOL);
|
||||
+ if (status != OK) {
|
||||
+ goto cleanup;
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * The request body is streamed by default, using either C-L or
|
||||
+ * chunked T-E, like this:
|
||||
+ *
|
||||
+ * The whole body (including no body) was received on prefetch, i.e.
|
||||
+ * the input brigade ends with EOS => C-L = input_bytes.
|
||||
+ *
|
||||
+ * C-L is known and reliable, i.e. only protocol filters in the input
|
||||
+ * chain thus none should change the body => use C-L from client.
|
||||
+ *
|
||||
+ * The administrator has not "proxy-sendcl" which prevents T-E => use
|
||||
+ * T-E and chunks.
|
||||
+ *
|
||||
+ * Otherwise we need to determine and set a content-length, so spool
|
||||
+ * the entire request body to memory/temporary file (MAX_MEM_SPOOL),
|
||||
+ * such that we finally know its length => C-L = input_bytes.
|
||||
+ */
|
||||
+ if (!APR_BRIGADE_EMPTY(input_brigade)
|
||||
+ && APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
|
||||
+ /* The whole thing fit, so our decision is trivial, use the input
|
||||
+ * bytes for the Content-Length. If we expected no body, and read
|
||||
+ * no body, do not set the Content-Length.
|
||||
+ */
|
||||
+ if (old_cl || old_te || input_bytes) {
|
||||
+ apr_table_setn(r->headers_in, "Content-Length",
|
||||
+ apr_off_t_toa(p, input_bytes));
|
||||
+ if (old_te) {
|
||||
+ apr_table_unset(r->headers_in, "Transfer-Encoding");
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+ else if (old_cl && r->input_filters == r->proto_input_filters) {
|
||||
+ /* Streaming is possible by preserving the existing C-L */
|
||||
+ }
|
||||
+ else if (!apr_table_get(r->subprocess_env, "proxy-sendcl")) {
|
||||
+ /* Streaming is possible using T-E: chunked */
|
||||
+ }
|
||||
+ else {
|
||||
+ /* No streaming, C-L is the only option so spool to memory/file */
|
||||
+ apr_bucket_brigade *tmp_bb;
|
||||
+ apr_off_t remaining_bytes = 0;
|
||||
+
|
||||
+ AP_DEBUG_ASSERT(MAX_MEM_SPOOL >= input_bytes);
|
||||
+ tmp_bb = apr_brigade_create(p, r->connection->bucket_alloc);
|
||||
+ status = ap_proxy_spool_input(r, backend, tmp_bb, &remaining_bytes,
|
||||
+ MAX_MEM_SPOOL - input_bytes);
|
||||
+ if (status != OK) {
|
||||
+ goto cleanup;
|
||||
+ }
|
||||
+
|
||||
+ APR_BRIGADE_CONCAT(input_brigade, tmp_bb);
|
||||
+ input_bytes += remaining_bytes;
|
||||
+
|
||||
+ apr_table_setn(r->headers_in, "Content-Length",
|
||||
+ apr_off_t_toa(p, input_bytes));
|
||||
+ if (old_te) {
|
||||
+ apr_table_unset(r->headers_in, "Transfer-Encoding");
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
/* This scheme handler does not reuse connections by default, to
|
||||
* avoid tying up a fastcgi that isn't expecting to work on
|
||||
* parallel requests. But if the user went out of their way to
|
||||
@@ -1074,7 +1193,7 @@ static int proxy_fcgi_handler(request_rec *r, proxy_worker *worker,
|
||||
|
||||
/* Step Three: Process the Request */
|
||||
status = fcgi_do_request(p, r, backend, origin, dconf, uri, url,
|
||||
- server_portstr);
|
||||
+ server_portstr, input_brigade);
|
||||
|
||||
cleanup:
|
||||
ap_proxy_release_connection(FCGI_SCHEME, backend, r->server);
|
||||
diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c
|
||||
index df10997..7f67f26 100644
|
||||
--- a/modules/proxy/mod_proxy_http.c
|
||||
+++ b/modules/proxy/mod_proxy_http.c
|
||||
@@ -266,50 +266,6 @@ typedef struct {
|
||||
prefetch_nonblocking:1;
|
||||
} proxy_http_req_t;
|
||||
|
||||
-/* Read what's in the client pipe. If nonblocking is set and read is EAGAIN,
|
||||
- * pass a FLUSH bucket to the backend and read again in blocking mode.
|
||||
- */
|
||||
-static int stream_reqbody_read(proxy_http_req_t *req, apr_bucket_brigade *bb,
|
||||
- int nonblocking)
|
||||
-{
|
||||
- request_rec *r = req->r;
|
||||
- proxy_conn_rec *p_conn = req->backend;
|
||||
- apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc;
|
||||
- apr_read_type_e block = nonblocking ? APR_NONBLOCK_READ : APR_BLOCK_READ;
|
||||
- apr_status_t status;
|
||||
- int rv;
|
||||
-
|
||||
- for (;;) {
|
||||
- status = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
|
||||
- block, HUGE_STRING_LEN);
|
||||
- if (block == APR_BLOCK_READ
|
||||
- || (!APR_STATUS_IS_EAGAIN(status)
|
||||
- && (status != APR_SUCCESS || !APR_BRIGADE_EMPTY(bb)))) {
|
||||
- break;
|
||||
- }
|
||||
-
|
||||
- /* Flush and retry (blocking) */
|
||||
- apr_brigade_cleanup(bb);
|
||||
- rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, req->origin, bb, 1);
|
||||
- if (rv != OK) {
|
||||
- return rv;
|
||||
- }
|
||||
- block = APR_BLOCK_READ;
|
||||
- }
|
||||
-
|
||||
- if (status != APR_SUCCESS) {
|
||||
- conn_rec *c = r->connection;
|
||||
- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02608)
|
||||
- "read request body failed to %pI (%s)"
|
||||
- " from %s (%s)", p_conn->addr,
|
||||
- p_conn->hostname ? p_conn->hostname: "",
|
||||
- c->client_ip, c->remote_host ? c->remote_host: "");
|
||||
- return ap_map_http_request_error(status, HTTP_BAD_REQUEST);
|
||||
- }
|
||||
-
|
||||
- return OK;
|
||||
-}
|
||||
-
|
||||
static int stream_reqbody(proxy_http_req_t *req)
|
||||
{
|
||||
request_rec *r = req->r;
|
||||
@@ -328,7 +284,8 @@ static int stream_reqbody(proxy_http_req_t *req)
|
||||
do {
|
||||
if (APR_BRIGADE_EMPTY(input_brigade)
|
||||
&& APR_BRIGADE_EMPTY(header_brigade)) {
|
||||
- rv = stream_reqbody_read(req, input_brigade, 1);
|
||||
+ rv = ap_proxy_read_input(r, p_conn, input_brigade,
|
||||
+ HUGE_STRING_LEN);
|
||||
if (rv != OK) {
|
||||
return rv;
|
||||
}
|
||||
@@ -409,7 +366,7 @@ static int stream_reqbody(proxy_http_req_t *req)
|
||||
*/
|
||||
APR_BRIGADE_PREPEND(input_brigade, header_brigade);
|
||||
|
||||
- /* Flush here on EOS because we won't stream_reqbody_read() again */
|
||||
+ /* Flush here on EOS because we won't ap_proxy_read_input() again. */
|
||||
rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin,
|
||||
input_brigade, seen_eos);
|
||||
if (rv != OK) {
|
||||
@@ -427,137 +384,6 @@ static int stream_reqbody(proxy_http_req_t *req)
|
||||
return OK;
|
||||
}
|
||||
|
||||
-static int spool_reqbody_cl(proxy_http_req_t *req, apr_off_t *bytes_spooled)
|
||||
-{
|
||||
- apr_pool_t *p = req->p;
|
||||
- request_rec *r = req->r;
|
||||
- int seen_eos = 0, rv = OK;
|
||||
- apr_status_t status = APR_SUCCESS;
|
||||
- apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc;
|
||||
- apr_bucket_brigade *input_brigade = req->input_brigade;
|
||||
- apr_bucket_brigade *body_brigade;
|
||||
- apr_bucket *e;
|
||||
- apr_off_t bytes, fsize = 0;
|
||||
- apr_file_t *tmpfile = NULL;
|
||||
- apr_off_t limit;
|
||||
-
|
||||
- body_brigade = apr_brigade_create(p, bucket_alloc);
|
||||
- *bytes_spooled = 0;
|
||||
-
|
||||
- limit = ap_get_limit_req_body(r);
|
||||
-
|
||||
- do {
|
||||
- if (APR_BRIGADE_EMPTY(input_brigade)) {
|
||||
- rv = stream_reqbody_read(req, input_brigade, 0);
|
||||
- if (rv != OK) {
|
||||
- return rv;
|
||||
- }
|
||||
- }
|
||||
-
|
||||
- /* If this brigade contains EOS, either stop or remove it. */
|
||||
- if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
|
||||
- seen_eos = 1;
|
||||
- }
|
||||
-
|
||||
- apr_brigade_length(input_brigade, 1, &bytes);
|
||||
-
|
||||
- if (*bytes_spooled + bytes > MAX_MEM_SPOOL) {
|
||||
- /*
|
||||
- * LimitRequestBody does not affect Proxy requests (Should it?).
|
||||
- * Let it take effect if we decide to store the body in a
|
||||
- * temporary file on disk.
|
||||
- */
|
||||
- if (limit && (*bytes_spooled + bytes > limit)) {
|
||||
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01088)
|
||||
- "Request body is larger than the configured "
|
||||
- "limit of %" APR_OFF_T_FMT, limit);
|
||||
- return HTTP_REQUEST_ENTITY_TOO_LARGE;
|
||||
- }
|
||||
- /* can't spool any more in memory; write latest brigade to disk */
|
||||
- if (tmpfile == NULL) {
|
||||
- const char *temp_dir;
|
||||
- char *template;
|
||||
-
|
||||
- status = apr_temp_dir_get(&temp_dir, p);
|
||||
- if (status != APR_SUCCESS) {
|
||||
- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01089)
|
||||
- "search for temporary directory failed");
|
||||
- return HTTP_INTERNAL_SERVER_ERROR;
|
||||
- }
|
||||
- apr_filepath_merge(&template, temp_dir,
|
||||
- "modproxy.tmp.XXXXXX",
|
||||
- APR_FILEPATH_NATIVE, p);
|
||||
- status = apr_file_mktemp(&tmpfile, template, 0, p);
|
||||
- if (status != APR_SUCCESS) {
|
||||
- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01090)
|
||||
- "creation of temporary file in directory "
|
||||
- "%s failed", temp_dir);
|
||||
- return HTTP_INTERNAL_SERVER_ERROR;
|
||||
- }
|
||||
- }
|
||||
- for (e = APR_BRIGADE_FIRST(input_brigade);
|
||||
- e != APR_BRIGADE_SENTINEL(input_brigade);
|
||||
- e = APR_BUCKET_NEXT(e)) {
|
||||
- const char *data;
|
||||
- apr_size_t bytes_read, bytes_written;
|
||||
-
|
||||
- apr_bucket_read(e, &data, &bytes_read, APR_BLOCK_READ);
|
||||
- status = apr_file_write_full(tmpfile, data, bytes_read, &bytes_written);
|
||||
- if (status != APR_SUCCESS) {
|
||||
- const char *tmpfile_name;
|
||||
-
|
||||
- if (apr_file_name_get(&tmpfile_name, tmpfile) != APR_SUCCESS) {
|
||||
- tmpfile_name = "(unknown)";
|
||||
- }
|
||||
- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01091)
|
||||
- "write to temporary file %s failed",
|
||||
- tmpfile_name);
|
||||
- return HTTP_INTERNAL_SERVER_ERROR;
|
||||
- }
|
||||
- AP_DEBUG_ASSERT(bytes_read == bytes_written);
|
||||
- fsize += bytes_written;
|
||||
- }
|
||||
- apr_brigade_cleanup(input_brigade);
|
||||
- }
|
||||
- else {
|
||||
-
|
||||
- /*
|
||||
- * Save input_brigade in body_brigade. (At least) in the SSL case
|
||||
- * input_brigade contains transient buckets whose data would get
|
||||
- * overwritten during the next call of ap_get_brigade in the loop.
|
||||
- * ap_save_brigade ensures these buckets to be set aside.
|
||||
- * Calling ap_save_brigade with NULL as filter is OK, because
|
||||
- * body_brigade already has been created and does not need to get
|
||||
- * created by ap_save_brigade.
|
||||
- */
|
||||
- status = ap_save_brigade(NULL, &body_brigade, &input_brigade, p);
|
||||
- if (status != APR_SUCCESS) {
|
||||
- return HTTP_INTERNAL_SERVER_ERROR;
|
||||
- }
|
||||
-
|
||||
- }
|
||||
-
|
||||
- *bytes_spooled += bytes;
|
||||
- } while (!seen_eos);
|
||||
-
|
||||
- APR_BRIGADE_CONCAT(input_brigade, body_brigade);
|
||||
- if (tmpfile) {
|
||||
- apr_brigade_insert_file(input_brigade, tmpfile, 0, fsize, p);
|
||||
- }
|
||||
- if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) {
|
||||
- e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc);
|
||||
- APR_BRIGADE_INSERT_TAIL(input_brigade, e);
|
||||
- }
|
||||
- if (tmpfile) {
|
||||
- /* We dropped metadata buckets when spooling to tmpfile,
|
||||
- * terminate with EOS for stream_reqbody() to flush the
|
||||
- * whole in one go.
|
||||
- */
|
||||
- e = apr_bucket_eos_create(bucket_alloc);
|
||||
- APR_BRIGADE_INSERT_TAIL(input_brigade, e);
|
||||
- }
|
||||
- return OK;
|
||||
-}
|
||||
|
||||
static int ap_proxy_http_prefetch(proxy_http_req_t *req,
|
||||
apr_uri_t *uri, char *url)
|
||||
@@ -569,14 +395,12 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req,
|
||||
apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc;
|
||||
apr_bucket_brigade *header_brigade = req->header_brigade;
|
||||
apr_bucket_brigade *input_brigade = req->input_brigade;
|
||||
- apr_bucket_brigade *temp_brigade;
|
||||
apr_bucket *e;
|
||||
- char *buf;
|
||||
apr_status_t status;
|
||||
+ char *buf;
|
||||
apr_off_t bytes_read = 0;
|
||||
apr_off_t bytes;
|
||||
int force10, rv;
|
||||
- apr_read_type_e block;
|
||||
conn_rec *origin = p_conn->connection;
|
||||
|
||||
if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) {
|
||||
@@ -641,69 +465,12 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req,
|
||||
p_conn->close = 1;
|
||||
}
|
||||
|
||||
- /* Prefetch MAX_MEM_SPOOL bytes
|
||||
- *
|
||||
- * This helps us avoid any election of C-L v.s. T-E
|
||||
- * request bodies, since we are willing to keep in
|
||||
- * memory this much data, in any case. This gives
|
||||
- * us an instant C-L election if the body is of some
|
||||
- * reasonable size.
|
||||
- */
|
||||
- temp_brigade = apr_brigade_create(p, bucket_alloc);
|
||||
- block = req->prefetch_nonblocking ? APR_NONBLOCK_READ : APR_BLOCK_READ;
|
||||
-
|
||||
- /* Account for saved input, if any. */
|
||||
- apr_brigade_length(input_brigade, 0, &bytes_read);
|
||||
-
|
||||
- /* Ensure we don't hit a wall where we have a buffer too small
|
||||
- * for ap_get_brigade's filters to fetch us another bucket,
|
||||
- * surrender once we hit 80 bytes less than MAX_MEM_SPOOL
|
||||
- * (an arbitrary value).
|
||||
- */
|
||||
- while (bytes_read < MAX_MEM_SPOOL - 80
|
||||
- && (APR_BRIGADE_EMPTY(input_brigade)
|
||||
- || !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade)))) {
|
||||
- status = ap_get_brigade(r->input_filters, temp_brigade,
|
||||
- AP_MODE_READBYTES, block,
|
||||
- MAX_MEM_SPOOL - bytes_read);
|
||||
- /* ap_get_brigade may return success with an empty brigade
|
||||
- * for a non-blocking read which would block
|
||||
- */
|
||||
- if (block == APR_NONBLOCK_READ
|
||||
- && ((status == APR_SUCCESS && APR_BRIGADE_EMPTY(temp_brigade))
|
||||
- || APR_STATUS_IS_EAGAIN(status))) {
|
||||
- break;
|
||||
- }
|
||||
- if (status != APR_SUCCESS) {
|
||||
- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01095)
|
||||
- "prefetch request body failed to %pI (%s)"
|
||||
- " from %s (%s)",
|
||||
- p_conn->addr, p_conn->hostname ? p_conn->hostname: "",
|
||||
- c->client_ip, c->remote_host ? c->remote_host: "");
|
||||
- return ap_map_http_request_error(status, HTTP_BAD_REQUEST);
|
||||
- }
|
||||
-
|
||||
- apr_brigade_length(temp_brigade, 1, &bytes);
|
||||
- bytes_read += bytes;
|
||||
-
|
||||
- /*
|
||||
- * Save temp_brigade in input_brigade. (At least) in the SSL case
|
||||
- * temp_brigade contains transient buckets whose data would get
|
||||
- * overwritten during the next call of ap_get_brigade in the loop.
|
||||
- * ap_save_brigade ensures these buckets to be set aside.
|
||||
- * Calling ap_save_brigade with NULL as filter is OK, because
|
||||
- * input_brigade already has been created and does not need to get
|
||||
- * created by ap_save_brigade.
|
||||
- */
|
||||
- status = ap_save_brigade(NULL, &input_brigade, &temp_brigade, p);
|
||||
- if (status != APR_SUCCESS) {
|
||||
- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01096)
|
||||
- "processing prefetched request body failed"
|
||||
- " to %pI (%s) from %s (%s)",
|
||||
- p_conn->addr, p_conn->hostname ? p_conn->hostname: "",
|
||||
- c->client_ip, c->remote_host ? c->remote_host: "");
|
||||
- return HTTP_INTERNAL_SERVER_ERROR;
|
||||
- }
|
||||
+ rv = ap_proxy_prefetch_input(r, req->backend, input_brigade,
|
||||
+ req->prefetch_nonblocking ? APR_NONBLOCK_READ
|
||||
+ : APR_BLOCK_READ,
|
||||
+ &bytes_read, MAX_MEM_SPOOL);
|
||||
+ if (rv != OK) {
|
||||
+ return rv;
|
||||
}
|
||||
|
||||
/* Use chunked request body encoding or send a content-length body?
|
||||
@@ -772,7 +539,7 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req,
|
||||
char *endstr;
|
||||
status = apr_strtoff(&req->cl_val, req->old_cl_val, &endstr, 10);
|
||||
if (status != APR_SUCCESS || *endstr || req->cl_val < 0) {
|
||||
- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01085)
|
||||
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01085)
|
||||
"could not parse request Content-Length (%s)",
|
||||
req->old_cl_val);
|
||||
return HTTP_BAD_REQUEST;
|
||||
@@ -812,7 +579,8 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req,
|
||||
/* If we have to spool the body, do it now, before connecting or
|
||||
* reusing the backend connection.
|
||||
*/
|
||||
- rv = spool_reqbody_cl(req, &bytes);
|
||||
+ rv = ap_proxy_spool_input(r, p_conn, input_brigade,
|
||||
+ &bytes, MAX_MEM_SPOOL);
|
||||
if (rv != OK) {
|
||||
return rv;
|
||||
}
|
||||
diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c
|
||||
index ab88d8f..973aa83 100644
|
||||
--- a/modules/proxy/proxy_util.c
|
||||
+++ b/modules/proxy/proxy_util.c
|
||||
@@ -3866,6 +3866,268 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
|
||||
return OK;
|
||||
}
|
||||
|
||||
+PROXY_DECLARE(int) ap_proxy_prefetch_input(request_rec *r,
|
||||
+ proxy_conn_rec *backend,
|
||||
+ apr_bucket_brigade *input_brigade,
|
||||
+ apr_read_type_e block,
|
||||
+ apr_off_t *bytes_read,
|
||||
+ apr_off_t max_read)
|
||||
+{
|
||||
+ apr_pool_t *p = r->pool;
|
||||
+ conn_rec *c = r->connection;
|
||||
+ apr_bucket_brigade *temp_brigade;
|
||||
+ apr_status_t status;
|
||||
+ apr_off_t bytes;
|
||||
+
|
||||
+ *bytes_read = 0;
|
||||
+ if (max_read < APR_BUCKET_BUFF_SIZE) {
|
||||
+ max_read = APR_BUCKET_BUFF_SIZE;
|
||||
+ }
|
||||
+
|
||||
+ /* Prefetch max_read bytes
|
||||
+ *
|
||||
+ * This helps us avoid any election of C-L v.s. T-E
|
||||
+ * request bodies, since we are willing to keep in
|
||||
+ * memory this much data, in any case. This gives
|
||||
+ * us an instant C-L election if the body is of some
|
||||
+ * reasonable size.
|
||||
+ */
|
||||
+ temp_brigade = apr_brigade_create(p, input_brigade->bucket_alloc);
|
||||
+
|
||||
+ /* Account for saved input, if any. */
|
||||
+ apr_brigade_length(input_brigade, 0, bytes_read);
|
||||
+
|
||||
+ /* Ensure we don't hit a wall where we have a buffer too small for
|
||||
+ * ap_get_brigade's filters to fetch us another bucket, surrender
|
||||
+ * once we hit 80 bytes (an arbitrary value) less than max_read.
|
||||
+ */
|
||||
+ while (*bytes_read < max_read - 80
|
||||
+ && (APR_BRIGADE_EMPTY(input_brigade)
|
||||
+ || !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade)))) {
|
||||
+ status = ap_get_brigade(r->input_filters, temp_brigade,
|
||||
+ AP_MODE_READBYTES, block,
|
||||
+ max_read - *bytes_read);
|
||||
+ /* ap_get_brigade may return success with an empty brigade
|
||||
+ * for a non-blocking read which would block
|
||||
+ */
|
||||
+ if (block == APR_NONBLOCK_READ
|
||||
+ && ((status == APR_SUCCESS && APR_BRIGADE_EMPTY(temp_brigade))
|
||||
+ || APR_STATUS_IS_EAGAIN(status))) {
|
||||
+ break;
|
||||
+ }
|
||||
+ if (status != APR_SUCCESS) {
|
||||
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01095)
|
||||
+ "prefetch request body failed to %pI (%s)"
|
||||
+ " from %s (%s)", backend->addr,
|
||||
+ backend->hostname ? backend->hostname : "",
|
||||
+ c->client_ip, c->remote_host ? c->remote_host : "");
|
||||
+ return ap_map_http_request_error(status, HTTP_BAD_REQUEST);
|
||||
+ }
|
||||
+
|
||||
+ apr_brigade_length(temp_brigade, 1, &bytes);
|
||||
+ *bytes_read += bytes;
|
||||
+
|
||||
+ /*
|
||||
+ * Save temp_brigade in input_brigade. (At least) in the SSL case
|
||||
+ * temp_brigade contains transient buckets whose data would get
|
||||
+ * overwritten during the next call of ap_get_brigade in the loop.
|
||||
+ * ap_save_brigade ensures these buckets to be set aside.
|
||||
+ * Calling ap_save_brigade with NULL as filter is OK, because
|
||||
+ * input_brigade already has been created and does not need to get
|
||||
+ * created by ap_save_brigade.
|
||||
+ */
|
||||
+ status = ap_save_brigade(NULL, &input_brigade, &temp_brigade, p);
|
||||
+ if (status != APR_SUCCESS) {
|
||||
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01096)
|
||||
+ "processing prefetched request body failed"
|
||||
+ " to %pI (%s) from %s (%s)", backend->addr,
|
||||
+ backend->hostname ? backend->hostname : "",
|
||||
+ c->client_ip, c->remote_host ? c->remote_host : "");
|
||||
+ return HTTP_INTERNAL_SERVER_ERROR;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ return OK;
|
||||
+}
|
||||
+
|
||||
+PROXY_DECLARE(int) ap_proxy_read_input(request_rec *r,
|
||||
+ proxy_conn_rec *backend,
|
||||
+ apr_bucket_brigade *bb,
|
||||
+ apr_off_t max_read)
|
||||
+{
|
||||
+ apr_bucket_alloc_t *bucket_alloc = bb->bucket_alloc;
|
||||
+ apr_read_type_e block = (backend->connection) ? APR_NONBLOCK_READ
|
||||
+ : APR_BLOCK_READ;
|
||||
+ apr_status_t status;
|
||||
+ int rv;
|
||||
+
|
||||
+ for (;;) {
|
||||
+ apr_brigade_cleanup(bb);
|
||||
+ status = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
|
||||
+ block, max_read);
|
||||
+ if (block == APR_BLOCK_READ
|
||||
+ || (!(status == APR_SUCCESS && APR_BRIGADE_EMPTY(bb))
|
||||
+ && !APR_STATUS_IS_EAGAIN(status))) {
|
||||
+ break;
|
||||
+ }
|
||||
+
|
||||
+ /* Flush and retry (blocking) */
|
||||
+ apr_brigade_cleanup(bb);
|
||||
+ rv = ap_proxy_pass_brigade(bucket_alloc, r, backend,
|
||||
+ backend->connection, bb, 1);
|
||||
+ if (rv != OK) {
|
||||
+ return rv;
|
||||
+ }
|
||||
+ block = APR_BLOCK_READ;
|
||||
+ }
|
||||
+
|
||||
+ if (status != APR_SUCCESS) {
|
||||
+ conn_rec *c = r->connection;
|
||||
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02608)
|
||||
+ "read request body failed to %pI (%s)"
|
||||
+ " from %s (%s)", backend->addr,
|
||||
+ backend->hostname ? backend->hostname : "",
|
||||
+ c->client_ip, c->remote_host ? c->remote_host : "");
|
||||
+ return ap_map_http_request_error(status, HTTP_BAD_REQUEST);
|
||||
+ }
|
||||
+
|
||||
+ return OK;
|
||||
+}
|
||||
+
|
||||
+PROXY_DECLARE(int) ap_proxy_spool_input(request_rec *r,
|
||||
+ proxy_conn_rec *backend,
|
||||
+ apr_bucket_brigade *input_brigade,
|
||||
+ apr_off_t *bytes_spooled,
|
||||
+ apr_off_t max_mem_spool)
|
||||
+{
|
||||
+ apr_pool_t *p = r->pool;
|
||||
+ int seen_eos = 0, rv = OK;
|
||||
+ apr_status_t status = APR_SUCCESS;
|
||||
+ apr_bucket_alloc_t *bucket_alloc = input_brigade->bucket_alloc;
|
||||
+ apr_bucket_brigade *body_brigade;
|
||||
+ apr_bucket *e;
|
||||
+ apr_off_t bytes, fsize = 0;
|
||||
+ apr_file_t *tmpfile = NULL;
|
||||
+ apr_off_t limit;
|
||||
+
|
||||
+ *bytes_spooled = 0;
|
||||
+ body_brigade = apr_brigade_create(p, bucket_alloc);
|
||||
+
|
||||
+ limit = ap_get_limit_req_body(r);
|
||||
+
|
||||
+ do {
|
||||
+ if (APR_BRIGADE_EMPTY(input_brigade)) {
|
||||
+ rv = ap_proxy_read_input(r, backend, input_brigade,
|
||||
+ HUGE_STRING_LEN);
|
||||
+ if (rv != OK) {
|
||||
+ return rv;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /* If this brigade contains EOS, either stop or remove it. */
|
||||
+ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
|
||||
+ seen_eos = 1;
|
||||
+ }
|
||||
+
|
||||
+ apr_brigade_length(input_brigade, 1, &bytes);
|
||||
+
|
||||
+ if (*bytes_spooled + bytes > max_mem_spool) {
|
||||
+ /*
|
||||
+ * LimitRequestBody does not affect Proxy requests (Should it?).
|
||||
+ * Let it take effect if we decide to store the body in a
|
||||
+ * temporary file on disk.
|
||||
+ */
|
||||
+ if (limit && (*bytes_spooled + bytes > limit)) {
|
||||
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01088)
|
||||
+ "Request body is larger than the configured "
|
||||
+ "limit of %" APR_OFF_T_FMT, limit);
|
||||
+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
|
||||
+ }
|
||||
+ /* can't spool any more in memory; write latest brigade to disk */
|
||||
+ if (tmpfile == NULL) {
|
||||
+ const char *temp_dir;
|
||||
+ char *template;
|
||||
+
|
||||
+ status = apr_temp_dir_get(&temp_dir, p);
|
||||
+ if (status != APR_SUCCESS) {
|
||||
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01089)
|
||||
+ "search for temporary directory failed");
|
||||
+ return HTTP_INTERNAL_SERVER_ERROR;
|
||||
+ }
|
||||
+ apr_filepath_merge(&template, temp_dir,
|
||||
+ "modproxy.tmp.XXXXXX",
|
||||
+ APR_FILEPATH_NATIVE, p);
|
||||
+ status = apr_file_mktemp(&tmpfile, template, 0, p);
|
||||
+ if (status != APR_SUCCESS) {
|
||||
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01090)
|
||||
+ "creation of temporary file in directory "
|
||||
+ "%s failed", temp_dir);
|
||||
+ return HTTP_INTERNAL_SERVER_ERROR;
|
||||
+ }
|
||||
+ }
|
||||
+ for (e = APR_BRIGADE_FIRST(input_brigade);
|
||||
+ e != APR_BRIGADE_SENTINEL(input_brigade);
|
||||
+ e = APR_BUCKET_NEXT(e)) {
|
||||
+ const char *data;
|
||||
+ apr_size_t bytes_read, bytes_written;
|
||||
+
|
||||
+ apr_bucket_read(e, &data, &bytes_read, APR_BLOCK_READ);
|
||||
+ status = apr_file_write_full(tmpfile, data, bytes_read, &bytes_written);
|
||||
+ if (status != APR_SUCCESS) {
|
||||
+ const char *tmpfile_name;
|
||||
+
|
||||
+ if (apr_file_name_get(&tmpfile_name, tmpfile) != APR_SUCCESS) {
|
||||
+ tmpfile_name = "(unknown)";
|
||||
+ }
|
||||
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01091)
|
||||
+ "write to temporary file %s failed",
|
||||
+ tmpfile_name);
|
||||
+ return HTTP_INTERNAL_SERVER_ERROR;
|
||||
+ }
|
||||
+ AP_DEBUG_ASSERT(bytes_read == bytes_written);
|
||||
+ fsize += bytes_written;
|
||||
+ }
|
||||
+ apr_brigade_cleanup(input_brigade);
|
||||
+ }
|
||||
+ else {
|
||||
+
|
||||
+ /*
|
||||
+ * Save input_brigade in body_brigade. (At least) in the SSL case
|
||||
+ * input_brigade contains transient buckets whose data would get
|
||||
+ * overwritten during the next call of ap_get_brigade in the loop.
|
||||
+ * ap_save_brigade ensures these buckets to be set aside.
|
||||
+ * Calling ap_save_brigade with NULL as filter is OK, because
|
||||
+ * body_brigade already has been created and does not need to get
|
||||
+ * created by ap_save_brigade.
|
||||
+ */
|
||||
+ status = ap_save_brigade(NULL, &body_brigade, &input_brigade, p);
|
||||
+ if (status != APR_SUCCESS) {
|
||||
+ return HTTP_INTERNAL_SERVER_ERROR;
|
||||
+ }
|
||||
+
|
||||
+ }
|
||||
+
|
||||
+ *bytes_spooled += bytes;
|
||||
+ } while (!seen_eos);
|
||||
+
|
||||
+ APR_BRIGADE_CONCAT(input_brigade, body_brigade);
|
||||
+ if (tmpfile) {
|
||||
+ apr_brigade_insert_file(input_brigade, tmpfile, 0, fsize, p);
|
||||
+ }
|
||||
+ if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) {
|
||||
+ e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc);
|
||||
+ APR_BRIGADE_INSERT_TAIL(input_brigade, e);
|
||||
+ }
|
||||
+ if (tmpfile) {
|
||||
+ /* We dropped metadata buckets when spooling to tmpfile,
|
||||
+ * terminate with EOS to allow for flushing in a one go.
|
||||
+ */
|
||||
+ e = apr_bucket_eos_create(bucket_alloc);
|
||||
+ APR_BRIGADE_INSERT_TAIL(input_brigade, e);
|
||||
+ }
|
||||
+ return OK;
|
||||
+}
|
||||
+
|
||||
PROXY_DECLARE(int) ap_proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc,
|
||||
request_rec *r, proxy_conn_rec *p_conn,
|
||||
conn_rec *origin, apr_bucket_brigade *bb,
|
@ -13,7 +13,7 @@
|
||||
Summary: Apache HTTP Server
|
||||
Name: httpd
|
||||
Version: 2.4.37
|
||||
Release: 61%{?dist}
|
||||
Release: 62%{?dist}
|
||||
URL: https://httpd.apache.org/
|
||||
Source0: https://www.apache.org/dist/httpd/httpd-%{version}.tar.bz2
|
||||
Source2: httpd.logrotate
|
||||
@ -167,6 +167,8 @@ Patch90: httpd-2.4.37-hcheck-mem-issues.patch
|
||||
Patch91: httpd-2.4.37-add-SNI-support.patch
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=2159603
|
||||
Patch92: httpd-2.4.37-mod_status-duplicate-key.patch
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=2221083
|
||||
Patch93: httpd-2.4.37-r1885607.patch
|
||||
|
||||
# Security fixes
|
||||
Patch200: httpd-2.4.37-r1851471.patch
|
||||
@ -434,6 +436,7 @@ interface for storing and accessing per-user session data.
|
||||
%patch90 -p1 -b .hcheck-mem-issues
|
||||
%patch91 -p1 -b .SNI
|
||||
%patch92 -p1 -b .mod_status-dupl
|
||||
%patch93 -p1 -b .r1885607
|
||||
|
||||
%patch200 -p1 -b .r1851471
|
||||
%patch201 -p1 -b .CVE-2019-0211
|
||||
@ -980,6 +983,10 @@ rm -rf $RPM_BUILD_ROOT
|
||||
%{_rpmconfigdir}/macros.d/macros.httpd
|
||||
|
||||
%changelog
|
||||
* Thu Jul 27 2023 Luboš Uhliarik <luhliari@redhat.com> - 2.4.37-62
|
||||
- Resolves: #2221083 - Apache Bug 57087: mod_proxy_fcgi doesn't send cgi
|
||||
CONTENT_LENGTH variable when the client request used Transfer-Encoding:chunked
|
||||
|
||||
* Thu Jul 20 2023 Tomas Korbar <tkorbar@redhat.com> - 2.4.37-61
|
||||
- Fix issue found by covscan
|
||||
- Related: #2159603
|
||||
|
Loading…
Reference in New Issue
Block a user