Import rpm: 8b11eeb3c4e58475c99828fdf3a7875cb95e3188
This commit is contained in:
commit
6816fbea4e
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
SOURCES/mod_http2-1.15.7.tar.gz
|
10
gating.yaml
Normal file
10
gating.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
--- !Policy
|
||||
product_versions:
|
||||
- rhel-9
|
||||
decision_context: osci_compose_gate
|
||||
rules:
|
||||
- !PassingTestCaseRule {test_case_name: baseos-ci.brew-build.tier1.functional}
|
||||
- !PassingTestCaseRule {test_case_name: baseos-ci.brew-build.tier2.functional}
|
||||
- !PassingTestCaseRule {test_case_name: baseos-ci.brew-build.tier3.functional}
|
||||
- !PassingTestCaseRule {test_case_name: baseos-ci.brew-build.acceptance-tier.functional}
|
||||
|
14
mod_http2-1.14.1-buildfix.patch
Normal file
14
mod_http2-1.14.1-buildfix.patch
Normal file
@ -0,0 +1,14 @@
|
||||
diff -uap mod_http2-1.14.0/mod_http2/h2_from_h1.c.buildfix mod_http2-1.14.0/mod_http2/h2_from_h1.c
|
||||
--- mod_http2-1.14.0/mod_http2/h2_from_h1.c.buildfix 2019-02-12 13:30:56.000000000 +0000
|
||||
+++ mod_http2-1.14.0/mod_http2/h2_from_h1.c 2019-03-14 10:35:46.365678537 +0000
|
||||
@@ -35,6 +35,10 @@
|
||||
#include "h2_task.h"
|
||||
#include "h2_util.h"
|
||||
|
||||
+#ifndef AP_STATUS_IS_HEADER_ONLY
|
||||
+#define AP_STATUS_IS_HEADER_ONLY(x) ((x) == HTTP_NO_CONTENT || \
|
||||
+ (x) == HTTP_NOT_MODIFIED)
|
||||
+#endif
|
||||
|
||||
/* This routine is called by apr_table_do and merges all instances of
|
||||
* the passed field values into a single array that will be further
|
65
mod_http2-1.15.14-openssl30.patch
Normal file
65
mod_http2-1.15.14-openssl30.patch
Normal file
@ -0,0 +1,65 @@
|
||||
commit 124c2ca0886b05d0871ee09466de555d757b72fc
|
||||
Author: Joe Orton <jorton@redhat.com>
|
||||
Date: Fri May 7 10:58:18 2021 +0100
|
||||
|
||||
Switch to using OpenSSL EVP_* API to avoid deprecation warnings
|
||||
with OpenSSL 3.0.
|
||||
|
||||
diff --git a/mod_http2/h2_push.c b/mod_http2/h2_push.c
|
||||
index 8ae0b49..0a90a5d 100644
|
||||
--- a/mod_http2/h2_push.c
|
||||
+++ b/mod_http2/h2_push.c
|
||||
@@ -23,7 +23,7 @@
|
||||
#include <apr_time.h>
|
||||
|
||||
#ifdef H2_OPENSSL
|
||||
-#include <openssl/sha.h>
|
||||
+#include <openssl/evp.h>
|
||||
#endif
|
||||
|
||||
#include <httpd.h>
|
||||
@@ -472,27 +472,32 @@ typedef struct h2_push_diary_entry {
|
||||
|
||||
|
||||
#ifdef H2_OPENSSL
|
||||
-static void sha256_update(SHA256_CTX *ctx, const char *s)
|
||||
+static void sha256_update(EVP_MD_CTX *ctx, const char *s)
|
||||
{
|
||||
- SHA256_Update(ctx, s, strlen(s));
|
||||
+ EVP_DigestUpdate(ctx, s, strlen(s));
|
||||
}
|
||||
|
||||
static void calc_sha256_hash(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push)
|
||||
{
|
||||
- SHA256_CTX sha256;
|
||||
+ EVP_MD_CTX *md;
|
||||
apr_uint64_t val;
|
||||
- unsigned char hash[SHA256_DIGEST_LENGTH];
|
||||
+ unsigned char hash[EVP_MAX_MD_SIZE];
|
||||
+ unsigned len;
|
||||
int i;
|
||||
-
|
||||
- SHA256_Init(&sha256);
|
||||
- sha256_update(&sha256, push->req->scheme);
|
||||
- sha256_update(&sha256, "://");
|
||||
- sha256_update(&sha256, push->req->authority);
|
||||
- sha256_update(&sha256, push->req->path);
|
||||
- SHA256_Final(hash, &sha256);
|
||||
+
|
||||
+ md = EVP_MD_CTX_create();
|
||||
+ ap_assert(md != NULL);
|
||||
+
|
||||
+ i = EVP_DigestInit_ex(md, EVP_sha256(), NULL);
|
||||
+ ap_assert(i == 1);
|
||||
+ sha256_update(md, push->req->scheme);
|
||||
+ sha256_update(md, "://");
|
||||
+ sha256_update(md, push->req->authority);
|
||||
+ sha256_update(md, push->req->path);
|
||||
+ EVP_DigestFinal(md, hash, &len);
|
||||
|
||||
val = 0;
|
||||
- for (i = 0; i != sizeof(val); ++i)
|
||||
+ for (i = 0; i != len; ++i)
|
||||
val = val * 256 + hash[i];
|
||||
*phash = val >> (64 - diary->mask_bits);
|
||||
}
|
13
mod_http2-1.15.19-CVE-2021-44224.patch
Normal file
13
mod_http2-1.15.19-CVE-2021-44224.patch
Normal file
@ -0,0 +1,13 @@
|
||||
diff --git a/mod_http2/h2_request.c b/mod_http2/h2_request.c
|
||||
index 45df9b1..70241d4 100644
|
||||
--- a/mod_http2/h2_request.c
|
||||
+++ b/mod_http2/h2_request.c
|
||||
@@ -371,7 +371,7 @@ request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c)
|
||||
ap_add_input_filter_handle(ap_http_input_filter_handle,
|
||||
NULL, r, r->connection);
|
||||
|
||||
- if ((access_status = ap_run_post_read_request(r))) {
|
||||
+ if ((access_status = ap_post_read_request(r))) {
|
||||
/* Request check post hooks failed. An example of this would be a
|
||||
* request for a vhost where h2 is disabled --> 421.
|
||||
*/
|
119
mod_http2-1.15.7-CVE-2020-11993.patch
Normal file
119
mod_http2-1.15.7-CVE-2020-11993.patch
Normal file
@ -0,0 +1,119 @@
|
||||
diff --git a/mod_http2/h2_mplx.c b/mod_http2/h2_mplx.c
|
||||
index c3d590d..33ea45e 100644
|
||||
--- a/mod_http2/h2_mplx.c
|
||||
+++ b/mod_http2/h2_mplx.c
|
||||
@@ -56,7 +56,7 @@ typedef struct {
|
||||
apr_size_t count;
|
||||
} stream_iter_ctx;
|
||||
|
||||
-static apr_status_t mplx_be_happy(h2_mplx *m);
|
||||
+static apr_status_t mplx_be_happy(h2_mplx *m, h2_task *task);
|
||||
static apr_status_t mplx_be_annoyed(h2_mplx *m);
|
||||
|
||||
apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s)
|
||||
@@ -526,10 +526,10 @@ static apr_status_t out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
|
||||
stream->output = beam;
|
||||
|
||||
if (APLOGctrace2(m->c)) {
|
||||
- h2_beam_log(beam, m->c, APLOG_TRACE2, "out_open");
|
||||
+ h2_beam_log(beam, stream->task->c, APLOG_TRACE2, "out_open");
|
||||
}
|
||||
else {
|
||||
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
|
||||
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->task->c,
|
||||
"h2_mplx(%s): out open", stream->task->id);
|
||||
}
|
||||
|
||||
@@ -579,10 +579,10 @@ static apr_status_t out_close(h2_mplx *m, h2_task *task)
|
||||
return APR_ECONNABORTED;
|
||||
}
|
||||
|
||||
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, m->c,
|
||||
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, task->c,
|
||||
"h2_mplx(%s): close", task->id);
|
||||
status = h2_beam_close(task->output.beam);
|
||||
- h2_beam_log(task->output.beam, m->c, APLOG_TRACE2, "out_close");
|
||||
+ h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "out_close");
|
||||
output_consumed_signal(m, task);
|
||||
check_data_for(m, stream, 1);
|
||||
return status;
|
||||
@@ -782,18 +782,18 @@ static void task_done(h2_mplx *m, h2_task *task)
|
||||
{
|
||||
h2_stream *stream;
|
||||
|
||||
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
|
||||
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
|
||||
"h2_mplx(%ld): task(%s) done", m->id, task->id);
|
||||
out_close(m, task);
|
||||
|
||||
task->worker_done = 1;
|
||||
task->done_at = apr_time_now();
|
||||
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
|
||||
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
|
||||
"h2_mplx(%s): request done, %f ms elapsed", task->id,
|
||||
(task->done_at - task->started_at) / 1000.0);
|
||||
|
||||
if (task->c && !task->c->aborted && task->started_at > m->last_mood_change) {
|
||||
- mplx_be_happy(m);
|
||||
+ mplx_be_happy(m, task);
|
||||
}
|
||||
|
||||
ap_assert(task->done_done == 0);
|
||||
@@ -805,13 +805,13 @@ static void task_done(h2_mplx *m, h2_task *task)
|
||||
/* reset and schedule again */
|
||||
h2_task_redo(task);
|
||||
h2_iq_add(m->q, stream->id, NULL, NULL);
|
||||
- ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, m->c,
|
||||
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, task->c,
|
||||
H2_STRM_MSG(stream, "redo, added to q"));
|
||||
}
|
||||
else {
|
||||
/* stream not cleaned up, stay around */
|
||||
task->done_done = 1;
|
||||
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
|
||||
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
|
||||
H2_STRM_MSG(stream, "task_done, stream open"));
|
||||
if (stream->input) {
|
||||
h2_beam_leave(stream->input);
|
||||
@@ -824,7 +824,7 @@ static void task_done(h2_mplx *m, h2_task *task)
|
||||
else if ((stream = h2_ihash_get(m->shold, task->stream_id)) != NULL) {
|
||||
/* stream is done, was just waiting for this. */
|
||||
task->done_done = 1;
|
||||
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
|
||||
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
|
||||
H2_STRM_MSG(stream, "task_done, in hold"));
|
||||
if (stream->input) {
|
||||
h2_beam_leave(stream->input);
|
||||
@@ -832,12 +832,12 @@ static void task_done(h2_mplx *m, h2_task *task)
|
||||
stream_joined(m, stream);
|
||||
}
|
||||
else if ((stream = h2_ihash_get(m->spurge, task->stream_id)) != NULL) {
|
||||
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c,
|
||||
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, task->c,
|
||||
H2_STRM_LOG(APLOGNO(03517), stream, "already in spurge"));
|
||||
ap_assert("stream should not be in spurge" == NULL);
|
||||
}
|
||||
else {
|
||||
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03518)
|
||||
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, task->c, APLOGNO(03518)
|
||||
"h2_mplx(%s): task_done, stream not found",
|
||||
task->id);
|
||||
ap_assert("stream should still be available" == NULL);
|
||||
@@ -963,7 +963,7 @@ static apr_status_t unschedule_slow_tasks(h2_mplx *m)
|
||||
return rv;
|
||||
}
|
||||
|
||||
-static apr_status_t mplx_be_happy(h2_mplx *m)
|
||||
+static apr_status_t mplx_be_happy(h2_mplx *m, h2_task *task)
|
||||
{
|
||||
apr_time_t now;
|
||||
|
||||
@@ -975,7 +975,7 @@ static apr_status_t mplx_be_happy(h2_mplx *m)
|
||||
m->limit_active = H2MIN(m->limit_active * 2, m->max_active);
|
||||
m->last_mood_change = now;
|
||||
m->irritations_since = 0;
|
||||
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
|
||||
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
|
||||
"h2_mplx(%ld): mood update, increasing worker limit to %d",
|
||||
m->id, m->limit_active);
|
||||
}
|
391
mod_http2-1.15.7-CVE-2020-9490.patch
Normal file
391
mod_http2-1.15.7-CVE-2020-9490.patch
Normal file
@ -0,0 +1,391 @@
|
||||
From b8a8c5061eada0ce3339b24ba1d587134552bc0c Mon Sep 17 00:00:00 2001
|
||||
From: Stefan Eissing <stefan.eissing@greenbytes.de>
|
||||
Date: Wed, 29 Jul 2020 14:41:38 +0200
|
||||
Subject: [PATCH] * Removing support for abandoned draft of http-wg regarding
|
||||
cache-digests.
|
||||
|
||||
---
|
||||
|
||||
diff --git a/mod_http2/h2_push.c b/mod_http2/h2_push.c
|
||||
index 4a70674..8ae0b49 100644
|
||||
--- a/mod_http2/h2_push.c
|
||||
+++ b/mod_http2/h2_push.c
|
||||
@@ -464,33 +464,6 @@ apr_array_header_t *h2_push_collect(apr_pool_t *p, const h2_request *req,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
-/*******************************************************************************
|
||||
- * push diary
|
||||
- *
|
||||
- * - The push diary keeps track of resources already PUSHed via HTTP/2 on this
|
||||
- * connection. It records a hash value from the absolute URL of the resource
|
||||
- * pushed.
|
||||
- * - Lacking openssl, it uses 'apr_hashfunc_default' for the value
|
||||
- * - with openssl, it uses SHA256 to calculate the hash value
|
||||
- * - whatever the method to generate the hash, the diary keeps a maximum of 64
|
||||
- * bits per hash, limiting the memory consumption to about
|
||||
- * H2PushDiarySize * 8
|
||||
- * bytes. Entries are sorted by most recently used and oldest entries are
|
||||
- * forgotten first.
|
||||
- * - Clients can initialize/replace the push diary by sending a 'Cache-Digest'
|
||||
- * header. Currently, this is the base64url encoded value of the cache digest
|
||||
- * as specified in https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
|
||||
- * This draft can be expected to evolve and the definition of the header
|
||||
- * will be added there and refined.
|
||||
- * - The cache digest header is a Golomb Coded Set of hash values, but it may
|
||||
- * limit the amount of bits per hash value even further. For a good description
|
||||
- * of GCS, read here:
|
||||
- * http://giovanni.bajo.it/post/47119962313/golomb-coded-sets-smaller-than-bloom-filters
|
||||
- * - The means that the push diary might be initialized with hash values of much
|
||||
- * less than 64 bits, leading to more false positives, but smaller digest size.
|
||||
- ******************************************************************************/
|
||||
-
|
||||
-
|
||||
#define GCSLOG_LEVEL APLOG_TRACE1
|
||||
|
||||
typedef struct h2_push_diary_entry {
|
||||
@@ -618,38 +591,48 @@ static int h2_push_diary_find(h2_push_diary *diary, apr_uint64_t hash)
|
||||
return -1;
|
||||
}
|
||||
|
||||
-static h2_push_diary_entry *move_to_last(h2_push_diary *diary, apr_size_t idx)
|
||||
+static void move_to_last(h2_push_diary *diary, apr_size_t idx)
|
||||
{
|
||||
h2_push_diary_entry *entries = (h2_push_diary_entry*)diary->entries->elts;
|
||||
h2_push_diary_entry e;
|
||||
- apr_size_t lastidx = (apr_size_t)diary->entries->nelts;
|
||||
+ int lastidx;
|
||||
|
||||
+ /* Move an existing entry to the last place */
|
||||
+ if (diary->entries->nelts <= 0)
|
||||
+ return;
|
||||
+
|
||||
/* move entry[idx] to the end */
|
||||
- if (idx+1 < lastidx) {
|
||||
+ lastidx = diary->entries->nelts - 1;
|
||||
+ if (idx < lastidx) {
|
||||
e = entries[idx];
|
||||
- memmove(entries+idx, entries+idx+1, sizeof(e) * (lastidx - idx));
|
||||
+ memmove(entries+idx, entries+idx+1, sizeof(h2_push_diary_entry) * (lastidx - idx));
|
||||
entries[lastidx] = e;
|
||||
}
|
||||
- return &entries[lastidx];
|
||||
}
|
||||
|
||||
-static void h2_push_diary_append(h2_push_diary *diary, h2_push_diary_entry *e)
|
||||
+static void remove_first(h2_push_diary *diary)
|
||||
{
|
||||
- h2_push_diary_entry *ne;
|
||||
+ h2_push_diary_entry *entries = (h2_push_diary_entry*)diary->entries->elts;
|
||||
+ int lastidx;
|
||||
|
||||
- if (diary->entries->nelts < diary->N) {
|
||||
- /* append a new diary entry at the end */
|
||||
- APR_ARRAY_PUSH(diary->entries, h2_push_diary_entry) = *e;
|
||||
- ne = &APR_ARRAY_IDX(diary->entries, diary->entries->nelts-1, h2_push_diary_entry);
|
||||
+ /* move remaining entries to index 0 */
|
||||
+ lastidx = diary->entries->nelts - 1;
|
||||
+ if (lastidx > 0) {
|
||||
+ --diary->entries->nelts;
|
||||
+ memmove(entries, entries+1, sizeof(h2_push_diary_entry) * diary->entries->nelts);
|
||||
}
|
||||
- else {
|
||||
- /* replace content with new digest. keeps memory usage constant once diary is full */
|
||||
- ne = move_to_last(diary, 0);
|
||||
- *ne = *e;
|
||||
+}
|
||||
+
|
||||
+static void h2_push_diary_append(h2_push_diary *diary, h2_push_diary_entry *e)
|
||||
+{
|
||||
+ while (diary->entries->nelts >= diary->N) {
|
||||
+ remove_first(diary);
|
||||
}
|
||||
+ /* append a new diary entry at the end */
|
||||
+ APR_ARRAY_PUSH(diary->entries, h2_push_diary_entry) = *e;
|
||||
/* Intentional no APLOGNO */
|
||||
ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, diary->entries->pool,
|
||||
- "push_diary_append: %"APR_UINT64_T_HEX_FMT, ne->hash);
|
||||
+ "push_diary_append: %"APR_UINT64_T_HEX_FMT, e->hash);
|
||||
}
|
||||
|
||||
apr_array_header_t *h2_push_diary_update(h2_session *session, apr_array_header_t *pushes)
|
||||
@@ -692,30 +675,12 @@ apr_array_header_t *h2_push_collect_update(h2_stream *stream,
|
||||
const struct h2_request *req,
|
||||
const struct h2_headers *res)
|
||||
{
|
||||
- h2_session *session = stream->session;
|
||||
- const char *cache_digest = apr_table_get(req->headers, "Cache-Digest");
|
||||
apr_array_header_t *pushes;
|
||||
- apr_status_t status;
|
||||
|
||||
- if (cache_digest && session->push_diary) {
|
||||
- status = h2_push_diary_digest64_set(session->push_diary, req->authority,
|
||||
- cache_digest, stream->pool);
|
||||
- if (status != APR_SUCCESS) {
|
||||
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
|
||||
- H2_SSSN_LOG(APLOGNO(03057), session,
|
||||
- "push diary set from Cache-Digest: %s"), cache_digest);
|
||||
- }
|
||||
- }
|
||||
pushes = h2_push_collect(stream->pool, req, stream->push_policy, res);
|
||||
return h2_push_diary_update(stream->session, pushes);
|
||||
}
|
||||
|
||||
-static apr_int32_t h2_log2inv(unsigned char log2)
|
||||
-{
|
||||
- return log2? (1 << log2) : 1;
|
||||
-}
|
||||
-
|
||||
-
|
||||
typedef struct {
|
||||
h2_push_diary *diary;
|
||||
unsigned char log2p;
|
||||
@@ -830,11 +795,6 @@ apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *pool,
|
||||
apr_size_t hash_count;
|
||||
|
||||
nelts = diary->entries->nelts;
|
||||
-
|
||||
- if ((apr_uint32_t)nelts > APR_UINT32_MAX) {
|
||||
- /* should not happen */
|
||||
- return APR_ENOTIMPL;
|
||||
- }
|
||||
N = ceil_power_of_2(nelts);
|
||||
log2n = h2_log2(N);
|
||||
|
||||
@@ -896,166 +856,3 @@ apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *pool,
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
-typedef struct {
|
||||
- h2_push_diary *diary;
|
||||
- apr_pool_t *pool;
|
||||
- unsigned char log2p;
|
||||
- const unsigned char *data;
|
||||
- apr_size_t datalen;
|
||||
- apr_size_t offset;
|
||||
- unsigned int bit;
|
||||
- apr_uint64_t last_val;
|
||||
-} gset_decoder;
|
||||
-
|
||||
-static int gset_decode_next_bit(gset_decoder *decoder)
|
||||
-{
|
||||
- if (++decoder->bit >= 8) {
|
||||
- if (++decoder->offset >= decoder->datalen) {
|
||||
- return -1;
|
||||
- }
|
||||
- decoder->bit = 0;
|
||||
- }
|
||||
- return (decoder->data[decoder->offset] & cbit_mask[decoder->bit])? 1 : 0;
|
||||
-}
|
||||
-
|
||||
-static apr_status_t gset_decode_next(gset_decoder *decoder, apr_uint64_t *phash)
|
||||
-{
|
||||
- apr_uint64_t flex = 0, fixed = 0, delta;
|
||||
- int i;
|
||||
-
|
||||
- /* read 1 bits until we encounter 0, then read log2n(diary-P) bits.
|
||||
- * On a malformed bit-string, this will not fail, but produce results
|
||||
- * which are pbly too large. Luckily, the diary will modulo the hash.
|
||||
- */
|
||||
- while (1) {
|
||||
- int bit = gset_decode_next_bit(decoder);
|
||||
- if (bit == -1) {
|
||||
- return APR_EINVAL;
|
||||
- }
|
||||
- if (!bit) {
|
||||
- break;
|
||||
- }
|
||||
- ++flex;
|
||||
- }
|
||||
-
|
||||
- for (i = 0; i < decoder->log2p; ++i) {
|
||||
- int bit = gset_decode_next_bit(decoder);
|
||||
- if (bit == -1) {
|
||||
- return APR_EINVAL;
|
||||
- }
|
||||
- fixed = (fixed << 1) | bit;
|
||||
- }
|
||||
-
|
||||
- delta = (flex << decoder->log2p) | fixed;
|
||||
- *phash = delta + decoder->last_val;
|
||||
- decoder->last_val = *phash;
|
||||
-
|
||||
- /* Intentional no APLOGNO */
|
||||
- ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, decoder->pool,
|
||||
- "h2_push_diary_digest_dec: val=%"APR_UINT64_T_HEX_FMT", delta=%"
|
||||
- APR_UINT64_T_HEX_FMT", flex=%d, fixed=%"APR_UINT64_T_HEX_FMT,
|
||||
- *phash, delta, (int)flex, fixed);
|
||||
-
|
||||
- return APR_SUCCESS;
|
||||
-}
|
||||
-
|
||||
-/**
|
||||
- * Initialize the push diary by a cache digest as described in
|
||||
- * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
|
||||
- * .
|
||||
- * @param diary the diary to set the digest into
|
||||
- * @param data the binary cache digest
|
||||
- * @param len the length of the cache digest
|
||||
- * @return APR_EINVAL if digest was not successfully parsed
|
||||
- */
|
||||
-apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority,
|
||||
- const char *data, apr_size_t len)
|
||||
-{
|
||||
- gset_decoder decoder;
|
||||
- unsigned char log2n, log2p;
|
||||
- int N, i;
|
||||
- apr_pool_t *pool = diary->entries->pool;
|
||||
- h2_push_diary_entry e;
|
||||
- apr_status_t status = APR_SUCCESS;
|
||||
-
|
||||
- if (len < 2) {
|
||||
- /* at least this should be there */
|
||||
- return APR_EINVAL;
|
||||
- }
|
||||
- log2n = data[0];
|
||||
- log2p = data[1];
|
||||
- diary->mask_bits = log2n + log2p;
|
||||
- if (diary->mask_bits > 64) {
|
||||
- /* cannot handle */
|
||||
- return APR_ENOTIMPL;
|
||||
- }
|
||||
-
|
||||
- /* whatever is in the digest, it replaces the diary entries */
|
||||
- apr_array_clear(diary->entries);
|
||||
- if (!authority || !strcmp("*", authority)) {
|
||||
- diary->authority = NULL;
|
||||
- }
|
||||
- else if (!diary->authority || strcmp(diary->authority, authority)) {
|
||||
- diary->authority = apr_pstrdup(diary->entries->pool, authority);
|
||||
- }
|
||||
-
|
||||
- N = h2_log2inv(log2n + log2p);
|
||||
-
|
||||
- decoder.diary = diary;
|
||||
- decoder.pool = pool;
|
||||
- decoder.log2p = log2p;
|
||||
- decoder.data = (const unsigned char*)data;
|
||||
- decoder.datalen = len;
|
||||
- decoder.offset = 1;
|
||||
- decoder.bit = 8;
|
||||
- decoder.last_val = 0;
|
||||
-
|
||||
- diary->N = N;
|
||||
- /* Determine effective N we use for storage */
|
||||
- if (!N) {
|
||||
- /* a totally empty cache digest. someone tells us that she has no
|
||||
- * entries in the cache at all. Use our own preferences for N+mask
|
||||
- */
|
||||
- diary->N = diary->NMax;
|
||||
- return APR_SUCCESS;
|
||||
- }
|
||||
- else if (N > diary->NMax) {
|
||||
- /* Store not more than diary is configured to hold. We open us up
|
||||
- * to DOS attacks otherwise. */
|
||||
- diary->N = diary->NMax;
|
||||
- }
|
||||
-
|
||||
- /* Intentional no APLOGNO */
|
||||
- ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
|
||||
- "h2_push_diary_digest_set: N=%d, log2n=%d, "
|
||||
- "diary->mask_bits=%d, dec.log2p=%d",
|
||||
- (int)diary->N, (int)log2n, diary->mask_bits,
|
||||
- (int)decoder.log2p);
|
||||
-
|
||||
- for (i = 0; i < diary->N; ++i) {
|
||||
- if (gset_decode_next(&decoder, &e.hash) != APR_SUCCESS) {
|
||||
- /* the data may have less than N values */
|
||||
- break;
|
||||
- }
|
||||
- h2_push_diary_append(diary, &e);
|
||||
- }
|
||||
-
|
||||
- /* Intentional no APLOGNO */
|
||||
- ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
|
||||
- "h2_push_diary_digest_set: diary now with %d entries, mask_bits=%d",
|
||||
- (int)diary->entries->nelts, diary->mask_bits);
|
||||
- return status;
|
||||
-}
|
||||
-
|
||||
-apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority,
|
||||
- const char *data64url, apr_pool_t *pool)
|
||||
-{
|
||||
- const char *data;
|
||||
- apr_size_t len = h2_util_base64url_decode(&data, data64url, pool);
|
||||
- /* Intentional no APLOGNO */
|
||||
- ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool,
|
||||
- "h2_push_diary_digest64_set: digest=%s, dlen=%d",
|
||||
- data64url, (int)len);
|
||||
- return h2_push_diary_digest_set(diary, authority, data, len);
|
||||
-}
|
||||
-
|
||||
diff --git a/mod_http2/h2_push.h b/mod_http2/h2_push.h
|
||||
index 0533853..5dc189f 100644
|
||||
--- a/mod_http2/h2_push.h
|
||||
+++ b/mod_http2/h2_push.h
|
||||
@@ -35,6 +35,44 @@ typedef enum {
|
||||
H2_PUSH_DIGEST_SHA256
|
||||
} h2_push_digest_type;
|
||||
|
||||
+/*******************************************************************************
|
||||
+ * push diary
|
||||
+ *
|
||||
+ * - The push diary keeps track of resources already PUSHed via HTTP/2 on this
|
||||
+ * connection. It records a hash value from the absolute URL of the resource
|
||||
+ * pushed.
|
||||
+ * - Lacking openssl,
|
||||
+ * - with openssl, it uses SHA256 to calculate the hash value, otherwise it
|
||||
+ * falls back to apr_hashfunc_default()
|
||||
+ * - whatever the method to generate the hash, the diary keeps a maximum of 64
|
||||
+ * bits per hash, limiting the memory consumption to about
|
||||
+ * H2PushDiarySize * 8
|
||||
+ * bytes. Entries are sorted by most recently used and oldest entries are
|
||||
+ * forgotten first.
|
||||
+ * - While useful by itself to avoid duplicated PUSHes on the same connection,
|
||||
+ * the original idea was that clients provided a 'Cache-Digest' header with
|
||||
+ * the values of *their own* cached resources. This was described in
|
||||
+ * <https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/>
|
||||
+ * and some subsequent revisions that tweaked values but kept the overall idea.
|
||||
+ * - The draft was abandoned by the IETF http-wg, as support from major clients,
|
||||
+ * e.g. browsers, was lacking for various reasons.
|
||||
+ * - For these reasons, mod_h2 abandoned its support for client supplied values
|
||||
+ * but keeps the diary. It seems to provide value for applications using PUSH,
|
||||
+ * is configurable in size and defaults to a very moderate amount of memory
|
||||
+ * used.
|
||||
+ * - The cache digest header is a Golomb Coded Set of hash values, but it may
|
||||
+ * limit the amount of bits per hash value even further. For a good description
|
||||
+ * of GCS, read here:
|
||||
+ * <http://giovanni.bajo.it/post/47119962313/golomb-coded-sets-smaller-than-bloom-filters>
|
||||
+ ******************************************************************************/
|
||||
+
|
||||
+
|
||||
+/*
|
||||
+ * The push diary is based on the abandoned draft
|
||||
+ * <https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/>
|
||||
+ * that describes how to use golomb filters.
|
||||
+ */
|
||||
+
|
||||
typedef struct h2_push_diary h2_push_diary;
|
||||
|
||||
typedef void h2_push_digest_calc(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push);
|
||||
@@ -101,20 +139,4 @@ apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *p,
|
||||
int maxP, const char *authority,
|
||||
const char **pdata, apr_size_t *plen);
|
||||
|
||||
-/**
|
||||
- * Initialize the push diary by a cache digest as described in
|
||||
- * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
|
||||
- * .
|
||||
- * @param diary the diary to set the digest into
|
||||
- * @param authority the authority to set the data for
|
||||
- * @param data the binary cache digest
|
||||
- * @param len the length of the cache digest
|
||||
- * @return APR_EINVAL if digest was not successfully parsed
|
||||
- */
|
||||
-apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority,
|
||||
- const char *data, apr_size_t len);
|
||||
-
|
||||
-apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority,
|
||||
- const char *data64url, apr_pool_t *pool);
|
||||
-
|
||||
#endif /* defined(__mod_h2__h2_push__) */
|
81
mod_http2-1.15.7-CVE-2021-33193.patch
Normal file
81
mod_http2-1.15.7-CVE-2021-33193.patch
Normal file
@ -0,0 +1,81 @@
|
||||
diff --git a/mod_http2/h2_request.c b/mod_http2/h2_request.c
|
||||
index 5893c8b..1131440 100644
|
||||
--- a/mod_http2/h2_request.c
|
||||
+++ b/mod_http2/h2_request.c
|
||||
@@ -206,75 +206,13 @@ h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src)
|
||||
return dst;
|
||||
}
|
||||
|
||||
-#if !AP_MODULE_MAGIC_AT_LEAST(20150222, 13)
|
||||
-static request_rec *my_ap_create_request(conn_rec *c)
|
||||
-{
|
||||
- apr_pool_t *p;
|
||||
- request_rec *r;
|
||||
-
|
||||
- apr_pool_create(&p, c->pool);
|
||||
- apr_pool_tag(p, "request");
|
||||
- r = apr_pcalloc(p, sizeof(request_rec));
|
||||
- AP_READ_REQUEST_ENTRY((intptr_t)r, (uintptr_t)c);
|
||||
- r->pool = p;
|
||||
- r->connection = c;
|
||||
- r->server = c->base_server;
|
||||
-
|
||||
- r->user = NULL;
|
||||
- r->ap_auth_type = NULL;
|
||||
-
|
||||
- r->allowed_methods = ap_make_method_list(p, 2);
|
||||
-
|
||||
- r->headers_in = apr_table_make(r->pool, 5);
|
||||
- r->trailers_in = apr_table_make(r->pool, 5);
|
||||
- r->subprocess_env = apr_table_make(r->pool, 25);
|
||||
- r->headers_out = apr_table_make(r->pool, 12);
|
||||
- r->err_headers_out = apr_table_make(r->pool, 5);
|
||||
- r->trailers_out = apr_table_make(r->pool, 5);
|
||||
- r->notes = apr_table_make(r->pool, 5);
|
||||
-
|
||||
- r->request_config = ap_create_request_config(r->pool);
|
||||
- /* Must be set before we run create request hook */
|
||||
-
|
||||
- r->proto_output_filters = c->output_filters;
|
||||
- r->output_filters = r->proto_output_filters;
|
||||
- r->proto_input_filters = c->input_filters;
|
||||
- r->input_filters = r->proto_input_filters;
|
||||
- ap_run_create_request(r);
|
||||
- r->per_dir_config = r->server->lookup_defaults;
|
||||
-
|
||||
- r->sent_bodyct = 0; /* bytect isn't for body */
|
||||
-
|
||||
- r->read_length = 0;
|
||||
- r->read_body = REQUEST_NO_BODY;
|
||||
-
|
||||
- r->status = HTTP_OK; /* Until further notice */
|
||||
- r->header_only = 0;
|
||||
- r->the_request = NULL;
|
||||
-
|
||||
- /* Begin by presuming any module can make its own path_info assumptions,
|
||||
- * until some module interjects and changes the value.
|
||||
- */
|
||||
- r->used_path_info = AP_REQ_DEFAULT_PATH_INFO;
|
||||
-
|
||||
- r->useragent_addr = c->client_addr;
|
||||
- r->useragent_ip = c->client_ip;
|
||||
-
|
||||
- return r;
|
||||
-}
|
||||
-#endif
|
||||
-
|
||||
request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c)
|
||||
{
|
||||
- int access_status = HTTP_OK;
|
||||
+ int access_status = HTTP_OK;
|
||||
const char *rpath;
|
||||
const char *s;
|
||||
|
||||
-#if AP_MODULE_MAGIC_AT_LEAST(20150222, 13)
|
||||
request_rec *r = ap_create_request(c);
|
||||
-#else
|
||||
- request_rec *r = my_ap_create_request(c);
|
||||
-#endif
|
||||
|
||||
r->headers_in = apr_table_clone(r->pool, req->headers);
|
||||
|
13
mod_http2-1.15.7-CVE-2021-44224.patch
Normal file
13
mod_http2-1.15.7-CVE-2021-44224.patch
Normal file
@ -0,0 +1,13 @@
|
||||
diff --git a/mod_http2/h2_request.c b/mod_http2/h2_request.c
|
||||
index 1131440..89a0b47 100644
|
||||
--- a/mod_http2/h2_request.c
|
||||
+++ b/mod_http2/h2_request.c
|
||||
@@ -267,7 +267,7 @@ request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c)
|
||||
NULL, r, r->connection);
|
||||
|
||||
if (access_status != HTTP_OK
|
||||
- || (access_status = ap_run_post_read_request(r))) {
|
||||
+ || (access_status = ap_post_read_request(r))) {
|
||||
/* Request check post hooks failed. An example of this would be a
|
||||
* request for a vhost where h2 is disabled --> 421.
|
||||
*/
|
138
mod_http2.spec
Normal file
138
mod_http2.spec
Normal file
@ -0,0 +1,138 @@
|
||||
# Module Magic Number
|
||||
%{!?_httpd_mmn: %global _httpd_mmn %(cat %{_includedir}/httpd/.mmn 2>/dev/null || echo 0-0)}
|
||||
|
||||
Name: mod_http2
|
||||
Version: 1.15.7
|
||||
Release: 5%{?dist}
|
||||
Summary: module implementing HTTP/2 for Apache 2
|
||||
Group: System Environment/Daemons
|
||||
License: ASL 2.0
|
||||
URL: https://icing.github.io/mod_h2/
|
||||
Source0: https://github.com/icing/mod_h2/releases/download/v%{version}/mod_http2-%{version}.tar.gz
|
||||
Patch1: mod_http2-1.15.7-CVE-2020-9490.patch
|
||||
Patch2: mod_http2-1.15.7-CVE-2020-11993.patch
|
||||
Patch3: mod_http2-1.15.7-CVE-2021-33193.patch
|
||||
Patch4: mod_http2-1.15.7-CVE-2021-44224.patch
|
||||
BuildRequires: pkgconfig, httpd-devel >= 2.4.20, libnghttp2-devel >= 1.7.0, openssl-devel >= 1.0.2
|
||||
Requires: httpd-mmn = %{_httpd_mmn}
|
||||
Conflicts: httpd < 2.4.25-8
|
||||
|
||||
|
||||
%description
|
||||
The mod_h2 Apache httpd module implements the HTTP2 protocol (h2+h2c) on
|
||||
top of libnghttp2 for httpd 2.4 servers.
|
||||
|
||||
%prep
|
||||
%setup -q
|
||||
%patch1 -p1 -b .CVE-2020-9490
|
||||
%patch2 -p1 -b .CVE-2020-11993
|
||||
%patch3 -p1 -b .CVE-2021-33193
|
||||
%patch4 -p1 -b .CVE-2021-44224
|
||||
|
||||
%build
|
||||
%configure
|
||||
make %{?_smp_mflags} V=1
|
||||
|
||||
%install
|
||||
make DESTDIR=%{buildroot} install
|
||||
rm -rf %{buildroot}/etc/httpd/share/doc/
|
||||
|
||||
# create configuration
|
||||
mkdir -p %{buildroot}%{_httpd_modconfdir}
|
||||
echo "LoadModule http2_module modules/mod_http2.so" > %{buildroot}%{_httpd_modconfdir}/10-h2.conf
|
||||
echo "LoadModule proxy_http2_module modules/mod_proxy_http2.so" > %{buildroot}%{_httpd_modconfdir}/10-proxy_h2.conf
|
||||
|
||||
%check
|
||||
make check
|
||||
|
||||
%files
|
||||
%doc README README.md ChangeLog AUTHORS
|
||||
%license LICENSE
|
||||
%config(noreplace) %{_httpd_modconfdir}/10-h2.conf
|
||||
%config(noreplace) %{_httpd_modconfdir}/10-proxy_h2.conf
|
||||
%{_httpd_moddir}/mod_http2.so
|
||||
%{_httpd_moddir}/mod_proxy_http2.so
|
||||
|
||||
%changelog
|
||||
* Mon Jan 24 2022 Luboš Uhliarik <luhliari@redhat.com> - 1.15.7-5
|
||||
- Resolves: #2035030 - CVE-2021-44224 httpd:2.4/httpd: possible NULL dereference
|
||||
or SSRF in forward proxy configurations
|
||||
|
||||
* Thu Jan 06 2022 Luboš Uhliarik <luhliari@redhat.com> - 1.15.7-4
|
||||
- Resolves: #1966728 - CVE-2021-33193 httpd:2.4/mod_http2: httpd:
|
||||
Request splitting via HTTP/2 method injection and mod_proxy
|
||||
|
||||
* Fri Oct 30 2020 Lubos Uhliarik <luhliari@redhat.com> - 1.15.7-3
|
||||
- Resolves: #1869077 - CVE-2020-11993 httpd:2.4/mod_http2: httpd:
|
||||
mod_http2 concurrent pool usage
|
||||
|
||||
* Mon Aug 17 2020 Lubos Uhliarik <luhliari@redhat.com> - 1.15.7-2
|
||||
- Resolves: #1869073 - CVE-2020-9490 httpd:2.4/mod_http2: httpd:
|
||||
Push diary crash on specifically crafted HTTP/2 header
|
||||
|
||||
* Tue Apr 14 2020 Lubos Uhliarik <luhliari@redhat.com> - 1.15.7-1
|
||||
- new version 1.15.7
|
||||
- Resolves: #1814236 - RFE: mod_http2 rebase
|
||||
- Resolves: #1747289 - CVE-2019-10082 httpd:2.4/mod_http2: httpd:
|
||||
read-after-free in h2 connection shutdown
|
||||
- Resolves: #1696099 - CVE-2019-0197 httpd:2.4/mod_http2: httpd:
|
||||
mod_http2: possible crash on late upgrade
|
||||
- Resolves: #1696094 - CVE-2019-0196 httpd:2.4/mod_http2: httpd:
|
||||
mod_http2: read-after-free on a string compare
|
||||
- Resolves: #1677591 - CVE-2018-17189 httpd:2.4/mod_http2: httpd:
|
||||
mod_http2: DoS via slow, unneeded request bodies
|
||||
|
||||
* Thu Aug 29 2019 Lubos Uhliarik <luhliari@redhat.com> - 1.11.3-3
|
||||
- Resolves: #1744999 - CVE-2019-9511 httpd:2.4/mod_http2: HTTP/2: large amount
|
||||
of data request leads to denial of service
|
||||
- Resolves: #1745086 - CVE-2019-9516 httpd:2.4/mod_http2: HTTP/2: 0-length
|
||||
headers leads to denial of service
|
||||
- Resolves: #1745154 - CVE-2019-9517 httpd:2.4/mod_http2: HTTP/2: request for
|
||||
large response leads to denial of service
|
||||
|
||||
* Thu Apr 4 2019 Joe Orton <jorton@redhat.com> - 1.11.3-2
|
||||
- update release (#1695587)
|
||||
|
||||
* Tue Oct 16 2018 Lubos Uhliarik <luhliari@redhat.com> - 1.11.3-1
|
||||
- new version 1.11.3
|
||||
- Resolves: #1633401 - CVE-2018-11763 mod_http2: httpd: DoS for HTTP/2
|
||||
connections by continuous SETTINGS
|
||||
|
||||
* Wed May 2 2018 Joe Orton <jorton@redhat.com> - 1.10.20-1
|
||||
- update to 1.10.20
|
||||
|
||||
* Wed Apr 18 2018 Joe Orton <jorton@redhat.com> - 1.10.18-1
|
||||
- update to 1.10.18
|
||||
|
||||
* Thu Mar 29 2018 Joe Orton <jorton@redhat.com> - 1.10.16-1
|
||||
- update to 1.10.16 (CVE-2018-1302)
|
||||
|
||||
* Thu Feb 08 2018 Fedora Release Engineering <releng@fedoraproject.org> - 1.10.13-2
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_28_Mass_Rebuild
|
||||
|
||||
* Tue Nov 7 2017 Joe Orton <jorton@redhat.com> - 1.10.13-1
|
||||
- update to 1.10.13
|
||||
|
||||
* Fri Oct 20 2017 Joe Orton <jorton@redhat.com> - 1.10.12-1
|
||||
- update to 1.10.12
|
||||
|
||||
* Thu Aug 03 2017 Fedora Release Engineering <releng@fedoraproject.org> - 1.10.10-2
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild
|
||||
|
||||
* Mon Jul 31 2017 Joe Orton <jorton@redhat.com> - 1.10.10-1
|
||||
- update to 1.10.10
|
||||
|
||||
* Wed Jul 26 2017 Fedora Release Engineering <releng@fedoraproject.org> - 1.10.7-2
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild
|
||||
|
||||
* Thu Jul 6 2017 Joe Orton <jorton@redhat.com> - 1.10.7-1
|
||||
- update to 1.10.7
|
||||
|
||||
* Mon Jun 12 2017 Joe Orton <jorton@redhat.com> - 1.10.6-1
|
||||
- update to 1.10.6
|
||||
|
||||
* Tue May 16 2017 Joe Orton <jorton@redhat.com> - 1.10.5-1
|
||||
- update to 1.10.5
|
||||
|
||||
* Mon Apr 10 2017 Luboš Uhliarik <luhliari@redhat.com> - 1.10.1-1
|
||||
- Initial import (#1440780).
|
Loading…
Reference in New Issue
Block a user