import mod_http2-1.11.3-3.module+el8.1.0+4134+e6bad0ed
This commit is contained in:
commit
4a8c1a1438
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
SOURCES/mod_http2-1.11.3.tar.gz
|
1
.mod_http2.metadata
Normal file
1
.mod_http2.metadata
Normal file
@ -0,0 +1 @@
|
||||
24e1a05b7952a6afa85b76b0a477664d0b20734f SOURCES/mod_http2-1.11.3.tar.gz
|
833
SOURCES/httpd-2.4.34-CVE-2019-9511-and-9516-and-9517.patch
Normal file
833
SOURCES/httpd-2.4.34-CVE-2019-9511-and-9516-and-9517.patch
Normal file
@ -0,0 +1,833 @@
|
||||
diff --git a/mod_http2/h2_conn.c b/mod_http2/h2_conn.c
|
||||
index 00d23c3..cb5bde7 100644
|
||||
--- a/mod_http2/h2_conn.c
|
||||
+++ b/mod_http2/h2_conn.c
|
||||
@@ -235,6 +235,13 @@ apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c)
|
||||
case H2_SESSION_ST_BUSY:
|
||||
case H2_SESSION_ST_WAIT:
|
||||
c->cs->state = CONN_STATE_WRITE_COMPLETION;
|
||||
+ if (c->cs && (session->open_streams || !session->remote.emitted_count)) {
|
||||
+ /* let the MPM know that we are not done and want
|
||||
+ * the Timeout behaviour instead of a KeepAliveTimeout
|
||||
+ * See PR 63534.
|
||||
+ */
|
||||
+ c->cs->sense = CONN_SENSE_WANT_READ;
|
||||
+ }
|
||||
break;
|
||||
case H2_SESSION_ST_CLEANUP:
|
||||
case H2_SESSION_ST_DONE:
|
||||
diff --git a/mod_http2/h2_filter.c b/mod_http2/h2_filter.c
|
||||
index 241dcf0..05a470e 100644
|
||||
--- a/mod_http2/h2_filter.c
|
||||
+++ b/mod_http2/h2_filter.c
|
||||
@@ -497,6 +497,52 @@ static apr_status_t status_event(void *ctx, h2_bucket_event event,
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
+static apr_status_t discard_body(request_rec *r, apr_off_t maxlen)
|
||||
+{
|
||||
+ apr_bucket_brigade *bb;
|
||||
+ int seen_eos;
|
||||
+ apr_status_t rv;
|
||||
+
|
||||
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
|
||||
+ seen_eos = 0;
|
||||
+ do {
|
||||
+ apr_bucket *bucket;
|
||||
+
|
||||
+ rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
|
||||
+ APR_BLOCK_READ, HUGE_STRING_LEN);
|
||||
+
|
||||
+ if (rv != APR_SUCCESS) {
|
||||
+ apr_brigade_destroy(bb);
|
||||
+ return rv;
|
||||
+ }
|
||||
+
|
||||
+ for (bucket = APR_BRIGADE_FIRST(bb);
|
||||
+ bucket != APR_BRIGADE_SENTINEL(bb);
|
||||
+ bucket = APR_BUCKET_NEXT(bucket))
|
||||
+ {
|
||||
+ const char *data;
|
||||
+ apr_size_t len;
|
||||
+
|
||||
+ if (APR_BUCKET_IS_EOS(bucket)) {
|
||||
+ seen_eos = 1;
|
||||
+ break;
|
||||
+ }
|
||||
+ if (bucket->length == 0) {
|
||||
+ continue;
|
||||
+ }
|
||||
+ rv = apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);
|
||||
+ if (rv != APR_SUCCESS) {
|
||||
+ apr_brigade_destroy(bb);
|
||||
+ return rv;
|
||||
+ }
|
||||
+ maxlen -= bucket->length;
|
||||
+ }
|
||||
+ apr_brigade_cleanup(bb);
|
||||
+ } while (!seen_eos && maxlen >= 0);
|
||||
+
|
||||
+ return APR_SUCCESS;
|
||||
+}
|
||||
+
|
||||
int h2_filter_h2_status_handler(request_rec *r)
|
||||
{
|
||||
h2_ctx *ctx = h2_ctx_rget(r);
|
||||
@@ -515,8 +561,10 @@ int h2_filter_h2_status_handler(request_rec *r)
|
||||
|
||||
task = ctx? h2_ctx_get_task(ctx) : NULL;
|
||||
if (task) {
|
||||
-
|
||||
- if ((status = ap_discard_request_body(r)) != OK) {
|
||||
+ /* In this handler, we do some special sauce to send footers back,
|
||||
+ * IFF we received footers in the request. This is used in our test
|
||||
+ * cases, since CGI has no way of handling those. */
|
||||
+ if ((status = discard_body(r, 1024)) != OK) {
|
||||
return status;
|
||||
}
|
||||
|
||||
diff --git a/mod_http2/h2_mplx.c b/mod_http2/h2_mplx.c
|
||||
index a917077..e44d9ed 100644
|
||||
--- a/mod_http2/h2_mplx.c
|
||||
+++ b/mod_http2/h2_mplx.c
|
||||
@@ -54,8 +54,12 @@ typedef struct {
|
||||
h2_mplx *m;
|
||||
h2_stream *stream;
|
||||
apr_time_t now;
|
||||
+ apr_size_t count;
|
||||
} stream_iter_ctx;
|
||||
|
||||
+static apr_status_t mplx_be_happy(h2_mplx *m);
|
||||
+static apr_status_t mplx_be_annoyed(h2_mplx *m);
|
||||
+
|
||||
apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s)
|
||||
{
|
||||
(void)pool;
|
||||
@@ -110,7 +114,7 @@ static void stream_input_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t len
|
||||
|
||||
static void stream_joined(h2_mplx *m, h2_stream *stream)
|
||||
{
|
||||
- ap_assert(!stream->task || stream->task->worker_done);
|
||||
+ ap_assert(!h2_task_has_started(stream->task) || stream->task->worker_done);
|
||||
|
||||
h2_ihash_remove(m->shold, stream->id);
|
||||
h2_ihash_add(m->spurge, stream);
|
||||
@@ -136,7 +140,7 @@ static void stream_cleanup(h2_mplx *m, h2_stream *stream)
|
||||
h2_ififo_remove(m->readyq, stream->id);
|
||||
h2_ihash_add(m->shold, stream);
|
||||
|
||||
- if (!stream->task || stream->task->worker_done) {
|
||||
+ if (!h2_task_has_started(stream->task) || stream->task->done_done) {
|
||||
stream_joined(m, stream);
|
||||
}
|
||||
else if (stream->task) {
|
||||
@@ -219,7 +223,6 @@ h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent,
|
||||
m->stream_max_mem = (apr_size_t)h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM);
|
||||
|
||||
m->streams = h2_ihash_create(m->pool, offsetof(h2_stream,id));
|
||||
- m->sredo = h2_ihash_create(m->pool, offsetof(h2_stream,id));
|
||||
m->shold = h2_ihash_create(m->pool, offsetof(h2_stream,id));
|
||||
m->spurge = h2_ihash_create(m->pool, offsetof(h2_stream,id));
|
||||
m->q = h2_iq_create(m->pool, m->max_streams);
|
||||
@@ -233,8 +236,8 @@ h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent,
|
||||
m->workers = workers;
|
||||
m->max_active = workers->max_workers;
|
||||
m->limit_active = 6; /* the original h1 max parallel connections */
|
||||
- m->last_limit_change = m->last_idle_block = apr_time_now();
|
||||
- m->limit_change_interval = apr_time_from_msec(100);
|
||||
+ m->last_mood_change = apr_time_now();
|
||||
+ m->mood_update_interval = apr_time_from_msec(100);
|
||||
|
||||
m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*));
|
||||
|
||||
@@ -453,6 +456,10 @@ void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
|
||||
|
||||
/* How to shut down a h2 connection:
|
||||
* 1. cancel all streams still active */
|
||||
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
|
||||
+ "h2_mplx(%ld): release, %d/%d/%d streams (total/hold/purge), %d active tasks",
|
||||
+ m->id, (int)h2_ihash_count(m->streams),
|
||||
+ (int)h2_ihash_count(m->shold), (int)h2_ihash_count(m->spurge), m->tasks_active);
|
||||
while (!h2_ihash_iter(m->streams, stream_cancel_iter, m)) {
|
||||
/* until empty */
|
||||
}
|
||||
@@ -496,8 +503,7 @@ void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
|
||||
|
||||
H2_MPLX_LEAVE(m);
|
||||
|
||||
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
|
||||
- "h2_mplx(%ld): released", m->id);
|
||||
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, "h2_mplx(%ld): released", m->id);
|
||||
}
|
||||
|
||||
apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, h2_stream *stream)
|
||||
@@ -736,7 +742,6 @@ static h2_task *next_stream_task(h2_mplx *m)
|
||||
}
|
||||
|
||||
if (!stream->task) {
|
||||
-
|
||||
if (sid > m->max_stream_started) {
|
||||
m->max_stream_started = sid;
|
||||
}
|
||||
@@ -755,9 +760,9 @@ static h2_task *next_stream_task(h2_mplx *m)
|
||||
"create task"));
|
||||
return NULL;
|
||||
}
|
||||
-
|
||||
}
|
||||
|
||||
+ stream->task->started_at = apr_time_now();
|
||||
++m->tasks_active;
|
||||
return stream->task;
|
||||
}
|
||||
@@ -796,6 +801,8 @@ static void task_done(h2_mplx *m, h2_task *task, h2_req_engine *ngn)
|
||||
/* this task was handed over to an engine for processing
|
||||
* and the original worker has finished. That means the
|
||||
* engine may start processing now. */
|
||||
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
|
||||
+ "h2_mplx(%ld): task(%s) done (frozen)", m->id, task->id);
|
||||
h2_task_thaw(task);
|
||||
apr_thread_cond_broadcast(m->task_thawed);
|
||||
return;
|
||||
@@ -835,35 +842,28 @@ static void task_done(h2_mplx *m, h2_task *task, h2_req_engine *ngn)
|
||||
"h2_mplx(%s): request done, %f ms elapsed", task->id,
|
||||
(task->done_at - task->started_at) / 1000.0);
|
||||
|
||||
- if (task->started_at > m->last_idle_block) {
|
||||
- /* this task finished without causing an 'idle block', e.g.
|
||||
- * a block by flow control.
|
||||
- */
|
||||
- if (task->done_at- m->last_limit_change >= m->limit_change_interval
|
||||
- && m->limit_active < m->max_active) {
|
||||
- /* Well behaving stream, allow it more workers */
|
||||
- m->limit_active = H2MIN(m->limit_active * 2,
|
||||
- m->max_active);
|
||||
- m->last_limit_change = task->done_at;
|
||||
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
|
||||
- "h2_mplx(%ld): increase worker limit to %d",
|
||||
- m->id, m->limit_active);
|
||||
- }
|
||||
+ if (task->c && !task->c->aborted && task->started_at > m->last_mood_change) {
|
||||
+ mplx_be_happy(m);
|
||||
}
|
||||
|
||||
+ ap_assert(task->done_done == 0);
|
||||
+
|
||||
stream = h2_ihash_get(m->streams, task->stream_id);
|
||||
if (stream) {
|
||||
/* stream not done yet. */
|
||||
- if (!m->aborted && h2_ihash_get(m->sredo, stream->id)) {
|
||||
+ if (!m->aborted && task->redo) {
|
||||
/* reset and schedule again */
|
||||
+ task->worker_done = 0;
|
||||
h2_task_redo(task);
|
||||
- h2_ihash_remove(m->sredo, stream->id);
|
||||
h2_iq_add(m->q, stream->id, NULL, NULL);
|
||||
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, m->c,
|
||||
+ H2_STRM_MSG(stream, "redo, added to q"));
|
||||
}
|
||||
else {
|
||||
/* stream not cleaned up, stay around */
|
||||
+ task->done_done = 1;
|
||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
|
||||
- H2_STRM_MSG(stream, "task_done, stream open"));
|
||||
+ H2_STRM_MSG(stream, "task_done, stream open"));
|
||||
if (stream->input) {
|
||||
h2_beam_leave(stream->input);
|
||||
}
|
||||
@@ -874,6 +874,7 @@ static void task_done(h2_mplx *m, h2_task *task, h2_req_engine *ngn)
|
||||
}
|
||||
else if ((stream = h2_ihash_get(m->shold, task->stream_id)) != NULL) {
|
||||
/* stream is done, was just waiting for this. */
|
||||
+ task->done_done = 1;
|
||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
|
||||
H2_STRM_MSG(stream, "task_done, in hold"));
|
||||
if (stream->input) {
|
||||
@@ -898,8 +899,8 @@ void h2_mplx_task_done(h2_mplx *m, h2_task *task, h2_task **ptask)
|
||||
{
|
||||
H2_MPLX_ENTER_ALWAYS(m);
|
||||
|
||||
- task_done(m, task, NULL);
|
||||
--m->tasks_active;
|
||||
+ task_done(m, task, NULL);
|
||||
|
||||
if (m->join_wait) {
|
||||
apr_thread_cond_signal(m->join_wait);
|
||||
@@ -917,91 +918,161 @@ void h2_mplx_task_done(h2_mplx *m, h2_task *task, h2_task **ptask)
|
||||
* h2_mplx DoS protection
|
||||
******************************************************************************/
|
||||
|
||||
-static int latest_repeatable_unsubmitted_iter(void *data, void *val)
|
||||
+static int timed_out_busy_iter(void *data, void *val)
|
||||
{
|
||||
stream_iter_ctx *ctx = data;
|
||||
h2_stream *stream = val;
|
||||
|
||||
- if (stream->task && !stream->task->worker_done
|
||||
- && h2_task_can_redo(stream->task)
|
||||
- && !h2_ihash_get(ctx->m->sredo, stream->id)) {
|
||||
- if (!h2_stream_is_ready(stream)) {
|
||||
- /* this task occupies a worker, the response has not been submitted
|
||||
- * yet, not been cancelled and it is a repeatable request
|
||||
- * -> it can be re-scheduled later */
|
||||
- if (!ctx->stream
|
||||
- || (ctx->stream->task->started_at < stream->task->started_at)) {
|
||||
- /* we did not have one or this one was started later */
|
||||
- ctx->stream = stream;
|
||||
- }
|
||||
- }
|
||||
+ if (h2_task_has_started(stream->task) && !stream->task->worker_done
|
||||
+ && (ctx->now - stream->task->started_at) > stream->task->timeout) {
|
||||
+ /* timed out stream occupying a worker, found */
|
||||
+ ctx->stream = stream;
|
||||
+ return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
-static h2_stream *get_latest_repeatable_unsubmitted_stream(h2_mplx *m)
|
||||
+static h2_stream *get_timed_out_busy_stream(h2_mplx *m)
|
||||
{
|
||||
stream_iter_ctx ctx;
|
||||
ctx.m = m;
|
||||
ctx.stream = NULL;
|
||||
- h2_ihash_iter(m->streams, latest_repeatable_unsubmitted_iter, &ctx);
|
||||
+ ctx.now = apr_time_now();
|
||||
+ h2_ihash_iter(m->streams, timed_out_busy_iter, &ctx);
|
||||
return ctx.stream;
|
||||
}
|
||||
|
||||
-static int timed_out_busy_iter(void *data, void *val)
|
||||
+static int latest_repeatable_unsubmitted_iter(void *data, void *val)
|
||||
{
|
||||
stream_iter_ctx *ctx = data;
|
||||
h2_stream *stream = val;
|
||||
- if (stream->task && !stream->task->worker_done
|
||||
- && (ctx->now - stream->task->started_at) > stream->task->timeout) {
|
||||
- /* timed out stream occupying a worker, found */
|
||||
- ctx->stream = stream;
|
||||
- return 0;
|
||||
- }
|
||||
+ if (!stream->task) goto leave;
|
||||
+ if (!h2_task_has_started(stream->task) || stream->task->worker_done) goto leave;
|
||||
+ if (h2_stream_is_ready(stream)) goto leave;
|
||||
+ if (stream->task->redo) {
|
||||
+ ++ctx->count;
|
||||
+ goto leave;
|
||||
+ }
|
||||
+ if (h2_task_can_redo(stream->task)) {
|
||||
+ /* this task occupies a worker, the response has not been submitted
|
||||
+ * yet, not been cancelled and it is a repeatable request
|
||||
+ * -> we could redo it later */
|
||||
+ if (!ctx->stream
|
||||
+ || (ctx->stream->task->started_at < stream->task->started_at)) {
|
||||
+ /* we did not have one or this one was started later */
|
||||
+ ctx->stream = stream;
|
||||
+ }
|
||||
+ }
|
||||
+leave:
|
||||
return 1;
|
||||
}
|
||||
|
||||
-static h2_stream *get_timed_out_busy_stream(h2_mplx *m)
|
||||
+static apr_status_t assess_task_to_throttle(h2_task **ptask, h2_mplx *m)
|
||||
{
|
||||
stream_iter_ctx ctx;
|
||||
+
|
||||
+ /* count the running tasks already marked for redo and get one that could
|
||||
+ * be throttled */
|
||||
+ *ptask = NULL;
|
||||
+
|
||||
ctx.m = m;
|
||||
ctx.stream = NULL;
|
||||
- ctx.now = apr_time_now();
|
||||
- h2_ihash_iter(m->streams, timed_out_busy_iter, &ctx);
|
||||
- return ctx.stream;
|
||||
+
|
||||
+ ctx.count = 0;
|
||||
+ h2_ihash_iter(m->streams, latest_repeatable_unsubmitted_iter, &ctx);
|
||||
+ if (m->tasks_active - ctx.count > m->limit_active) {
|
||||
+ /* we are above the limit of running tasks, accounting for the ones
|
||||
+ * already throttled. */
|
||||
+ if (ctx.stream && ctx.stream->task) {
|
||||
+ *ptask = ctx.stream->task;
|
||||
+ return APR_EAGAIN;
|
||||
+ }
|
||||
+ /* above limit, be seeing no candidate for easy throttling */
|
||||
+ if (get_timed_out_busy_stream(m)) {
|
||||
+ /* Too many busy workers, unable to cancel enough streams
|
||||
+ * and with a busy, timed out stream, we tell the client
|
||||
+ * to go away... */
|
||||
+ return APR_TIMEUP;
|
||||
+ }
|
||||
+ }
|
||||
+ return APR_SUCCESS;
|
||||
}
|
||||
|
||||
static apr_status_t unschedule_slow_tasks(h2_mplx *m)
|
||||
{
|
||||
- h2_stream *stream;
|
||||
- long n;
|
||||
+ h2_task *task;
|
||||
+ apr_status_t rv;
|
||||
|
||||
/* Try to get rid of streams that occupy workers. Look for safe requests
|
||||
* that are repeatable. If none found, fail the connection.
|
||||
*/
|
||||
- n = (m->tasks_active - m->limit_active - h2_ihash_count(m->sredo));
|
||||
- while (n > 0 && (stream = get_latest_repeatable_unsubmitted_stream(m))) {
|
||||
- h2_task_rst(stream->task, H2_ERR_CANCEL);
|
||||
- h2_ihash_add(m->sredo, stream);
|
||||
- --n;
|
||||
+ while (APR_EAGAIN == (rv = assess_task_to_throttle(&task, m))) {
|
||||
+ task->redo = 1;
|
||||
+ h2_task_rst(task, H2_ERR_CANCEL);
|
||||
}
|
||||
|
||||
- if ((m->tasks_active - h2_ihash_count(m->sredo)) > m->limit_active) {
|
||||
- stream = get_timed_out_busy_stream(m);
|
||||
- if (stream) {
|
||||
- /* Too many busy workers, unable to cancel enough streams
|
||||
- * and with a busy, timed out stream, we tell the client
|
||||
- * to go away... */
|
||||
- return APR_TIMEUP;
|
||||
- }
|
||||
+ return rv;
|
||||
+}
|
||||
+
|
||||
+static apr_status_t mplx_be_happy(h2_mplx *m)
|
||||
+{
|
||||
+ apr_time_t now;
|
||||
+
|
||||
+ --m->irritations_since;
|
||||
+ now = apr_time_now();
|
||||
+ if (m->limit_active < m->max_active
|
||||
+ && (now - m->last_mood_change >= m->mood_update_interval
|
||||
+ || m->irritations_since < -m->limit_active)) {
|
||||
+ m->limit_active = H2MIN(m->limit_active * 2, m->max_active);
|
||||
+ m->last_mood_change = now;
|
||||
+ m->irritations_since = 0;
|
||||
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
|
||||
+ "h2_mplx(%ld): mood update, increasing worker limit to %d",
|
||||
+ m->id, m->limit_active);
|
||||
}
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
|
||||
+static apr_status_t mplx_be_annoyed(h2_mplx *m)
|
||||
+{
|
||||
+ apr_status_t status = APR_SUCCESS;
|
||||
+ apr_time_t now;
|
||||
+
|
||||
+ ++m->irritations_since;
|
||||
+ now = apr_time_now();
|
||||
+ if (m->limit_active > 2 &&
|
||||
+ ((now - m->last_mood_change >= m->mood_update_interval)
|
||||
+ || (m->irritations_since >= m->limit_active))) {
|
||||
+
|
||||
+ if (m->limit_active > 16) {
|
||||
+ m->limit_active = 16;
|
||||
+ }
|
||||
+ else if (m->limit_active > 8) {
|
||||
+ m->limit_active = 8;
|
||||
+ }
|
||||
+ else if (m->limit_active > 4) {
|
||||
+ m->limit_active = 4;
|
||||
+ }
|
||||
+ else if (m->limit_active > 2) {
|
||||
+ m->limit_active = 2;
|
||||
+ }
|
||||
+ m->last_mood_change = now;
|
||||
+ m->irritations_since = 0;
|
||||
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
|
||||
+ "h2_mplx(%ld): mood update, decreasing worker limit to %d",
|
||||
+ m->id, m->limit_active);
|
||||
+ }
|
||||
+
|
||||
+ if (m->tasks_active > m->limit_active) {
|
||||
+ status = unschedule_slow_tasks(m);
|
||||
+ }
|
||||
+
|
||||
+ return status;
|
||||
+}
|
||||
+
|
||||
apr_status_t h2_mplx_idle(h2_mplx *m)
|
||||
{
|
||||
apr_status_t status = APR_SUCCESS;
|
||||
- apr_time_t now;
|
||||
apr_size_t scount;
|
||||
|
||||
H2_MPLX_ENTER(m);
|
||||
@@ -1021,31 +1092,7 @@ apr_status_t h2_mplx_idle(h2_mplx *m)
|
||||
* of busy workers we allow for this connection until it
|
||||
* well behaves.
|
||||
*/
|
||||
- now = apr_time_now();
|
||||
- m->last_idle_block = now;
|
||||
- if (m->limit_active > 2
|
||||
- && now - m->last_limit_change >= m->limit_change_interval) {
|
||||
- if (m->limit_active > 16) {
|
||||
- m->limit_active = 16;
|
||||
- }
|
||||
- else if (m->limit_active > 8) {
|
||||
- m->limit_active = 8;
|
||||
- }
|
||||
- else if (m->limit_active > 4) {
|
||||
- m->limit_active = 4;
|
||||
- }
|
||||
- else if (m->limit_active > 2) {
|
||||
- m->limit_active = 2;
|
||||
- }
|
||||
- m->last_limit_change = now;
|
||||
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
|
||||
- "h2_mplx(%ld): decrease worker limit to %d",
|
||||
- m->id, m->limit_active);
|
||||
- }
|
||||
-
|
||||
- if (m->tasks_active > m->limit_active) {
|
||||
- status = unschedule_slow_tasks(m);
|
||||
- }
|
||||
+ status = mplx_be_annoyed(m);
|
||||
}
|
||||
else if (!h2_iq_empty(m->q)) {
|
||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
|
||||
@@ -1209,8 +1256,8 @@ void h2_mplx_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn,
|
||||
|
||||
if (status != APR_SUCCESS && stream
|
||||
&& h2_task_can_redo(task)
|
||||
- && !h2_ihash_get(m->sredo, stream->id)) {
|
||||
- h2_ihash_add(m->sredo, stream);
|
||||
+ && !task->redo) {
|
||||
+ task->redo = 1;
|
||||
}
|
||||
|
||||
if (task->engine) {
|
||||
@@ -1276,11 +1323,24 @@ int h2_mplx_awaits_data(h2_mplx *m)
|
||||
if (h2_ihash_empty(m->streams)) {
|
||||
waiting = 0;
|
||||
}
|
||||
- else if (!m->tasks_active && !h2_ififo_count(m->readyq)
|
||||
- && h2_iq_empty(m->q)) {
|
||||
+ else if (!m->tasks_active && !h2_ififo_count(m->readyq) && h2_iq_empty(m->q)) {
|
||||
waiting = 0;
|
||||
}
|
||||
|
||||
H2_MPLX_LEAVE(m);
|
||||
return waiting;
|
||||
}
|
||||
+
|
||||
+apr_status_t h2_mplx_client_rst(h2_mplx *m, int stream_id)
|
||||
+{
|
||||
+ h2_stream *stream;
|
||||
+ apr_status_t status = APR_SUCCESS;
|
||||
+
|
||||
+ H2_MPLX_ENTER_ALWAYS(m);
|
||||
+ stream = h2_ihash_get(m->streams, stream_id);
|
||||
+ if (stream && stream->task) {
|
||||
+ status = mplx_be_annoyed(m);
|
||||
+ }
|
||||
+ H2_MPLX_LEAVE(m);
|
||||
+ return status;
|
||||
+}
|
||||
diff --git a/mod_http2/h2_mplx.h b/mod_http2/h2_mplx.h
|
||||
index c267647..94e2cc8 100644
|
||||
--- a/mod_http2/h2_mplx.h
|
||||
+++ b/mod_http2/h2_mplx.h
|
||||
@@ -65,7 +65,6 @@ struct h2_mplx {
|
||||
unsigned int is_registered; /* is registered at h2_workers */
|
||||
|
||||
struct h2_ihash_t *streams; /* all streams currently processing */
|
||||
- struct h2_ihash_t *sredo; /* all streams that need to be re-started */
|
||||
struct h2_ihash_t *shold; /* all streams done with task ongoing */
|
||||
struct h2_ihash_t *spurge; /* all streams done, ready for destroy */
|
||||
|
||||
@@ -79,10 +78,10 @@ struct h2_mplx {
|
||||
apr_uint32_t tasks_active; /* # of tasks being processed from this mplx */
|
||||
apr_uint32_t limit_active; /* current limit on active tasks, dynamic */
|
||||
apr_uint32_t max_active; /* max, hard limit # of active tasks in a process */
|
||||
- apr_time_t last_idle_block; /* last time, this mplx entered IDLE while
|
||||
- * streams were ready */
|
||||
- apr_time_t last_limit_change; /* last time, worker limit changed */
|
||||
- apr_interval_time_t limit_change_interval;
|
||||
+ apr_time_t last_mood_change; /* last time, we worker limit changed */
|
||||
+ apr_interval_time_t mood_update_interval; /* how frequent we update at most */
|
||||
+ int irritations_since; /* irritations (>0) or happy events (<0) since last mood change */
|
||||
+
|
||||
|
||||
apr_thread_mutex_t *lock;
|
||||
struct apr_thread_cond_t *added_output;
|
||||
@@ -211,6 +210,8 @@ typedef int h2_mplx_stream_cb(struct h2_stream *s, void *ctx);
|
||||
|
||||
apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx);
|
||||
|
||||
+apr_status_t h2_mplx_client_rst(h2_mplx *m, int stream_id);
|
||||
+
|
||||
/*******************************************************************************
|
||||
* Output handling of streams.
|
||||
******************************************************************************/
|
||||
diff --git a/mod_http2/h2_session.c b/mod_http2/h2_session.c
|
||||
index ed96cf0..6a33d69 100644
|
||||
--- a/mod_http2/h2_session.c
|
||||
+++ b/mod_http2/h2_session.c
|
||||
@@ -390,9 +390,14 @@ static int on_frame_recv_cb(nghttp2_session *ng2s,
|
||||
(int)frame->rst_stream.error_code);
|
||||
stream = h2_session_stream_get(session, frame->hd.stream_id);
|
||||
if (stream && stream->initiated_on) {
|
||||
+ /* A stream reset on a request we sent it. Normal, when the
|
||||
+ * client does not want it. */
|
||||
++session->pushes_reset;
|
||||
}
|
||||
else {
|
||||
+ /* A stream reset on a request it sent us. Could happen in a browser
|
||||
+ * when the user navigates away or cancels loading - maybe. */
|
||||
+ h2_mplx_client_rst(session->mplx, frame->hd.stream_id);
|
||||
++session->streams_reset;
|
||||
}
|
||||
break;
|
||||
@@ -1680,7 +1685,7 @@ static void transit(h2_session *session, const char *action, h2_session_state ns
|
||||
* that already served requests - not fair. */
|
||||
session->idle_sync_until = apr_time_now() + apr_time_from_sec(1);
|
||||
s = "timeout";
|
||||
- timeout = H2MAX(session->s->timeout, session->s->keep_alive_timeout);
|
||||
+ timeout = session->s->timeout;
|
||||
update_child_status(session, SERVER_BUSY_READ, "idle");
|
||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
|
||||
H2_SSSN_LOG("", session, "enter idle, timeout = %d sec"),
|
||||
@@ -1688,8 +1693,8 @@ static void transit(h2_session *session, const char *action, h2_session_state ns
|
||||
}
|
||||
else if (session->open_streams) {
|
||||
s = "timeout";
|
||||
- timeout = session->s->keep_alive_timeout;
|
||||
- update_child_status(session, SERVER_BUSY_KEEPALIVE, "idle");
|
||||
+ timeout = session->s->timeout;
|
||||
+ update_child_status(session, SERVER_BUSY_READ, "idle");
|
||||
}
|
||||
else {
|
||||
/* normal keepalive setup */
|
||||
@@ -2146,6 +2151,14 @@ apr_status_t h2_session_process(h2_session *session, int async)
|
||||
session->have_read = 1;
|
||||
}
|
||||
else if (APR_STATUS_IS_EAGAIN(status) || APR_STATUS_IS_TIMEUP(status)) {
|
||||
+ status = h2_mplx_idle(session->mplx);
|
||||
+ if (status == APR_EAGAIN) {
|
||||
+ break;
|
||||
+ }
|
||||
+ else if (status != APR_SUCCESS) {
|
||||
+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
|
||||
+ H2_ERR_ENHANCE_YOUR_CALM, "less is more");
|
||||
+ }
|
||||
status = APR_EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
diff --git a/mod_http2/h2_stream.c b/mod_http2/h2_stream.c
|
||||
index 24ebc56..8d7322a 100644
|
||||
--- a/mod_http2/h2_stream.c
|
||||
+++ b/mod_http2/h2_stream.c
|
||||
@@ -398,13 +398,8 @@ apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_
|
||||
/* start pushed stream */
|
||||
ap_assert(stream->request == NULL);
|
||||
ap_assert(stream->rtmp != NULL);
|
||||
- status = h2_request_end_headers(stream->rtmp, stream->pool, 1, 0);
|
||||
- if (status != APR_SUCCESS) {
|
||||
- return status;
|
||||
- }
|
||||
- set_policy_for(stream, stream->rtmp);
|
||||
- stream->request = stream->rtmp;
|
||||
- stream->rtmp = NULL;
|
||||
+ status = h2_stream_end_headers(stream, 1, 0);
|
||||
+ if (status != APR_SUCCESS) goto leave;
|
||||
break;
|
||||
|
||||
default:
|
||||
@@ -416,6 +411,7 @@ apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_
|
||||
if (status == APR_SUCCESS && eos) {
|
||||
status = transit(stream, on_event(stream, H2_SEV_CLOSED_L));
|
||||
}
|
||||
+leave:
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -456,13 +452,8 @@ apr_status_t h2_stream_recv_frame(h2_stream *stream, int ftype, int flags, size_
|
||||
* to abort the connection here, since this is clearly a protocol error */
|
||||
return APR_EINVAL;
|
||||
}
|
||||
- status = h2_request_end_headers(stream->rtmp, stream->pool, eos, frame_len);
|
||||
- if (status != APR_SUCCESS) {
|
||||
- return status;
|
||||
- }
|
||||
- set_policy_for(stream, stream->rtmp);
|
||||
- stream->request = stream->rtmp;
|
||||
- stream->rtmp = NULL;
|
||||
+ status = h2_stream_end_headers(stream, eos, frame_len);
|
||||
+ if (status != APR_SUCCESS) goto leave;
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -473,6 +464,7 @@ apr_status_t h2_stream_recv_frame(h2_stream *stream, int ftype, int flags, size_
|
||||
if (status == APR_SUCCESS && eos) {
|
||||
status = transit(stream, on_event(stream, H2_SEV_CLOSED_R));
|
||||
}
|
||||
+leave:
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -684,6 +676,8 @@ static apr_status_t add_trailer(h2_stream *stream,
|
||||
hvalue = apr_pstrndup(stream->pool, value, vlen);
|
||||
h2_util_camel_case_header(hname, nlen);
|
||||
apr_table_mergen(stream->trailers, hname, hvalue);
|
||||
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
|
||||
+ H2_STRM_MSG(stream, "added trailer '%s: %s'"), hname, hvalue);
|
||||
|
||||
return APR_SUCCESS;
|
||||
}
|
||||
@@ -703,15 +697,19 @@ apr_status_t h2_stream_add_header(h2_stream *stream,
|
||||
if (name[0] == ':') {
|
||||
if ((vlen) > session->s->limit_req_line) {
|
||||
/* pseudo header: approximation of request line size check */
|
||||
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
|
||||
- H2_STRM_MSG(stream, "pseudo %s too long"), name);
|
||||
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,
|
||||
+ H2_STRM_LOG(APLOGNO(10178), stream,
|
||||
+ "Request pseudo header exceeds "
|
||||
+ "LimitRequestFieldSize: %s"), name);
|
||||
error = HTTP_REQUEST_URI_TOO_LARGE;
|
||||
}
|
||||
}
|
||||
else if ((nlen + 2 + vlen) > session->s->limit_req_fieldsize) {
|
||||
/* header too long */
|
||||
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
|
||||
- H2_STRM_MSG(stream, "header %s too long"), name);
|
||||
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,
|
||||
+ H2_STRM_LOG(APLOGNO(10180), stream,"Request header exceeds "
|
||||
+ "LimitRequestFieldSize: %.*s"),
|
||||
+ (int)H2MIN(nlen, 80), name);
|
||||
error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
|
||||
}
|
||||
|
||||
@@ -723,8 +721,9 @@ apr_status_t h2_stream_add_header(h2_stream *stream,
|
||||
h2_stream_rst(stream, H2_ERR_ENHANCE_YOUR_CALM);
|
||||
return APR_ECONNRESET;
|
||||
}
|
||||
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
|
||||
- H2_STRM_MSG(stream, "too many header lines"));
|
||||
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,
|
||||
+ H2_STRM_LOG(APLOGNO(10181), stream, "Number of request headers "
|
||||
+ "exceeds LimitRequestFields"));
|
||||
error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
|
||||
}
|
||||
|
||||
@@ -755,6 +754,47 @@ apr_status_t h2_stream_add_header(h2_stream *stream,
|
||||
return status;
|
||||
}
|
||||
|
||||
+typedef struct {
|
||||
+ apr_size_t maxlen;
|
||||
+ const char *failed_key;
|
||||
+} val_len_check_ctx;
|
||||
+
|
||||
+static int table_check_val_len(void *baton, const char *key, const char *value)
|
||||
+{
|
||||
+ val_len_check_ctx *ctx = baton;
|
||||
+
|
||||
+ if (strlen(value) <= ctx->maxlen) return 1;
|
||||
+ ctx->failed_key = key;
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes)
|
||||
+{
|
||||
+ apr_status_t status;
|
||||
+ val_len_check_ctx ctx;
|
||||
+
|
||||
+ status = h2_request_end_headers(stream->rtmp, stream->pool, eos, raw_bytes);
|
||||
+ if (APR_SUCCESS == status) {
|
||||
+ set_policy_for(stream, stream->rtmp);
|
||||
+ stream->request = stream->rtmp;
|
||||
+ stream->rtmp = NULL;
|
||||
+
|
||||
+ ctx.maxlen = stream->session->s->limit_req_fieldsize;
|
||||
+ ctx.failed_key = NULL;
|
||||
+ apr_table_do(table_check_val_len, &ctx, stream->request->headers, NULL);
|
||||
+ if (ctx.failed_key) {
|
||||
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, stream->session->c,
|
||||
+ H2_STRM_LOG(APLOGNO(), stream,"Request header exceeds "
|
||||
+ "LimitRequestFieldSize: %.*s"),
|
||||
+ (int)H2MIN(strlen(ctx.failed_key), 80), ctx.failed_key);
|
||||
+ set_error_response(stream, HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE);
|
||||
+ /* keep on returning APR_SUCCESS, so that we send a HTTP response and
|
||||
+ * do not RST the stream. */
|
||||
+ }
|
||||
+ }
|
||||
+ return status;
|
||||
+}
|
||||
+
|
||||
static apr_bucket *get_first_headers_bucket(apr_bucket_brigade *bb)
|
||||
{
|
||||
if (bb) {
|
||||
diff --git a/mod_http2/h2_stream.h b/mod_http2/h2_stream.h
|
||||
index 7ecc0ad..79cb39d 100644
|
||||
--- a/mod_http2/h2_stream.h
|
||||
+++ b/mod_http2/h2_stream.h
|
||||
@@ -198,6 +198,10 @@ apr_status_t h2_stream_set_request_rec(h2_stream *stream,
|
||||
apr_status_t h2_stream_add_header(h2_stream *stream,
|
||||
const char *name, size_t nlen,
|
||||
const char *value, size_t vlen);
|
||||
+
|
||||
+/* End the contruction of request headers */
|
||||
+apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes);
|
||||
+
|
||||
|
||||
apr_status_t h2_stream_send_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len);
|
||||
apr_status_t h2_stream_recv_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len);
|
||||
diff --git a/mod_http2/h2_task.c b/mod_http2/h2_task.c
|
||||
index 86fb026..1c01830 100644
|
||||
--- a/mod_http2/h2_task.c
|
||||
+++ b/mod_http2/h2_task.c
|
||||
@@ -425,8 +425,15 @@ int h2_task_can_redo(h2_task *task) {
|
||||
|| !strcmp("OPTIONS", task->request->method));
|
||||
}
|
||||
|
||||
+int h2_task_has_started(h2_task *task)
|
||||
+{
|
||||
+ return task && task->started_at != 0;
|
||||
+}
|
||||
+
|
||||
void h2_task_redo(h2_task *task)
|
||||
{
|
||||
+ task->started_at = 0;
|
||||
+ task->worker_done = 0;
|
||||
task->rst_error = 0;
|
||||
}
|
||||
|
||||
@@ -565,7 +572,6 @@ apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id)
|
||||
ap_assert(task);
|
||||
c = task->c;
|
||||
task->worker_started = 1;
|
||||
- task->started_at = apr_time_now();
|
||||
|
||||
if (c->master) {
|
||||
/* Each conn_rec->id is supposed to be unique at a point in time. Since
|
||||
diff --git a/mod_http2/h2_task.h b/mod_http2/h2_task.h
|
||||
index ab6a746..56f6e0f 100644
|
||||
--- a/mod_http2/h2_task.h
|
||||
+++ b/mod_http2/h2_task.h
|
||||
@@ -83,7 +83,10 @@ struct h2_task {
|
||||
unsigned int frozen : 1;
|
||||
unsigned int thawed : 1;
|
||||
unsigned int worker_started : 1; /* h2_worker started processing */
|
||||
- unsigned int worker_done : 1; /* h2_worker finished */
|
||||
+ unsigned int redo : 1; /* was throttled, should be restarted later */
|
||||
+
|
||||
+ int worker_done; /* h2_worker finished */
|
||||
+ int done_done; /* task_done has been handled */
|
||||
|
||||
apr_time_t started_at; /* when processing started */
|
||||
apr_time_t done_at; /* when processing was done */
|
||||
@@ -105,6 +108,7 @@ apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id);
|
||||
|
||||
void h2_task_redo(h2_task *task);
|
||||
int h2_task_can_redo(h2_task *task);
|
||||
+int h2_task_has_started(h2_task *task);
|
||||
|
||||
/**
|
||||
* Reset the task with the given error code, resets all input/output.
|
108
SPECS/mod_http2.spec
Normal file
108
SPECS/mod_http2.spec
Normal file
@ -0,0 +1,108 @@
|
||||
# Module Magic Number
|
||||
%{!?_httpd_mmn: %global _httpd_mmn %(cat %{_includedir}/httpd/.mmn 2>/dev/null || echo 0-0)}
|
||||
|
||||
Name: mod_http2
|
||||
Version: 1.11.3
|
||||
Release: 3%{?dist}
|
||||
Summary: module implementing HTTP/2 for Apache 2
|
||||
Group: System Environment/Daemons
|
||||
License: ASL 2.0
|
||||
URL: https://icing.github.io/mod_h2/
|
||||
Source0: https://github.com/icing/mod_h2/releases/download/v%{version}/mod_http2-%{version}.tar.gz
|
||||
BuildRequires: pkgconfig, httpd-devel >= 2.4.20, libnghttp2-devel >= 1.7.0, openssl-devel >= 1.0.2
|
||||
Requires: httpd-mmn = %{_httpd_mmn}
|
||||
Conflicts: httpd < 2.4.25-8
|
||||
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1741860
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1741864
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1741868
|
||||
Patch200: httpd-2.4.34-CVE-2019-9511-and-9516-and-9517.patch
|
||||
|
||||
%description
|
||||
The mod_h2 Apache httpd module implements the HTTP2 protocol (h2+h2c) on
|
||||
top of libnghttp2 for httpd 2.4 servers.
|
||||
|
||||
%prep
|
||||
%setup -q
|
||||
|
||||
%patch200 -p1 -b .CVE-2019-9511-and-9516-and-9517
|
||||
|
||||
%build
|
||||
%configure
|
||||
make %{?_smp_mflags} V=1
|
||||
|
||||
%install
|
||||
make DESTDIR=%{buildroot} install
|
||||
rm -rf %{buildroot}/etc/httpd/share/doc/
|
||||
|
||||
# create configuration
|
||||
mkdir -p %{buildroot}%{_httpd_modconfdir}
|
||||
echo "LoadModule http2_module modules/mod_http2.so" > %{buildroot}%{_httpd_modconfdir}/10-h2.conf
|
||||
echo "LoadModule proxy_http2_module modules/mod_proxy_http2.so" > %{buildroot}%{_httpd_modconfdir}/10-proxy_h2.conf
|
||||
|
||||
%check
|
||||
make check
|
||||
|
||||
%files
|
||||
%doc README README.md ChangeLog AUTHORS
|
||||
%license LICENSE
|
||||
%config(noreplace) %{_httpd_modconfdir}/10-h2.conf
|
||||
%config(noreplace) %{_httpd_modconfdir}/10-proxy_h2.conf
|
||||
%{_httpd_moddir}/mod_http2.so
|
||||
%{_httpd_moddir}/mod_proxy_http2.so
|
||||
|
||||
%changelog
|
||||
* Thu Aug 29 2019 Lubos Uhliarik <luhliari@redhat.com> - 1.11.3-3
|
||||
- Resolves: #1744999 - CVE-2019-9511 httpd:2.4/mod_http2: HTTP/2: large amount
|
||||
of data request leads to denial of service
|
||||
- Resolves: #1745086 - CVE-2019-9516 httpd:2.4/mod_http2: HTTP/2: 0-length
|
||||
headers leads to denial of service
|
||||
- Resolves: #1745154 - CVE-2019-9517 httpd:2.4/mod_http2: HTTP/2: request for
|
||||
large response leads to denial of service
|
||||
|
||||
* Thu Apr 4 2019 Joe Orton <jorton@redhat.com> - 1.11.3-2
|
||||
- update release (#1695587)
|
||||
|
||||
* Tue Oct 16 2018 Lubos Uhliarik <luhliari@redhat.com> - 1.11.3-1
|
||||
- new version 1.11.3
|
||||
- Resolves: #1633401 - CVE-2018-11763 mod_http2: httpd: DoS for HTTP/2
|
||||
connections by continuous SETTINGS
|
||||
|
||||
* Wed May 2 2018 Joe Orton <jorton@redhat.com> - 1.10.20-1
|
||||
- update to 1.10.20
|
||||
|
||||
* Wed Apr 18 2018 Joe Orton <jorton@redhat.com> - 1.10.18-1
|
||||
- update to 1.10.18
|
||||
|
||||
* Thu Mar 29 2018 Joe Orton <jorton@redhat.com> - 1.10.16-1
|
||||
- update to 1.10.16 (CVE-2018-1302)
|
||||
|
||||
* Thu Feb 08 2018 Fedora Release Engineering <releng@fedoraproject.org> - 1.10.13-2
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_28_Mass_Rebuild
|
||||
|
||||
* Tue Nov 7 2017 Joe Orton <jorton@redhat.com> - 1.10.13-1
|
||||
- update to 1.10.13
|
||||
|
||||
* Fri Oct 20 2017 Joe Orton <jorton@redhat.com> - 1.10.12-1
|
||||
- update to 1.10.12
|
||||
|
||||
* Thu Aug 03 2017 Fedora Release Engineering <releng@fedoraproject.org> - 1.10.10-2
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild
|
||||
|
||||
* Mon Jul 31 2017 Joe Orton <jorton@redhat.com> - 1.10.10-1
|
||||
- update to 1.10.10
|
||||
|
||||
* Wed Jul 26 2017 Fedora Release Engineering <releng@fedoraproject.org> - 1.10.7-2
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild
|
||||
|
||||
* Thu Jul 6 2017 Joe Orton <jorton@redhat.com> - 1.10.7-1
|
||||
- update to 1.10.7
|
||||
|
||||
* Mon Jun 12 2017 Joe Orton <jorton@redhat.com> - 1.10.6-1
|
||||
- update to 1.10.6
|
||||
|
||||
* Tue May 16 2017 Joe Orton <jorton@redhat.com> - 1.10.5-1
|
||||
- update to 1.10.5
|
||||
|
||||
* Mon Apr 10 2017 Luboš Uhliarik <luhliari@redhat.com> - 1.10.1-1
|
||||
- Initial import (#1440780).
|
Loading…
Reference in New Issue
Block a user