Import of kernel-4.18.0-553.94.1.el8_10
This commit is contained in:
parent
8b5ed42031
commit
4934e7026b
@ -12,7 +12,7 @@ RHEL_MINOR = 10
|
||||
#
|
||||
# Use this spot to avoid future merge conflicts.
|
||||
# Do not trim this comment.
|
||||
RHEL_RELEASE = 553.92.1
|
||||
RHEL_RELEASE = 553.94.1
|
||||
|
||||
#
|
||||
# ZSTREAM
|
||||
|
||||
@ -56,6 +56,7 @@ struct intel_breadcrumbs;
|
||||
|
||||
typedef u32 intel_engine_mask_t;
|
||||
#define ALL_ENGINES ((intel_engine_mask_t)~0ul)
|
||||
#define VIRTUAL_ENGINES BIT(BITS_PER_TYPE(intel_engine_mask_t) - 1)
|
||||
|
||||
struct intel_hw_status_page {
|
||||
struct list_head timelines;
|
||||
|
||||
@ -5354,6 +5354,9 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
|
||||
|
||||
ve->base.flags = I915_ENGINE_IS_VIRTUAL;
|
||||
|
||||
BUILD_BUG_ON(ilog2(VIRTUAL_ENGINES) < I915_NUM_ENGINES);
|
||||
ve->base.mask = VIRTUAL_ENGINES;
|
||||
|
||||
intel_context_init(&ve->context, &ve->base);
|
||||
|
||||
for (n = 0; n < count; n++) {
|
||||
|
||||
@ -135,9 +135,7 @@ static void i915_fence_release(struct dma_fence *fence)
|
||||
i915_sw_fence_fini(&rq->semaphore);
|
||||
|
||||
/*
|
||||
* Keep one request on each engine for reserved use under mempressure
|
||||
* do not use with virtual engines as this really is only needed for
|
||||
* kernel contexts.
|
||||
* Keep one request on each engine for reserved use under mempressure.
|
||||
*
|
||||
* We do not hold a reference to the engine here and so have to be
|
||||
* very careful in what rq->engine we poke. The virtual engine is
|
||||
@ -167,8 +165,7 @@ static void i915_fence_release(struct dma_fence *fence)
|
||||
* know that if the rq->execution_mask is a single bit, rq->engine
|
||||
* can be a physical engine with the exact corresponding mask.
|
||||
*/
|
||||
if (!intel_engine_is_virtual(rq->engine) &&
|
||||
is_power_of_2(rq->execution_mask) &&
|
||||
if (is_power_of_2(rq->execution_mask) &&
|
||||
!cmpxchg(&rq->engine->request_pool, NULL, rq))
|
||||
return;
|
||||
|
||||
|
||||
@ -742,13 +742,14 @@ int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
|
||||
dma_resv_assert_held(resv);
|
||||
|
||||
dma_resv_for_each_fence(&cursor, resv, usage, fence) {
|
||||
/* Make sure to grab an additional ref on the added fence */
|
||||
dma_fence_get(fence);
|
||||
ret = drm_sched_job_add_dependency(job, fence);
|
||||
if (ret) {
|
||||
dma_fence_put(fence);
|
||||
/*
|
||||
* As drm_sched_job_add_dependency always consumes the fence
|
||||
* reference (even when it fails), and dma_resv_for_each_fence
|
||||
* is not obtaining one, we need to grab one before calling.
|
||||
*/
|
||||
ret = drm_sched_job_add_dependency(job, dma_fence_get(fence));
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -364,6 +364,11 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
||||
|
||||
if (!buff->is_eop) {
|
||||
unsigned int frag_cnt = 0U;
|
||||
|
||||
/* There will be an extra fragment */
|
||||
if (buff->len > AQ_CFG_RX_HDR_SIZE)
|
||||
frag_cnt++;
|
||||
|
||||
buff_ = buff;
|
||||
do {
|
||||
bool is_rsc_completed = true;
|
||||
|
||||
@ -785,11 +785,11 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
|
||||
rc = server->ops->query_dir_next(xid, tcon, &cfile->fid,
|
||||
search_flags,
|
||||
&cfile->srch_inf);
|
||||
if (rc)
|
||||
return -ENOENT;
|
||||
/* FindFirst/Next set last_entry to NULL on malformed reply */
|
||||
if (cfile->srch_inf.last_entry)
|
||||
cifs_save_resume_key(cfile->srch_inf.last_entry, cfile);
|
||||
if (rc)
|
||||
return -ENOENT;
|
||||
}
|
||||
if (index_to_find < cfile->srch_inf.index_of_last_entry) {
|
||||
/* we found the buffer that contains the entry */
|
||||
|
||||
@ -455,7 +455,9 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
struct smbd_response *response =
|
||||
container_of(wc->wr_cqe, struct smbd_response, cqe);
|
||||
struct smbd_connection *info = response->info;
|
||||
int data_length = 0;
|
||||
u32 data_offset = 0;
|
||||
u32 data_length = 0;
|
||||
u32 remaining_data_length = 0;
|
||||
|
||||
log_rdma_recv(INFO, "response=%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%x\n",
|
||||
response, response->type, wc->status, wc->opcode,
|
||||
@ -487,7 +489,22 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
/* SMBD data transfer packet */
|
||||
case SMBD_TRANSFER_DATA:
|
||||
data_transfer = smbd_response_payload(response);
|
||||
|
||||
if (wc->byte_len <
|
||||
offsetof(struct smbd_data_transfer, padding))
|
||||
goto error;
|
||||
|
||||
remaining_data_length = le32_to_cpu(data_transfer->remaining_data_length);
|
||||
data_offset = le32_to_cpu(data_transfer->data_offset);
|
||||
data_length = le32_to_cpu(data_transfer->data_length);
|
||||
if (wc->byte_len < data_offset ||
|
||||
(u64)wc->byte_len < (u64)data_offset + data_length)
|
||||
goto error;
|
||||
|
||||
if (remaining_data_length > info->max_fragmented_recv_size ||
|
||||
data_length > info->max_fragmented_recv_size ||
|
||||
(u64)remaining_data_length + (u64)data_length > (u64)info->max_fragmented_recv_size)
|
||||
goto error;
|
||||
|
||||
/*
|
||||
* If this is a packet with data playload place the data in
|
||||
|
||||
@ -2510,6 +2510,14 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
|
||||
*/
|
||||
WRITE_ONCE(current->task_struct_rh->capture_control, NULL);
|
||||
*capture = READ_ONCE(capc.page);
|
||||
/*
|
||||
* Technically, it is also possible that compaction is skipped but
|
||||
* the page is still captured out of luck(IRQ came and freed the page).
|
||||
* Returning COMPACT_SUCCESS in such cases helps in properly accounting
|
||||
* the COMPACT[STALL|FAIL] when compaction is skipped.
|
||||
*/
|
||||
if (*capture)
|
||||
ret = COMPACT_SUCCESS;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -4160,6 +4160,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
||||
memalloc_noreclaim_restore(noreclaim_flag);
|
||||
psi_memstall_leave(&pflags);
|
||||
|
||||
if (*compact_result == COMPACT_SKIPPED)
|
||||
return NULL;
|
||||
/*
|
||||
* At least in one zone compaction wasn't deferred or skipped, so let's
|
||||
* count a compaction stall
|
||||
|
||||
Loading…
Reference in New Issue
Block a user