Import of kernel-5.14.0-570.32.1.el9_6

This commit is contained in:
eabdullin 2025-09-05 14:36:07 +00:00
parent 038d8766a2
commit f7f44ba607
16 changed files with 198 additions and 71 deletions

View File

@ -12,7 +12,7 @@ RHEL_MINOR = 6
# #
# Use this spot to avoid future merge conflicts. # Use this spot to avoid future merge conflicts.
# Do not trim this comment. # Do not trim this comment.
RHEL_RELEASE = 570.30.1 RHEL_RELEASE = 570.32.1
# #
# ZSTREAM # ZSTREAM

View File

@ -2908,22 +2908,16 @@ static void btusb_coredump_qca(struct hci_dev *hdev)
bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err); bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
} }
/* /* Return: 0 on success, negative errno on failure. */
* ==0: not a dump pkt.
* < 0: fails to handle a dump pkt
* > 0: otherwise.
*/
static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
{ {
int ret = 1; int ret = 0;
u8 pkt_type; u8 pkt_type;
u8 *sk_ptr; u8 *sk_ptr;
unsigned int sk_len; unsigned int sk_len;
u16 seqno; u16 seqno;
u32 dump_size; u32 dump_size;
struct hci_event_hdr *event_hdr;
struct hci_acl_hdr *acl_hdr;
struct qca_dump_hdr *dump_hdr; struct qca_dump_hdr *dump_hdr;
struct btusb_data *btdata = hci_get_drvdata(hdev); struct btusb_data *btdata = hci_get_drvdata(hdev);
struct usb_device *udev = btdata->udev; struct usb_device *udev = btdata->udev;
@ -2933,30 +2927,14 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
sk_len = skb->len; sk_len = skb->len;
if (pkt_type == HCI_ACLDATA_PKT) { if (pkt_type == HCI_ACLDATA_PKT) {
acl_hdr = hci_acl_hdr(skb);
if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
return 0;
sk_ptr += HCI_ACL_HDR_SIZE; sk_ptr += HCI_ACL_HDR_SIZE;
sk_len -= HCI_ACL_HDR_SIZE; sk_len -= HCI_ACL_HDR_SIZE;
event_hdr = (struct hci_event_hdr *)sk_ptr;
} else {
event_hdr = hci_event_hdr(skb);
} }
if ((event_hdr->evt != HCI_VENDOR_PKT)
|| (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
return 0;
sk_ptr += HCI_EVENT_HDR_SIZE; sk_ptr += HCI_EVENT_HDR_SIZE;
sk_len -= HCI_EVENT_HDR_SIZE; sk_len -= HCI_EVENT_HDR_SIZE;
dump_hdr = (struct qca_dump_hdr *)sk_ptr; dump_hdr = (struct qca_dump_hdr *)sk_ptr;
if ((sk_len < offsetof(struct qca_dump_hdr, data))
|| (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS)
|| (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
return 0;
/*it is dump pkt now*/
seqno = le16_to_cpu(dump_hdr->seqno); seqno = le16_to_cpu(dump_hdr->seqno);
if (seqno == 0) { if (seqno == 0) {
set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags); set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags);
@ -3030,17 +3008,84 @@ out:
return ret; return ret;
} }
/* Return: true if the ACL packet is a dump packet, false otherwise. */
static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
u8 *sk_ptr;
unsigned int sk_len;
struct hci_event_hdr *event_hdr;
struct hci_acl_hdr *acl_hdr;
struct qca_dump_hdr *dump_hdr;
sk_ptr = skb->data;
sk_len = skb->len;
acl_hdr = hci_acl_hdr(skb);
if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
return false;
sk_ptr += HCI_ACL_HDR_SIZE;
sk_len -= HCI_ACL_HDR_SIZE;
event_hdr = (struct hci_event_hdr *)sk_ptr;
if ((event_hdr->evt != HCI_VENDOR_PKT) ||
(event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
return false;
sk_ptr += HCI_EVENT_HDR_SIZE;
sk_len -= HCI_EVENT_HDR_SIZE;
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
(dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
return false;
return true;
}
/* Return: true if the event packet is a dump packet, false otherwise. */
static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
u8 *sk_ptr;
unsigned int sk_len;
struct hci_event_hdr *event_hdr;
struct qca_dump_hdr *dump_hdr;
sk_ptr = skb->data;
sk_len = skb->len;
event_hdr = hci_event_hdr(skb);
if ((event_hdr->evt != HCI_VENDOR_PKT)
|| (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
return false;
sk_ptr += HCI_EVENT_HDR_SIZE;
sk_len -= HCI_EVENT_HDR_SIZE;
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
(dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
return false;
return true;
}
static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb) static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb)
{ {
if (handle_dump_pkt_qca(hdev, skb)) if (acl_pkt_is_dump_qca(hdev, skb))
return 0; return handle_dump_pkt_qca(hdev, skb);
return hci_recv_frame(hdev, skb); return hci_recv_frame(hdev, skb);
} }
static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb) static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb)
{ {
if (handle_dump_pkt_qca(hdev, skb)) if (evt_pkt_is_dump_qca(hdev, skb))
return 0; return handle_dump_pkt_qca(hdev, skb);
return hci_recv_frame(hdev, skb); return hci_recv_frame(hdev, skb);
} }

View File

@ -833,9 +833,9 @@ static void hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
hid_ishtp_cl); hid_ishtp_cl);
dev_dbg(ishtp_device(cl_device), "%s\n", __func__); dev_dbg(ishtp_device(cl_device), "%s\n", __func__);
hid_ishtp_cl_deinit(hid_ishtp_cl);
ishtp_put_device(cl_device); ishtp_put_device(cl_device);
ishtp_hid_remove(client_data); ishtp_hid_remove(client_data);
hid_ishtp_cl_deinit(hid_ishtp_cl);
hid_ishtp_cl = NULL; hid_ishtp_cl = NULL;

View File

@ -261,12 +261,14 @@ err_hid_data:
*/ */
void ishtp_hid_remove(struct ishtp_cl_data *client_data) void ishtp_hid_remove(struct ishtp_cl_data *client_data)
{ {
void *data;
int i; int i;
for (i = 0; i < client_data->num_hid_devices; ++i) { for (i = 0; i < client_data->num_hid_devices; ++i) {
if (client_data->hid_sensor_hubs[i]) { if (client_data->hid_sensor_hubs[i]) {
kfree(client_data->hid_sensor_hubs[i]->driver_data); data = client_data->hid_sensor_hubs[i]->driver_data;
hid_destroy_device(client_data->hid_sensor_hubs[i]); hid_destroy_device(client_data->hid_sensor_hubs[i]);
kfree(data);
client_data->hid_sensor_hubs[i] = NULL; client_data->hid_sensor_hubs[i] = NULL;
} }
} }

View File

@ -2174,14 +2174,9 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
if (!rdev_set_badblocks(rdev, sect, s, 0)) if (!rdev_set_badblocks(rdev, sect, s, 0))
abort = 1; abort = 1;
} }
if (abort) { if (abort)
conf->recovery_disabled =
mddev->recovery_disabled;
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_done_sync(mddev, r1_bio->sectors, 0);
put_buf(r1_bio);
return 0; return 0;
}
/* Try next page */ /* Try next page */
sectors -= s; sectors -= s;
sect += s; sect += s;
@ -2320,10 +2315,21 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
int disks = conf->raid_disks * 2; int disks = conf->raid_disks * 2;
struct bio *wbio; struct bio *wbio;
if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
/* ouch - failed to read all of that. */ /*
if (!fix_sync_read_error(r1_bio)) * ouch - failed to read all of that.
* No need to fix read error for check/repair
* because all member disks are read.
*/
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) ||
!fix_sync_read_error(r1_bio)) {
conf->recovery_disabled = mddev->recovery_disabled;
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_done_sync(mddev, r1_bio->sectors, 0);
put_buf(r1_bio);
return; return;
}
}
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
process_checks(r1_bio); process_checks(r1_bio);

View File

@ -813,6 +813,7 @@ static void rtsx_usb_ms_drv_remove(struct platform_device *pdev)
host->eject = true; host->eject = true;
cancel_work_sync(&host->handle_req); cancel_work_sync(&host->handle_req);
cancel_delayed_work_sync(&host->poll_card);
mutex_lock(&host->host_mutex); mutex_lock(&host->host_mutex);
if (host->req) { if (host->req) {

View File

@ -854,6 +854,7 @@ static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
u32 context_id = vmci_get_context_id(); u32 context_id = vmci_get_context_id();
struct vmci_event_qp ev; struct vmci_event_qp ev;
memset(&ev, 0, sizeof(ev));
ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER); ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_CONTEXT_RESOURCE_ID); VMCI_CONTEXT_RESOURCE_ID);
@ -1467,6 +1468,7 @@ static int qp_notify_peer(bool attach,
* kernel. * kernel.
*/ */
memset(&ev, 0, sizeof(ev));
ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER); ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
VMCI_CONTEXT_RESOURCE_ID); VMCI_CONTEXT_RESOURCE_ID);

View File

@ -292,7 +292,7 @@ static int packet_read_list(char *data, size_t * pread_length)
remaining_bytes = *pread_length; remaining_bytes = *pread_length;
bytes_read = rbu_data.packet_read_count; bytes_read = rbu_data.packet_read_count;
list_for_each_entry(newpacket, (&packet_data_head.list)->next, list) { list_for_each_entry(newpacket, &packet_data_head.list, list) {
bytes_copied = do_packet_read(pdest, newpacket, bytes_copied = do_packet_read(pdest, newpacket,
remaining_bytes, bytes_read, &temp_count); remaining_bytes, bytes_read, &temp_count);
remaining_bytes -= bytes_copied; remaining_bytes -= bytes_copied;
@ -315,7 +315,7 @@ static void packet_empty_list(void)
{ {
struct packet_data *newpacket, *tmp; struct packet_data *newpacket, *tmp;
list_for_each_entry_safe(newpacket, tmp, (&packet_data_head.list)->next, list) { list_for_each_entry_safe(newpacket, tmp, &packet_data_head.list, list) {
list_del(&newpacket->list); list_del(&newpacket->list);
/* /*

View File

@ -1480,8 +1480,11 @@ nfsd4_run_cb_work(struct work_struct *work)
nfsd4_process_cb_update(cb); nfsd4_process_cb_update(cb);
clnt = clp->cl_cb_client; clnt = clp->cl_cb_client;
if (!clnt) { if (!clnt || clp->cl_state == NFSD4_COURTESY) {
/* Callback channel broken, or client killed; give up: */ /*
* Callback channel broken, client killed or
* nfs4_client in courtesy state; give up.
*/
nfsd41_destroy_cb(cb); nfsd41_destroy_cb(cb);
return; return;
} }

View File

@ -1385,11 +1385,11 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
ctx->acdirmax = ctx->acregmax = HZ * result.uint_32; ctx->acdirmax = ctx->acregmax = HZ * result.uint_32;
break; break;
case Opt_closetimeo: case Opt_closetimeo:
ctx->closetimeo = HZ * result.uint_32; if (result.uint_32 > SMB3_MAX_DCLOSETIMEO / HZ) {
if (ctx->closetimeo > SMB3_MAX_DCLOSETIMEO) {
cifs_errorf(fc, "closetimeo too large\n"); cifs_errorf(fc, "closetimeo too large\n");
goto cifs_parse_mount_err; goto cifs_parse_mount_err;
} }
ctx->closetimeo = HZ * result.uint_32;
break; break;
case Opt_echo_interval: case Opt_echo_interval:
ctx->echo_interval = result.uint_32; ctx->echo_interval = result.uint_32;

View File

@ -47,6 +47,22 @@ struct padata_mt_job_state {
static void padata_free_pd(struct parallel_data *pd); static void padata_free_pd(struct parallel_data *pd);
static void __init padata_mt_helper(struct work_struct *work); static void __init padata_mt_helper(struct work_struct *work);
static inline void padata_get_pd(struct parallel_data *pd)
{
refcount_inc(&pd->refcnt);
}
static inline void padata_put_pd_cnt(struct parallel_data *pd, int cnt)
{
if (refcount_sub_and_test(cnt, &pd->refcnt))
padata_free_pd(pd);
}
static inline void padata_put_pd(struct parallel_data *pd)
{
padata_put_pd_cnt(pd, 1);
}
static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
{ {
int cpu, target_cpu; int cpu, target_cpu;
@ -206,7 +222,7 @@ int padata_do_parallel(struct padata_shell *ps,
if ((pinst->flags & PADATA_RESET)) if ((pinst->flags & PADATA_RESET))
goto out; goto out;
refcount_inc(&pd->refcnt); padata_get_pd(pd);
padata->pd = pd; padata->pd = pd;
padata->cb_cpu = *cb_cpu; padata->cb_cpu = *cb_cpu;
@ -336,8 +352,14 @@ static void padata_reorder(struct parallel_data *pd)
smp_mb(); smp_mb();
reorder = per_cpu_ptr(pd->reorder_list, pd->cpu); reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
if (!list_empty(&reorder->list) && padata_find_next(pd, false)) if (!list_empty(&reorder->list) && padata_find_next(pd, false)) {
/*
* Other context(eg. the padata_serial_worker) can finish the request.
* To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish.
*/
padata_get_pd(pd);
queue_work(pinst->serial_wq, &pd->reorder_work); queue_work(pinst->serial_wq, &pd->reorder_work);
}
} }
static void invoke_padata_reorder(struct work_struct *work) static void invoke_padata_reorder(struct work_struct *work)
@ -348,6 +370,8 @@ static void invoke_padata_reorder(struct work_struct *work)
pd = container_of(work, struct parallel_data, reorder_work); pd = container_of(work, struct parallel_data, reorder_work);
padata_reorder(pd); padata_reorder(pd);
local_bh_enable(); local_bh_enable();
/* Pairs with putting the reorder_work in the serial_wq */
padata_put_pd(pd);
} }
static void padata_serial_worker(struct work_struct *serial_work) static void padata_serial_worker(struct work_struct *serial_work)
@ -380,8 +404,7 @@ static void padata_serial_worker(struct work_struct *serial_work)
} }
local_bh_enable(); local_bh_enable();
if (refcount_sub_and_test(cnt, &pd->refcnt)) padata_put_pd_cnt(pd, cnt);
padata_free_pd(pd);
} }
/** /**
@ -521,13 +544,6 @@ void __init padata_do_multithreaded(struct padata_mt_job *job)
ps.chunk_size = max(ps.chunk_size, 1ul); ps.chunk_size = max(ps.chunk_size, 1ul);
ps.chunk_size = roundup(ps.chunk_size, job->align); ps.chunk_size = roundup(ps.chunk_size, job->align);
/*
* chunk_size can be 0 if the caller sets min_chunk to 0. So force it
* to at least 1 to prevent divide-by-0 panic in padata_mt_helper().`
*/
if (!ps.chunk_size)
ps.chunk_size = 1U;
list_for_each_entry(pw, &works, pw_list) list_for_each_entry(pw, &works, pw_list)
if (job->numa_aware) { if (job->numa_aware) {
int old_node = atomic_read(&last_used_nid); int old_node = atomic_read(&last_used_nid);
@ -688,8 +704,7 @@ static int padata_replace(struct padata_instance *pinst)
synchronize_rcu(); synchronize_rcu();
list_for_each_entry_continue_reverse(ps, &pinst->pslist, list) list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
if (refcount_dec_and_test(&ps->opd->refcnt)) padata_put_pd(ps->opd);
padata_free_pd(ps->opd);
pinst->flags &= ~PADATA_RESET; pinst->flags &= ~PADATA_RESET;
@ -977,7 +992,7 @@ static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
pinst = kobj2pinst(kobj); pinst = kobj2pinst(kobj);
pentry = attr2pentry(attr); pentry = attr2pentry(attr);
if (pentry->show) if (pentry->store)
ret = pentry->store(pinst, attr, buf, count); ret = pentry->store(pinst, attr, buf, count);
return ret; return ret;
@ -1128,11 +1143,16 @@ void padata_free_shell(struct padata_shell *ps)
if (!ps) if (!ps)
return; return;
/*
* Wait for all _do_serial calls to finish to avoid touching
* freed pd's and ps's.
*/
synchronize_rcu();
mutex_lock(&ps->pinst->lock); mutex_lock(&ps->pinst->lock);
list_del(&ps->list); list_del(&ps->list);
pd = rcu_dereference_protected(ps->pd, 1); pd = rcu_dereference_protected(ps->pd, 1);
if (refcount_dec_and_test(&pd->refcnt)) padata_put_pd(pd);
padata_free_pd(pd);
mutex_unlock(&ps->pinst->lock); mutex_unlock(&ps->pinst->lock);
kfree(ps); kfree(ps);

View File

@ -175,6 +175,11 @@ struct hfsc_sched {
#define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */ #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */
static bool cl_in_el_or_vttree(struct hfsc_class *cl)
{
return ((cl->cl_flags & HFSC_FSC) && cl->cl_nactive) ||
((cl->cl_flags & HFSC_RSC) && !RB_EMPTY_NODE(&cl->el_node));
}
/* /*
* eligible tree holds backlogged classes being sorted by their eligible times. * eligible tree holds backlogged classes being sorted by their eligible times.
@ -203,7 +208,10 @@ eltree_insert(struct hfsc_class *cl)
static inline void static inline void
eltree_remove(struct hfsc_class *cl) eltree_remove(struct hfsc_class *cl)
{ {
rb_erase(&cl->el_node, &cl->sched->eligible); if (!RB_EMPTY_NODE(&cl->el_node)) {
rb_erase(&cl->el_node, &cl->sched->eligible);
RB_CLEAR_NODE(&cl->el_node);
}
} }
static inline void static inline void
@ -1037,6 +1045,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (cl == NULL) if (cl == NULL)
return -ENOBUFS; return -ENOBUFS;
RB_CLEAR_NODE(&cl->el_node);
err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack); err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
if (err) { if (err) {
kfree(cl); kfree(cl);
@ -1225,7 +1235,8 @@ hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
/* vttree is now handled in update_vf() so that update_vf(cl, 0, 0) /* vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
* needs to be called explicitly to remove a class from vttree. * needs to be called explicitly to remove a class from vttree.
*/ */
update_vf(cl, 0, 0); if (cl->cl_nactive)
update_vf(cl, 0, 0);
if (cl->cl_flags & HFSC_RSC) if (cl->cl_flags & HFSC_RSC)
eltree_remove(cl); eltree_remove(cl);
} }
@ -1565,7 +1576,10 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
return err; return err;
} }
if (first) { sch->qstats.backlog += len;
sch->q.qlen++;
if (first && !cl_in_el_or_vttree(cl)) {
if (cl->cl_flags & HFSC_RSC) if (cl->cl_flags & HFSC_RSC)
init_ed(cl, len); init_ed(cl, len);
if (cl->cl_flags & HFSC_FSC) if (cl->cl_flags & HFSC_FSC)
@ -1580,9 +1594,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
} }
sch->qstats.backlog += len;
sch->q.qlen++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }

View File

@ -1328,13 +1328,15 @@ static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
stab = rtnl_dereference(q->root->stab); stab = rtnl_dereference(q->root->stab);
oper = rtnl_dereference(q->oper_sched); rcu_read_lock();
oper = rcu_dereference(q->oper_sched);
if (oper) if (oper)
taprio_update_queue_max_sdu(q, oper, stab); taprio_update_queue_max_sdu(q, oper, stab);
admin = rtnl_dereference(q->admin_sched); admin = rcu_dereference(q->admin_sched);
if (admin) if (admin)
taprio_update_queue_max_sdu(q, admin, stab); taprio_update_queue_max_sdu(q, admin, stab);
rcu_read_unlock();
break; break;
} }

View File

@ -817,12 +817,20 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
goto exit; goto exit;
} }
/* Get net to avoid freed tipc_crypto when delete namespace */
if (!maybe_get_net(aead->crypto->net)) {
tipc_bearer_put(b);
rc = -ENODEV;
goto exit;
}
/* Now, do encrypt */ /* Now, do encrypt */
rc = crypto_aead_encrypt(req); rc = crypto_aead_encrypt(req);
if (rc == -EINPROGRESS || rc == -EBUSY) if (rc == -EINPROGRESS || rc == -EBUSY)
return rc; return rc;
tipc_bearer_put(b); tipc_bearer_put(b);
put_net(aead->crypto->net);
exit: exit:
kfree(ctx); kfree(ctx);
@ -860,6 +868,7 @@ static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err)
kfree(tx_ctx); kfree(tx_ctx);
tipc_bearer_put(b); tipc_bearer_put(b);
tipc_aead_put(aead); tipc_aead_put(aead);
put_net(net);
} }
/** /**

View File

@ -1,3 +1,29 @@
* Mon Jul 28 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.32.1.el9_6]
- net_sched: hfsc: Address reentrant enqueue adding class to eltree twice (Davide Caratti) [RHEL-97522] {CVE-2025-38001 CVE-2025-37890}
- sch_hfsc: Fix qlen accounting bug when using peek in hfsc_enqueue() (Davide Caratti) [RHEL-97522] {CVE-2025-38000}
- net_sched: hfsc: Fix a UAF vulnerability in class with netem as child qdisc (Davide Caratti) [RHEL-97522] {CVE-2025-37890}
- sch_hfsc: make hfsc_qlen_notify() idempotent (Ivan Vecera) [RHEL-97522]
- HID: intel-ish-hid: Fix use-after-free issue in ishtp_hid_remove() (CKI Backport Bot) [RHEL-98847] {CVE-2025-21928}
- HID: intel-ish-hid: Fix use-after-free issue in hid_ishtp_cl_remove() (CKI Backport Bot) [RHEL-98871] {CVE-2025-21929}
Resolves: RHEL-97522, RHEL-98847, RHEL-98871
* Sat Jul 26 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.31.1.el9_6]
- Bluetooth: btusb: avoid NULL pointer dereference in skb_dequeue() (David Marlin) [RHEL-95324] {CVE-2025-37918}
- memstick: rtsx_usb_ms: Fix slab-use-after-free in rtsx_usb_ms_drv_remove (Desnes Nunes) [RHEL-99029] {CVE-2025-22020}
- misc/vmw_vmci: fix an infoleak in vmci_host_do_receive_datagram() (John W. Linville) [RHEL-97499] {CVE-2022-49788}
- net: tipc: fix refcount warning in tipc_aead_encrypt (Xin Long) [RHEL-103087]
- net/tipc: fix slab-use-after-free Read in tipc_aead_encrypt_done (CKI Backport Bot) [RHEL-103087] {CVE-2025-38052}
- md/raid1: Add check for missing source disk in process_checks() (CKI Backport Bot) [RHEL-97439]
- net/sched: fix use-after-free in taprio_dev_notifier (CKI Backport Bot) [RHEL-101317] {CVE-2025-38087}
- padata: avoid UAF for reorder_work (Rafael Aquini) [RHEL-97031] {CVE-2025-21727 CVE-2025-21726}
- padata: fix UAF in padata_reorder (Rafael Aquini) [RHEL-97031] {CVE-2025-21727}
- padata: add pd get/put refcnt helper (Rafael Aquini) [RHEL-97031] {CVE-2025-21727}
- padata: fix sysfs store callback check (Rafael Aquini) [RHEL-97031] {CVE-2025-21727}
- padata: Clean up in padata_do_multithreaded() (Rafael Aquini) [RHEL-97031] {CVE-2025-21727}
- platform/x86: dell_rbu: Fix list usage (David Arcari) [RHEL-100908]
- cifs: Fix integer overflow while processing closetimeo mount option (CKI Backport Bot) [RHEL-87900] {CVE-2025-21962}
Resolves: RHEL-100908, RHEL-101317, RHEL-103087, RHEL-87900, RHEL-95324, RHEL-97031, RHEL-97439, RHEL-97499, RHEL-99029
* Thu Jul 24 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.30.1.el9_6] * Thu Jul 24 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.30.1.el9_6]
- net_sched: hfsc: Fix a UAF vulnerability in class handling (Davide Caratti) [RHEL-95853] {CVE-2025-37797} - net_sched: hfsc: Fix a UAF vulnerability in class handling (Davide Caratti) [RHEL-95853] {CVE-2025-37797}
Resolves: RHEL-95853 Resolves: RHEL-95853