Import of kernel-5.14.0-570.32.1.el9_6
This commit is contained in:
parent
038d8766a2
commit
f7f44ba607
@ -12,7 +12,7 @@ RHEL_MINOR = 6
|
||||
#
|
||||
# Use this spot to avoid future merge conflicts.
|
||||
# Do not trim this comment.
|
||||
RHEL_RELEASE = 570.30.1
|
||||
RHEL_RELEASE = 570.32.1
|
||||
|
||||
#
|
||||
# ZSTREAM
|
||||
|
@ -2908,22 +2908,16 @@ static void btusb_coredump_qca(struct hci_dev *hdev)
|
||||
bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
|
||||
}
|
||||
|
||||
/*
|
||||
* ==0: not a dump pkt.
|
||||
* < 0: fails to handle a dump pkt
|
||||
* > 0: otherwise.
|
||||
*/
|
||||
/* Return: 0 on success, negative errno on failure. */
|
||||
static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
int ret = 1;
|
||||
int ret = 0;
|
||||
u8 pkt_type;
|
||||
u8 *sk_ptr;
|
||||
unsigned int sk_len;
|
||||
u16 seqno;
|
||||
u32 dump_size;
|
||||
|
||||
struct hci_event_hdr *event_hdr;
|
||||
struct hci_acl_hdr *acl_hdr;
|
||||
struct qca_dump_hdr *dump_hdr;
|
||||
struct btusb_data *btdata = hci_get_drvdata(hdev);
|
||||
struct usb_device *udev = btdata->udev;
|
||||
@ -2933,30 +2927,14 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
sk_len = skb->len;
|
||||
|
||||
if (pkt_type == HCI_ACLDATA_PKT) {
|
||||
acl_hdr = hci_acl_hdr(skb);
|
||||
if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
|
||||
return 0;
|
||||
sk_ptr += HCI_ACL_HDR_SIZE;
|
||||
sk_len -= HCI_ACL_HDR_SIZE;
|
||||
event_hdr = (struct hci_event_hdr *)sk_ptr;
|
||||
} else {
|
||||
event_hdr = hci_event_hdr(skb);
|
||||
}
|
||||
|
||||
if ((event_hdr->evt != HCI_VENDOR_PKT)
|
||||
|| (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
|
||||
return 0;
|
||||
|
||||
sk_ptr += HCI_EVENT_HDR_SIZE;
|
||||
sk_len -= HCI_EVENT_HDR_SIZE;
|
||||
|
||||
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
|
||||
if ((sk_len < offsetof(struct qca_dump_hdr, data))
|
||||
|| (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS)
|
||||
|| (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
|
||||
return 0;
|
||||
|
||||
/*it is dump pkt now*/
|
||||
seqno = le16_to_cpu(dump_hdr->seqno);
|
||||
if (seqno == 0) {
|
||||
set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags);
|
||||
@ -3030,17 +3008,84 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Return: true if the ACL packet is a dump packet, false otherwise. */
|
||||
static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
u8 *sk_ptr;
|
||||
unsigned int sk_len;
|
||||
|
||||
struct hci_event_hdr *event_hdr;
|
||||
struct hci_acl_hdr *acl_hdr;
|
||||
struct qca_dump_hdr *dump_hdr;
|
||||
|
||||
sk_ptr = skb->data;
|
||||
sk_len = skb->len;
|
||||
|
||||
acl_hdr = hci_acl_hdr(skb);
|
||||
if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
|
||||
return false;
|
||||
|
||||
sk_ptr += HCI_ACL_HDR_SIZE;
|
||||
sk_len -= HCI_ACL_HDR_SIZE;
|
||||
event_hdr = (struct hci_event_hdr *)sk_ptr;
|
||||
|
||||
if ((event_hdr->evt != HCI_VENDOR_PKT) ||
|
||||
(event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
|
||||
return false;
|
||||
|
||||
sk_ptr += HCI_EVENT_HDR_SIZE;
|
||||
sk_len -= HCI_EVENT_HDR_SIZE;
|
||||
|
||||
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
|
||||
if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
|
||||
(dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
|
||||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Return: true if the event packet is a dump packet, false otherwise. */
|
||||
static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
u8 *sk_ptr;
|
||||
unsigned int sk_len;
|
||||
|
||||
struct hci_event_hdr *event_hdr;
|
||||
struct qca_dump_hdr *dump_hdr;
|
||||
|
||||
sk_ptr = skb->data;
|
||||
sk_len = skb->len;
|
||||
|
||||
event_hdr = hci_event_hdr(skb);
|
||||
|
||||
if ((event_hdr->evt != HCI_VENDOR_PKT)
|
||||
|| (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
|
||||
return false;
|
||||
|
||||
sk_ptr += HCI_EVENT_HDR_SIZE;
|
||||
sk_len -= HCI_EVENT_HDR_SIZE;
|
||||
|
||||
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
|
||||
if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
|
||||
(dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
|
||||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
if (handle_dump_pkt_qca(hdev, skb))
|
||||
return 0;
|
||||
if (acl_pkt_is_dump_qca(hdev, skb))
|
||||
return handle_dump_pkt_qca(hdev, skb);
|
||||
return hci_recv_frame(hdev, skb);
|
||||
}
|
||||
|
||||
static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
if (handle_dump_pkt_qca(hdev, skb))
|
||||
return 0;
|
||||
if (evt_pkt_is_dump_qca(hdev, skb))
|
||||
return handle_dump_pkt_qca(hdev, skb);
|
||||
return hci_recv_frame(hdev, skb);
|
||||
}
|
||||
|
||||
|
@ -833,9 +833,9 @@ static void hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
|
||||
hid_ishtp_cl);
|
||||
|
||||
dev_dbg(ishtp_device(cl_device), "%s\n", __func__);
|
||||
hid_ishtp_cl_deinit(hid_ishtp_cl);
|
||||
ishtp_put_device(cl_device);
|
||||
ishtp_hid_remove(client_data);
|
||||
hid_ishtp_cl_deinit(hid_ishtp_cl);
|
||||
|
||||
hid_ishtp_cl = NULL;
|
||||
|
||||
|
@ -261,12 +261,14 @@ err_hid_data:
|
||||
*/
|
||||
void ishtp_hid_remove(struct ishtp_cl_data *client_data)
|
||||
{
|
||||
void *data;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < client_data->num_hid_devices; ++i) {
|
||||
if (client_data->hid_sensor_hubs[i]) {
|
||||
kfree(client_data->hid_sensor_hubs[i]->driver_data);
|
||||
data = client_data->hid_sensor_hubs[i]->driver_data;
|
||||
hid_destroy_device(client_data->hid_sensor_hubs[i]);
|
||||
kfree(data);
|
||||
client_data->hid_sensor_hubs[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -2174,14 +2174,9 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
|
||||
if (!rdev_set_badblocks(rdev, sect, s, 0))
|
||||
abort = 1;
|
||||
}
|
||||
if (abort) {
|
||||
conf->recovery_disabled =
|
||||
mddev->recovery_disabled;
|
||||
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
|
||||
md_done_sync(mddev, r1_bio->sectors, 0);
|
||||
put_buf(r1_bio);
|
||||
if (abort)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Try next page */
|
||||
sectors -= s;
|
||||
sect += s;
|
||||
@ -2320,10 +2315,21 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
|
||||
int disks = conf->raid_disks * 2;
|
||||
struct bio *wbio;
|
||||
|
||||
if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
|
||||
/* ouch - failed to read all of that. */
|
||||
if (!fix_sync_read_error(r1_bio))
|
||||
if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
|
||||
/*
|
||||
* ouch - failed to read all of that.
|
||||
* No need to fix read error for check/repair
|
||||
* because all member disks are read.
|
||||
*/
|
||||
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) ||
|
||||
!fix_sync_read_error(r1_bio)) {
|
||||
conf->recovery_disabled = mddev->recovery_disabled;
|
||||
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
|
||||
md_done_sync(mddev, r1_bio->sectors, 0);
|
||||
put_buf(r1_bio);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
|
||||
process_checks(r1_bio);
|
||||
|
@ -813,6 +813,7 @@ static void rtsx_usb_ms_drv_remove(struct platform_device *pdev)
|
||||
|
||||
host->eject = true;
|
||||
cancel_work_sync(&host->handle_req);
|
||||
cancel_delayed_work_sync(&host->poll_card);
|
||||
|
||||
mutex_lock(&host->host_mutex);
|
||||
if (host->req) {
|
||||
|
@ -854,6 +854,7 @@ static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
|
||||
u32 context_id = vmci_get_context_id();
|
||||
struct vmci_event_qp ev;
|
||||
|
||||
memset(&ev, 0, sizeof(ev));
|
||||
ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
|
||||
ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
|
||||
VMCI_CONTEXT_RESOURCE_ID);
|
||||
@ -1467,6 +1468,7 @@ static int qp_notify_peer(bool attach,
|
||||
* kernel.
|
||||
*/
|
||||
|
||||
memset(&ev, 0, sizeof(ev));
|
||||
ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
|
||||
ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
|
||||
VMCI_CONTEXT_RESOURCE_ID);
|
||||
|
@ -292,7 +292,7 @@ static int packet_read_list(char *data, size_t * pread_length)
|
||||
remaining_bytes = *pread_length;
|
||||
bytes_read = rbu_data.packet_read_count;
|
||||
|
||||
list_for_each_entry(newpacket, (&packet_data_head.list)->next, list) {
|
||||
list_for_each_entry(newpacket, &packet_data_head.list, list) {
|
||||
bytes_copied = do_packet_read(pdest, newpacket,
|
||||
remaining_bytes, bytes_read, &temp_count);
|
||||
remaining_bytes -= bytes_copied;
|
||||
@ -315,7 +315,7 @@ static void packet_empty_list(void)
|
||||
{
|
||||
struct packet_data *newpacket, *tmp;
|
||||
|
||||
list_for_each_entry_safe(newpacket, tmp, (&packet_data_head.list)->next, list) {
|
||||
list_for_each_entry_safe(newpacket, tmp, &packet_data_head.list, list) {
|
||||
list_del(&newpacket->list);
|
||||
|
||||
/*
|
||||
|
@ -1480,8 +1480,11 @@ nfsd4_run_cb_work(struct work_struct *work)
|
||||
nfsd4_process_cb_update(cb);
|
||||
|
||||
clnt = clp->cl_cb_client;
|
||||
if (!clnt) {
|
||||
/* Callback channel broken, or client killed; give up: */
|
||||
if (!clnt || clp->cl_state == NFSD4_COURTESY) {
|
||||
/*
|
||||
* Callback channel broken, client killed or
|
||||
* nfs4_client in courtesy state; give up.
|
||||
*/
|
||||
nfsd41_destroy_cb(cb);
|
||||
return;
|
||||
}
|
||||
|
@ -1385,11 +1385,11 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
|
||||
ctx->acdirmax = ctx->acregmax = HZ * result.uint_32;
|
||||
break;
|
||||
case Opt_closetimeo:
|
||||
ctx->closetimeo = HZ * result.uint_32;
|
||||
if (ctx->closetimeo > SMB3_MAX_DCLOSETIMEO) {
|
||||
if (result.uint_32 > SMB3_MAX_DCLOSETIMEO / HZ) {
|
||||
cifs_errorf(fc, "closetimeo too large\n");
|
||||
goto cifs_parse_mount_err;
|
||||
}
|
||||
ctx->closetimeo = HZ * result.uint_32;
|
||||
break;
|
||||
case Opt_echo_interval:
|
||||
ctx->echo_interval = result.uint_32;
|
||||
|
@ -47,6 +47,22 @@ struct padata_mt_job_state {
|
||||
static void padata_free_pd(struct parallel_data *pd);
|
||||
static void __init padata_mt_helper(struct work_struct *work);
|
||||
|
||||
static inline void padata_get_pd(struct parallel_data *pd)
|
||||
{
|
||||
refcount_inc(&pd->refcnt);
|
||||
}
|
||||
|
||||
static inline void padata_put_pd_cnt(struct parallel_data *pd, int cnt)
|
||||
{
|
||||
if (refcount_sub_and_test(cnt, &pd->refcnt))
|
||||
padata_free_pd(pd);
|
||||
}
|
||||
|
||||
static inline void padata_put_pd(struct parallel_data *pd)
|
||||
{
|
||||
padata_put_pd_cnt(pd, 1);
|
||||
}
|
||||
|
||||
static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
|
||||
{
|
||||
int cpu, target_cpu;
|
||||
@ -206,7 +222,7 @@ int padata_do_parallel(struct padata_shell *ps,
|
||||
if ((pinst->flags & PADATA_RESET))
|
||||
goto out;
|
||||
|
||||
refcount_inc(&pd->refcnt);
|
||||
padata_get_pd(pd);
|
||||
padata->pd = pd;
|
||||
padata->cb_cpu = *cb_cpu;
|
||||
|
||||
@ -336,9 +352,15 @@ static void padata_reorder(struct parallel_data *pd)
|
||||
smp_mb();
|
||||
|
||||
reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
|
||||
if (!list_empty(&reorder->list) && padata_find_next(pd, false))
|
||||
if (!list_empty(&reorder->list) && padata_find_next(pd, false)) {
|
||||
/*
|
||||
* Other context(eg. the padata_serial_worker) can finish the request.
|
||||
* To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish.
|
||||
*/
|
||||
padata_get_pd(pd);
|
||||
queue_work(pinst->serial_wq, &pd->reorder_work);
|
||||
}
|
||||
}
|
||||
|
||||
static void invoke_padata_reorder(struct work_struct *work)
|
||||
{
|
||||
@ -348,6 +370,8 @@ static void invoke_padata_reorder(struct work_struct *work)
|
||||
pd = container_of(work, struct parallel_data, reorder_work);
|
||||
padata_reorder(pd);
|
||||
local_bh_enable();
|
||||
/* Pairs with putting the reorder_work in the serial_wq */
|
||||
padata_put_pd(pd);
|
||||
}
|
||||
|
||||
static void padata_serial_worker(struct work_struct *serial_work)
|
||||
@ -380,8 +404,7 @@ static void padata_serial_worker(struct work_struct *serial_work)
|
||||
}
|
||||
local_bh_enable();
|
||||
|
||||
if (refcount_sub_and_test(cnt, &pd->refcnt))
|
||||
padata_free_pd(pd);
|
||||
padata_put_pd_cnt(pd, cnt);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -521,13 +544,6 @@ void __init padata_do_multithreaded(struct padata_mt_job *job)
|
||||
ps.chunk_size = max(ps.chunk_size, 1ul);
|
||||
ps.chunk_size = roundup(ps.chunk_size, job->align);
|
||||
|
||||
/*
|
||||
* chunk_size can be 0 if the caller sets min_chunk to 0. So force it
|
||||
* to at least 1 to prevent divide-by-0 panic in padata_mt_helper().`
|
||||
*/
|
||||
if (!ps.chunk_size)
|
||||
ps.chunk_size = 1U;
|
||||
|
||||
list_for_each_entry(pw, &works, pw_list)
|
||||
if (job->numa_aware) {
|
||||
int old_node = atomic_read(&last_used_nid);
|
||||
@ -688,8 +704,7 @@ static int padata_replace(struct padata_instance *pinst)
|
||||
synchronize_rcu();
|
||||
|
||||
list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
|
||||
if (refcount_dec_and_test(&ps->opd->refcnt))
|
||||
padata_free_pd(ps->opd);
|
||||
padata_put_pd(ps->opd);
|
||||
|
||||
pinst->flags &= ~PADATA_RESET;
|
||||
|
||||
@ -977,7 +992,7 @@ static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
|
||||
|
||||
pinst = kobj2pinst(kobj);
|
||||
pentry = attr2pentry(attr);
|
||||
if (pentry->show)
|
||||
if (pentry->store)
|
||||
ret = pentry->store(pinst, attr, buf, count);
|
||||
|
||||
return ret;
|
||||
@ -1128,11 +1143,16 @@ void padata_free_shell(struct padata_shell *ps)
|
||||
if (!ps)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Wait for all _do_serial calls to finish to avoid touching
|
||||
* freed pd's and ps's.
|
||||
*/
|
||||
synchronize_rcu();
|
||||
|
||||
mutex_lock(&ps->pinst->lock);
|
||||
list_del(&ps->list);
|
||||
pd = rcu_dereference_protected(ps->pd, 1);
|
||||
if (refcount_dec_and_test(&pd->refcnt))
|
||||
padata_free_pd(pd);
|
||||
padata_put_pd(pd);
|
||||
mutex_unlock(&ps->pinst->lock);
|
||||
|
||||
kfree(ps);
|
||||
|
@ -175,6 +175,11 @@ struct hfsc_sched {
|
||||
|
||||
#define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */
|
||||
|
||||
static bool cl_in_el_or_vttree(struct hfsc_class *cl)
|
||||
{
|
||||
return ((cl->cl_flags & HFSC_FSC) && cl->cl_nactive) ||
|
||||
((cl->cl_flags & HFSC_RSC) && !RB_EMPTY_NODE(&cl->el_node));
|
||||
}
|
||||
|
||||
/*
|
||||
* eligible tree holds backlogged classes being sorted by their eligible times.
|
||||
@ -203,7 +208,10 @@ eltree_insert(struct hfsc_class *cl)
|
||||
static inline void
|
||||
eltree_remove(struct hfsc_class *cl)
|
||||
{
|
||||
if (!RB_EMPTY_NODE(&cl->el_node)) {
|
||||
rb_erase(&cl->el_node, &cl->sched->eligible);
|
||||
RB_CLEAR_NODE(&cl->el_node);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
@ -1037,6 +1045,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
if (cl == NULL)
|
||||
return -ENOBUFS;
|
||||
|
||||
RB_CLEAR_NODE(&cl->el_node);
|
||||
|
||||
err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
|
||||
if (err) {
|
||||
kfree(cl);
|
||||
@ -1225,6 +1235,7 @@ hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
|
||||
/* vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
|
||||
* needs to be called explicitly to remove a class from vttree.
|
||||
*/
|
||||
if (cl->cl_nactive)
|
||||
update_vf(cl, 0, 0);
|
||||
if (cl->cl_flags & HFSC_RSC)
|
||||
eltree_remove(cl);
|
||||
@ -1565,7 +1576,10 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (first) {
|
||||
sch->qstats.backlog += len;
|
||||
sch->q.qlen++;
|
||||
|
||||
if (first && !cl_in_el_or_vttree(cl)) {
|
||||
if (cl->cl_flags & HFSC_RSC)
|
||||
init_ed(cl, len);
|
||||
if (cl->cl_flags & HFSC_FSC)
|
||||
@ -1580,9 +1594,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
||||
|
||||
}
|
||||
|
||||
sch->qstats.backlog += len;
|
||||
sch->q.qlen++;
|
||||
|
||||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1328,13 +1328,15 @@ static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
|
||||
|
||||
stab = rtnl_dereference(q->root->stab);
|
||||
|
||||
oper = rtnl_dereference(q->oper_sched);
|
||||
rcu_read_lock();
|
||||
oper = rcu_dereference(q->oper_sched);
|
||||
if (oper)
|
||||
taprio_update_queue_max_sdu(q, oper, stab);
|
||||
|
||||
admin = rtnl_dereference(q->admin_sched);
|
||||
admin = rcu_dereference(q->admin_sched);
|
||||
if (admin)
|
||||
taprio_update_queue_max_sdu(q, admin, stab);
|
||||
rcu_read_unlock();
|
||||
|
||||
break;
|
||||
}
|
||||
|
@ -817,12 +817,20 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Get net to avoid freed tipc_crypto when delete namespace */
|
||||
if (!maybe_get_net(aead->crypto->net)) {
|
||||
tipc_bearer_put(b);
|
||||
rc = -ENODEV;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Now, do encrypt */
|
||||
rc = crypto_aead_encrypt(req);
|
||||
if (rc == -EINPROGRESS || rc == -EBUSY)
|
||||
return rc;
|
||||
|
||||
tipc_bearer_put(b);
|
||||
put_net(aead->crypto->net);
|
||||
|
||||
exit:
|
||||
kfree(ctx);
|
||||
@ -860,6 +868,7 @@ static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err)
|
||||
kfree(tx_ctx);
|
||||
tipc_bearer_put(b);
|
||||
tipc_aead_put(aead);
|
||||
put_net(net);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1,3 +1,29 @@
|
||||
* Mon Jul 28 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.32.1.el9_6]
|
||||
- net_sched: hfsc: Address reentrant enqueue adding class to eltree twice (Davide Caratti) [RHEL-97522] {CVE-2025-38001 CVE-2025-37890}
|
||||
- sch_hfsc: Fix qlen accounting bug when using peek in hfsc_enqueue() (Davide Caratti) [RHEL-97522] {CVE-2025-38000}
|
||||
- net_sched: hfsc: Fix a UAF vulnerability in class with netem as child qdisc (Davide Caratti) [RHEL-97522] {CVE-2025-37890}
|
||||
- sch_hfsc: make hfsc_qlen_notify() idempotent (Ivan Vecera) [RHEL-97522]
|
||||
- HID: intel-ish-hid: Fix use-after-free issue in ishtp_hid_remove() (CKI Backport Bot) [RHEL-98847] {CVE-2025-21928}
|
||||
- HID: intel-ish-hid: Fix use-after-free issue in hid_ishtp_cl_remove() (CKI Backport Bot) [RHEL-98871] {CVE-2025-21929}
|
||||
Resolves: RHEL-97522, RHEL-98847, RHEL-98871
|
||||
|
||||
* Sat Jul 26 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.31.1.el9_6]
|
||||
- Bluetooth: btusb: avoid NULL pointer dereference in skb_dequeue() (David Marlin) [RHEL-95324] {CVE-2025-37918}
|
||||
- memstick: rtsx_usb_ms: Fix slab-use-after-free in rtsx_usb_ms_drv_remove (Desnes Nunes) [RHEL-99029] {CVE-2025-22020}
|
||||
- misc/vmw_vmci: fix an infoleak in vmci_host_do_receive_datagram() (John W. Linville) [RHEL-97499] {CVE-2022-49788}
|
||||
- net: tipc: fix refcount warning in tipc_aead_encrypt (Xin Long) [RHEL-103087]
|
||||
- net/tipc: fix slab-use-after-free Read in tipc_aead_encrypt_done (CKI Backport Bot) [RHEL-103087] {CVE-2025-38052}
|
||||
- md/raid1: Add check for missing source disk in process_checks() (CKI Backport Bot) [RHEL-97439]
|
||||
- net/sched: fix use-after-free in taprio_dev_notifier (CKI Backport Bot) [RHEL-101317] {CVE-2025-38087}
|
||||
- padata: avoid UAF for reorder_work (Rafael Aquini) [RHEL-97031] {CVE-2025-21727 CVE-2025-21726}
|
||||
- padata: fix UAF in padata_reorder (Rafael Aquini) [RHEL-97031] {CVE-2025-21727}
|
||||
- padata: add pd get/put refcnt helper (Rafael Aquini) [RHEL-97031] {CVE-2025-21727}
|
||||
- padata: fix sysfs store callback check (Rafael Aquini) [RHEL-97031] {CVE-2025-21727}
|
||||
- padata: Clean up in padata_do_multithreaded() (Rafael Aquini) [RHEL-97031] {CVE-2025-21727}
|
||||
- platform/x86: dell_rbu: Fix list usage (David Arcari) [RHEL-100908]
|
||||
- cifs: Fix integer overflow while processing closetimeo mount option (CKI Backport Bot) [RHEL-87900] {CVE-2025-21962}
|
||||
Resolves: RHEL-100908, RHEL-101317, RHEL-103087, RHEL-87900, RHEL-95324, RHEL-97031, RHEL-97439, RHEL-97499, RHEL-99029
|
||||
|
||||
* Thu Jul 24 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.30.1.el9_6]
|
||||
- net_sched: hfsc: Fix a UAF vulnerability in class handling (Davide Caratti) [RHEL-95853] {CVE-2025-37797}
|
||||
Resolves: RHEL-95853
|
||||
|
Loading…
Reference in New Issue
Block a user