Import of kernel-5.14.0-611.49.1.el9_7

This commit is contained in:
almalinux-bot-kernel 2026-04-23 05:05:58 +00:00
parent 18acec0308
commit 917cb6988b
16 changed files with 105 additions and 95 deletions

View File

@ -12,7 +12,7 @@ RHEL_MINOR = 7
#
# Use this spot to avoid future merge conflicts.
# Do not trim this comment.
RHEL_RELEASE = 611.47.1
RHEL_RELEASE = 611.49.1
#
# ZSTREAM

View File

@ -3886,7 +3886,7 @@ static int btusb_probe(struct usb_interface *intf,
return -ENODEV;
}
data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@ -3909,8 +3909,10 @@ static int btusb_probe(struct usb_interface *intf,
}
}
if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep)
if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) {
kfree(data);
return -ENODEV;
}
if (id->driver_info & BTUSB_AMP) {
data->cmdreq_type = USB_TYPE_CLASS | 0x01;
@ -3965,8 +3967,10 @@ static int btusb_probe(struct usb_interface *intf,
data->recv_acl = hci_recv_frame;
hdev = hci_alloc_dev_priv(priv_size);
if (!hdev)
if (!hdev) {
kfree(data);
return -ENOMEM;
}
hdev->bus = HCI_USB;
hci_set_drvdata(hdev, data);
@ -4239,6 +4243,7 @@ out_free_dev:
if (data->reset_gpio)
gpiod_put(data->reset_gpio);
hci_free_dev(hdev);
kfree(data);
return err;
}
@ -4266,6 +4271,11 @@ static void btusb_disconnect(struct usb_interface *intf)
hci_unregister_dev(hdev);
if (data->oob_wake_irq)
device_init_wakeup(&data->udev->dev, false);
if (data->reset_gpio)
gpiod_put(data->reset_gpio);
if (intf == data->intf) {
if (data->isoc)
usb_driver_release_interface(&btusb_driver, data->isoc);
@ -4276,18 +4286,13 @@ static void btusb_disconnect(struct usb_interface *intf)
usb_driver_release_interface(&btusb_driver, data->diag);
usb_driver_release_interface(&btusb_driver, data->intf);
} else if (intf == data->diag) {
usb_driver_release_interface(&btusb_driver, data->intf);
if (data->isoc)
usb_driver_release_interface(&btusb_driver, data->isoc);
usb_driver_release_interface(&btusb_driver, data->intf);
}
if (data->oob_wake_irq)
device_init_wakeup(&data->udev->dev, false);
if (data->reset_gpio)
gpiod_put(data->reset_gpio);
hci_free_dev(hdev);
kfree(data);
}
#ifdef CONFIG_PM

View File

@ -685,6 +685,8 @@ static int hci_uart_register_dev(struct hci_uart *hu)
return err;
}
set_bit(HCI_UART_PROTO_INIT, &hu->flags);
if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
return 0;
@ -712,8 +714,6 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id)
hu->proto = p;
set_bit(HCI_UART_PROTO_INIT, &hu->flags);
err = hci_uart_register_dev(hu);
if (err) {
return err;

View File

@ -423,22 +423,6 @@ static void cache_tag_flush_devtlb_psi(struct dmar_domain *domain, struct cache_
domain->qi_batch);
}
static void cache_tag_flush_devtlb_all(struct dmar_domain *domain, struct cache_tag *tag)
{
struct intel_iommu *iommu = tag->iommu;
struct device_domain_info *info;
u16 sid;
info = dev_iommu_priv_get(tag->dev);
sid = PCI_DEVID(info->bus, info->devfn);
qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, 0,
MAX_AGAW_PFN_WIDTH, domain->qi_batch);
if (info->dtlb_extra_inval)
qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, 0,
MAX_AGAW_PFN_WIDTH, domain->qi_batch);
}
/*
* Invalidates a range of IOVA from @start (inclusive) to @end (inclusive)
* when the memory mappings in the target domain have been modified.
@ -451,7 +435,13 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
struct cache_tag *tag;
unsigned long flags;
addr = calculate_psi_aligned_address(start, end, &pages, &mask);
if (start == 0 && end == ULONG_MAX) {
addr = 0;
pages = -1;
mask = MAX_AGAW_PFN_WIDTH;
} else {
addr = calculate_psi_aligned_address(start, end, &pages, &mask);
}
spin_lock_irqsave(&domain->cache_lock, flags);
list_for_each_entry(tag, &domain->cache_tags, node) {
@ -492,31 +482,7 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
*/
void cache_tag_flush_all(struct dmar_domain *domain)
{
struct intel_iommu *iommu = NULL;
struct cache_tag *tag;
unsigned long flags;
spin_lock_irqsave(&domain->cache_lock, flags);
list_for_each_entry(tag, &domain->cache_tags, node) {
if (iommu && iommu != tag->iommu)
qi_batch_flush_descs(iommu, domain->qi_batch);
iommu = tag->iommu;
switch (tag->type) {
case CACHE_TAG_IOTLB:
case CACHE_TAG_NESTING_IOTLB:
cache_tag_flush_iotlb(domain, tag, 0, -1, 0, 0);
break;
case CACHE_TAG_DEVTLB:
case CACHE_TAG_NESTING_DEVTLB:
cache_tag_flush_devtlb_all(domain, tag);
break;
}
trace_cache_tag_flush_all(tag);
}
qi_batch_flush_descs(iommu, domain->qi_batch);
spin_unlock_irqrestore(&domain->cache_lock, flags);
cache_tag_flush_range(domain, 0, ULONG_MAX, 0);
}
/*

View File

@ -130,11 +130,6 @@ DEFINE_EVENT(cache_tag_log, cache_tag_unassign,
TP_ARGS(tag)
);
DEFINE_EVENT(cache_tag_log, cache_tag_flush_all,
TP_PROTO(struct cache_tag *tag),
TP_ARGS(tag)
);
DECLARE_EVENT_CLASS(cache_tag_flush,
TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
unsigned long addr, unsigned long pages, unsigned long mask),

View File

@ -1413,4 +1413,15 @@ static inline struct i40e_veb *i40e_pf_get_main_veb(struct i40e_pf *pf)
return (pf->lan_veb != I40E_NO_VEB) ? pf->veb[pf->lan_veb] : NULL;
}
static inline u32 i40e_get_max_num_descriptors(const struct i40e_pf *pf)
{
const struct i40e_hw *hw = &pf->hw;
switch (hw->mac.type) {
case I40E_MAC_XL710:
return I40E_MAX_NUM_DESCRIPTORS_XL710;
default:
return I40E_MAX_NUM_DESCRIPTORS;
}
}
#endif /* _I40E_H_ */

View File

@ -2012,18 +2012,6 @@ static void i40e_get_drvinfo(struct net_device *netdev,
drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN;
}
static u32 i40e_get_max_num_descriptors(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
switch (hw->mac.type) {
case I40E_MAC_XL710:
return I40E_MAX_NUM_DESCRIPTORS_XL710;
default:
return I40E_MAX_NUM_DESCRIPTORS;
}
}
static void i40e_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,

View File

@ -656,7 +656,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
/* ring_len has to be multiple of 8 */
if (!IS_ALIGNED(info->ring_len, 8) ||
info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
info->ring_len > i40e_get_max_num_descriptors(pf)) {
ret = -EINVAL;
goto error_context;
}
@ -726,7 +726,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
/* ring_len has to be multiple of 32 */
if (!IS_ALIGNED(info->ring_len, 32) ||
info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
info->ring_len > i40e_get_max_num_descriptors(pf)) {
ret = -EINVAL;
goto error_param;
}

View File

@ -1765,6 +1765,9 @@ static int netvsc_set_rxfh(struct net_device *dev,
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (!ndc->rx_table_sz)
return -EOPNOTSUPP;
rndis_dev = ndev->extension;
if (rxfh->indir) {
for (i = 0; i < ndc->rx_table_sz; i++)

View File

@ -1292,7 +1292,7 @@ void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp)
a.reason = FCNVME_RJT_RC_LOGIC;
a.explanation = FCNVME_RJT_EXP_NONE;
xmt_reject = true;
kfree(item);
qla24xx_free_purex_item(item);
goto out;
}

View File

@ -306,7 +306,8 @@ nfs_start_delegation_return_locked(struct nfs_inode *nfsi)
if (delegation == NULL)
goto out;
spin_lock(&delegation->lock);
if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
if (delegation->inode &&
!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
clear_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
/* Refcount matched in nfs_end_delegation_return() */
ret = nfs_get_delegation(delegation);

View File

@ -1949,6 +1949,7 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
}
mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
mgmt_pending_free(cmd);
return;
}
@ -1967,6 +1968,7 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
sock_put(match.sk);
hci_update_eir_sync(hdev);
mgmt_pending_free(cmd);
}
static int set_ssp_sync(struct hci_dev *hdev, void *data)
@ -6350,6 +6352,7 @@ static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
hci_dev_clear_flag(hdev, HCI_ADVERTISING);
settings_rsp(cmd, &match);
mgmt_pending_free(cmd);
new_settings(hdev, match.sk);

View File

@ -1005,6 +1005,9 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
/* IFLA_VF_STATS_TX_DROPPED */
nla_total_size_64bit(sizeof(__u64)));
}
if (dev->netdev_ops->ndo_get_vf_guid)
size += num_vfs * 2 *
nla_total_size(sizeof(struct ifla_vf_guid));
return size;
} else
return 0;

View File

@ -1594,7 +1594,6 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
qdisc_drop_reason(skb, sch, to_free, SKB_DROP_REASON_QDISC_OVERLIMIT);
sch->q.qlen--;
qdisc_tree_reduce_backlog(sch, 1, len);
cake_heapify(q, 0);
@ -1740,14 +1739,14 @@ static void cake_reconfigure(struct Qdisc *sch);
static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
u32 idx, tin, prev_qlen, prev_backlog, drop_id;
struct cake_sched_data *q = qdisc_priv(sch);
int len = qdisc_pkt_len(skb);
int ret;
int len = qdisc_pkt_len(skb), ret;
struct sk_buff *ack = NULL;
ktime_t now = ktime_get();
struct cake_tin_data *b;
struct cake_flow *flow;
u32 idx;
bool same_flow = false;
/* choose flow to insert into */
idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
@ -1757,6 +1756,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
__qdisc_drop(skb, to_free);
return ret;
}
tin = (u32)(b - q->tins);
idx--;
flow = &b->flows[idx];
@ -1819,6 +1819,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
consume_skb(skb);
} else {
/* not splitting */
int ack_pkt_len = 0;
cobalt_set_enqueue_time(skb, now);
get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
flow_queue_add(flow, skb);
@ -1829,13 +1831,13 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (ack) {
b->ack_drops++;
sch->qstats.drops++;
b->bytes += qdisc_pkt_len(ack);
len -= qdisc_pkt_len(ack);
ack_pkt_len = qdisc_pkt_len(ack);
b->bytes += ack_pkt_len;
q->buffer_used += skb->truesize - ack->truesize;
if (q->rate_flags & CAKE_FLAG_INGRESS)
cake_advance_shaper(q, b, ack, now, true);
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
qdisc_tree_reduce_backlog(sch, 1, ack_pkt_len);
consume_skb(ack);
} else {
sch->q.qlen++;
@ -1844,11 +1846,11 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* stats */
b->packets++;
b->bytes += len;
b->backlogs[idx] += len;
b->tin_backlog += len;
sch->qstats.backlog += len;
q->avg_window_bytes += len;
b->bytes += len - ack_pkt_len;
b->backlogs[idx] += len - ack_pkt_len;
b->tin_backlog += len - ack_pkt_len;
sch->qstats.backlog += len - ack_pkt_len;
q->avg_window_bytes += len - ack_pkt_len;
}
if (q->overflow_timeout)
@ -1923,15 +1925,29 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (q->buffer_used > q->buffer_max_used)
q->buffer_max_used = q->buffer_used;
if (q->buffer_used > q->buffer_limit) {
u32 dropped = 0;
if (q->buffer_used <= q->buffer_limit)
return NET_XMIT_SUCCESS;
while (q->buffer_used > q->buffer_limit) {
dropped++;
cake_drop(sch, to_free);
}
b->drop_overlimit += dropped;
prev_qlen = sch->q.qlen;
prev_backlog = sch->qstats.backlog;
while (q->buffer_used > q->buffer_limit) {
drop_id = cake_drop(sch, to_free);
if ((drop_id >> 16) == tin &&
(drop_id & 0xFFFF) == idx)
same_flow = true;
}
prev_qlen -= sch->q.qlen;
prev_backlog -= sch->qstats.backlog;
b->drop_overlimit += prev_qlen;
if (same_flow) {
qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
prev_backlog - len);
return NET_XMIT_CN;
}
qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
return NET_XMIT_SUCCESS;
}

View File

@ -1,3 +1,22 @@
* Tue Apr 07 2026 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-611.49.1.el9_7]
- rtnetlink: Allocate vfinfo size for VF GUIDs when supported (Kamal Heib) [RHEL-149469] {CVE-2025-22075}
Resolves: RHEL-149469
* Fri Apr 03 2026 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-611.48.1.el9_7]
- scsi: qla2xxx: Fix improper freeing of purex item (CKI Backport Bot) [RHEL-159222] {CVE-2025-68741}
- NFSv4: Check for delegation validity in nfs_start_delegation_return_locked() (Scott Mayhew) [RHEL-151414]
- Bluetooth: MGMT: Fix memory leak in set_ssp_complete (David Marlin) [RHEL-151728]
- Bluetooth: hci_uart: fix null-ptr-deref in hci_uart_write_work (David Marlin) [RHEL-151728]
- Bluetooth: btusb: revert use of devm_kzalloc in btusb (David Marlin) [RHEL-151728]
- Bluetooth: btusb: reorder cleanup in btusb_disconnect to avoid UAF (David Marlin) [RHEL-151728]
- net: hv_netvsc: reject RSS hash key programming without RX indirection table (Medha Mummigatti) [RHEL-150571]
- net/sched: sch_cake: Fix incorrect qlen reduction in cake_drop (CKI Backport Bot) [RHEL-150455] {CVE-2025-39766}
- net/sched: Make cake_enqueue return NET_XMIT_CN when past buffer_limit (CKI Backport Bot) [RHEL-150455] {CVE-2025-39766}
- iommu/vt-d: Deduplicate cache_tag_flush_all by reusing flush_range (Jerry Snitselaar) [RHEL-144218]
- iommu/vt-d: Fix missing PASID in dev TLB flush with cache_tag_flush_all (Jerry Snitselaar) [RHEL-144218]
- i40e: validate ring_len parameter against hardware-specific values (CKI Backport Bot) [RHEL-141722]
Resolves: RHEL-141722, RHEL-144218, RHEL-150455, RHEL-150571, RHEL-151414, RHEL-151728, RHEL-159222
* Tue Mar 31 2026 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-611.47.1.el9_7]
- net/mlx5: Fix ECVF vports unload on shutdown flow (CKI Backport Bot) [RHEL-154537] {CVE-2025-38109}
- netfilter: nf_tables: fix use-after-free in nf_tables_addchain() (CKI Backport Bot) [RHEL-153269] {CVE-2026-23231}