Import of kernel-5.14.0-570.30.1.el9_6

This commit is contained in:
eabdullin 2025-09-05 14:29:08 +00:00
parent d63051341b
commit 038d8766a2
25 changed files with 185 additions and 92 deletions

View File

@ -12,7 +12,7 @@ RHEL_MINOR = 6
#
# Use this spot to avoid future merge conflicts.
# Do not trim this comment.
RHEL_RELEASE = 570.28.1
RHEL_RELEASE = 570.30.1
#
# ZSTREAM

View File

@ -1357,9 +1357,11 @@ static void ib_device_notify_register(struct ib_device *device)
u32 port;
int ret;
down_read(&devices_rwsem);
ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT);
if (ret)
return;
goto out;
rdma_for_each_port(device, port) {
netdev = ib_device_get_netdev(device, port);
@ -1370,8 +1372,11 @@ static void ib_device_notify_register(struct ib_device *device)
RDMA_NETDEV_ATTACH_EVENT);
dev_put(netdev);
if (ret)
return;
goto out;
}
out:
up_read(&devices_rwsem);
}
/**

View File

@ -56,7 +56,7 @@ static void
create_mkey_callback(int status, struct mlx5_async_work *context);
static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
u64 iova, int access_flags,
unsigned int page_size, bool populate,
unsigned long page_size, bool populate,
int access_mode);
static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr);
@ -1115,7 +1115,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
struct mlx5r_cache_rb_key rb_key = {};
struct mlx5_cache_ent *ent;
struct mlx5_ib_mr *mr;
unsigned int page_size;
unsigned long page_size;
if (umem->is_dmabuf)
page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova);
@ -1219,7 +1219,7 @@ err_1:
*/
static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
u64 iova, int access_flags,
unsigned int page_size, bool populate,
unsigned long page_size, bool populate,
int access_mode)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
@ -1425,7 +1425,7 @@ static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
mr = alloc_cacheable_mr(pd, umem, iova, access_flags,
MLX5_MKC_ACCESS_MODE_MTT);
} else {
unsigned int page_size =
unsigned long page_size =
mlx5_umem_mkc_find_best_pgsz(dev, umem, iova);
mutex_lock(&dev->slow_path_mutex);

View File

@ -269,6 +269,7 @@ int uvc_status_init(struct uvc_device *dev)
dev->int_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->int_urb) {
kfree(dev->status);
dev->status = NULL;
return -ENOMEM;
}

View File

@ -1223,7 +1223,7 @@ static void octep_hb_timeout_task(struct work_struct *work)
miss_cnt);
rtnl_lock();
if (netif_running(oct->netdev))
octep_stop(oct->netdev);
dev_close(oct->netdev);
rtnl_unlock();
}

View File

@ -835,7 +835,9 @@ static void octep_vf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
struct octep_vf_device *oct = netdev_priv(netdev);
netdev_hold(netdev, NULL, GFP_ATOMIC);
schedule_work(&oct->tx_timeout_task);
if (!schedule_work(&oct->tx_timeout_task))
netdev_put(netdev, NULL);
}
static int octep_vf_set_mac(struct net_device *netdev, void *p)

View File

@ -2886,6 +2886,32 @@ static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
RTL_R32(tp, CSIDR) : ~0;
}
static void rtl_disable_zrxdc_timeout(struct rtl8169_private *tp)
{
struct pci_dev *pdev = tp->pci_dev;
u32 csi;
int rc;
u8 val;
#define RTL_GEN3_RELATED_OFF 0x0890
#define RTL_GEN3_ZRXDC_NONCOMPL 0x1
if (pdev->cfg_size > RTL_GEN3_RELATED_OFF) {
rc = pci_read_config_byte(pdev, RTL_GEN3_RELATED_OFF, &val);
if (rc == PCIBIOS_SUCCESSFUL) {
val &= ~RTL_GEN3_ZRXDC_NONCOMPL;
rc = pci_write_config_byte(pdev, RTL_GEN3_RELATED_OFF,
val);
if (rc == PCIBIOS_SUCCESSFUL)
return;
}
}
netdev_notice_once(tp->dev,
"No native access to PCI extended config space, falling back to CSI\n");
csi = rtl_csi_read(tp, RTL_GEN3_RELATED_OFF);
rtl_csi_write(tp, RTL_GEN3_RELATED_OFF, csi & ~RTL_GEN3_ZRXDC_NONCOMPL);
}
static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val)
{
struct pci_dev *pdev = tp->pci_dev;
@ -3931,6 +3957,7 @@ static void rtl_hw_start_8125b(struct rtl8169_private *tp)
static void rtl_hw_start_8126a(struct rtl8169_private *tp)
{
rtl_disable_zrxdc_timeout(tp);
rtl_set_def_aspm_entry_latency(tp);
rtl_hw_start_8125_common(tp);
}
@ -5481,7 +5508,7 @@ done:
/* register is set if system vendor successfully tested ASPM 1.2 */
static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
{
if (tp->mac_version >= RTL_GIGA_MAC_VER_60 &&
if (tp->mac_version >= RTL_GIGA_MAC_VER_46 &&
r8168_mac_ocp_read(tp, 0xc0b2) & 0xf)
return true;

View File

@ -979,6 +979,9 @@ int __mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum)
lockdep_assert_held_once(&bus->mdio_lock);
if (addr >= PHY_MAX_ADDR)
return -ENXIO;
if (bus->read_c45)
retval = bus->read_c45(bus, addr, devad, regnum);
else
@ -1010,6 +1013,9 @@ int __mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum,
lockdep_assert_held_once(&bus->mdio_lock);
if (addr >= PHY_MAX_ADDR)
return -ENXIO;
if (bus->write_c45)
err = bus->write_c45(bus, addr, devad, regnum, val);
else

View File

@ -178,6 +178,7 @@ static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc)
{
struct usbnet *dev = netdev_priv(netdev);
unsigned char buff[2];
int ret;
netdev_dbg(netdev, "%s phy_id:%02x loc:%02x\n",
__func__, phy_id, loc);
@ -185,8 +186,10 @@ static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc)
if (phy_id != 0)
return -ENODEV;
control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02,
CONTROL_TIMEOUT_MS);
ret = control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02,
CONTROL_TIMEOUT_MS);
if (ret < 0)
return ret;
return (buff[0] | buff[1] << 8);
}

View File

@ -1197,7 +1197,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(*fseq_ver))
goto invalid_tlv_len;
IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %s\n",
IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %.32s\n",
fseq_ver->version);
}
break;

View File

@ -264,11 +264,17 @@ static struct airq_info *new_airq_info(int index)
static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs,
u64 *first, void **airq_info)
{
int i, j;
int i, j, queue_idx, highest_queue_idx = -1;
struct airq_info *info;
unsigned long *indicator_addr = NULL;
unsigned long bit, flags;
/* Array entries without an actual queue pointer must be ignored. */
for (i = 0; i < nvqs; i++) {
if (vqs[i])
highest_queue_idx++;
}
for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
mutex_lock(&airq_areas_lock);
if (!airq_areas[i])
@ -278,7 +284,7 @@ static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs,
if (!info)
return NULL;
write_lock_irqsave(&info->lock, flags);
bit = airq_iv_alloc(info->aiv, nvqs);
bit = airq_iv_alloc(info->aiv, highest_queue_idx + 1);
if (bit == -1UL) {
/* Not enough vacancies. */
write_unlock_irqrestore(&info->lock, flags);
@ -287,8 +293,10 @@ static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs,
*first = bit;
*airq_info = info;
indicator_addr = info->aiv->vector;
for (j = 0; j < nvqs; j++) {
airq_iv_set_ptr(info->aiv, bit + j,
for (j = 0, queue_idx = 0; j < nvqs; j++) {
if (!vqs[j])
continue;
airq_iv_set_ptr(info->aiv, bit + queue_idx++,
(unsigned long)vqs[j]);
}
write_unlock_irqrestore(&info->lock, flags);

View File

@ -1834,7 +1834,8 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
enum {
EXT4_MF_MNTDIR_SAMPLED,
EXT4_MF_FS_ABORTED, /* Fatal error detected */
EXT4_MF_FC_INELIGIBLE /* Fast commit ineligible */
EXT4_MF_FC_INELIGIBLE, /* Fast commit ineligible */
EXT4_MF_JOURNAL_DESTROY /* Journal is in process of destroying */
};
static inline void ext4_set_mount_flag(struct super_block *sb, int bit)

View File

@ -513,4 +513,33 @@ static inline int ext4_should_dioread_nolock(struct inode *inode)
return 1;
}
/*
* Pass journal explicitly as it may not be cached in the sbi->s_journal in some
* cases
*/
static inline int ext4_journal_destroy(struct ext4_sb_info *sbi, journal_t *journal)
{
int err = 0;
/*
* At this point only two things can be operating on the journal.
* JBD2 thread performing transaction commit and s_sb_upd_work
* issuing sb update through the journal. Once we set
* EXT4_JOURNAL_DESTROY, new ext4_handle_error() calls will not
* queue s_sb_upd_work and ext4_force_commit() makes sure any
* ext4_handle_error() calls from the running transaction commit are
* finished. Hence no new s_sb_upd_work can be queued after we
* flush it here.
*/
ext4_set_mount_flag(sbi->s_sb, EXT4_MF_JOURNAL_DESTROY);
ext4_force_commit(sbi->s_sb);
flush_work(&sbi->s_sb_upd_work);
err = jbd2_journal_destroy(journal);
sbi->s_journal = NULL;
return err;
}
#endif /* _EXT4_JBD2_H */

View File

@ -4807,6 +4807,11 @@ static inline int ext4_iget_extra_inode(struct inode *inode,
*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
int err;
err = xattr_check_inode(inode, IHDR(inode, raw_inode),
ITAIL(inode, raw_inode));
if (err)
return err;
ext4_set_inode_state(inode, EXT4_STATE_XATTR);
err = ext4_find_inline_data_nolock(inode);
if (!err && ext4_has_inline_data(inode))

View File

@ -720,9 +720,13 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
* In case the fs should keep running, we need to writeout
* superblock through the journal. Due to lock ordering
* constraints, it may not be safe to do it right here so we
* defer superblock flushing to a workqueue.
* defer superblock flushing to a workqueue. We just need to be
* careful when the journal is already shutting down. If we get
* here in that case, just update the sb directly as the last
* transaction won't commit anyway.
*/
if (continue_fs && journal)
if (continue_fs && journal &&
!ext4_test_mount_flag(sb, EXT4_MF_JOURNAL_DESTROY))
schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
else
ext4_commit_super(sb);
@ -1252,18 +1256,17 @@ static void ext4_put_super(struct super_block *sb)
ext4_unregister_li_request(sb);
ext4_quota_off_umount(sb);
flush_work(&sbi->s_sb_upd_work);
destroy_workqueue(sbi->rsv_conversion_wq);
ext4_release_orphan_info(sb);
if (sbi->s_journal) {
aborted = is_journal_aborted(sbi->s_journal);
err = jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
err = ext4_journal_destroy(sbi, sbi->s_journal);
if ((err < 0) && !aborted) {
ext4_abort(sb, -err, "Couldn't clean up the journal");
}
}
} else
flush_work(&sbi->s_sb_upd_work);
ext4_es_unregister_shrinker(sbi);
timer_shutdown_sync(&sbi->s_err_report);
@ -4921,10 +4924,7 @@ static int ext4_load_and_init_journal(struct super_block *sb,
return 0;
out:
/* flush s_sb_upd_work before destroying the journal. */
flush_work(&sbi->s_sb_upd_work);
jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
ext4_journal_destroy(sbi, sbi->s_journal);
return -EINVAL;
}
@ -5686,10 +5686,7 @@ failed_mount_wq:
sbi->s_ea_block_cache = NULL;
if (sbi->s_journal) {
/* flush s_sb_upd_work before journal destroy. */
flush_work(&sbi->s_sb_upd_work);
jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
ext4_journal_destroy(sbi, sbi->s_journal);
}
failed_mount3a:
ext4_es_unregister_shrinker(sbi);
@ -5976,7 +5973,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
return journal;
out_journal:
jbd2_journal_destroy(journal);
ext4_journal_destroy(EXT4_SB(sb), journal);
out_bdev:
bdev_fput(bdev_file);
return NULL;
@ -6093,8 +6090,7 @@ static int ext4_load_journal(struct super_block *sb,
EXT4_SB(sb)->s_journal = journal;
err = ext4_clear_journal_err(sb, es);
if (err) {
EXT4_SB(sb)->s_journal = NULL;
jbd2_journal_destroy(journal);
ext4_journal_destroy(EXT4_SB(sb), journal);
return err;
}
@ -6112,7 +6108,7 @@ static int ext4_load_journal(struct super_block *sb,
return 0;
err_out:
jbd2_journal_destroy(journal);
ext4_journal_destroy(EXT4_SB(sb), journal);
return err;
}

View File

@ -308,7 +308,7 @@ __ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh,
__ext4_xattr_check_block((inode), (bh), __func__, __LINE__)
static inline int
int
__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
void *end, const char *function, unsigned int line)
{
@ -316,9 +316,6 @@ __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
function, line);
}
#define xattr_check_inode(inode, header, end) \
__xattr_check_inode((inode), (header), (end), __func__, __LINE__)
static int
xattr_find_entry(struct inode *inode, struct ext4_xattr_entry **pentry,
void *end, int name_index, const char *name, int sorted)
@ -649,10 +646,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
return error;
raw_inode = ext4_raw_inode(&iloc);
header = IHDR(inode, raw_inode);
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
error = xattr_check_inode(inode, header, end);
if (error)
goto cleanup;
end = ITAIL(inode, raw_inode);
entry = IFIRST(header);
error = xattr_find_entry(inode, &entry, end, name_index, name, 0);
if (error)
@ -784,7 +778,6 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
struct ext4_xattr_ibody_header *header;
struct ext4_inode *raw_inode;
struct ext4_iloc iloc;
void *end;
int error;
if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
@ -794,14 +787,9 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
return error;
raw_inode = ext4_raw_inode(&iloc);
header = IHDR(inode, raw_inode);
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
error = xattr_check_inode(inode, header, end);
if (error)
goto cleanup;
error = ext4_xattr_list_entries(dentry, IFIRST(header),
buffer, buffer_size);
cleanup:
brelse(iloc.bh);
return error;
}
@ -869,7 +857,6 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
struct ext4_xattr_ibody_header *header;
struct ext4_xattr_entry *entry;
qsize_t ea_inode_refs = 0;
void *end;
int ret;
lockdep_assert_held_read(&EXT4_I(inode)->xattr_sem);
@ -880,10 +867,6 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
goto out;
raw_inode = ext4_raw_inode(&iloc);
header = IHDR(inode, raw_inode);
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
ret = xattr_check_inode(inode, header, end);
if (ret)
goto out;
for (entry = IFIRST(header); !IS_LAST_ENTRY(entry);
entry = EXT4_XATTR_NEXT(entry))
@ -2220,11 +2203,8 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
header = IHDR(inode, raw_inode);
is->s.base = is->s.first = IFIRST(header);
is->s.here = is->s.first;
is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
is->s.end = ITAIL(inode, raw_inode);
if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
error = xattr_check_inode(inode, header, is->s.end);
if (error)
return error;
/* Find the named attribute. */
error = xattr_find_entry(inode, &is->s.here, is->s.end,
i->name_index, i->name, 0);
@ -2770,14 +2750,10 @@ retry:
*/
base = IFIRST(header);
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
end = ITAIL(inode, raw_inode);
min_offs = end - base;
total_ino = sizeof(struct ext4_xattr_ibody_header) + sizeof(u32);
error = xattr_check_inode(inode, header, end);
if (error)
goto cleanup;
ifree = ext4_xattr_free_space(base, &min_offs, base, &total_ino);
if (ifree >= isize_diff)
goto shift;

View File

@ -68,6 +68,9 @@ struct ext4_xattr_entry {
((void *)raw_inode + \
EXT4_GOOD_OLD_INODE_SIZE + \
EXT4_I(inode)->i_extra_isize))
#define ITAIL(inode, raw_inode) \
((void *)(raw_inode) + \
EXT4_SB((inode)->i_sb)->s_inode_size)
#define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
/*
@ -207,6 +210,13 @@ extern int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
extern struct mb_cache *ext4_xattr_create_cache(void);
extern void ext4_xattr_destroy_cache(struct mb_cache *);
extern int
__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
void *end, const char *function, unsigned int line);
#define xattr_check_inode(inode, header, end) \
__xattr_check_inode((inode), (header), (end), __func__, __LINE__)
#ifdef CONFIG_EXT4_FS_SECURITY
extern int ext4_init_security(handle_t *handle, struct inode *inode,
struct inode *dir, const struct qstr *qstr);

View File

@ -875,15 +875,8 @@ globalroot:
abs_path += sizeof("\\DosDevices\\")-1;
else if (strstarts(abs_path, "\\GLOBAL??\\"))
abs_path += sizeof("\\GLOBAL??\\")-1;
else {
/* Unhandled absolute symlink, points outside of DOS/Win32 */
cifs_dbg(VFS,
"absolute symlink '%s' cannot be converted from NT format "
"because points to unknown target\n",
smb_target);
rc = -EIO;
goto out;
}
else
goto out_unhandled_target;
/* Sometimes path separator after \?? is double backslash */
if (abs_path[0] == '\\')
@ -910,13 +903,7 @@ globalroot:
abs_path++;
abs_path[0] = drive_letter;
} else {
/* Unhandled absolute symlink. Report an error. */
cifs_dbg(VFS,
"absolute symlink '%s' cannot be converted from NT format "
"because points to unknown target\n",
smb_target);
rc = -EIO;
goto out;
goto out_unhandled_target;
}
abs_path_len = strlen(abs_path)+1;
@ -966,6 +953,7 @@ globalroot:
* These paths have same format as Linux symlinks, so no
* conversion is needed.
*/
out_unhandled_target:
linux_target = smb_target;
smb_target = NULL;
}

View File

@ -2284,6 +2284,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
{
spinlock_t *ptl;
struct mmu_notifier_range range;
bool pmd_migration;
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
address & HPAGE_PMD_MASK,
@ -2298,13 +2299,13 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
VM_BUG_ON(freeze && !folio);
VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
is_pmd_migration_entry(*pmd)) {
pmd_migration = is_pmd_migration_entry(*pmd);
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) || pmd_migration) {
/*
* It's safe to call pmd_page when folio is set because it's
* guaranteed that pmd is present.
* Do not apply pmd_folio() to a migration entry; and folio lock
* guarantees that it must be of the wrong folio anyway.
*/
if (folio && folio != page_folio(pmd_page(*pmd)))
if (folio && (pmd_migration || folio != page_folio(pmd_page(*pmd))))
goto out;
__split_huge_pmd_locked(vma, pmd, range.start, freeze);
}

View File

@ -247,9 +247,15 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
do_div(val, skb->truesize);
tcp_sk(sk)->scaling_ratio = val ? val : 1;
if (old_ratio != tcp_sk(sk)->scaling_ratio)
WRITE_ONCE(tcp_sk(sk)->window_clamp,
tcp_win_from_space(sk, sk->sk_rcvbuf));
if (old_ratio != tcp_sk(sk)->scaling_ratio) {
struct tcp_sock *tp = tcp_sk(sk);
val = tcp_win_from_space(sk, sk->sk_rcvbuf);
tcp_set_window_clamp(sk, val);
if (tp->window_clamp < tp->rcvq_space.space)
tp->rcvq_space.space = tp->window_clamp;
}
}
icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
tcp_sk(sk)->advmss);

View File

@ -958,6 +958,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (cl != NULL) {
int old_flags;
int len = 0;
if (parentid) {
if (cl->cl_parent &&
@ -988,9 +989,13 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (usc != NULL)
hfsc_change_usc(cl, usc, cur_time);
if (cl->qdisc->q.qlen != 0)
len = qdisc_peek_len(cl->qdisc);
/* Check queue length again since some qdisc implementations
* (e.g., netem/codel) might empty the queue during the peek
* operation.
*/
if (cl->qdisc->q.qlen != 0) {
int len = qdisc_peek_len(cl->qdisc);
if (cl->cl_flags & HFSC_RSC) {
if (old_flags & HFSC_RSC)
update_ed(cl, len);

View File

@ -1 +0,0 @@
# CONFIG_BT_INTEL_PCIE is not set

View File

@ -1 +0,0 @@
# CONFIG_BT_HCIUART_AML is not set

View File

@ -1,3 +1,29 @@
* Thu Jul 24 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.30.1.el9_6]
- net_sched: hfsc: Fix a UAF vulnerability in class handling (Davide Caratti) [RHEL-95853] {CVE-2025-37797}
Resolves: RHEL-95853
* Sat Jul 19 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.29.1.el9_6]
- tcp: adjust rcvq_space after updating scaling ratio (Guillaume Nault) [RHEL-99145]
- ext4: fix out-of-bound read in ext4_xattr_inode_dec_ref_all() (CKI Backport Bot) [RHEL-93555] {CVE-2025-22121}
- ext4: introduce ITAIL helper (CKI Backport Bot) [RHEL-93555] {CVE-2025-22121}
- ext4: avoid journaling sb update on error if journal is destroying (Brian Foster) [RHEL-93591] {CVE-2025-22113}
- ext4: define ext4_journal_destroy wrapper (Brian Foster) [RHEL-93591]
- net/mdiobus: Fix potential out-of-bounds clause 45 read/write access (CKI Backport Bot) [RHEL-102093] {CVE-2025-38110}
- smb: client: fix regression with native SMB symlinks (Paulo Alcantara) [RHEL-101953]
- redhat/configs: remove automotive directory (Eric Chanudet) [RHEL-96365]
- r8169: enable RTL8168H/RTL8168EP/RTL8168FP ASPM support (CKI Backport Bot) [RHEL-96715]
- r8169: disable RTL8126 ZRX-DC timeout (CKI Backport Bot) [RHEL-96715]
- net: ch9200: fix uninitialised access during mii_nway_restart (CKI Backport Bot) [RHEL-101212] {CVE-2025-38086}
- media: uvcvideo: Fix double free in error path (CKI Backport Bot) [RHEL-98795] {CVE-2024-57980}
- RDMA/mlx5: Fix page_size variable overflow (CKI Backport Bot) [RHEL-99320] {CVE-2025-22091}
- wifi: iwlwifi: limit printed string from FW file (CKI Backport Bot) [RHEL-99384] {CVE-2025-21905}
- RDMA/core: Fix use-after-free when rename device name (CKI Backport Bot) [RHEL-99048] {CVE-2025-22085}
- octeon_ep: Fix host hang issue during device reboot (CKI Backport Bot) [RHEL-93251]
- mm/huge_memory: fix dereferencing invalid pmd migration entry (Rafael Aquini) [RHEL-96368] {CVE-2025-37958}
- octeon_ep_vf: Resolve netdevice usage count issue (CKI Backport Bot) [RHEL-93252]
- s390/virtio_ccw: Don't allocate/assign airqs for non-existing queues (CKI Backport Bot) [RHEL-87555]
Resolves: RHEL-101212, RHEL-101953, RHEL-102093, RHEL-87555, RHEL-93251, RHEL-93252, RHEL-93555, RHEL-93591, RHEL-96365, RHEL-96368, RHEL-96715, RHEL-98795, RHEL-99048, RHEL-99145, RHEL-99320, RHEL-99384
* Tue Jul 15 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.28.1.el9_6]
- sunrpc: handle SVC_GARBAGE during svc auth processing as auth error (CKI Backport Bot) [RHEL-101327] {CVE-2025-38089}
Resolves: RHEL-101327