Import of kernel-5.14.0-570.46.1.el9_6

This commit is contained in:
eabdullin 2025-09-30 07:28:10 +00:00
parent 49ffffa68d
commit db9979bc72
15 changed files with 187 additions and 73 deletions

View File

@ -12,7 +12,7 @@ RHEL_MINOR = 6
#
# Use this spot to avoid future merge conflicts.
# Do not trim this comment.
RHEL_RELEASE = 570.44.1
RHEL_RELEASE = 570.46.1
#
# ZSTREAM

View File

@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright Red Hat
#include <linux/bitfield.h>
#include <linux/cleanup.h>
#include <linux/mutex.h>
#include <linux/pci.h>
@ -14,32 +13,45 @@
static DEFINE_XARRAY(ice_adapters);
static DEFINE_MUTEX(ice_adapters_mutex);
/* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */
#define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13)
#define INDEX_FIELD_DEV GENMASK(31, 16)
#define INDEX_FIELD_BUS GENMASK(12, 5)
#define INDEX_FIELD_SLOT GENMASK(4, 0)
#define ICE_ADAPTER_FIXED_INDEX BIT_ULL(63)
static unsigned long ice_adapter_index(const struct pci_dev *pdev)
#define ICE_ADAPTER_INDEX_E825C \
(ICE_DEV_ID_E825C_BACKPLANE | ICE_ADAPTER_FIXED_INDEX)
static u64 ice_adapter_index(struct pci_dev *pdev)
{
unsigned int domain = pci_domain_nr(pdev->bus);
WARN_ON(domain > FIELD_MAX(INDEX_FIELD_DOMAIN));
switch (pdev->device) {
case ICE_DEV_ID_E825C_BACKPLANE:
case ICE_DEV_ID_E825C_QSFP:
case ICE_DEV_ID_E825C_SFP:
case ICE_DEV_ID_E825C_SGMII:
return FIELD_PREP(INDEX_FIELD_DEV, pdev->device);
/* E825C devices have multiple NACs which are connected to the
* same clock source, and which must share the same
* ice_adapter structure. We can't use the serial number since
* each NAC has its own NVM generated with its own unique
* Device Serial Number. Instead, rely on the embedded nature
* of the E825C devices, and use a fixed index. This relies on
* the fact that all E825C physical functions in a given
* system are part of the same overall device.
*/
return ICE_ADAPTER_INDEX_E825C;
default:
return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) |
FIELD_PREP(INDEX_FIELD_BUS, pdev->bus->number) |
FIELD_PREP(INDEX_FIELD_SLOT, PCI_SLOT(pdev->devfn));
return pci_get_dsn(pdev) & ~ICE_ADAPTER_FIXED_INDEX;
}
}
static struct ice_adapter *ice_adapter_new(void)
static unsigned long ice_adapter_xa_index(struct pci_dev *pdev)
{
u64 index = ice_adapter_index(pdev);
#if BITS_PER_LONG == 64
return index;
#else
return (u32)index ^ (u32)(index >> 32);
#endif
}
static struct ice_adapter *ice_adapter_new(struct pci_dev *pdev)
{
struct ice_adapter *adapter;
@ -47,6 +59,7 @@ static struct ice_adapter *ice_adapter_new(void)
if (!adapter)
return NULL;
adapter->index = ice_adapter_index(pdev);
spin_lock_init(&adapter->ptp_gltsyn_time_lock);
refcount_set(&adapter->refcount, 1);
@ -77,23 +90,25 @@ static void ice_adapter_free(struct ice_adapter *adapter)
* Return: Pointer to ice_adapter on success.
* ERR_PTR() on error. -ENOMEM is the only possible error.
*/
struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev)
struct ice_adapter *ice_adapter_get(struct pci_dev *pdev)
{
unsigned long index = ice_adapter_index(pdev);
struct ice_adapter *adapter;
unsigned long index;
int err;
index = ice_adapter_xa_index(pdev);
scoped_guard(mutex, &ice_adapters_mutex) {
err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL);
if (err == -EBUSY) {
adapter = xa_load(&ice_adapters, index);
refcount_inc(&adapter->refcount);
WARN_ON_ONCE(adapter->index != ice_adapter_index(pdev));
return adapter;
}
if (err)
return ERR_PTR(err);
adapter = ice_adapter_new();
adapter = ice_adapter_new(pdev);
if (!adapter)
return ERR_PTR(-ENOMEM);
xa_store(&ice_adapters, index, adapter, GFP_KERNEL);
@ -110,11 +125,12 @@ struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev)
*
* Context: Process, may sleep.
*/
void ice_adapter_put(const struct pci_dev *pdev)
void ice_adapter_put(struct pci_dev *pdev)
{
unsigned long index = ice_adapter_index(pdev);
struct ice_adapter *adapter;
unsigned long index;
index = ice_adapter_xa_index(pdev);
scoped_guard(mutex, &ice_adapters_mutex) {
adapter = xa_load(&ice_adapters, index);
if (WARN_ON(!adapter))

View File

@ -32,6 +32,7 @@ struct ice_port_list {
* @refcount: Reference count. struct ice_pf objects hold the references.
* @ctrl_pf: Control PF of the adapter
* @ports: Ports list
* @index: 64-bit index cached for collision detection on 32bit systems
*/
struct ice_adapter {
refcount_t refcount;
@ -40,9 +41,10 @@ struct ice_adapter {
struct ice_pf *ctrl_pf;
struct ice_port_list ports;
u64 index;
};
struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev);
void ice_adapter_put(const struct pci_dev *pdev);
struct ice_adapter *ice_adapter_get(struct pci_dev *pdev);
void ice_adapter_put(struct pci_dev *pdev);
#endif /* _ICE_ADAPTER_H */

View File

@ -319,6 +319,7 @@ struct igc_adapter {
struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */
ktime_t ptp_reset_start; /* Reset time in clock mono */
struct system_time_snapshot snapshot;
struct mutex ptm_lock; /* Only allow one PTM transaction at a time */
char fw_version[32];

View File

@ -574,7 +574,10 @@
#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x3f) << 2)
#define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8)
#define IGC_PTM_SHORT_CYC_DEFAULT 1 /* Default short cycle interval */
/* A short cycle time of 1us theoretically should work, but appears to be too
* short in practice.
*/
#define IGC_PTM_SHORT_CYC_DEFAULT 4 /* Default short cycle interval */
#define IGC_PTM_CYC_TIME_DEFAULT 5 /* Default PTM cycle time */
#define IGC_PTM_TIMEOUT_DEFAULT 255 /* Default timeout for PTM errors */
@ -593,6 +596,7 @@
#define IGC_PTM_STAT_T4M1_OVFL BIT(3) /* T4 minus T1 overflow */
#define IGC_PTM_STAT_ADJUST_1ST BIT(4) /* 1588 timer adjusted during 1st PTM cycle */
#define IGC_PTM_STAT_ADJUST_CYC BIT(5) /* 1588 timer adjusted during non-1st PTM cycle */
#define IGC_PTM_STAT_ALL GENMASK(5, 0) /* Used to clear all status */
/* PCIe PTM Cycle Control */
#define IGC_PTM_CYCLE_CTRL_CYC_TIME(msec) ((msec) & 0x3ff) /* PTM Cycle Time (msec) */

View File

@ -7169,6 +7169,7 @@ static int igc_probe(struct pci_dev *pdev,
err_register:
igc_release_hw_control(adapter);
igc_ptp_stop(adapter);
err_eeprom:
if (!igc_check_reset_block(hw))
igc_reset_phy(hw);

View File

@ -974,45 +974,62 @@ static void igc_ptm_log_error(struct igc_adapter *adapter, u32 ptm_stat)
}
}
/* The PTM lock: adapter->ptm_lock must be held when calling igc_ptm_trigger() */
static void igc_ptm_trigger(struct igc_hw *hw)
{
u32 ctrl;
/* To "manually" start the PTM cycle we need to set the
* trigger (TRIG) bit
*/
ctrl = rd32(IGC_PTM_CTRL);
ctrl |= IGC_PTM_CTRL_TRIG;
wr32(IGC_PTM_CTRL, ctrl);
/* Perform flush after write to CTRL register otherwise
* transaction may not start
*/
wrfl();
}
/* The PTM lock: adapter->ptm_lock must be held when calling igc_ptm_reset() */
static void igc_ptm_reset(struct igc_hw *hw)
{
u32 ctrl;
ctrl = rd32(IGC_PTM_CTRL);
ctrl &= ~IGC_PTM_CTRL_TRIG;
wr32(IGC_PTM_CTRL, ctrl);
/* Write to clear all status */
wr32(IGC_PTM_STAT, IGC_PTM_STAT_ALL);
}
static int igc_phc_get_syncdevicetime(ktime_t *device,
struct system_counterval_t *system,
void *ctx)
{
u32 stat, t2_curr_h, t2_curr_l, ctrl;
struct igc_adapter *adapter = ctx;
struct igc_hw *hw = &adapter->hw;
u32 stat, t2_curr_h, t2_curr_l;
int err, count = 100;
ktime_t t1, t2_curr;
/* Get a snapshot of system clocks to use as historic value. */
ktime_get_snapshot(&adapter->snapshot);
do {
/* Doing this in a loop because in the event of a
* badly timed (ha!) system clock adjustment, we may
* get PTM errors from the PCI root, but these errors
* are transitory. Repeating the process returns valid
* data eventually.
*/
do {
/* Get a snapshot of system clocks to use as historic value. */
ktime_get_snapshot(&adapter->snapshot);
/* To "manually" start the PTM cycle we need to clear and
* then set again the TRIG bit.
*/
ctrl = rd32(IGC_PTM_CTRL);
ctrl &= ~IGC_PTM_CTRL_TRIG;
wr32(IGC_PTM_CTRL, ctrl);
ctrl |= IGC_PTM_CTRL_TRIG;
wr32(IGC_PTM_CTRL, ctrl);
/* The cycle only starts "for real" when software notifies
* that it has read the registers, this is done by setting
* VALID bit.
*/
wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID);
igc_ptm_trigger(hw);
err = readx_poll_timeout(rd32, IGC_PTM_STAT, stat,
stat, IGC_PTM_STAT_SLEEP,
IGC_PTM_STAT_TIMEOUT);
igc_ptm_reset(hw);
if (err < 0) {
netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n");
return err;
@ -1021,15 +1038,7 @@ static int igc_phc_get_syncdevicetime(ktime_t *device,
if ((stat & IGC_PTM_STAT_VALID) == IGC_PTM_STAT_VALID)
break;
if (stat & ~IGC_PTM_STAT_VALID) {
/* An error occurred, log it. */
igc_ptm_log_error(adapter, stat);
/* The STAT register is write-1-to-clear (W1C),
* so write the previous error status to clear it.
*/
wr32(IGC_PTM_STAT, stat);
continue;
}
} while (--count);
if (!count) {
@ -1061,9 +1070,16 @@ static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp,
{
struct igc_adapter *adapter = container_of(ptp, struct igc_adapter,
ptp_caps);
int ret;
return get_device_system_crosststamp(igc_phc_get_syncdevicetime,
/* This blocks until any in progress PTM transactions complete */
mutex_lock(&adapter->ptm_lock);
ret = get_device_system_crosststamp(igc_phc_get_syncdevicetime,
adapter, &adapter->snapshot, cts);
mutex_unlock(&adapter->ptm_lock);
return ret;
}
static int igc_ptp_getcyclesx64(struct ptp_clock_info *ptp,
@ -1162,6 +1178,7 @@ void igc_ptp_init(struct igc_adapter *adapter)
spin_lock_init(&adapter->ptp_tx_lock);
spin_lock_init(&adapter->free_timer_lock);
spin_lock_init(&adapter->tmreg_lock);
mutex_init(&adapter->ptm_lock);
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
@ -1174,6 +1191,7 @@ void igc_ptp_init(struct igc_adapter *adapter)
if (IS_ERR(adapter->ptp_clock)) {
adapter->ptp_clock = NULL;
netdev_err(netdev, "ptp_clock_register failed\n");
mutex_destroy(&adapter->ptm_lock);
} else if (adapter->ptp_clock) {
netdev_info(netdev, "PHC added\n");
adapter->ptp_flags |= IGC_PTP_ENABLED;
@ -1203,10 +1221,12 @@ static void igc_ptm_stop(struct igc_adapter *adapter)
struct igc_hw *hw = &adapter->hw;
u32 ctrl;
mutex_lock(&adapter->ptm_lock);
ctrl = rd32(IGC_PTM_CTRL);
ctrl &= ~IGC_PTM_CTRL_EN;
wr32(IGC_PTM_CTRL, ctrl);
mutex_unlock(&adapter->ptm_lock);
}
/**
@ -1237,13 +1257,18 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
**/
void igc_ptp_stop(struct igc_adapter *adapter)
{
if (!(adapter->ptp_flags & IGC_PTP_ENABLED))
return;
igc_ptp_suspend(adapter);
adapter->ptp_flags &= ~IGC_PTP_ENABLED;
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
netdev_info(adapter->netdev, "PHC removed\n");
adapter->ptp_flags &= ~IGC_PTP_ENABLED;
}
mutex_destroy(&adapter->ptm_lock);
}
/**
@ -1255,13 +1280,18 @@ void igc_ptp_stop(struct igc_adapter *adapter)
void igc_ptp_reset(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u32 cycle_ctrl, ctrl;
u32 cycle_ctrl, ctrl, stat;
unsigned long flags;
u32 timadj;
if (!(adapter->ptp_flags & IGC_PTP_ENABLED))
return;
/* reset the tstamp_config */
igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
mutex_lock(&adapter->ptm_lock);
spin_lock_irqsave(&adapter->tmreg_lock, flags);
switch (adapter->hw.mac.type) {
@ -1290,14 +1320,19 @@ void igc_ptp_reset(struct igc_adapter *adapter)
ctrl = IGC_PTM_CTRL_EN |
IGC_PTM_CTRL_START_NOW |
IGC_PTM_CTRL_SHRT_CYC(IGC_PTM_SHORT_CYC_DEFAULT) |
IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT) |
IGC_PTM_CTRL_TRIG;
IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT);
wr32(IGC_PTM_CTRL, ctrl);
/* Force the first cycle to run. */
wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID);
igc_ptm_trigger(hw);
if (readx_poll_timeout_atomic(rd32, IGC_PTM_STAT, stat,
stat, IGC_PTM_STAT_SLEEP,
IGC_PTM_STAT_TIMEOUT))
netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n");
igc_ptm_reset(hw);
break;
default:
/* No work to do. */
@ -1314,5 +1349,7 @@ void igc_ptp_reset(struct igc_adapter *adapter)
out:
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
mutex_unlock(&adapter->ptm_lock);
wrfl();
}

View File

@ -2201,6 +2201,13 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING;
align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
if (unlikely(size > skb->len)) {
netif_dbg(dev, rx_err, dev->net,
"size err rx_cmd_a=0x%08x\n",
rx_cmd_a);
return 0;
}
if (unlikely(rx_cmd_a & RX_CMD_A_RED)) {
netif_dbg(dev, rx_err, dev->net,
"Error rx_cmd_a=0x%08x\n", rx_cmd_a);

View File

@ -76,6 +76,13 @@ unsigned long sclp_console_full;
/* The currently active SCLP command word. */
static sclp_cmdw_t active_cmd;
static inline struct sccb_header *sclpint_to_sccb(u32 sccb_int)
{
if (sccb_int)
return __va(sccb_int);
return NULL;
}
static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err)
{
struct sclp_trace_entry e;
@ -620,7 +627,7 @@ __sclp_find_req(u32 sccb)
static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd)
{
struct sccb_header *sccb = (struct sccb_header *)__va(sccb_int);
struct sccb_header *sccb = sclpint_to_sccb(sccb_int);
struct evbuf_header *evbuf;
u16 response;
@ -659,7 +666,7 @@ static void sclp_interrupt_handler(struct ext_code ext_code,
/* INT: Interrupt received (a=intparm, b=cmd) */
sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd,
(struct sccb_header *)__va(finished_sccb),
sclpint_to_sccb(finished_sccb),
!ok_response(finished_sccb, active_cmd));
if (finished_sccb) {

View File

@ -4489,6 +4489,12 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
if (!count)
return IRQ_NONE;
if (count > evt->length) {
dev_err_ratelimited(dwc->dev, "invalid count(%u) > evt->length(%u)\n",
count, evt->length);
return IRQ_NONE;
}
evt->count = count;
evt->flags |= DWC3_EVENT_PENDING;

View File

@ -2281,6 +2281,19 @@ static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
return attach_recursive_mnt(mnt, p, mp, false);
}
static int may_change_propagation(const struct mount *m)
{
struct mnt_namespace *ns = m->mnt_ns;
// it must be mounted in some namespace
if (IS_ERR_OR_NULL(ns)) // is_mounted()
return -EINVAL;
// and the caller must be admin in userns of that namespace
if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
/*
* Sanity check the flags to change_mnt_propagation.
*/
@ -2317,6 +2330,10 @@ static int do_change_type(struct path *path, int ms_flags)
return -EINVAL;
namespace_lock();
err = may_change_propagation(mnt);
if (err)
goto out_unlock;
if (type == MS_SHARED) {
err = invent_group_ids(mnt, recurse);
if (err)

View File

@ -416,7 +416,7 @@ int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
skb_dst_update_pmtu_no_confirm(skb, mtu);
if (!reply || skb->pkt_type == PACKET_HOST)
if (!reply)
return 0;
if (skb->protocol == htons(ETH_P_IP))

View File

@ -945,12 +945,6 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
pskb_trim(skb, ovs_mac_header_len(key));
}
/* Need to set the pkt_type to involve the routing layer. The
* packet movement through the OVS datapath doesn't generally
* use routing, but this is needed for tunnel cases.
*/
skb->pkt_type = PACKET_OUTGOING;
if (likely(!mru ||
(skb->len <= mru + vport->dev->hard_header_len))) {
ovs_vport_send(vport, skb, ovs_key_mac_proto(key));

View File

@ -1,3 +1,25 @@
* Tue Sep 16 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.46.1.el9_6]
- net: usb: smsc75xx: Move packet length check to prevent kernel panic in skb_pull (CKI Backport Bot) [RHEL-112246] {CVE-2023-53125}
- net: usb: smsc75xx: Limit packet length to skb->len (CKI Backport Bot) [RHEL-112246] {CVE-2023-53125}
- s390/sclp: Fix SCCB present check (CKI Backport Bot) [RHEL-113558] {CVE-2025-39694}
- use uniform permission checks for all mount propagation changes (Ian Kent) [RHEL-107301] {CVE-2025-38498}
- do_change_type(): refuse to operate on unmounted/not ours mounts (Ian Kent) [RHEL-107301] {CVE-2025-38498}
- usb: dwc3: gadget: check that event count does not exceed event buffer length (CKI Backport Bot) [RHEL-107649] {CVE-2025-37810}
Resolves: RHEL-107301, RHEL-107649, RHEL-112246, RHEL-113558
* Sat Sep 13 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.45.1.el9_6]
- tunnels: Accept PACKET_HOST in skb_tunnel_check_pmtu(). (Adrian Moreno) [RHEL-113279]
- igc: fix lock order in igc_ptp_reset (CKI Backport Bot) [RHEL-108118]
- igc: add lock preventing multiple simultaneous PTM transactions (CKI Backport Bot) [RHEL-108118]
- igc: cleanup PTP module if probe fails (CKI Backport Bot) [RHEL-108118]
- igc: handle the IGC_PTP_ENABLED flag correctly (CKI Backport Bot) [RHEL-108118]
- igc: move ktime snapshot into PTM retry loop (CKI Backport Bot) [RHEL-108118]
- igc: increase wait time before retrying PTM (CKI Backport Bot) [RHEL-108118]
- igc: fix PTM cycle trigger logic (CKI Backport Bot) [RHEL-108118]
- ice: use fixed adapter index for E825C embedded devices (CKI Backport Bot) [RHEL-111766]
- ice: use DSN instead of PCI BDF for ice_adapter index (CKI Backport Bot) [RHEL-111766]
Resolves: RHEL-108118, RHEL-111766, RHEL-113279
* Tue Sep 09 2025 Chao YE <cye@redhat.com> [5.14.0-570.44.1.el9_6]
- ipv6: mcast: Delay put pmc->idev in mld_del_delrec() (CKI Backport Bot) [RHEL-111149] {CVE-2025-38550}
- posix-cpu-timers: fix race between handle_posix_cpu_timers() and posix_cpu_timer_del() (CKI Backport Bot) [RHEL-112780] {CVE-2025-38352}