Import of kernel-6.12.0-124.16.1.el10_1
This commit is contained in:
parent
13d687fe64
commit
1dc94ec495
@ -12,7 +12,7 @@ RHEL_MINOR = 1
|
|||||||
#
|
#
|
||||||
# Use this spot to avoid future merge conflicts.
|
# Use this spot to avoid future merge conflicts.
|
||||||
# Do not trim this comment.
|
# Do not trim this comment.
|
||||||
RHEL_RELEASE = 124.13.1
|
RHEL_RELEASE = 124.16.1
|
||||||
|
|
||||||
#
|
#
|
||||||
# RHEL_REBASE_NUM
|
# RHEL_REBASE_NUM
|
||||||
|
|||||||
@ -500,6 +500,9 @@ CONFIG_PPC_TRANSACTIONAL_MEM=y
|
|||||||
CONFIG_PPC_UV=y
|
CONFIG_PPC_UV=y
|
||||||
# CONFIG_LD_HEAD_STUB_CATCH is not set
|
# CONFIG_LD_HEAD_STUB_CATCH is not set
|
||||||
CONFIG_MPROFILE_KERNEL=y
|
CONFIG_MPROFILE_KERNEL=y
|
||||||
|
CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY=y
|
||||||
|
CONFIG_PPC_FTRACE_OUT_OF_LINE=y
|
||||||
|
CONFIG_PPC_FTRACE_OUT_OF_LINE_NUM_RESERVE=32768
|
||||||
CONFIG_HOTPLUG_CPU=y
|
CONFIG_HOTPLUG_CPU=y
|
||||||
CONFIG_INTERRUPT_SANITIZE_REGISTERS=y
|
CONFIG_INTERRUPT_SANITIZE_REGISTERS=y
|
||||||
CONFIG_PPC_QUEUED_SPINLOCKS=y
|
CONFIG_PPC_QUEUED_SPINLOCKS=y
|
||||||
@ -722,6 +725,7 @@ CONFIG_FUNCTION_ALIGNMENT_4B=y
|
|||||||
CONFIG_FUNCTION_ALIGNMENT=4
|
CONFIG_FUNCTION_ALIGNMENT=4
|
||||||
CONFIG_CC_HAS_MIN_FUNCTION_ALIGNMENT=y
|
CONFIG_CC_HAS_MIN_FUNCTION_ALIGNMENT=y
|
||||||
CONFIG_CC_HAS_SANE_FUNCTION_ALIGNMENT=y
|
CONFIG_CC_HAS_SANE_FUNCTION_ALIGNMENT=y
|
||||||
|
CONFIG_ARCH_WANTS_PRE_LINK_VMLINUX=y
|
||||||
# end of General architecture-dependent options
|
# end of General architecture-dependent options
|
||||||
|
|
||||||
CONFIG_RT_MUTEXES=y
|
CONFIG_RT_MUTEXES=y
|
||||||
@ -5018,6 +5022,7 @@ CONFIG_HID_KUNIT_TEST=m
|
|||||||
#
|
#
|
||||||
# HID-BPF support
|
# HID-BPF support
|
||||||
#
|
#
|
||||||
|
CONFIG_HID_BPF=y
|
||||||
# end of HID-BPF support
|
# end of HID-BPF support
|
||||||
|
|
||||||
CONFIG_I2C_HID=y
|
CONFIG_I2C_HID=y
|
||||||
@ -7120,6 +7125,8 @@ CONFIG_HAVE_FUNCTION_TRACER=y
|
|||||||
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
|
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
|
||||||
CONFIG_HAVE_DYNAMIC_FTRACE=y
|
CONFIG_HAVE_DYNAMIC_FTRACE=y
|
||||||
CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
|
CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
|
||||||
|
CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y
|
||||||
|
CONFIG_HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS=y
|
||||||
CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y
|
CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y
|
||||||
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
|
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
|
||||||
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
|
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
|
||||||
@ -7140,6 +7147,8 @@ CONFIG_FUNCTION_TRACER=y
|
|||||||
CONFIG_FUNCTION_GRAPH_TRACER=y
|
CONFIG_FUNCTION_GRAPH_TRACER=y
|
||||||
CONFIG_DYNAMIC_FTRACE=y
|
CONFIG_DYNAMIC_FTRACE=y
|
||||||
CONFIG_DYNAMIC_FTRACE_WITH_REGS=y
|
CONFIG_DYNAMIC_FTRACE_WITH_REGS=y
|
||||||
|
CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y
|
||||||
|
CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS=y
|
||||||
CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y
|
CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y
|
||||||
CONFIG_FPROBE=y
|
CONFIG_FPROBE=y
|
||||||
CONFIG_FUNCTION_PROFILER=y
|
CONFIG_FUNCTION_PROFILER=y
|
||||||
@ -7164,7 +7173,7 @@ CONFIG_BPF_EVENTS=y
|
|||||||
CONFIG_DYNAMIC_EVENTS=y
|
CONFIG_DYNAMIC_EVENTS=y
|
||||||
CONFIG_PROBE_EVENTS=y
|
CONFIG_PROBE_EVENTS=y
|
||||||
CONFIG_FTRACE_MCOUNT_RECORD=y
|
CONFIG_FTRACE_MCOUNT_RECORD=y
|
||||||
CONFIG_FTRACE_MCOUNT_USE_CC=y
|
CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY=y
|
||||||
CONFIG_TRACING_MAP=y
|
CONFIG_TRACING_MAP=y
|
||||||
CONFIG_SYNTH_EVENTS=y
|
CONFIG_SYNTH_EVENTS=y
|
||||||
# CONFIG_USER_EVENTS is not set
|
# CONFIG_USER_EVENTS is not set
|
||||||
@ -7190,6 +7199,8 @@ CONFIG_RV_REACTORS=y
|
|||||||
CONFIG_RV_REACT_PRINTK=y
|
CONFIG_RV_REACT_PRINTK=y
|
||||||
CONFIG_RV_REACT_PANIC=y
|
CONFIG_RV_REACT_PANIC=y
|
||||||
# CONFIG_SAMPLES is not set
|
# CONFIG_SAMPLES is not set
|
||||||
|
CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y
|
||||||
|
CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y
|
||||||
CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y
|
CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y
|
||||||
CONFIG_STRICT_DEVMEM=y
|
CONFIG_STRICT_DEVMEM=y
|
||||||
# CONFIG_IO_STRICT_DEVMEM is not set
|
# CONFIG_IO_STRICT_DEVMEM is not set
|
||||||
|
|||||||
@ -1441,18 +1441,28 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
|
/* Must be called when queue is frozen */
|
||||||
|
static bool ublk_mark_queue_canceling(struct ublk_queue *ubq)
|
||||||
{
|
{
|
||||||
struct gendisk *disk;
|
bool canceled;
|
||||||
|
|
||||||
spin_lock(&ubq->cancel_lock);
|
spin_lock(&ubq->cancel_lock);
|
||||||
if (ubq->canceling) {
|
canceled = ubq->canceling;
|
||||||
spin_unlock(&ubq->cancel_lock);
|
if (!canceled)
|
||||||
return false;
|
ubq->canceling = true;
|
||||||
}
|
|
||||||
ubq->canceling = true;
|
|
||||||
spin_unlock(&ubq->cancel_lock);
|
spin_unlock(&ubq->cancel_lock);
|
||||||
|
|
||||||
|
return canceled;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
|
||||||
|
{
|
||||||
|
bool was_canceled = ubq->canceling;
|
||||||
|
struct gendisk *disk;
|
||||||
|
|
||||||
|
if (was_canceled)
|
||||||
|
return false;
|
||||||
|
|
||||||
spin_lock(&ub->lock);
|
spin_lock(&ub->lock);
|
||||||
disk = ub->ub_disk;
|
disk = ub->ub_disk;
|
||||||
if (disk)
|
if (disk)
|
||||||
@ -1463,14 +1473,23 @@ static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
|
|||||||
if (!disk)
|
if (!disk)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* Now we are serialized with ublk_queue_rq() */
|
/*
|
||||||
|
* Now we are serialized with ublk_queue_rq()
|
||||||
|
*
|
||||||
|
* Make sure that ubq->canceling is set when queue is frozen,
|
||||||
|
* because ublk_queue_rq() has to rely on this flag for avoiding to
|
||||||
|
* touch completed uring_cmd
|
||||||
|
*/
|
||||||
blk_mq_quiesce_queue(disk->queue);
|
blk_mq_quiesce_queue(disk->queue);
|
||||||
/* abort queue is for making forward progress */
|
was_canceled = ublk_mark_queue_canceling(ubq);
|
||||||
ublk_abort_queue(ub, ubq);
|
if (!was_canceled) {
|
||||||
|
/* abort queue is for making forward progress */
|
||||||
|
ublk_abort_queue(ub, ubq);
|
||||||
|
}
|
||||||
blk_mq_unquiesce_queue(disk->queue);
|
blk_mq_unquiesce_queue(disk->queue);
|
||||||
put_device(disk_to_dev(disk));
|
put_device(disk_to_dev(disk));
|
||||||
|
|
||||||
return true;
|
return !was_canceled;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io,
|
static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io,
|
||||||
|
|||||||
@ -549,12 +549,12 @@ static int e1000_set_eeprom(struct net_device *netdev,
|
|||||||
{
|
{
|
||||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||||
struct e1000_hw *hw = &adapter->hw;
|
struct e1000_hw *hw = &adapter->hw;
|
||||||
|
size_t total_len, max_len;
|
||||||
u16 *eeprom_buff;
|
u16 *eeprom_buff;
|
||||||
void *ptr;
|
int ret_val = 0;
|
||||||
int max_len;
|
|
||||||
int first_word;
|
int first_word;
|
||||||
int last_word;
|
int last_word;
|
||||||
int ret_val = 0;
|
void *ptr;
|
||||||
u16 i;
|
u16 i;
|
||||||
|
|
||||||
if (eeprom->len == 0)
|
if (eeprom->len == 0)
|
||||||
@ -569,6 +569,10 @@ static int e1000_set_eeprom(struct net_device *netdev,
|
|||||||
|
|
||||||
max_len = hw->nvm.word_size * 2;
|
max_len = hw->nvm.word_size * 2;
|
||||||
|
|
||||||
|
if (check_add_overflow(eeprom->offset, eeprom->len, &total_len) ||
|
||||||
|
total_len > max_len)
|
||||||
|
return -EFBIG;
|
||||||
|
|
||||||
first_word = eeprom->offset >> 1;
|
first_word = eeprom->offset >> 1;
|
||||||
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
|
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
|
||||||
eeprom_buff = kmalloc(max_len, GFP_KERNEL);
|
eeprom_buff = kmalloc(max_len, GFP_KERNEL);
|
||||||
|
|||||||
@ -448,7 +448,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
|
|||||||
(qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
|
(qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
|
||||||
(pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
|
(pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
|
||||||
BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
|
BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
|
||||||
(itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
|
FIELD_PREP(I40E_QINT_RQCTL_ITR_INDX_MASK, itr_idx);
|
||||||
wr32(hw, reg_idx, reg);
|
wr32(hw, reg_idx, reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -653,6 +653,13 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
|
|||||||
|
|
||||||
/* only set the required fields */
|
/* only set the required fields */
|
||||||
tx_ctx.base = info->dma_ring_addr / 128;
|
tx_ctx.base = info->dma_ring_addr / 128;
|
||||||
|
|
||||||
|
/* ring_len has to be multiple of 8 */
|
||||||
|
if (!IS_ALIGNED(info->ring_len, 8) ||
|
||||||
|
info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto error_context;
|
||||||
|
}
|
||||||
tx_ctx.qlen = info->ring_len;
|
tx_ctx.qlen = info->ring_len;
|
||||||
tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
|
tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
|
||||||
tx_ctx.rdylist_act = 0;
|
tx_ctx.rdylist_act = 0;
|
||||||
@ -716,6 +723,13 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
|
|||||||
|
|
||||||
/* only set the required fields */
|
/* only set the required fields */
|
||||||
rx_ctx.base = info->dma_ring_addr / 128;
|
rx_ctx.base = info->dma_ring_addr / 128;
|
||||||
|
|
||||||
|
/* ring_len has to be multiple of 32 */
|
||||||
|
if (!IS_ALIGNED(info->ring_len, 32) ||
|
||||||
|
info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto error_param;
|
||||||
|
}
|
||||||
rx_ctx.qlen = info->ring_len;
|
rx_ctx.qlen = info->ring_len;
|
||||||
|
|
||||||
if (info->splithdr_enabled) {
|
if (info->splithdr_enabled) {
|
||||||
@ -1453,6 +1467,7 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
|
|||||||
* functions that may still be running at this point.
|
* functions that may still be running at this point.
|
||||||
*/
|
*/
|
||||||
clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
|
clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
|
||||||
|
clear_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states);
|
||||||
|
|
||||||
/* In the case of a VFLR, the HW has already reset the VF and we
|
/* In the case of a VFLR, the HW has already reset the VF and we
|
||||||
* just need to clean up, so don't hit the VFRTRIG register.
|
* just need to clean up, so don't hit the VFRTRIG register.
|
||||||
@ -2119,7 +2134,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
size_t len = 0;
|
size_t len = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
|
i40e_sync_vf_state(vf, I40E_VF_STATE_INIT);
|
||||||
|
|
||||||
|
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) ||
|
||||||
|
test_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states)) {
|
||||||
aq_ret = -EINVAL;
|
aq_ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
@ -2222,6 +2240,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
vf->default_lan_addr.addr);
|
vf->default_lan_addr.addr);
|
||||||
}
|
}
|
||||||
set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
|
set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
|
||||||
|
set_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states);
|
||||||
|
|
||||||
err:
|
err:
|
||||||
/* send the response back to the VF */
|
/* send the response back to the VF */
|
||||||
@ -2384,7 +2403,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (vf->adq_enabled) {
|
if (vf->adq_enabled) {
|
||||||
if (idx >= ARRAY_SIZE(vf->ch)) {
|
if (idx >= vf->num_tc) {
|
||||||
aq_ret = -ENODEV;
|
aq_ret = -ENODEV;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
@ -2405,7 +2424,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
* to its appropriate VSIs based on TC mapping
|
* to its appropriate VSIs based on TC mapping
|
||||||
*/
|
*/
|
||||||
if (vf->adq_enabled) {
|
if (vf->adq_enabled) {
|
||||||
if (idx >= ARRAY_SIZE(vf->ch)) {
|
if (idx >= vf->num_tc) {
|
||||||
aq_ret = -ENODEV;
|
aq_ret = -ENODEV;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
@ -2455,8 +2474,10 @@ static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
|
|||||||
u16 vsi_queue_id, queue_id;
|
u16 vsi_queue_id, queue_id;
|
||||||
|
|
||||||
for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
|
for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
|
||||||
if (vf->adq_enabled) {
|
u16 idx = vsi_queue_id / I40E_MAX_VF_VSI;
|
||||||
vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
|
|
||||||
|
if (vf->adq_enabled && idx < vf->num_tc) {
|
||||||
|
vsi_id = vf->ch[idx].vsi_id;
|
||||||
queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
|
queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
|
||||||
} else {
|
} else {
|
||||||
queue_id = vsi_queue_id;
|
queue_id = vsi_queue_id;
|
||||||
@ -3589,7 +3610,7 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
|
|||||||
|
|
||||||
/* action_meta is TC number here to which the filter is applied */
|
/* action_meta is TC number here to which the filter is applied */
|
||||||
if (!tc_filter->action_meta ||
|
if (!tc_filter->action_meta ||
|
||||||
tc_filter->action_meta > vf->num_tc) {
|
tc_filter->action_meta >= vf->num_tc) {
|
||||||
dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
|
dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
|
||||||
vf->vf_id, tc_filter->action_meta);
|
vf->vf_id, tc_filter->action_meta);
|
||||||
goto err;
|
goto err;
|
||||||
@ -3887,6 +3908,8 @@ err:
|
|||||||
aq_ret);
|
aq_ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define I40E_MAX_VF_CLOUD_FILTER 0xFF00
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* i40e_vc_add_cloud_filter
|
* i40e_vc_add_cloud_filter
|
||||||
* @vf: pointer to the VF info
|
* @vf: pointer to the VF info
|
||||||
@ -3926,6 +3949,14 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
|
|||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (vf->num_cloud_filters >= I40E_MAX_VF_CLOUD_FILTER) {
|
||||||
|
dev_warn(&pf->pdev->dev,
|
||||||
|
"VF %d: Max number of filters reached, can't apply cloud filter\n",
|
||||||
|
vf->vf_id);
|
||||||
|
aq_ret = -ENOSPC;
|
||||||
|
goto err_out;
|
||||||
|
}
|
||||||
|
|
||||||
cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
|
cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
|
||||||
if (!cfilter) {
|
if (!cfilter) {
|
||||||
aq_ret = -ENOMEM;
|
aq_ret = -ENOMEM;
|
||||||
|
|||||||
@ -41,7 +41,8 @@ enum i40e_vf_states {
|
|||||||
I40E_VF_STATE_MC_PROMISC,
|
I40E_VF_STATE_MC_PROMISC,
|
||||||
I40E_VF_STATE_UC_PROMISC,
|
I40E_VF_STATE_UC_PROMISC,
|
||||||
I40E_VF_STATE_PRE_ENABLE,
|
I40E_VF_STATE_PRE_ENABLE,
|
||||||
I40E_VF_STATE_RESETTING
|
I40E_VF_STATE_RESETTING,
|
||||||
|
I40E_VF_STATE_RESOURCES_LOADED,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* VF capabilities */
|
/* VF capabilities */
|
||||||
|
|||||||
@ -1677,6 +1677,10 @@ void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
|
|||||||
skb_queue_splice_tail_init(&wcid->tx_pending, &list);
|
skb_queue_splice_tail_init(&wcid->tx_pending, &list);
|
||||||
spin_unlock(&wcid->tx_pending.lock);
|
spin_unlock(&wcid->tx_pending.lock);
|
||||||
|
|
||||||
|
spin_lock(&wcid->tx_offchannel.lock);
|
||||||
|
skb_queue_splice_tail_init(&wcid->tx_offchannel, &list);
|
||||||
|
spin_unlock(&wcid->tx_offchannel.lock);
|
||||||
|
|
||||||
spin_unlock_bh(&phy->tx_lock);
|
spin_unlock_bh(&phy->tx_lock);
|
||||||
|
|
||||||
while ((skb = __skb_dequeue(&list)) != NULL) {
|
while ((skb = __skb_dequeue(&list)) != NULL) {
|
||||||
@ -1688,7 +1692,7 @@ EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
|
|||||||
|
|
||||||
void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid)
|
void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid)
|
||||||
{
|
{
|
||||||
if (test_bit(MT76_MCU_RESET, &dev->phy.state))
|
if (test_bit(MT76_MCU_RESET, &dev->phy.state) || !wcid->sta)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock_bh(&dev->sta_poll_lock);
|
spin_lock_bh(&dev->sta_poll_lock);
|
||||||
|
|||||||
@ -645,6 +645,7 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid,
|
|||||||
static void mt76_txq_schedule_pending(struct mt76_phy *phy)
|
static void mt76_txq_schedule_pending(struct mt76_phy *phy)
|
||||||
{
|
{
|
||||||
LIST_HEAD(tx_list);
|
LIST_HEAD(tx_list);
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
if (list_empty(&phy->tx_list))
|
if (list_empty(&phy->tx_list))
|
||||||
return;
|
return;
|
||||||
@ -656,13 +657,13 @@ static void mt76_txq_schedule_pending(struct mt76_phy *phy)
|
|||||||
list_splice_init(&phy->tx_list, &tx_list);
|
list_splice_init(&phy->tx_list, &tx_list);
|
||||||
while (!list_empty(&tx_list)) {
|
while (!list_empty(&tx_list)) {
|
||||||
struct mt76_wcid *wcid;
|
struct mt76_wcid *wcid;
|
||||||
int ret;
|
|
||||||
|
|
||||||
wcid = list_first_entry(&tx_list, struct mt76_wcid, tx_list);
|
wcid = list_first_entry(&tx_list, struct mt76_wcid, tx_list);
|
||||||
list_del_init(&wcid->tx_list);
|
list_del_init(&wcid->tx_list);
|
||||||
|
|
||||||
spin_unlock(&phy->tx_lock);
|
spin_unlock(&phy->tx_lock);
|
||||||
ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_offchannel);
|
if (ret >= 0)
|
||||||
|
ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_offchannel);
|
||||||
if (ret >= 0 && !phy->offchannel)
|
if (ret >= 0 && !phy->offchannel)
|
||||||
ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_pending);
|
ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_pending);
|
||||||
spin_lock(&phy->tx_lock);
|
spin_lock(&phy->tx_lock);
|
||||||
@ -671,9 +672,6 @@ static void mt76_txq_schedule_pending(struct mt76_phy *phy)
|
|||||||
!skb_queue_empty(&wcid->tx_offchannel) &&
|
!skb_queue_empty(&wcid->tx_offchannel) &&
|
||||||
list_empty(&wcid->tx_list))
|
list_empty(&wcid->tx_list))
|
||||||
list_add_tail(&wcid->tx_list, &phy->tx_list);
|
list_add_tail(&wcid->tx_list, &phy->tx_list);
|
||||||
|
|
||||||
if (ret < 0)
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
spin_unlock(&phy->tx_lock);
|
spin_unlock(&phy->tx_lock);
|
||||||
|
|
||||||
|
|||||||
@ -126,12 +126,14 @@ void nvme_mpath_start_request(struct request *rq)
|
|||||||
struct nvme_ns *ns = rq->q->queuedata;
|
struct nvme_ns *ns = rq->q->queuedata;
|
||||||
struct gendisk *disk = ns->head->disk;
|
struct gendisk *disk = ns->head->disk;
|
||||||
|
|
||||||
if (READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) {
|
if ((READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) &&
|
||||||
|
!(nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE)) {
|
||||||
atomic_inc(&ns->ctrl->nr_active);
|
atomic_inc(&ns->ctrl->nr_active);
|
||||||
nvme_req(rq)->flags |= NVME_MPATH_CNT_ACTIVE;
|
nvme_req(rq)->flags |= NVME_MPATH_CNT_ACTIVE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq))
|
if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq) ||
|
||||||
|
(nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
nvme_req(rq)->flags |= NVME_MPATH_IO_STATS;
|
nvme_req(rq)->flags |= NVME_MPATH_IO_STATS;
|
||||||
|
|||||||
@ -4697,10 +4697,16 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
|
|||||||
}
|
}
|
||||||
status = nfs_ok;
|
status = nfs_ok;
|
||||||
if (conf) {
|
if (conf) {
|
||||||
old = unconf;
|
if (get_client_locked(conf) == nfs_ok) {
|
||||||
unhash_client_locked(old);
|
old = unconf;
|
||||||
nfsd4_change_callback(conf, &unconf->cl_cb_conn);
|
unhash_client_locked(old);
|
||||||
} else {
|
nfsd4_change_callback(conf, &unconf->cl_cb_conn);
|
||||||
|
} else {
|
||||||
|
conf = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!conf) {
|
||||||
old = find_confirmed_client_by_name(&unconf->cl_name, nn);
|
old = find_confirmed_client_by_name(&unconf->cl_name, nn);
|
||||||
if (old) {
|
if (old) {
|
||||||
status = nfserr_clid_inuse;
|
status = nfserr_clid_inuse;
|
||||||
@ -4717,10 +4723,14 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
|
|||||||
}
|
}
|
||||||
trace_nfsd_clid_replaced(&old->cl_clientid);
|
trace_nfsd_clid_replaced(&old->cl_clientid);
|
||||||
}
|
}
|
||||||
|
status = get_client_locked(unconf);
|
||||||
|
if (status != nfs_ok) {
|
||||||
|
old = NULL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
move_to_confirmed(unconf);
|
move_to_confirmed(unconf);
|
||||||
conf = unconf;
|
conf = unconf;
|
||||||
}
|
}
|
||||||
get_client_locked(conf);
|
|
||||||
spin_unlock(&nn->client_lock);
|
spin_unlock(&nn->client_lock);
|
||||||
if (conf == unconf)
|
if (conf == unconf)
|
||||||
fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
|
fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
|
||||||
|
|||||||
@ -2555,10 +2555,9 @@ int unpoison_memory(unsigned long pfn)
|
|||||||
static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
|
static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
|
||||||
DEFAULT_RATELIMIT_BURST);
|
DEFAULT_RATELIMIT_BURST);
|
||||||
|
|
||||||
if (!pfn_valid(pfn))
|
p = pfn_to_online_page(pfn);
|
||||||
return -ENXIO;
|
if (!p)
|
||||||
|
return -EIO;
|
||||||
p = pfn_to_page(pfn);
|
|
||||||
folio = page_folio(p);
|
folio = page_folio(p);
|
||||||
|
|
||||||
mutex_lock(&mf_mutex);
|
mutex_lock(&mf_mutex);
|
||||||
|
|||||||
@ -2280,6 +2280,7 @@ static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
|
|||||||
if (IS_ERR(dst))
|
if (IS_ERR(dst))
|
||||||
goto out_drop;
|
goto out_drop;
|
||||||
|
|
||||||
|
skb_dst_drop(skb);
|
||||||
skb_dst_set(skb, dst);
|
skb_dst_set(skb, dst);
|
||||||
} else if (nh->nh_family != AF_INET6) {
|
} else if (nh->nh_family != AF_INET6) {
|
||||||
goto out_drop;
|
goto out_drop;
|
||||||
@ -2388,6 +2389,7 @@ static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
|
|||||||
goto out_drop;
|
goto out_drop;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
skb_dst_drop(skb);
|
||||||
skb_dst_set(skb, &rt->dst);
|
skb_dst_set(skb, &rt->dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -1,3 +1,28 @@
|
|||||||
|
* Sat Nov 22 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [6.12.0-124.16.1.el10_1]
|
||||||
|
- bpf: Fix metadata_dst leak __bpf_redirect_neigh_v{4,6} (Xin Long) [RHEL-125759]
|
||||||
|
- mm/memory-failure: fix VM_BUG_ON_PAGE(PagePoisoned(page)) when unpoison memory (CKI Backport Bot) [RHEL-119161] {CVE-2025-39883}
|
||||||
|
Resolves: RHEL-119161, RHEL-125759
|
||||||
|
|
||||||
|
* Thu Nov 20 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [6.12.0-124.15.1.el10_1]
|
||||||
|
- nfsd: handle get_client_locked() failure in nfsd4_setclientid_confirm() (CKI Backport Bot) [RHEL-125623] {CVE-2025-38724}
|
||||||
|
- wifi: mt76: free pending offchannel tx frames on wcid cleanup (Jose Ignacio Tornos Martinez) [RHEL-123070]
|
||||||
|
- wifi: mt76: do not add non-sta wcid entries to the poll list (Jose Ignacio Tornos Martinez) [RHEL-123070]
|
||||||
|
- wifi: mt76: fix linked list corruption (Jose Ignacio Tornos Martinez) [RHEL-123070] {CVE-2025-39918}
|
||||||
|
Resolves: RHEL-123070, RHEL-125623
|
||||||
|
|
||||||
|
* Wed Nov 19 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [6.12.0-124.14.1.el10_1]
|
||||||
|
- ublk: make sure ubq->canceling is set when queue is frozen (Ming Lei) [RHEL-99436] {CVE-2025-22068}
|
||||||
|
- e1000e: fix heap overflow in e1000_set_eeprom (Corinna Vinschen) [RHEL-123127] {CVE-2025-39898}
|
||||||
|
- i40e: add mask to apply valid bits for itr_idx (Michal Schmidt) [RHEL-123811]
|
||||||
|
- i40e: add max boundary check for VF filters (Michal Schmidt) [RHEL-123811] {CVE-2025-39968}
|
||||||
|
- i40e: fix validation of VF state in get resources (Michal Schmidt) [RHEL-123811] {CVE-2025-39969}
|
||||||
|
- i40e: fix input validation logic for action_meta (Michal Schmidt) [RHEL-123811] {CVE-2025-39970}
|
||||||
|
- i40e: fix idx validation in config queues msg (Michal Schmidt) [RHEL-123811] {CVE-2025-39971}
|
||||||
|
- i40e: fix idx validation in i40e_validate_queue_map (Michal Schmidt) [RHEL-123811] {CVE-2025-39972}
|
||||||
|
- i40e: add validation for ring_len param (Michal Schmidt) [RHEL-123811] {CVE-2025-39973}
|
||||||
|
- nvme-multipath: Skip nr_active increments in RETRY disposition (Ewan D. Milne) [RHEL-123689]
|
||||||
|
Resolves: RHEL-123127, RHEL-123689, RHEL-123811, RHEL-99436
|
||||||
|
|
||||||
* Thu Nov 13 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [6.12.0-124.13.1.el10_1]
|
* Thu Nov 13 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [6.12.0-124.13.1.el10_1]
|
||||||
- NFSv4: handle ERR_GRACE on delegation recalls (Olga Kornievskaia) [RHEL-127623]
|
- NFSv4: handle ERR_GRACE on delegation recalls (Olga Kornievskaia) [RHEL-127623]
|
||||||
- nfsd: nfserr_jukebox in nlm_fopen should lead to a retry (Olga Kornievskaia) [RHEL-127623]
|
- nfsd: nfserr_jukebox in nlm_fopen should lead to a retry (Olga Kornievskaia) [RHEL-127623]
|
||||||
|
|||||||
@ -1,3 +1,3 @@
|
|||||||
sbat,1,SBAT Version,sbat,1,https://github.com/rhboot/shim/blob/main/SBAT.md
|
sbat,1,SBAT Version,sbat,1,https://github.com/rhboot/shim/blob/main/SBAT.md
|
||||||
kernel-uki-virt-addons.centos,1,Red Hat,kernel-uki-virt-addons,6.12.0-124.13.1.el10.x86_64,mailto:secalert@redhat.com
|
kernel-uki-virt-addons.centos,1,Red Hat,kernel-uki-virt-addons,6.12.0-124.16.1.el10.x86_64,mailto:secalert@redhat.com
|
||||||
kernel-uki-virt-addons.almalinux,1,AlmaLinux,kernel-uki-virt-addons,6.12.0-124.13.1.el10.x86_64,mailto:security@almalinux.org
|
kernel-uki-virt-addons.almalinux,1,AlmaLinux,kernel-uki-virt-addons,6.12.0-124.16.1.el10.x86_64,mailto:security@almalinux.org
|
||||||
|
|||||||
4
uki.sbat
4
uki.sbat
@ -1,3 +1,3 @@
|
|||||||
sbat,1,SBAT Version,sbat,1,https://github.com/rhboot/shim/blob/main/SBAT.md
|
sbat,1,SBAT Version,sbat,1,https://github.com/rhboot/shim/blob/main/SBAT.md
|
||||||
kernel-uki-virt.centos,1,Red Hat,kernel-uki-virt,6.12.0-124.13.1.el10.x86_64,mailto:secalert@redhat.com
|
kernel-uki-virt.centos,1,Red Hat,kernel-uki-virt,6.12.0-124.16.1.el10.x86_64,mailto:secalert@redhat.com
|
||||||
kernel-uki-virt.almalinux,1,AlmaLinux,kernel-uki-virt,6.12.0-124.13.1.el10.x86_64,mailto:security@almalinux.org
|
kernel-uki-virt.almalinux,1,AlmaLinux,kernel-uki-virt,6.12.0-124.16.1.el10.x86_64,mailto:security@almalinux.org
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user