Import of kernel-6.12.0-124.31.1.el10_1
This commit is contained in:
parent
2e8a266009
commit
4945f4fd52
@ -143,3 +143,7 @@ own name.
|
||||
* - ``clock_id``
|
||||
- u64
|
||||
- Clock ID used by the device for registering DPLL devices and pins.
|
||||
* - ``max_mac_per_vf``
|
||||
- u32
|
||||
- Controls the maximum number of MAC address filters that can be assigned
|
||||
to a Virtual Function (VF).
|
||||
|
||||
@ -7,6 +7,40 @@ i40e devlink support
|
||||
This document describes the devlink features implemented by the ``i40e``
|
||||
device driver.
|
||||
|
||||
Parameters
|
||||
==========
|
||||
|
||||
.. list-table:: Generic parameters implemented
|
||||
:widths: 5 5 90
|
||||
|
||||
* - Name
|
||||
- Mode
|
||||
- Notes
|
||||
* - ``max_mac_per_vf``
|
||||
- runtime
|
||||
- Controls the maximum number of MAC addresses a VF can use
|
||||
on i40e devices.
|
||||
|
||||
By default (``0``), the driver enforces its internally calculated per-VF
|
||||
MAC filter limit, which is based on the number of allocated VFS.
|
||||
|
||||
If set to a non-zero value, this parameter acts as a strict cap:
|
||||
the driver will use the user-provided value instead of its internal
|
||||
calculation.
|
||||
|
||||
**Important notes:**
|
||||
|
||||
- This value **must be set before enabling SR-IOV**.
|
||||
Attempting to change it while SR-IOV is enabled will return an error.
|
||||
- MAC filters are a **shared hardware resource** across all VFs.
|
||||
Setting a high value may cause other VFs to be starved of filters.
|
||||
- This value is a **Administrative policy**. The hardware may return
|
||||
errors when its absolute limit is reached, regardless of the value
|
||||
set here.
|
||||
|
||||
The default value is ``0`` (internal calculation is used).
|
||||
|
||||
|
||||
Info versions
|
||||
=============
|
||||
|
||||
|
||||
@ -12,7 +12,7 @@ RHEL_MINOR = 1
|
||||
#
|
||||
# Use this spot to avoid future merge conflicts.
|
||||
# Do not trim this comment.
|
||||
RHEL_RELEASE = 124.29.1
|
||||
RHEL_RELEASE = 124.31.1
|
||||
|
||||
#
|
||||
# RHEL_REBASE_NUM
|
||||
|
||||
@ -1639,6 +1639,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
|
||||
break;
|
||||
case SYS_ID_AA64MMFR2_EL1:
|
||||
val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
|
||||
val &= ~ID_AA64MMFR2_EL1_NV;
|
||||
break;
|
||||
case SYS_ID_AA64MMFR3_EL1:
|
||||
val &= ID_AA64MMFR3_EL1_TCRX | ID_AA64MMFR3_EL1_S1POE |
|
||||
@ -2005,6 +2006,22 @@ static int set_id_aa64mmfr0_el1(struct kvm_vcpu *vcpu,
|
||||
return set_id_reg(vcpu, rd, user_val);
|
||||
}
|
||||
|
||||
static int set_id_aa64mmfr2_el1(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd, u64 user_val)
|
||||
{
|
||||
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);
|
||||
u64 nv_mask = ID_AA64MMFR2_EL1_NV_MASK;
|
||||
|
||||
/*
|
||||
* We made the mistake to expose the now deprecated NV field,
|
||||
* so allow userspace to write it, but silently ignore it.
|
||||
*/
|
||||
if ((hw_val & nv_mask) == (user_val & nv_mask))
|
||||
user_val &= ~nv_mask;
|
||||
|
||||
return set_id_reg(vcpu, rd, user_val);
|
||||
}
|
||||
|
||||
static int set_ctr_el0(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd, u64 user_val)
|
||||
{
|
||||
@ -2890,7 +2907,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
ID_AA64MMFR1_EL1_XNX |
|
||||
ID_AA64MMFR1_EL1_VH |
|
||||
ID_AA64MMFR1_EL1_VMIDBits)),
|
||||
ID_WRITABLE(ID_AA64MMFR2_EL1, ~(ID_AA64MMFR2_EL1_RES0 |
|
||||
ID_FILTERED(ID_AA64MMFR2_EL1,
|
||||
id_aa64mmfr2_el1, ~(ID_AA64MMFR2_EL1_RES0 |
|
||||
ID_AA64MMFR2_EL1_EVT |
|
||||
ID_AA64MMFR2_EL1_FWB |
|
||||
ID_AA64MMFR2_EL1_IDS |
|
||||
|
||||
@ -142,7 +142,6 @@ config S390
|
||||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
select ARCH_WANT_KERNEL_PMD_MKWRITE
|
||||
select ARCH_WANT_LD_ORPHAN_WARN
|
||||
select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
|
||||
select BUILDTIME_TABLE_SORT
|
||||
select CLONE_BACKWARDS2
|
||||
select DCACHE_WORD_ACCESS if !KMSAN
|
||||
|
||||
@ -15,6 +15,17 @@
|
||||
#include <linux/mman.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/jump_label.h>
|
||||
|
||||
/*
|
||||
* RHEL-only: Since the 'hugetlb_optimize_vmemmap_key' static key is part
|
||||
* of the kABI, we need stub definitions to avoid breaking the build
|
||||
* when CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=n.
|
||||
*/
|
||||
#ifndef CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
|
||||
DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
|
||||
EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If the bit selected by single-bit bitmask "a" is set within "x", move
|
||||
|
||||
@ -25,10 +25,6 @@
|
||||
#include "efi.h"
|
||||
|
||||
#include <generated/compile.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/uts.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <generated/utsversion.h>
|
||||
#include <generated/utsrelease.h>
|
||||
|
||||
|
||||
@ -2741,3 +2741,6 @@ void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *
|
||||
text_poke_loc_init(&tp, addr, opcode, len, emulate);
|
||||
text_poke_bp_batch(&tp, 1);
|
||||
}
|
||||
|
||||
struct alt_instr __kabi__alt_instr[0];
|
||||
EXPORT_SYMBOL_GPL(__kabi__alt_instr);
|
||||
|
||||
@ -967,9 +967,18 @@ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
|
||||
ret = __add_pages(nid, start_pfn, nr_pages, params);
|
||||
WARN_ON_ONCE(ret);
|
||||
|
||||
/* update max_pfn, max_low_pfn and high_memory */
|
||||
update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
|
||||
nr_pages << PAGE_SHIFT);
|
||||
/*
|
||||
* Special case: add_pages() is called by memremap_pages() for adding device
|
||||
* private pages. Do not bump up max_pfn in the device private path,
|
||||
* because max_pfn changes affect dma_addressing_limited().
|
||||
*
|
||||
* dma_addressing_limited() returning true when max_pfn is the device's
|
||||
* addressable memory can force device drivers to use bounce buffers
|
||||
* and impact their performance negatively:
|
||||
*/
|
||||
if (!params->pgmap)
|
||||
/* update max_pfn, max_low_pfn and high_memory */
|
||||
update_end_of_memory_vars(start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -113,8 +113,14 @@ void __init kernel_randomize_memory(void)
|
||||
memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
|
||||
CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
|
||||
|
||||
/* Adapt physical memory region size based on available memory */
|
||||
if (memory_tb < kaslr_regions[0].size_tb)
|
||||
/*
|
||||
* Adapt physical memory region size based on available memory,
|
||||
* except when CONFIG_PCI_P2PDMA is enabled. P2PDMA exposes the
|
||||
* device BAR space assuming the direct map space is large enough
|
||||
* for creating a ZONE_DEVICE mapping in the direct map corresponding
|
||||
* to the physical BAR address.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_PCI_P2PDMA) && (memory_tb < kaslr_regions[0].size_tb))
|
||||
kaslr_regions[0].size_tb = memory_tb;
|
||||
|
||||
/*
|
||||
|
||||
@ -498,9 +498,6 @@ CONFIG_PPC_TRANSACTIONAL_MEM=y
|
||||
CONFIG_PPC_UV=y
|
||||
# CONFIG_LD_HEAD_STUB_CATCH is not set
|
||||
CONFIG_MPROFILE_KERNEL=y
|
||||
CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY=y
|
||||
CONFIG_PPC_FTRACE_OUT_OF_LINE=y
|
||||
CONFIG_PPC_FTRACE_OUT_OF_LINE_NUM_RESERVE=32768
|
||||
CONFIG_HOTPLUG_CPU=y
|
||||
CONFIG_INTERRUPT_SANITIZE_REGISTERS=y
|
||||
CONFIG_PPC_QUEUED_SPINLOCKS=y
|
||||
@ -724,7 +721,6 @@ CONFIG_FUNCTION_ALIGNMENT_4B=y
|
||||
CONFIG_FUNCTION_ALIGNMENT=4
|
||||
CONFIG_CC_HAS_MIN_FUNCTION_ALIGNMENT=y
|
||||
CONFIG_CC_HAS_SANE_FUNCTION_ALIGNMENT=y
|
||||
CONFIG_ARCH_WANTS_PRE_LINK_VMLINUX=y
|
||||
# end of General architecture-dependent options
|
||||
|
||||
CONFIG_RT_MUTEXES=y
|
||||
@ -5028,7 +5024,6 @@ CONFIG_HID_KUNIT_TEST=m
|
||||
#
|
||||
# HID-BPF support
|
||||
#
|
||||
CONFIG_HID_BPF=y
|
||||
# end of HID-BPF support
|
||||
|
||||
CONFIG_I2C_HID=y
|
||||
@ -7098,8 +7093,6 @@ CONFIG_HAVE_FUNCTION_TRACER=y
|
||||
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
|
||||
CONFIG_HAVE_DYNAMIC_FTRACE=y
|
||||
CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
|
||||
CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y
|
||||
CONFIG_HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS=y
|
||||
CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y
|
||||
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
|
||||
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
|
||||
@ -7119,8 +7112,6 @@ CONFIG_FUNCTION_TRACER=y
|
||||
CONFIG_FUNCTION_GRAPH_TRACER=y
|
||||
CONFIG_DYNAMIC_FTRACE=y
|
||||
CONFIG_DYNAMIC_FTRACE_WITH_REGS=y
|
||||
CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y
|
||||
CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS=y
|
||||
CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y
|
||||
CONFIG_FPROBE=y
|
||||
CONFIG_FUNCTION_PROFILER=y
|
||||
@ -7145,7 +7136,7 @@ CONFIG_BPF_EVENTS=y
|
||||
CONFIG_DYNAMIC_EVENTS=y
|
||||
CONFIG_PROBE_EVENTS=y
|
||||
CONFIG_FTRACE_MCOUNT_RECORD=y
|
||||
CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY=y
|
||||
CONFIG_FTRACE_MCOUNT_USE_CC=y
|
||||
CONFIG_TRACING_MAP=y
|
||||
CONFIG_SYNTH_EVENTS=y
|
||||
# CONFIG_USER_EVENTS is not set
|
||||
@ -7171,8 +7162,6 @@ CONFIG_RV_REACTORS=y
|
||||
CONFIG_RV_REACT_PRINTK=y
|
||||
CONFIG_RV_REACT_PANIC=y
|
||||
# CONFIG_SAMPLES is not set
|
||||
CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y
|
||||
CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y
|
||||
CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y
|
||||
CONFIG_STRICT_DEVMEM=y
|
||||
# CONFIG_IO_STRICT_DEVMEM is not set
|
||||
|
||||
@ -717,7 +717,6 @@ CONFIG_SPARSEMEM=y
|
||||
CONFIG_SPARSEMEM_EXTREME=y
|
||||
CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
|
||||
CONFIG_SPARSEMEM_VMEMMAP=y
|
||||
CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y
|
||||
CONFIG_HAVE_MEMBLOCK_PHYS_MAP=y
|
||||
CONFIG_HAVE_GUP_FAST=y
|
||||
CONFIG_NUMA_KEEP_MEMINFO=y
|
||||
@ -3286,9 +3285,7 @@ CONFIG_TMPFS_INODE64=y
|
||||
CONFIG_TMPFS_QUOTA=y
|
||||
CONFIG_ARCH_SUPPORTS_HUGETLBFS=y
|
||||
CONFIG_HUGETLBFS=y
|
||||
# CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON is not set
|
||||
CONFIG_HUGETLB_PAGE=y
|
||||
CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y
|
||||
CONFIG_ARCH_HAS_GIGANTIC_PAGE=y
|
||||
CONFIG_CONFIGFS_FS=y
|
||||
# end of Pseudo filesystems
|
||||
|
||||
@ -600,7 +600,6 @@ CONFIG_SPARSEMEM=y
|
||||
CONFIG_SPARSEMEM_EXTREME=y
|
||||
CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
|
||||
CONFIG_SPARSEMEM_VMEMMAP=y
|
||||
CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y
|
||||
CONFIG_HAVE_MEMBLOCK_PHYS_MAP=y
|
||||
CONFIG_HAVE_GUP_FAST=y
|
||||
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
|
||||
|
||||
@ -740,7 +740,6 @@ CONFIG_SPARSEMEM=y
|
||||
CONFIG_SPARSEMEM_EXTREME=y
|
||||
CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
|
||||
CONFIG_SPARSEMEM_VMEMMAP=y
|
||||
CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y
|
||||
CONFIG_HAVE_MEMBLOCK_PHYS_MAP=y
|
||||
CONFIG_HAVE_GUP_FAST=y
|
||||
CONFIG_NUMA_KEEP_MEMINFO=y
|
||||
@ -3311,9 +3310,7 @@ CONFIG_TMPFS_INODE64=y
|
||||
CONFIG_TMPFS_QUOTA=y
|
||||
CONFIG_ARCH_SUPPORTS_HUGETLBFS=y
|
||||
CONFIG_HUGETLBFS=y
|
||||
# CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON is not set
|
||||
CONFIG_HUGETLB_PAGE=y
|
||||
CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y
|
||||
CONFIG_ARCH_HAS_GIGANTIC_PAGE=y
|
||||
CONFIG_CONFIGFS_FS=y
|
||||
# end of Pseudo filesystems
|
||||
|
||||
@ -3184,6 +3184,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
|
||||
free_bind_ops:
|
||||
if (args->num_binds > 1)
|
||||
kvfree(*bind_ops);
|
||||
*bind_ops = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -3289,7 +3290,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
struct xe_exec_queue *q = NULL;
|
||||
u32 num_syncs, num_ufence = 0;
|
||||
struct xe_sync_entry *syncs = NULL;
|
||||
struct drm_xe_vm_bind_op *bind_ops;
|
||||
struct drm_xe_vm_bind_op *bind_ops = NULL;
|
||||
struct xe_vma_ops vops;
|
||||
struct dma_fence *fence;
|
||||
int err;
|
||||
|
||||
@ -1352,6 +1352,9 @@ static void ib_device_notify_register(struct ib_device *device)
|
||||
|
||||
down_read(&devices_rwsem);
|
||||
|
||||
/* Mark for userspace that device is ready */
|
||||
kobject_uevent(&device->dev.kobj, KOBJ_ADD);
|
||||
|
||||
ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -1468,10 +1471,9 @@ int ib_register_device(struct ib_device *device, const char *name,
|
||||
return ret;
|
||||
}
|
||||
dev_set_uevent_suppress(&device->dev, false);
|
||||
/* Mark for userspace that device is ready */
|
||||
kobject_uevent(&device->dev.kobj, KOBJ_ADD);
|
||||
|
||||
ib_device_notify_register(device);
|
||||
|
||||
ib_device_put(device);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -420,7 +420,7 @@ static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
|
||||
#ifdef CONFIG_ACPI
|
||||
static int acpi_num_msi;
|
||||
|
||||
static __init struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
|
||||
static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
|
||||
{
|
||||
struct v2m_data *data;
|
||||
|
||||
|
||||
@ -547,6 +547,11 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
|
||||
|
||||
if (!buff->is_eop) {
|
||||
unsigned int frag_cnt = 0U;
|
||||
|
||||
/* There will be an extra fragment */
|
||||
if (buff->len > AQ_CFG_RX_HDR_SIZE)
|
||||
frag_cnt++;
|
||||
|
||||
buff_ = buff;
|
||||
do {
|
||||
bool is_rsc_completed = true;
|
||||
|
||||
@ -573,6 +573,10 @@ struct i40e_pf {
|
||||
struct i40e_vf *vf;
|
||||
int num_alloc_vfs; /* actual number of VFs allocated */
|
||||
u32 vf_aq_requests;
|
||||
/* If set to non-zero, the device uses this value
|
||||
* as maximum number of MAC filters per VF.
|
||||
*/
|
||||
u32 max_mac_per_vf;
|
||||
u32 arq_overflows; /* Not fatal, possibly indicative of problems */
|
||||
struct ratelimit_state mdd_message_rate_limit;
|
||||
/* DCBx/DCBNL capability for PF that indicates
|
||||
@ -1277,7 +1281,8 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
|
||||
const u8 *macaddr);
|
||||
int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
|
||||
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
|
||||
int i40e_count_filters(struct i40e_vsi *vsi);
|
||||
int i40e_count_all_filters(struct i40e_vsi *vsi);
|
||||
int i40e_count_active_filters(struct i40e_vsi *vsi);
|
||||
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
|
||||
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
|
||||
static inline bool i40e_is_sw_dcb(struct i40e_pf *pf)
|
||||
|
||||
@ -5,6 +5,41 @@
|
||||
#include "i40e.h"
|
||||
#include "i40e_devlink.h"
|
||||
|
||||
static int i40e_max_mac_per_vf_set(struct devlink *devlink,
|
||||
u32 id,
|
||||
struct devlink_param_gset_ctx *ctx,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct i40e_pf *pf = devlink_priv(devlink);
|
||||
|
||||
if (pf->num_alloc_vfs > 0) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Cannot change max_mac_per_vf while SR-IOV is enabled");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
pf->max_mac_per_vf = ctx->val.vu32;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i40e_max_mac_per_vf_get(struct devlink *devlink,
|
||||
u32 id,
|
||||
struct devlink_param_gset_ctx *ctx)
|
||||
{
|
||||
struct i40e_pf *pf = devlink_priv(devlink);
|
||||
|
||||
ctx->val.vu32 = pf->max_mac_per_vf;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct devlink_param i40e_dl_params[] = {
|
||||
DEVLINK_PARAM_GENERIC(MAX_MAC_PER_VF,
|
||||
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
|
||||
i40e_max_mac_per_vf_get,
|
||||
i40e_max_mac_per_vf_set,
|
||||
NULL),
|
||||
};
|
||||
|
||||
static void i40e_info_get_dsn(struct i40e_pf *pf, char *buf, size_t len)
|
||||
{
|
||||
u8 dsn[8];
|
||||
@ -165,7 +200,18 @@ void i40e_free_pf(struct i40e_pf *pf)
|
||||
**/
|
||||
void i40e_devlink_register(struct i40e_pf *pf)
|
||||
{
|
||||
devlink_register(priv_to_devlink(pf));
|
||||
struct devlink *dl = priv_to_devlink(pf);
|
||||
struct device *dev = &pf->pdev->dev;
|
||||
int err;
|
||||
|
||||
err = devlink_params_register(dl, i40e_dl_params,
|
||||
ARRAY_SIZE(i40e_dl_params));
|
||||
if (err)
|
||||
dev_err(dev,
|
||||
"devlink params register failed with error %d", err);
|
||||
|
||||
devlink_register(dl);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@ -176,7 +222,11 @@ void i40e_devlink_register(struct i40e_pf *pf)
|
||||
**/
|
||||
void i40e_devlink_unregister(struct i40e_pf *pf)
|
||||
{
|
||||
devlink_unregister(priv_to_devlink(pf));
|
||||
struct devlink *dl = priv_to_devlink(pf);
|
||||
|
||||
devlink_unregister(dl);
|
||||
devlink_params_unregister(dl, i40e_dl_params,
|
||||
ARRAY_SIZE(i40e_dl_params));
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@ -1241,12 +1241,30 @@ void i40e_update_stats(struct i40e_vsi *vsi)
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_count_filters - counts VSI mac filters
|
||||
* i40e_count_all_filters - counts VSI MAC filters
|
||||
* @vsi: the VSI to be searched
|
||||
*
|
||||
* Returns count of mac filters
|
||||
**/
|
||||
int i40e_count_filters(struct i40e_vsi *vsi)
|
||||
* Return: count of MAC filters in any state.
|
||||
*/
|
||||
int i40e_count_all_filters(struct i40e_vsi *vsi)
|
||||
{
|
||||
struct i40e_mac_filter *f;
|
||||
struct hlist_node *h;
|
||||
int bkt, cnt = 0;
|
||||
|
||||
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
|
||||
cnt++;
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_count_active_filters - counts VSI MAC filters
|
||||
* @vsi: the VSI to be searched
|
||||
*
|
||||
* Return: count of active MAC filters.
|
||||
*/
|
||||
int i40e_count_active_filters(struct i40e_vsi *vsi)
|
||||
{
|
||||
struct i40e_mac_filter *f;
|
||||
struct hlist_node *h;
|
||||
|
||||
@ -2865,24 +2865,6 @@ error_param:
|
||||
(u8 *)&stats, sizeof(stats));
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_can_vf_change_mac
|
||||
* @vf: pointer to the VF info
|
||||
*
|
||||
* Return true if the VF is allowed to change its MAC filters, false otherwise
|
||||
*/
|
||||
static bool i40e_can_vf_change_mac(struct i40e_vf *vf)
|
||||
{
|
||||
/* If the VF MAC address has been set administratively (via the
|
||||
* ndo_set_vf_mac command), then deny permission to the VF to
|
||||
* add/delete unicast MAC addresses, unless the VF is trusted
|
||||
*/
|
||||
if (vf->pf_set_mac && !vf->trusted)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#define I40E_MAX_MACVLAN_PER_HW 3072
|
||||
#define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
|
||||
(num_ports))
|
||||
@ -2921,8 +2903,10 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
|
||||
struct i40e_pf *pf = vf->pf;
|
||||
struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
struct i40e_hw *hw = &pf->hw;
|
||||
int mac2add_cnt = 0;
|
||||
int i;
|
||||
int i, mac_add_max, mac_add_cnt = 0;
|
||||
bool vf_trusted;
|
||||
|
||||
vf_trusted = test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
|
||||
|
||||
for (i = 0; i < al->num_elements; i++) {
|
||||
struct i40e_mac_filter *f;
|
||||
@ -2942,9 +2926,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
|
||||
* The VF may request to set the MAC address filter already
|
||||
* assigned to it so do not return an error in that case.
|
||||
*/
|
||||
if (!i40e_can_vf_change_mac(vf) &&
|
||||
!is_multicast_ether_addr(addr) &&
|
||||
!ether_addr_equal(addr, vf->default_lan_addr.addr)) {
|
||||
if (!vf_trusted && !is_multicast_ether_addr(addr) &&
|
||||
vf->pf_set_mac && !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
|
||||
return -EPERM;
|
||||
@ -2953,31 +2936,50 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
|
||||
/*count filters that really will be added*/
|
||||
f = i40e_find_mac(vsi, addr);
|
||||
if (!f)
|
||||
++mac2add_cnt;
|
||||
++mac_add_cnt;
|
||||
}
|
||||
|
||||
/* If this VF is not privileged, then we can't add more than a limited
|
||||
* number of addresses. Check to make sure that the additions do not
|
||||
* push us over the limit.
|
||||
/* Determine the maximum number of MAC addresses this VF may use.
|
||||
*
|
||||
* - For untrusted VFs: use a fixed small limit.
|
||||
*
|
||||
* - For trusted VFs: limit is calculated by dividing total MAC
|
||||
* filter pool across all VFs/ports.
|
||||
*
|
||||
* - User can override this by devlink param "max_mac_per_vf".
|
||||
* If set its value is used as a strict cap for both trusted and
|
||||
* untrusted VFs.
|
||||
* Note:
|
||||
* even when overridden, this is a theoretical maximum; hardware
|
||||
* may reject additional MACs if the absolute HW limit is reached.
|
||||
*/
|
||||
if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
|
||||
if ((i40e_count_filters(vsi) + mac2add_cnt) >
|
||||
I40E_VC_MAX_MAC_ADDR_PER_VF) {
|
||||
if (!vf_trusted)
|
||||
mac_add_max = I40E_VC_MAX_MAC_ADDR_PER_VF;
|
||||
else
|
||||
mac_add_max = I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs, hw->num_ports);
|
||||
|
||||
if (pf->max_mac_per_vf > 0)
|
||||
mac_add_max = pf->max_mac_per_vf;
|
||||
|
||||
/* VF can replace all its filters in one step, in this case mac_add_max
|
||||
* will be added as active and another mac_add_max will be in
|
||||
* a to-be-removed state. Account for that.
|
||||
*/
|
||||
if ((i40e_count_active_filters(vsi) + mac_add_cnt) > mac_add_max ||
|
||||
(i40e_count_all_filters(vsi) + mac_add_cnt) > 2 * mac_add_max) {
|
||||
if (pf->max_mac_per_vf == mac_add_max && mac_add_max > 0) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Cannot add more MAC addresses: VF reached its maximum allowed limit (%d)\n",
|
||||
mac_add_max);
|
||||
return -EPERM;
|
||||
}
|
||||
if (!vf_trusted) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
|
||||
return -EPERM;
|
||||
}
|
||||
/* If this VF is trusted, it can use more resources than untrusted.
|
||||
* However to ensure that every trusted VF has appropriate number of
|
||||
* resources, divide whole pool of resources per port and then across
|
||||
* all VFs.
|
||||
*/
|
||||
} else {
|
||||
if ((i40e_count_filters(vsi) + mac2add_cnt) >
|
||||
I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs,
|
||||
hw->num_ports)) {
|
||||
} else {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
|
||||
"Cannot add more MAC addresses: trusted VF reached its maximum allowed limit (%d)\n",
|
||||
mac_add_max);
|
||||
return -EPERM;
|
||||
}
|
||||
}
|
||||
|
||||
@ -203,6 +203,12 @@ config PCI_P2PDMA
|
||||
P2P DMA transactions must be between devices behind the same root
|
||||
port.
|
||||
|
||||
Enabling this option will reduce the entropy of x86 KASLR memory
|
||||
regions. For example - on a 46 bit system, the entropy goes down
|
||||
from 16 bits to 15 bits. The actual reduction in entropy depends
|
||||
on the physical address bits, on processor features, kernel config
|
||||
(5 level page table) and physical memory present on the system.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config PCI_LABEL
|
||||
|
||||
@ -1002,7 +1002,10 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
|
||||
scode = cmdstatp->sense_hdr.sense_key;
|
||||
|
||||
if (scode == UNIT_ATTENTION) { /* New media? */
|
||||
new_session = 1;
|
||||
if (cmdstatp->sense_hdr.asc == 0x28) { /* New media */
|
||||
new_session = 1;
|
||||
DEBC_printk(STp, "New tape session.");
|
||||
}
|
||||
if (attentions < MAX_ATTENTIONS) {
|
||||
attentions++;
|
||||
continue;
|
||||
@ -1045,6 +1048,11 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
|
||||
retval = new_session ? CHKRES_NEW_SESSION : CHKRES_READY;
|
||||
break;
|
||||
}
|
||||
if (STp->first_tur) {
|
||||
/* Don't set pos_unknown right after device recognition */
|
||||
STp->pos_unknown = 0;
|
||||
STp->first_tur = 0;
|
||||
}
|
||||
|
||||
if (SRpnt != NULL)
|
||||
st_release_request(SRpnt);
|
||||
@ -3518,8 +3526,64 @@ static int partition_tape(struct scsi_tape *STp, int size)
|
||||
out:
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Handles any extra state needed for ioctls which are not st-specific.
|
||||
* Called with the scsi_tape lock held, released before return
|
||||
*/
|
||||
static long st_common_ioctl(struct scsi_tape *STp, struct st_modedef *STm,
|
||||
struct file *file, unsigned int cmd_in,
|
||||
unsigned long arg)
|
||||
{
|
||||
int i, retval = 0;
|
||||
|
||||
if (!STm->defined) {
|
||||
retval = -ENXIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (cmd_in) {
|
||||
case SCSI_IOCTL_GET_IDLUN:
|
||||
case SCSI_IOCTL_GET_BUS_NUMBER:
|
||||
case SCSI_IOCTL_GET_PCI:
|
||||
break;
|
||||
case SG_IO:
|
||||
case SCSI_IOCTL_SEND_COMMAND:
|
||||
case CDROM_SEND_PACKET:
|
||||
if (!capable(CAP_SYS_RAWIO)) {
|
||||
retval = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
fallthrough;
|
||||
default:
|
||||
if ((i = flush_buffer(STp, 0)) < 0) {
|
||||
retval = i;
|
||||
goto out;
|
||||
} else { /* flush_buffer succeeds */
|
||||
if (STp->can_partitions) {
|
||||
i = switch_partition(STp);
|
||||
if (i < 0) {
|
||||
retval = i;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&STp->lock);
|
||||
|
||||
retval = scsi_ioctl(STp->device, file->f_mode & FMODE_WRITE,
|
||||
cmd_in, (void __user *)arg);
|
||||
if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) {
|
||||
/* unload */
|
||||
STp->rew_at_close = 0;
|
||||
STp->ready = ST_NO_TAPE;
|
||||
}
|
||||
|
||||
return retval;
|
||||
out:
|
||||
mutex_unlock(&STp->lock);
|
||||
return retval;
|
||||
}
|
||||
|
||||
/* The ioctl command */
|
||||
static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
|
||||
@ -3528,6 +3592,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
|
||||
int i, cmd_nr, cmd_type, bt;
|
||||
int retval = 0;
|
||||
unsigned int blk;
|
||||
bool cmd_mtiocget;
|
||||
struct scsi_tape *STp = file->private_data;
|
||||
struct st_modedef *STm;
|
||||
struct st_partstat *STps;
|
||||
@ -3556,6 +3621,15 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
|
||||
if (retval)
|
||||
goto out;
|
||||
|
||||
switch (cmd_in) {
|
||||
case MTIOCPOS:
|
||||
case MTIOCGET:
|
||||
case MTIOCTOP:
|
||||
break;
|
||||
default:
|
||||
return st_common_ioctl(STp, STm, file, cmd_in, arg);
|
||||
}
|
||||
|
||||
cmd_type = _IOC_TYPE(cmd_in);
|
||||
cmd_nr = _IOC_NR(cmd_in);
|
||||
|
||||
@ -3641,6 +3715,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
|
||||
*/
|
||||
if (mtc.mt_op != MTREW &&
|
||||
mtc.mt_op != MTOFFL &&
|
||||
mtc.mt_op != MTLOAD &&
|
||||
mtc.mt_op != MTRETEN &&
|
||||
mtc.mt_op != MTERASE &&
|
||||
mtc.mt_op != MTSEEK &&
|
||||
@ -3768,17 +3843,28 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
|
||||
goto out;
|
||||
}
|
||||
|
||||
cmd_mtiocget = cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET);
|
||||
|
||||
if ((i = flush_buffer(STp, 0)) < 0) {
|
||||
retval = i;
|
||||
goto out;
|
||||
}
|
||||
if (STp->can_partitions &&
|
||||
(i = switch_partition(STp)) < 0) {
|
||||
retval = i;
|
||||
goto out;
|
||||
if (cmd_mtiocget && STp->pos_unknown) {
|
||||
/* flush fails -> modify status accordingly */
|
||||
reset_state(STp);
|
||||
STp->pos_unknown = 1;
|
||||
} else { /* return error */
|
||||
retval = i;
|
||||
goto out;
|
||||
}
|
||||
} else { /* flush_buffer succeeds */
|
||||
if (STp->can_partitions) {
|
||||
i = switch_partition(STp);
|
||||
if (i < 0) {
|
||||
retval = i;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET)) {
|
||||
if (cmd_mtiocget) {
|
||||
struct mtget mt_status;
|
||||
|
||||
if (_IOC_SIZE(cmd_in) != sizeof(struct mtget)) {
|
||||
@ -3792,7 +3878,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
|
||||
((STp->density << MT_ST_DENSITY_SHIFT) & MT_ST_DENSITY_MASK);
|
||||
mt_status.mt_blkno = STps->drv_block;
|
||||
mt_status.mt_fileno = STps->drv_file;
|
||||
if (STp->block_size != 0) {
|
||||
if (STp->block_size != 0 && mt_status.mt_blkno >= 0) {
|
||||
if (STps->rw == ST_WRITING)
|
||||
mt_status.mt_blkno +=
|
||||
(STp->buffer)->buffer_bytes / STp->block_size;
|
||||
@ -3855,29 +3941,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
|
||||
}
|
||||
mt_pos.mt_blkno = blk;
|
||||
retval = put_user_mtpos(p, &mt_pos);
|
||||
goto out;
|
||||
}
|
||||
mutex_unlock(&STp->lock);
|
||||
|
||||
switch (cmd_in) {
|
||||
case SG_IO:
|
||||
case SCSI_IOCTL_SEND_COMMAND:
|
||||
case CDROM_SEND_PACKET:
|
||||
if (!capable(CAP_SYS_RAWIO))
|
||||
return -EPERM;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
retval = scsi_ioctl(STp->device, file->f_mode & FMODE_WRITE, cmd_in, p);
|
||||
if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) {
|
||||
/* unload */
|
||||
STp->rew_at_close = 0;
|
||||
STp->ready = ST_NO_TAPE;
|
||||
}
|
||||
return retval;
|
||||
|
||||
out:
|
||||
mutex_unlock(&STp->lock);
|
||||
return retval;
|
||||
@ -4348,6 +4412,7 @@ static int st_probe(struct device *dev)
|
||||
blk_queue_rq_timeout(tpnt->device->request_queue, ST_TIMEOUT);
|
||||
tpnt->long_timeout = ST_LONG_TIMEOUT;
|
||||
tpnt->try_dio = try_direct_io;
|
||||
tpnt->first_tur = 1;
|
||||
|
||||
for (i = 0; i < ST_NBR_MODES; i++) {
|
||||
STm = &(tpnt->modes[i]);
|
||||
|
||||
@ -171,6 +171,7 @@ struct scsi_tape {
|
||||
unsigned char rew_at_close; /* rewind necessary at close */
|
||||
unsigned char inited;
|
||||
unsigned char cleaning_req; /* cleaning requested? */
|
||||
unsigned char first_tur; /* first TEST UNIT READY */
|
||||
int block_size;
|
||||
int changed_blksize;
|
||||
int min_block;
|
||||
|
||||
@ -854,7 +854,7 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force)
|
||||
kfree_rcu(epi, rcu);
|
||||
|
||||
percpu_counter_dec(&ep->user->epoll_watches);
|
||||
return ep_refcount_dec_and_test(ep);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -862,14 +862,14 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force)
|
||||
*/
|
||||
static void ep_remove_safe(struct eventpoll *ep, struct epitem *epi)
|
||||
{
|
||||
WARN_ON_ONCE(__ep_remove(ep, epi, false));
|
||||
if (__ep_remove(ep, epi, false))
|
||||
WARN_ON_ONCE(ep_refcount_dec_and_test(ep));
|
||||
}
|
||||
|
||||
static void ep_clear_and_put(struct eventpoll *ep)
|
||||
{
|
||||
struct rb_node *rbp, *next;
|
||||
struct epitem *epi;
|
||||
bool dispose;
|
||||
|
||||
/* We need to release all tasks waiting for these file */
|
||||
if (waitqueue_active(&ep->poll_wait))
|
||||
@ -902,10 +902,8 @@ static void ep_clear_and_put(struct eventpoll *ep)
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
dispose = ep_refcount_dec_and_test(ep);
|
||||
mutex_unlock(&ep->mtx);
|
||||
|
||||
if (dispose)
|
||||
if (ep_refcount_dec_and_test(ep))
|
||||
ep_free(ep);
|
||||
}
|
||||
|
||||
@ -1108,7 +1106,7 @@ again:
|
||||
dispose = __ep_remove(ep, epi, true);
|
||||
mutex_unlock(&ep->mtx);
|
||||
|
||||
if (dispose)
|
||||
if (dispose && ep_refcount_dec_and_test(ep))
|
||||
ep_free(ep);
|
||||
goto again;
|
||||
}
|
||||
|
||||
@ -687,6 +687,12 @@ void pde_put(struct proc_dir_entry *pde)
|
||||
}
|
||||
}
|
||||
|
||||
static void pde_erase(struct proc_dir_entry *pde, struct proc_dir_entry *parent)
|
||||
{
|
||||
rb_erase(&pde->subdir_node, &parent->subdir);
|
||||
RB_CLEAR_NODE(&pde->subdir_node);
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove a /proc entry and free it if it's not currently in use.
|
||||
*/
|
||||
@ -709,7 +715,7 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
|
||||
WARN(1, "removing permanent /proc entry '%s'", de->name);
|
||||
de = NULL;
|
||||
} else {
|
||||
rb_erase(&de->subdir_node, &parent->subdir);
|
||||
pde_erase(de, parent);
|
||||
if (S_ISDIR(de->mode))
|
||||
parent->nlink--;
|
||||
}
|
||||
@ -753,7 +759,7 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
|
||||
root->parent->name, root->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
rb_erase(&root->subdir_node, &parent->subdir);
|
||||
pde_erase(root, parent);
|
||||
|
||||
de = root;
|
||||
while (1) {
|
||||
@ -765,7 +771,7 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
|
||||
next->parent->name, next->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
rb_erase(&next->subdir_node, &de->subdir);
|
||||
pde_erase(next, de);
|
||||
de = next;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -73,52 +73,47 @@ xfs_dir_fsync(
|
||||
return xfs_log_force_inode(ip);
|
||||
}
|
||||
|
||||
static xfs_csn_t
|
||||
xfs_fsync_seq(
|
||||
struct xfs_inode *ip,
|
||||
bool datasync)
|
||||
{
|
||||
if (!xfs_ipincount(ip))
|
||||
return 0;
|
||||
if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
|
||||
return 0;
|
||||
return ip->i_itemp->ili_commit_seq;
|
||||
}
|
||||
|
||||
/*
|
||||
* All metadata updates are logged, which means that we just have to flush the
|
||||
* log up to the latest LSN that touched the inode.
|
||||
* All metadata updates are logged, which means that we just have to push the
|
||||
* journal to the required sequence number than holds the updates. We track
|
||||
* datasync commits separately to full sync commits, and hence only need to
|
||||
* select the correct sequence number for the log force here.
|
||||
*
|
||||
* If we have concurrent fsync/fdatasync() calls, we need them to all block on
|
||||
* the log force before we clear the ili_fsync_fields field. This ensures that
|
||||
* we don't get a racing sync operation that does not wait for the metadata to
|
||||
* hit the journal before returning. If we race with clearing ili_fsync_fields,
|
||||
* then all that will happen is the log force will do nothing as the lsn will
|
||||
* already be on disk. We can't race with setting ili_fsync_fields because that
|
||||
* is done under XFS_ILOCK_EXCL, and that can't happen because we hold the lock
|
||||
* shared until after the ili_fsync_fields is cleared.
|
||||
* We don't have to serialise against concurrent modifications, as we do not
|
||||
* have to wait for modifications that have not yet completed. We define a
|
||||
* transaction commit as completing when the commit sequence number is updated,
|
||||
* hence if the sequence number has not updated, the sync operation has been
|
||||
* run before the commit completed and we don't have to wait for it.
|
||||
*
|
||||
* If we have concurrent fsync/fdatasync() calls, the sequence numbers remain
|
||||
* set on the log item until - at least - the journal flush completes. In
|
||||
* reality, they are only cleared when the inode is fully unpinned (i.e.
|
||||
* persistent in the journal and not dirty in the CIL), and so we rely on
|
||||
* xfs_log_force_seq() either skipping sequences that have been persisted or
|
||||
* waiting on sequences that are still in flight to correctly order concurrent
|
||||
* sync operations.
|
||||
*/
|
||||
static int
|
||||
static int
|
||||
xfs_fsync_flush_log(
|
||||
struct xfs_inode *ip,
|
||||
bool datasync,
|
||||
int *log_flushed)
|
||||
{
|
||||
int error = 0;
|
||||
xfs_csn_t seq;
|
||||
struct xfs_inode_log_item *iip = ip->i_itemp;
|
||||
xfs_csn_t seq = 0;
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_SHARED);
|
||||
seq = xfs_fsync_seq(ip, datasync);
|
||||
if (seq) {
|
||||
error = xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC,
|
||||
spin_lock(&iip->ili_lock);
|
||||
if (datasync)
|
||||
seq = iip->ili_datasync_seq;
|
||||
else
|
||||
seq = iip->ili_commit_seq;
|
||||
spin_unlock(&iip->ili_lock);
|
||||
|
||||
if (!seq)
|
||||
return 0;
|
||||
|
||||
return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC,
|
||||
log_flushed);
|
||||
|
||||
spin_lock(&ip->i_itemp->ili_lock);
|
||||
ip->i_itemp->ili_fsync_fields = 0;
|
||||
spin_unlock(&ip->i_itemp->ili_lock);
|
||||
}
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
return error;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
@ -156,12 +151,10 @@ xfs_file_fsync(
|
||||
error = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
|
||||
|
||||
/*
|
||||
* Any inode that has dirty modifications in the log is pinned. The
|
||||
* racy check here for a pinned inode will not catch modifications
|
||||
* that happen concurrently to the fsync call, but fsync semantics
|
||||
* only require to sync previously completed I/O.
|
||||
* If the inode has a inode log item attached, it may need the journal
|
||||
* flushed to persist any changes the log item might be tracking.
|
||||
*/
|
||||
if (xfs_ipincount(ip)) {
|
||||
if (ip->i_itemp) {
|
||||
err2 = xfs_fsync_flush_log(ip, datasync, &log_flushed);
|
||||
if (err2 && !error)
|
||||
error = err2;
|
||||
|
||||
@ -1642,7 +1642,6 @@ retry:
|
||||
spin_lock(&iip->ili_lock);
|
||||
iip->ili_last_fields = iip->ili_fields;
|
||||
iip->ili_fields = 0;
|
||||
iip->ili_fsync_fields = 0;
|
||||
spin_unlock(&iip->ili_lock);
|
||||
ASSERT(iip->ili_last_fields);
|
||||
|
||||
@ -1808,12 +1807,20 @@ static void
|
||||
xfs_iunpin(
|
||||
struct xfs_inode *ip)
|
||||
{
|
||||
xfs_assert_ilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED);
|
||||
struct xfs_inode_log_item *iip = ip->i_itemp;
|
||||
xfs_csn_t seq = 0;
|
||||
|
||||
trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
|
||||
xfs_assert_ilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED);
|
||||
|
||||
spin_lock(&iip->ili_lock);
|
||||
seq = iip->ili_commit_seq;
|
||||
spin_unlock(&iip->ili_lock);
|
||||
if (!seq)
|
||||
return;
|
||||
|
||||
/* Give the log a push to start the unpinning I/O */
|
||||
xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL);
|
||||
xfs_log_force_seq(ip->i_mount, seq, 0, NULL);
|
||||
|
||||
}
|
||||
|
||||
@ -2472,7 +2479,6 @@ flush_out:
|
||||
spin_lock(&iip->ili_lock);
|
||||
iip->ili_last_fields = iip->ili_fields;
|
||||
iip->ili_fields = 0;
|
||||
iip->ili_fsync_fields = 0;
|
||||
set_bit(XFS_LI_FLUSHING, &iip->ili_item.li_flags);
|
||||
spin_unlock(&iip->ili_lock);
|
||||
|
||||
@ -2631,12 +2637,15 @@ int
|
||||
xfs_log_force_inode(
|
||||
struct xfs_inode *ip)
|
||||
{
|
||||
struct xfs_inode_log_item *iip = ip->i_itemp;
|
||||
xfs_csn_t seq = 0;
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_SHARED);
|
||||
if (xfs_ipincount(ip))
|
||||
seq = ip->i_itemp->ili_commit_seq;
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
if (!iip)
|
||||
return 0;
|
||||
|
||||
spin_lock(&iip->ili_lock);
|
||||
seq = iip->ili_commit_seq;
|
||||
spin_unlock(&iip->ili_lock);
|
||||
|
||||
if (!seq)
|
||||
return 0;
|
||||
|
||||
@ -131,31 +131,28 @@ xfs_inode_item_precommit(
|
||||
}
|
||||
|
||||
/*
|
||||
* Inode verifiers do not check that the extent size hint is an integer
|
||||
* multiple of the rt extent size on a directory with both rtinherit
|
||||
* and extszinherit flags set. If we're logging a directory that is
|
||||
* misconfigured in this way, clear the hint.
|
||||
* Inode verifiers do not check that the extent size hints are an
|
||||
* integer multiple of the rt extent size on a directory with
|
||||
* rtinherit flags set. If we're logging a directory that is
|
||||
* misconfigured in this way, clear the bad hints.
|
||||
*/
|
||||
if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
|
||||
(ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) &&
|
||||
xfs_extlen_to_rtxmod(ip->i_mount, ip->i_extsize) > 0) {
|
||||
ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
|
||||
XFS_DIFLAG_EXTSZINHERIT);
|
||||
ip->i_extsize = 0;
|
||||
flags |= XFS_ILOG_CORE;
|
||||
if (ip->i_diflags & XFS_DIFLAG_RTINHERIT) {
|
||||
if ((ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) &&
|
||||
xfs_extlen_to_rtxmod(ip->i_mount, ip->i_extsize) > 0) {
|
||||
ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
|
||||
XFS_DIFLAG_EXTSZINHERIT);
|
||||
ip->i_extsize = 0;
|
||||
flags |= XFS_ILOG_CORE;
|
||||
}
|
||||
if ((ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) &&
|
||||
xfs_extlen_to_rtxmod(ip->i_mount, ip->i_cowextsize) > 0) {
|
||||
ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
|
||||
ip->i_cowextsize = 0;
|
||||
flags |= XFS_ILOG_CORE;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Record the specific change for fdatasync optimisation. This allows
|
||||
* fdatasync to skip log forces for inodes that are only timestamp
|
||||
* dirty. Once we've processed the XFS_ILOG_IVERSION flag, convert it
|
||||
* to XFS_ILOG_CORE so that the actual on-disk dirty tracking
|
||||
* (ili_fields) correctly tracks that the version has changed.
|
||||
*/
|
||||
spin_lock(&iip->ili_lock);
|
||||
iip->ili_fsync_fields |= (flags & ~XFS_ILOG_IVERSION);
|
||||
if (flags & XFS_ILOG_IVERSION)
|
||||
flags = ((flags & ~XFS_ILOG_IVERSION) | XFS_ILOG_CORE);
|
||||
|
||||
if (!iip->ili_item.li_buf) {
|
||||
struct xfs_buf *bp;
|
||||
@ -190,6 +187,20 @@ xfs_inode_item_precommit(
|
||||
xfs_trans_brelse(tp, bp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Store the dirty flags back into the inode item as this state is used
|
||||
* later on in xfs_inode_item_committing() to determine whether the
|
||||
* transaction is relevant to fsync state or not.
|
||||
*/
|
||||
iip->ili_dirty_flags = flags;
|
||||
|
||||
/*
|
||||
* Convert the flags on-disk fields that have been modified in the
|
||||
* transaction so that ili_fields tracks the changes correctly.
|
||||
*/
|
||||
if (flags & XFS_ILOG_IVERSION)
|
||||
flags = ((flags & ~XFS_ILOG_IVERSION) | XFS_ILOG_CORE);
|
||||
|
||||
/*
|
||||
* Always OR in the bits from the ili_last_fields field. This is to
|
||||
* coordinate with the xfs_iflush() and xfs_buf_inode_iodone() routines
|
||||
@ -200,12 +211,6 @@ xfs_inode_item_precommit(
|
||||
spin_unlock(&iip->ili_lock);
|
||||
|
||||
xfs_inode_item_precommit_check(ip);
|
||||
|
||||
/*
|
||||
* We are done with the log item transaction dirty state, so clear it so
|
||||
* that it doesn't pollute future transactions.
|
||||
*/
|
||||
iip->ili_dirty_flags = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -707,13 +712,24 @@ xfs_inode_item_unpin(
|
||||
struct xfs_log_item *lip,
|
||||
int remove)
|
||||
{
|
||||
struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode;
|
||||
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
|
||||
struct xfs_inode *ip = iip->ili_inode;
|
||||
|
||||
trace_xfs_inode_unpin(ip, _RET_IP_);
|
||||
ASSERT(lip->li_buf || xfs_iflags_test(ip, XFS_ISTALE));
|
||||
ASSERT(atomic_read(&ip->i_pincount) > 0);
|
||||
if (atomic_dec_and_test(&ip->i_pincount))
|
||||
|
||||
/*
|
||||
* If this is the last unpin, then the inode no longer needs a journal
|
||||
* flush to persist it. Hence we can clear the commit sequence numbers
|
||||
* as a fsync/fdatasync operation on the inode at this point is a no-op.
|
||||
*/
|
||||
if (atomic_dec_and_lock(&ip->i_pincount, &iip->ili_lock)) {
|
||||
iip->ili_commit_seq = 0;
|
||||
iip->ili_datasync_seq = 0;
|
||||
spin_unlock(&iip->ili_lock);
|
||||
wake_up_bit(&ip->i_flags, __XFS_IPINNED_BIT);
|
||||
}
|
||||
}
|
||||
|
||||
STATIC uint
|
||||
@ -833,12 +849,45 @@ xfs_inode_item_committed(
|
||||
return lsn;
|
||||
}
|
||||
|
||||
/*
|
||||
* The modification is now complete, so before we unlock the inode we need to
|
||||
* update the commit sequence numbers for data integrity journal flushes. We
|
||||
* always record the commit sequence number (ili_commit_seq) so that anything
|
||||
* that needs a full journal sync will capture all of this modification.
|
||||
*
|
||||
* We then
|
||||
* check if the changes will impact a datasync (O_DSYNC) journal flush. If the
|
||||
* changes will require a datasync flush, then we also record the sequence in
|
||||
* ili_datasync_seq.
|
||||
*
|
||||
* These commit sequence numbers will get cleared atomically with the inode being
|
||||
* unpinned (i.e. pin count goes to zero), and so it will only be set when the
|
||||
* inode is dirty in the journal. This removes the need for checking if the
|
||||
* inode is pinned to determine if a journal flush is necessary, and hence
|
||||
* removes the need for holding the ILOCK_SHARED in xfs_file_fsync() to
|
||||
* serialise pin counts against commit sequence number updates.
|
||||
*
|
||||
*/
|
||||
STATIC void
|
||||
xfs_inode_item_committing(
|
||||
struct xfs_log_item *lip,
|
||||
xfs_csn_t seq)
|
||||
{
|
||||
INODE_ITEM(lip)->ili_commit_seq = seq;
|
||||
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
|
||||
|
||||
spin_lock(&iip->ili_lock);
|
||||
iip->ili_commit_seq = seq;
|
||||
if (iip->ili_dirty_flags & ~(XFS_ILOG_IVERSION | XFS_ILOG_TIMESTAMP))
|
||||
iip->ili_datasync_seq = seq;
|
||||
spin_unlock(&iip->ili_lock);
|
||||
|
||||
/*
|
||||
* Clear the per-transaction dirty flags now that we have finished
|
||||
* recording the transaction's inode modifications in the CIL and are
|
||||
* about to release and (maybe) unlock the inode.
|
||||
*/
|
||||
iip->ili_dirty_flags = 0;
|
||||
|
||||
return xfs_inode_item_release(lip);
|
||||
}
|
||||
|
||||
@ -1042,7 +1091,6 @@ xfs_iflush_abort_clean(
|
||||
{
|
||||
iip->ili_last_fields = 0;
|
||||
iip->ili_fields = 0;
|
||||
iip->ili_fsync_fields = 0;
|
||||
iip->ili_flush_lsn = 0;
|
||||
iip->ili_item.li_buf = NULL;
|
||||
list_del_init(&iip->ili_item.li_bio_list);
|
||||
|
||||
@ -32,9 +32,17 @@ struct xfs_inode_log_item {
|
||||
spinlock_t ili_lock; /* flush state lock */
|
||||
unsigned int ili_last_fields; /* fields when flushed */
|
||||
unsigned int ili_fields; /* fields to be logged */
|
||||
unsigned int ili_fsync_fields; /* logged since last fsync */
|
||||
xfs_lsn_t ili_flush_lsn; /* lsn at last flush */
|
||||
|
||||
/*
|
||||
* We record the sequence number for every inode modification, as
|
||||
* well as those that only require fdatasync operations for data
|
||||
* integrity. This allows optimisation of the O_DSYNC/fdatasync path
|
||||
* without needing to track what modifications the journal is currently
|
||||
* carrying for the inode. These are protected by the above ili_lock.
|
||||
*/
|
||||
xfs_csn_t ili_commit_seq; /* last transaction commit */
|
||||
xfs_csn_t ili_datasync_seq; /* for datasync optimisation */
|
||||
};
|
||||
|
||||
static inline int xfs_inode_clean(struct xfs_inode *ip)
|
||||
|
||||
@ -133,9 +133,18 @@ xfs_bmbt_to_iomap(
|
||||
iomap->bdev = target->bt_bdev;
|
||||
iomap->flags = iomap_flags;
|
||||
|
||||
if (xfs_ipincount(ip) &&
|
||||
(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
|
||||
iomap->flags |= IOMAP_F_DIRTY;
|
||||
/*
|
||||
* If the inode is dirty for datasync purposes, let iomap know so it
|
||||
* doesn't elide the IO completion journal flushes on O_DSYNC IO.
|
||||
*/
|
||||
if (ip->i_itemp) {
|
||||
struct xfs_inode_log_item *iip = ip->i_itemp;
|
||||
|
||||
spin_lock(&iip->ili_lock);
|
||||
if (iip->ili_datasync_seq)
|
||||
iomap->flags |= IOMAP_F_DIRTY;
|
||||
spin_unlock(&iip->ili_lock);
|
||||
}
|
||||
|
||||
iomap->validity_cookie = sequence_cookie;
|
||||
iomap->folio_ops = &xfs_iomap_folio_ops;
|
||||
|
||||
@ -663,6 +663,8 @@ struct io_kiocb {
|
||||
u64 extra1;
|
||||
u64 extra2;
|
||||
} big_cqe;
|
||||
/* for private io_kiocb freeing */
|
||||
RH_KABI_EXTEND(struct rcu_head rcu_head)
|
||||
};
|
||||
|
||||
struct io_overflow_cqe {
|
||||
|
||||
@ -775,7 +775,7 @@ struct mgmt_adv_pattern {
|
||||
__u8 ad_type;
|
||||
__u8 offset;
|
||||
__u8 length;
|
||||
__u8 value[31];
|
||||
__u8 value[HCI_MAX_AD_LENGTH];
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_ADD_ADV_PATTERNS_MONITOR 0x0052
|
||||
|
||||
@ -556,6 +556,7 @@ enum devlink_param_generic_id {
|
||||
DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
|
||||
DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
|
||||
DEVLINK_PARAM_GENERIC_ID_CLOCK_ID,
|
||||
DEVLINK_PARAM_GENERIC_ID_MAX_MAC_PER_VF,
|
||||
|
||||
/* add new param generic ids above here*/
|
||||
__DEVLINK_PARAM_GENERIC_ID_MAX,
|
||||
@ -620,6 +621,9 @@ enum devlink_param_generic_id {
|
||||
#define DEVLINK_PARAM_GENERIC_CLOCK_ID_NAME "clock_id"
|
||||
#define DEVLINK_PARAM_GENERIC_CLOCK_ID_TYPE DEVLINK_PARAM_TYPE_U64
|
||||
|
||||
#define DEVLINK_PARAM_GENERIC_MAX_MAC_PER_VF_NAME "max_mac_per_vf"
|
||||
#define DEVLINK_PARAM_GENERIC_MAX_MAC_PER_VF_TYPE DEVLINK_PARAM_TYPE_U32
|
||||
|
||||
#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
|
||||
{ \
|
||||
.id = DEVLINK_PARAM_GENERIC_ID_##_id, \
|
||||
|
||||
@ -24,7 +24,10 @@
|
||||
struct sk_buff;
|
||||
|
||||
struct dst_entry {
|
||||
struct net_device *dev;
|
||||
RH_KABI_REPLACE(struct net_device *dev, union {
|
||||
struct net_device *dev;
|
||||
struct net_device __rcu *dev_rcu;
|
||||
})
|
||||
struct dst_ops *ops;
|
||||
unsigned long _metrics;
|
||||
unsigned long expires;
|
||||
@ -561,6 +564,41 @@ static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
|
||||
dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
|
||||
}
|
||||
|
||||
static inline struct net_device *dst_dev(const struct dst_entry *dst)
|
||||
{
|
||||
return READ_ONCE(dst->dev);
|
||||
}
|
||||
|
||||
static inline struct net_device *dst_dev_rcu(const struct dst_entry *dst)
|
||||
{
|
||||
return rcu_dereference(dst->dev_rcu);
|
||||
}
|
||||
|
||||
static inline struct net *dst_dev_net_rcu(const struct dst_entry *dst)
|
||||
{
|
||||
return dev_net_rcu(dst_dev_rcu(dst));
|
||||
}
|
||||
|
||||
static inline struct net_device *skb_dst_dev(const struct sk_buff *skb)
|
||||
{
|
||||
return dst_dev(skb_dst(skb));
|
||||
}
|
||||
|
||||
static inline struct net_device *skb_dst_dev_rcu(const struct sk_buff *skb)
|
||||
{
|
||||
return dst_dev_rcu(skb_dst(skb));
|
||||
}
|
||||
|
||||
static inline struct net *skb_dst_dev_net(const struct sk_buff *skb)
|
||||
{
|
||||
return dev_net(skb_dst_dev(skb));
|
||||
}
|
||||
|
||||
static inline struct net *skb_dst_dev_net_rcu(const struct sk_buff *skb)
|
||||
{
|
||||
return dev_net_rcu(skb_dst_dev_rcu(skb));
|
||||
}
|
||||
|
||||
struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie);
|
||||
void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
|
||||
struct sk_buff *skb, u32 mtu, bool confirm_neigh);
|
||||
|
||||
@ -466,12 +466,14 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
|
||||
bool forwarding)
|
||||
{
|
||||
const struct rtable *rt = dst_rtable(dst);
|
||||
const struct net_device *dev;
|
||||
unsigned int mtu, res;
|
||||
struct net *net;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
net = dev_net_rcu(dst->dev);
|
||||
dev = dst_dev_rcu(dst);
|
||||
net = dev_net_rcu(dev);
|
||||
if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) ||
|
||||
ip_mtu_locked(dst) ||
|
||||
!forwarding) {
|
||||
@ -485,7 +487,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
|
||||
if (mtu)
|
||||
goto out;
|
||||
|
||||
mtu = READ_ONCE(dst->dev->mtu);
|
||||
mtu = READ_ONCE(dev->mtu);
|
||||
|
||||
if (unlikely(ip_mtu_locked(dst))) {
|
||||
if (rt->rt_uses_gateway && mtu > 576)
|
||||
|
||||
@ -337,7 +337,7 @@ static inline unsigned int ip6_dst_mtu_maybe_forward(const struct dst_entry *dst
|
||||
|
||||
mtu = IPV6_MIN_MTU;
|
||||
rcu_read_lock();
|
||||
idev = __in6_dev_get(dst->dev);
|
||||
idev = __in6_dev_get(dst_dev_rcu(dst));
|
||||
if (idev)
|
||||
mtu = READ_ONCE(idev->cnf.mtu6);
|
||||
rcu_read_unlock();
|
||||
|
||||
@ -373,7 +373,7 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
|
||||
const struct net *net;
|
||||
|
||||
rcu_read_lock();
|
||||
net = dev_net_rcu(dst->dev);
|
||||
net = dst_dev_net_rcu(dst);
|
||||
hoplimit = READ_ONCE(net->ipv4.sysctl_ip_default_ttl);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
@ -82,7 +82,7 @@ static void io_msg_tw_complete(struct io_kiocb *req, struct io_tw_state *ts)
|
||||
spin_unlock(&ctx->msg_lock);
|
||||
}
|
||||
if (req)
|
||||
kmem_cache_free(req_cachep, req);
|
||||
kfree_rcu(req, rcu_head);
|
||||
percpu_ref_put(&ctx->refs);
|
||||
}
|
||||
|
||||
@ -91,7 +91,7 @@ static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
|
||||
{
|
||||
req->task = READ_ONCE(ctx->submitter_task);
|
||||
if (!req->task) {
|
||||
kmem_cache_free(req_cachep, req);
|
||||
kfree_rcu(req, rcu_head);
|
||||
return -EOWNERDEAD;
|
||||
}
|
||||
req->cqe.user_data = user_data;
|
||||
|
||||
@ -1772,6 +1772,7 @@ void uprobe_free_utask(struct task_struct *t)
|
||||
if (!utask)
|
||||
return;
|
||||
|
||||
t->utask = NULL;
|
||||
if (utask->active_uprobe)
|
||||
put_uprobe(utask->active_uprobe);
|
||||
|
||||
@ -1781,7 +1782,6 @@ void uprobe_free_utask(struct task_struct *t)
|
||||
|
||||
xol_free_insn_slot(t);
|
||||
kfree(utask);
|
||||
t->utask = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@ -4242,6 +4242,13 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
|
||||
}
|
||||
|
||||
if (i == ARRAY_SIZE(hci_cc_table)) {
|
||||
if (!skb->len) {
|
||||
bt_dev_err(hdev, "Unexpected cc 0x%4.4x with no status",
|
||||
*opcode);
|
||||
*status = HCI_ERROR_UNSPECIFIED;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Unknown opcode, assume byte 0 contains the status, so
|
||||
* that e.g. __hci_cmd_sync() properly returns errors
|
||||
* for vendor specific commands send by HCI drivers.
|
||||
|
||||
@ -1311,7 +1311,9 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
|
||||
goto done;
|
||||
}
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
mgmt_index_removed(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
err = hci_dev_open(hdev->id);
|
||||
if (err) {
|
||||
|
||||
@ -863,11 +863,17 @@ bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
|
||||
{
|
||||
struct hci_cmd_sync_work_entry *entry;
|
||||
|
||||
entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
|
||||
if (!entry)
|
||||
return false;
|
||||
mutex_lock(&hdev->cmd_sync_work_lock);
|
||||
|
||||
hci_cmd_sync_cancel_entry(hdev, entry);
|
||||
entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
|
||||
if (!entry) {
|
||||
mutex_unlock(&hdev->cmd_sync_work_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
|
||||
|
||||
mutex_unlock(&hdev->cmd_sync_work_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -5395,9 +5395,9 @@ static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
|
||||
for (i = 0; i < pattern_count; i++) {
|
||||
offset = patterns[i].offset;
|
||||
length = patterns[i].length;
|
||||
if (offset >= HCI_MAX_EXT_AD_LENGTH ||
|
||||
length > HCI_MAX_EXT_AD_LENGTH ||
|
||||
(offset + length) > HCI_MAX_EXT_AD_LENGTH)
|
||||
if (offset >= HCI_MAX_AD_LENGTH ||
|
||||
length > HCI_MAX_AD_LENGTH ||
|
||||
(offset + length) > HCI_MAX_AD_LENGTH)
|
||||
return MGMT_STATUS_INVALID_PARAMS;
|
||||
|
||||
p = kmalloc(sizeof(*p), GFP_KERNEL);
|
||||
|
||||
@ -150,7 +150,7 @@ void dst_dev_put(struct dst_entry *dst)
|
||||
dst->ops->ifdown(dst, dev);
|
||||
dst->input = dst_discard;
|
||||
dst->output = dst_discard_out;
|
||||
dst->dev = blackhole_netdev;
|
||||
rcu_assign_pointer(dst->dev_rcu, blackhole_netdev);
|
||||
netdev_ref_replace(dev, blackhole_netdev, &dst->dev_tracker,
|
||||
GFP_ATOMIC);
|
||||
}
|
||||
@ -263,7 +263,7 @@ unsigned int dst_blackhole_mtu(const struct dst_entry *dst)
|
||||
{
|
||||
unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
|
||||
|
||||
return mtu ? : dst->dev->mtu;
|
||||
return mtu ? : dst_dev(dst)->mtu;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dst_blackhole_mtu);
|
||||
|
||||
|
||||
@ -2522,7 +2522,7 @@ void sk_free_unlock_clone(struct sock *sk)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
|
||||
|
||||
static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst)
|
||||
static u32 sk_dst_gso_max_size(struct sock *sk, const struct net_device *dev)
|
||||
{
|
||||
bool is_ipv6 = false;
|
||||
u32 max_size;
|
||||
@ -2532,8 +2532,8 @@ static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst)
|
||||
!ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr));
|
||||
#endif
|
||||
/* pairs with the WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */
|
||||
max_size = is_ipv6 ? READ_ONCE(dst->dev->gso_max_size) :
|
||||
READ_ONCE(dst->dev->gso_ipv4_max_size);
|
||||
max_size = is_ipv6 ? READ_ONCE(dev->gso_max_size) :
|
||||
READ_ONCE(dev->gso_ipv4_max_size);
|
||||
if (max_size > GSO_LEGACY_MAX_SIZE && !sk_is_tcp(sk))
|
||||
max_size = GSO_LEGACY_MAX_SIZE;
|
||||
|
||||
@ -2542,9 +2542,12 @@ static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst)
|
||||
|
||||
void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
|
||||
{
|
||||
const struct net_device *dev;
|
||||
u32 max_segs = 1;
|
||||
|
||||
sk->sk_route_caps = dst->dev->features;
|
||||
rcu_read_lock();
|
||||
dev = dst_dev_rcu(dst);
|
||||
sk->sk_route_caps = dev->features;
|
||||
if (sk_is_tcp(sk))
|
||||
sk->sk_route_caps |= NETIF_F_GSO;
|
||||
if (sk->sk_route_caps & NETIF_F_GSO)
|
||||
@ -2556,13 +2559,14 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
|
||||
sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
|
||||
} else {
|
||||
sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
|
||||
sk->sk_gso_max_size = sk_dst_gso_max_size(sk, dst);
|
||||
sk->sk_gso_max_size = sk_dst_gso_max_size(sk, dev);
|
||||
/* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */
|
||||
max_segs = max_t(u32, READ_ONCE(dst->dev->gso_max_segs), 1);
|
||||
max_segs = max_t(u32, READ_ONCE(dev->gso_max_segs), 1);
|
||||
}
|
||||
}
|
||||
sk->sk_gso_max_segs = max_segs;
|
||||
sk_dst_set(sk, dst);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sk_setup_caps);
|
||||
|
||||
|
||||
@ -102,6 +102,11 @@ static const struct devlink_param devlink_param_generic[] = {
|
||||
.name = DEVLINK_PARAM_GENERIC_CLOCK_ID_NAME,
|
||||
.type = DEVLINK_PARAM_GENERIC_CLOCK_ID_TYPE,
|
||||
},
|
||||
{
|
||||
.id = DEVLINK_PARAM_GENERIC_ID_MAX_MAC_PER_VF,
|
||||
.name = DEVLINK_PARAM_GENERIC_MAX_MAC_PER_VF_NAME,
|
||||
.type = DEVLINK_PARAM_GENERIC_MAX_MAC_PER_VF_TYPE,
|
||||
},
|
||||
};
|
||||
|
||||
static int devlink_param_generic_verify(const struct devlink_param *param)
|
||||
|
||||
@ -701,13 +701,15 @@ void devl_rate_nodes_destroy(struct devlink *devlink)
|
||||
if (!devlink_rate->parent)
|
||||
continue;
|
||||
|
||||
refcount_dec(&devlink_rate->parent->refcnt);
|
||||
if (devlink_rate_is_leaf(devlink_rate))
|
||||
ops->rate_leaf_parent_set(devlink_rate, NULL, devlink_rate->priv,
|
||||
NULL, NULL);
|
||||
else if (devlink_rate_is_node(devlink_rate))
|
||||
ops->rate_node_parent_set(devlink_rate, NULL, devlink_rate->priv,
|
||||
NULL, NULL);
|
||||
|
||||
refcount_dec(&devlink_rate->parent->refcnt);
|
||||
devlink_rate->parent = NULL;
|
||||
}
|
||||
list_for_each_entry_safe(devlink_rate, tmp, &devlink->rate_list, list) {
|
||||
if (devlink_rate_is_node(devlink_rate)) {
|
||||
|
||||
@ -426,15 +426,20 @@ int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
|
||||
struct net_device *dev, *indev = skb->dev;
|
||||
int ret_val;
|
||||
|
||||
rcu_read_lock();
|
||||
dev = skb_dst_dev_rcu(skb);
|
||||
skb->dev = dev;
|
||||
skb->protocol = htons(ETH_P_IP);
|
||||
|
||||
return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
|
||||
net, sk, skb, indev, dev,
|
||||
ip_finish_output,
|
||||
!(IPCB(skb)->flags & IPSKB_REROUTED));
|
||||
ret_val = NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
|
||||
net, sk, skb, indev, dev,
|
||||
ip_finish_output,
|
||||
!(IPCB(skb)->flags & IPSKB_REROUTED));
|
||||
rcu_read_unlock();
|
||||
return ret_val;
|
||||
}
|
||||
EXPORT_SYMBOL(ip_output);
|
||||
|
||||
|
||||
@ -1021,7 +1021,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
net = dev_net_rcu(dst->dev);
|
||||
net = dst_dev_net_rcu(dst);
|
||||
if (mtu < net->ipv4.ip_rt_min_pmtu) {
|
||||
lock = true;
|
||||
mtu = min(old_mtu, net->ipv4.ip_rt_min_pmtu);
|
||||
@ -1307,7 +1307,7 @@ static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
|
||||
struct net *net;
|
||||
|
||||
rcu_read_lock();
|
||||
net = dev_net_rcu(dst->dev);
|
||||
net = dst_dev_net_rcu(dst);
|
||||
advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
|
||||
net->ipv4.ip_rt_min_advmss);
|
||||
rcu_read_unlock();
|
||||
|
||||
@ -60,7 +60,7 @@
|
||||
static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct net_device *dev = dst->dev;
|
||||
struct net_device *dev = dst_dev_rcu(dst);
|
||||
struct inet6_dev *idev = ip6_dst_idev(dst);
|
||||
unsigned int hh_len = LL_RESERVED_SPACE(dev);
|
||||
const struct in6_addr *daddr, *nexthop;
|
||||
@ -70,15 +70,12 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
|
||||
|
||||
/* Be paranoid, rather than too clever. */
|
||||
if (unlikely(hh_len > skb_headroom(skb)) && dev->header_ops) {
|
||||
/* Make sure idev stays alive */
|
||||
rcu_read_lock();
|
||||
/* idev stays alive because we hold rcu_read_lock(). */
|
||||
skb = skb_expand_head(skb, hh_len);
|
||||
if (!skb) {
|
||||
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
|
||||
rcu_read_unlock();
|
||||
return -ENOMEM;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
hdr = ipv6_hdr(skb);
|
||||
@ -123,7 +120,6 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
|
||||
|
||||
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
|
||||
|
||||
rcu_read_lock();
|
||||
nexthop = rt6_nexthop(dst_rt6_info(dst), daddr);
|
||||
neigh = __ipv6_neigh_lookup_noref(dev, nexthop);
|
||||
|
||||
@ -131,7 +127,6 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
|
||||
if (unlikely(!neigh))
|
||||
neigh = __neigh_create(&nd_tbl, nexthop, dev, false);
|
||||
if (IS_ERR(neigh)) {
|
||||
rcu_read_unlock();
|
||||
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTNOROUTES);
|
||||
kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL);
|
||||
return -EINVAL;
|
||||
@ -139,7 +134,6 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
|
||||
}
|
||||
sock_confirm_neigh(skb, neigh);
|
||||
ret = neigh_output(neigh, skb, false);
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -232,22 +226,30 @@ static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
|
||||
|
||||
int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
|
||||
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct net_device *dev, *indev = skb->dev;
|
||||
struct inet6_dev *idev;
|
||||
int ret;
|
||||
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
rcu_read_lock();
|
||||
dev = dst_dev_rcu(dst);
|
||||
idev = ip6_dst_idev(dst);
|
||||
skb->dev = dev;
|
||||
|
||||
if (unlikely(!idev || READ_ONCE(idev->cnf.disable_ipv6))) {
|
||||
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
|
||||
rcu_read_unlock();
|
||||
kfree_skb_reason(skb, SKB_DROP_REASON_IPV6DISABLED);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
|
||||
net, sk, skb, indev, dev,
|
||||
ip6_finish_output,
|
||||
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
|
||||
ret = NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
|
||||
net, sk, skb, indev, dev,
|
||||
ip6_finish_output,
|
||||
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ip6_output);
|
||||
|
||||
@ -267,35 +269,36 @@ bool ip6_autoflowlabel(struct net *net, const struct sock *sk)
|
||||
int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
|
||||
__u32 mark, struct ipv6_txoptions *opt, int tclass, u32 priority)
|
||||
{
|
||||
struct net *net = sock_net(sk);
|
||||
const struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
struct in6_addr *first_hop = &fl6->daddr;
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct net_device *dev = dst->dev;
|
||||
struct inet6_dev *idev = ip6_dst_idev(dst);
|
||||
struct hop_jumbo_hdr *hop_jumbo;
|
||||
int hoplen = sizeof(*hop_jumbo);
|
||||
struct net *net = sock_net(sk);
|
||||
unsigned int head_room;
|
||||
struct net_device *dev;
|
||||
struct ipv6hdr *hdr;
|
||||
u8 proto = fl6->flowi6_proto;
|
||||
int seg_len = skb->len;
|
||||
int hlimit = -1;
|
||||
int ret, hlimit = -1;
|
||||
u32 mtu;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
dev = dst_dev_rcu(dst);
|
||||
head_room = sizeof(struct ipv6hdr) + hoplen + LL_RESERVED_SPACE(dev);
|
||||
if (opt)
|
||||
head_room += opt->opt_nflen + opt->opt_flen;
|
||||
|
||||
if (unlikely(head_room > skb_headroom(skb))) {
|
||||
/* Make sure idev stays alive */
|
||||
rcu_read_lock();
|
||||
/* idev stays alive while we hold rcu_read_lock(). */
|
||||
skb = skb_expand_head(skb, head_room);
|
||||
if (!skb) {
|
||||
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
|
||||
rcu_read_unlock();
|
||||
return -ENOBUFS;
|
||||
ret = -ENOBUFS;
|
||||
goto unlock;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
if (opt) {
|
||||
@ -357,17 +360,21 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
|
||||
* skb to its handler for processing
|
||||
*/
|
||||
skb = l3mdev_ip6_out((struct sock *)sk, skb);
|
||||
if (unlikely(!skb))
|
||||
return 0;
|
||||
if (unlikely(!skb)) {
|
||||
ret = 0;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* hooks should never assume socket lock is held.
|
||||
* we promote our socket to non const
|
||||
*/
|
||||
return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
|
||||
net, (struct sock *)sk, skb, NULL, dev,
|
||||
dst_output);
|
||||
ret = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
|
||||
net, (struct sock *)sk, skb, NULL, dev,
|
||||
dst_output);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ret = -EMSGSIZE;
|
||||
skb->dev = dev;
|
||||
/* ipv6_local_error() does not require socket lock,
|
||||
* we promote our socket to non const
|
||||
@ -376,7 +383,9 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
|
||||
|
||||
IP6_INC_STATS(net, idev, IPSTATS_MIB_FRAGFAILS);
|
||||
kfree_skb(skb);
|
||||
return -EMSGSIZE;
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ip6_xmit);
|
||||
|
||||
|
||||
@ -851,14 +851,19 @@ static void mptcp_reset_rtx_timer(struct sock *sk)
|
||||
|
||||
bool mptcp_schedule_work(struct sock *sk)
|
||||
{
|
||||
if (inet_sk_state_load(sk) != TCP_CLOSE &&
|
||||
schedule_work(&mptcp_sk(sk)->work)) {
|
||||
/* each subflow already holds a reference to the sk, and the
|
||||
* workqueue is invoked by a subflow, so sk can't go away here.
|
||||
*/
|
||||
sock_hold(sk);
|
||||
if (inet_sk_state_load(sk) == TCP_CLOSE)
|
||||
return false;
|
||||
|
||||
/* Get a reference on this socket, mptcp_worker() will release it.
|
||||
* As mptcp_worker() might complete before us, we can not avoid
|
||||
* a sock_hold()/sock_put() if schedule_work() returns false.
|
||||
*/
|
||||
sock_hold(sk);
|
||||
|
||||
if (schedule_work(&mptcp_sk(sk)->work))
|
||||
return true;
|
||||
}
|
||||
|
||||
sock_put(sk);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@ -152,7 +152,7 @@ static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt,
|
||||
static const struct
|
||||
nla_policy mqprio_tc_entry_policy[TCA_MQPRIO_TC_ENTRY_MAX + 1] = {
|
||||
[TCA_MQPRIO_TC_ENTRY_INDEX] = NLA_POLICY_MAX(NLA_U32,
|
||||
TC_QOPT_MAX_QUEUE),
|
||||
TC_QOPT_MAX_QUEUE - 1),
|
||||
[TCA_MQPRIO_TC_ENTRY_FP] = NLA_POLICY_RANGE(NLA_U32,
|
||||
TC_FP_EXPRESS,
|
||||
TC_FP_PREEMPTIBLE),
|
||||
|
||||
@ -1557,18 +1557,40 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
|
||||
timeout = schedule_timeout(timeout);
|
||||
lock_sock(sk);
|
||||
|
||||
if (signal_pending(current)) {
|
||||
err = sock_intr_errno(timeout);
|
||||
sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE;
|
||||
sock->state = SS_UNCONNECTED;
|
||||
vsock_transport_cancel_pkt(vsk);
|
||||
vsock_remove_connected(vsk);
|
||||
goto out_wait;
|
||||
} else if ((sk->sk_state != TCP_ESTABLISHED) && (timeout == 0)) {
|
||||
err = -ETIMEDOUT;
|
||||
/* Connection established. Whatever happens to socket once we
|
||||
* release it, that's not connect()'s concern. No need to go
|
||||
* into signal and timeout handling. Call it a day.
|
||||
*
|
||||
* Note that allowing to "reset" an already established socket
|
||||
* here is racy and insecure.
|
||||
*/
|
||||
if (sk->sk_state == TCP_ESTABLISHED)
|
||||
break;
|
||||
|
||||
/* If connection was _not_ established and a signal/timeout came
|
||||
* to be, we want the socket's state reset. User space may want
|
||||
* to retry.
|
||||
*
|
||||
* sk_state != TCP_ESTABLISHED implies that socket is not on
|
||||
* vsock_connected_table. We keep the binding and the transport
|
||||
* assigned.
|
||||
*/
|
||||
if (signal_pending(current) || timeout == 0) {
|
||||
err = timeout == 0 ? -ETIMEDOUT : sock_intr_errno(timeout);
|
||||
|
||||
/* Listener might have already responded with
|
||||
* VIRTIO_VSOCK_OP_RESPONSE. Its handling expects our
|
||||
* sk_state == TCP_SYN_SENT, which hereby we break.
|
||||
* In such case VIRTIO_VSOCK_OP_RST will follow.
|
||||
*/
|
||||
sk->sk_state = TCP_CLOSE;
|
||||
sock->state = SS_UNCONNECTED;
|
||||
|
||||
/* Try to cancel VIRTIO_VSOCK_OP_REQUEST skb sent out by
|
||||
* transport->connect().
|
||||
*/
|
||||
vsock_transport_cancel_pkt(vsk);
|
||||
|
||||
goto out_wait;
|
||||
}
|
||||
|
||||
|
||||
2
redhat/kabi/kabi-module/kabi_x86_64/__kabi__alt_instr
Normal file
2
redhat/kabi/kabi-module/kabi_x86_64/__kabi__alt_instr
Normal file
@ -0,0 +1,2 @@
|
||||
#1-
|
||||
0xe08433f8 __kabi__alt_instr vmlinux EXPORT_SYMBOL_GPL
|
||||
@ -1,3 +1,49 @@
|
||||
* Thu Jan 22 2026 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [6.12.0-124.31.1.el10_1]
|
||||
- i40e: support generic devlink param "max_mac_per_vf" (Mohammad Heib) [RHEL-121647]
|
||||
- devlink: Add new "max_mac_per_vf" generic device param (Mohammad Heib) [RHEL-121647]
|
||||
- i40e: improve VF MAC filters accounting (Mohammad Heib) [RHEL-121647]
|
||||
- KVM: arm64: Hide ID_AA64MMFR2_EL1.NV from guest and userspace (Donald Dutile) [RHEL-134763]
|
||||
- scsi: st: Skip buffer flush for information ioctls (Ewan D. Milne) [RHEL-136289]
|
||||
- scsi: st: Separate st-unique ioctl handling from SCSI common ioctl handling (Ewan D. Milne) [RHEL-136289]
|
||||
- scsi: st: Don't set pos_unknown just after device recognition (Ewan D. Milne) [RHEL-136289]
|
||||
- scsi: st: New session only when Unit Attention for new tape (Ewan D. Milne) [RHEL-136289]
|
||||
- scsi: st: Add MTIOCGET and MTLOAD to ioctls allowed after device reset (Ewan D. Milne) [RHEL-136289]
|
||||
- scsi: st: Don't modify unknown block number in MTIOCGET (Ewan D. Milne) [RHEL-136289]
|
||||
- xfs: rework datasync tracking and execution (CKI Backport Bot) [RHEL-126599]
|
||||
- xfs: rearrange code in xfs_inode_item_precommit (CKI Backport Bot) [RHEL-126599]
|
||||
- s390: Disable ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP (Luiz Capitulino) [RHEL-133336]
|
||||
- s390: mm: add stub for hugetlb_optimize_vmemmap_key (Luiz Capitulino) [RHEL-133336]
|
||||
- x86/mm/init: Handle the special case of device private pages in add_pages(), to not increase max_pfn and trigger dma_addressing_limited() bounce buffers (Ricardo Robaina) [RHEL-129452]
|
||||
- x86/kaslr: Reduce KASLR entropy on most x86 systems (Ricardo Robaina) [RHEL-129452]
|
||||
- x86/boot/compressed: Remove unused header includes from kaslr.c (Ricardo Robaina) [RHEL-129452]
|
||||
- RDMA/core: Fix "KASAN: slab-use-after-free Read in ib_register_device" problem (CKI Backport Bot) [RHEL-134363] {CVE-2025-38022}
|
||||
- uprobes: Fix race in uprobe_free_utask (Jay Shin) [RHEL-133456]
|
||||
- ASoC: Intel: bytcr_rt5640: Fix invalid quirk input mapping (CKI Backport Bot) [RHEL-129115] {CVE-2025-40154}
|
||||
Resolves: RHEL-121647, RHEL-122759, RHEL-126599, RHEL-129115, RHEL-129452, RHEL-133336, RHEL-133456, RHEL-134363, RHEL-134763, RHEL-136289
|
||||
|
||||
* Wed Jan 21 2026 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [6.12.0-124.30.1.el10_1]
|
||||
- io_uring/msg_ring: ensure io_kiocb freeing is deferred for RCU (Jeff Moyer) [RHEL-129623] {CVE-2025-38453}
|
||||
- net: atlantic: fix fragment overflow handling in RX path (CKI Backport Bot) [RHEL-139490] {CVE-2025-68301}
|
||||
- Bluetooth: hci_sock: Prevent race in socket write iter and sock bind (CKI Backport Bot) [RHEL-139465] {CVE-2025-68305}
|
||||
- vsock: Ignore signal/timeout on connect() if already established (CKI Backport Bot) [RHEL-139287] {CVE-2025-40248}
|
||||
- net: use dst_dev_rcu() in sk_setup_caps() (Hangbin Liu) [RHEL-129087] {CVE-2025-40170}
|
||||
- ipv6: use RCU in ip6_xmit() (Hangbin Liu) [RHEL-129026] {CVE-2025-40135}
|
||||
- ipv6: use RCU in ip6_output() (Hangbin Liu) [RHEL-128991] {CVE-2025-40158}
|
||||
- net: dst: introduce dst->dev_rcu (Hangbin Liu) [RHEL-129026]
|
||||
- net: Add locking to protect skb->dev access in ip_output (Hangbin Liu) [RHEL-129026]
|
||||
- net: dst: add four helpers to annotate data-races around dst->dev (Hangbin Liu) [RHEL-129026]
|
||||
- eventpoll: don't decrement ep refcount while still holding the ep mutex (CKI Backport Bot) [RHEL-138041] {CVE-2025-38349}
|
||||
- fs/proc: fix uaf in proc_readdir_de() (CKI Backport Bot) [RHEL-137101] {CVE-2025-40271}
|
||||
- Bluetooth: MGMT: Fix OOB access in parse_adv_monitor_pattern() (CKI Backport Bot) [RHEL-136972] {CVE-2025-40294}
|
||||
- Bluetooth: hci_event: validate skb length for unknown CC opcode (CKI Backport Bot) [RHEL-136951] {CVE-2025-40301}
|
||||
- net/sched: mqprio: fix stack out-of-bounds write in tc entry parsing (CKI Backport Bot) [RHEL-136836] {CVE-2025-38568}
|
||||
- Bluetooth: hci_sync: fix race in hci_cmd_sync_dequeue_once (CKI Backport Bot) [RHEL-136259] {CVE-2025-40318}
|
||||
- devlink: rate: Unset parent pointer in devl_rate_nodes_destroy (CKI Backport Bot) [RHEL-134926] {CVE-2025-40251}
|
||||
- mptcp: fix race condition in mptcp_schedule_work() (CKI Backport Bot) [RHEL-134451] {CVE-2025-40258}
|
||||
- irqchip/gic-v2m: Prevent use after free of gicv2m_get_fwnode() (CKI Backport Bot) [RHEL-131989] {CVE-2025-37819}
|
||||
- drm/xe: Fix vm_bind_ioctl double free bug (Anusha Srivatsa) [RHEL-122312] {CVE-2025-38731}
|
||||
Resolves: RHEL-122312, RHEL-128991, RHEL-129026, RHEL-129087, RHEL-129623, RHEL-131989, RHEL-134451, RHEL-134926, RHEL-136259, RHEL-136836, RHEL-136951, RHEL-136972, RHEL-137101, RHEL-138041, RHEL-139287, RHEL-139465, RHEL-139490
|
||||
|
||||
* Sat Jan 10 2026 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [6.12.0-124.29.1.el10_1]
|
||||
- gitlab-ci: use rhel10.1 builder image (Michael Hofmann)
|
||||
- mm/vmalloc: fix data race in show_numa_info() (Waiman Long) [RHEL-137997] {CVE-2025-38383}
|
||||
|
||||
@ -68,7 +68,8 @@ enum {
|
||||
BYT_RT5640_OVCD_SF_1P5 = (RT5640_OVCD_SF_1P5 << 13),
|
||||
};
|
||||
|
||||
#define BYT_RT5640_MAP(quirk) ((quirk) & GENMASK(3, 0))
|
||||
#define BYT_RT5640_MAP_MASK GENMASK(3, 0)
|
||||
#define BYT_RT5640_MAP(quirk) ((quirk) & BYT_RT5640_MAP_MASK)
|
||||
#define BYT_RT5640_JDSRC(quirk) (((quirk) & GENMASK(7, 4)) >> 4)
|
||||
#define BYT_RT5640_OVCD_TH(quirk) (((quirk) & GENMASK(12, 8)) >> 8)
|
||||
#define BYT_RT5640_OVCD_SF(quirk) (((quirk) & GENMASK(14, 13)) >> 13)
|
||||
@ -140,7 +141,9 @@ static void log_quirks(struct device *dev)
|
||||
dev_info(dev, "quirk NO_INTERNAL_MIC_MAP enabled\n");
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "quirk map 0x%x is not supported, microphone input will not work\n", map);
|
||||
dev_warn_once(dev, "quirk sets invalid input map: 0x%x, default to DMIC1_MAP\n", map);
|
||||
byt_rt5640_quirk &= ~BYT_RT5640_MAP_MASK;
|
||||
byt_rt5640_quirk |= BYT_RT5640_DMIC1_MAP;
|
||||
break;
|
||||
}
|
||||
if (byt_rt5640_quirk & BYT_RT5640_HSMIC2_ON_IN1)
|
||||
|
||||
@ -1,3 +1,3 @@
|
||||
sbat,1,SBAT Version,sbat,1,https://github.com/rhboot/shim/blob/main/SBAT.md
|
||||
kernel-uki-virt-addons.centos,1,Red Hat,kernel-uki-virt-addons,6.12.0-124.29.1.el10.x86_64,mailto:secalert@redhat.com
|
||||
kernel-uki-virt-addons.almalinux,1,AlmaLinux,kernel-uki-virt-addons,6.12.0-124.29.1.el10.x86_64,mailto:security@almalinux.org
|
||||
kernel-uki-virt-addons.centos,1,Red Hat,kernel-uki-virt-addons,6.12.0-124.31.1.el10.x86_64,mailto:secalert@redhat.com
|
||||
kernel-uki-virt-addons.almalinux,1,AlmaLinux,kernel-uki-virt-addons,6.12.0-124.31.1.el10.x86_64,mailto:security@almalinux.org
|
||||
|
||||
4
uki.sbat
4
uki.sbat
@ -1,3 +1,3 @@
|
||||
sbat,1,SBAT Version,sbat,1,https://github.com/rhboot/shim/blob/main/SBAT.md
|
||||
kernel-uki-virt.centos,1,Red Hat,kernel-uki-virt,6.12.0-124.29.1.el10.x86_64,mailto:secalert@redhat.com
|
||||
kernel-uki-virt.almalinux,1,AlmaLinux,kernel-uki-virt,6.12.0-124.29.1.el10.x86_64,mailto:security@almalinux.org
|
||||
kernel-uki-virt.centos,1,Red Hat,kernel-uki-virt,6.12.0-124.31.1.el10.x86_64,mailto:secalert@redhat.com
|
||||
kernel-uki-virt.almalinux,1,AlmaLinux,kernel-uki-virt,6.12.0-124.31.1.el10.x86_64,mailto:security@almalinux.org
|
||||
|
||||
Loading…
Reference in New Issue
Block a user