Import of kernel-6.12.0-55.42.1.el10_0

This commit is contained in:
almalinux-bot-kernel 2025-11-11 04:07:36 +00:00
parent ecf94b4a2b
commit 7b5eeb45cb
32 changed files with 817 additions and 239 deletions

View File

@ -12,7 +12,7 @@ RHEL_MINOR = 0
#
# Use this spot to avoid future merge conflicts.
# Do not trim this comment.
RHEL_RELEASE = 55.40.1
RHEL_RELEASE = 55.42.1
#
# RHEL_REBASE_NUM

View File

@ -1509,6 +1509,9 @@ static u8 pmuver_to_perfmon(u8 pmuver)
}
}
static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
/* Read a sanitised cpufeature ID register by sys_reg_desc */
static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
@ -1522,6 +1525,12 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
val = read_sanitised_ftr_reg(id);
switch (id) {
case SYS_ID_AA64DFR0_EL1:
val = sanitise_id_aa64dfr0_el1(vcpu, val);
break;
case SYS_ID_AA64PFR0_EL1:
val = sanitise_id_aa64pfr0_el1(vcpu, val);
break;
case SYS_ID_AA64PFR1_EL1:
if (!kvm_has_mte(vcpu->kvm))
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
@ -1535,6 +1544,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_DF2);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_PFAR);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac);
break;
case SYS_ID_AA64PFR2_EL1:
/* We only expose FPMR */
@ -1692,11 +1702,8 @@ static unsigned int fp8_visibility(const struct kvm_vcpu *vcpu,
return REG_HIDDEN;
}
static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
{
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
if (!vcpu_has_sve(vcpu))
val &= ~ID_AA64PFR0_EL1_SVE_MASK;
@ -1724,6 +1731,13 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
val &= ~ID_AA64PFR0_EL1_AMU_MASK;
/*
* MPAM is disabled by default as KVM also needs a set of PARTID to
* program the MPAMVPMx_EL2 PARTID remapping registers with. But some
* older kernels let the guest see the ID bit.
*/
val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
return val;
}
@ -1737,11 +1751,8 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
(val); \
})
static u64 read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
{
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
/*
@ -1834,6 +1845,42 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
return set_id_reg(vcpu, rd, val);
}
static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd, u64 user_val)
{
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
u64 mpam_mask = ID_AA64PFR0_EL1_MPAM_MASK;
/*
* Commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits
* in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to
* guests, but didn't add trap handling. KVM doesn't support MPAM and
* always returns an UNDEF for these registers. The guest must see 0
* for this field.
*
* But KVM must also accept values from user-space that were provided
* by KVM. On CPUs that support MPAM, permit user-space to write
* the sanitizied value to ID_AA64PFR0_EL1.MPAM, but ignore this field.
*/
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
return set_id_reg(vcpu, rd, user_val);
}
static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd, u64 user_val)
{
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK;
/* See set_id_aa64pfr0_el1 for comment about MPAM */
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
return set_id_reg(vcpu, rd, user_val);
}
/*
* cpufeature ID register user accessors
*
@ -2150,6 +2197,15 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu,
.val = mask, \
}
/* sys_reg_desc initialiser for cpufeature ID registers that need filtering */
#define ID_FILTERED(sysreg, name, mask) { \
ID_DESC(sysreg), \
.set_user = set_##name, \
.visibility = id_visibility, \
.reset = kvm_read_sanitised_id_reg, \
.val = (mask), \
}
/*
* sys_reg_desc initialiser for architecturally unallocated cpufeature ID
* register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
@ -2374,18 +2430,15 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* AArch64 ID registers */
/* CRm=4 */
{ SYS_DESC(SYS_ID_AA64PFR0_EL1),
.access = access_id_reg,
.get_user = get_id_reg,
.set_user = set_id_reg,
.reset = read_sanitised_id_aa64pfr0_el1,
.val = ~(ID_AA64PFR0_EL1_AMU |
ID_FILTERED(ID_AA64PFR0_EL1, id_aa64pfr0_el1,
~(ID_AA64PFR0_EL1_AMU |
ID_AA64PFR0_EL1_MPAM |
ID_AA64PFR0_EL1_SVE |
ID_AA64PFR0_EL1_RAS |
ID_AA64PFR0_EL1_AdvSIMD |
ID_AA64PFR0_EL1_FP), },
ID_WRITABLE(ID_AA64PFR1_EL1, ~(ID_AA64PFR1_EL1_PFAR |
ID_AA64PFR0_EL1_FP)),
ID_FILTERED(ID_AA64PFR1_EL1, id_aa64pfr1_el1,
~(ID_AA64PFR1_EL1_PFAR |
ID_AA64PFR1_EL1_DF2 |
ID_AA64PFR1_EL1_MTEX |
ID_AA64PFR1_EL1_THE |
@ -2406,11 +2459,6 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_WRITABLE(ID_AA64FPFR0_EL1, ~ID_AA64FPFR0_EL1_RES0),
/* CRm=5 */
{ SYS_DESC(SYS_ID_AA64DFR0_EL1),
.access = access_id_reg,
.get_user = get_id_reg,
.set_user = set_id_aa64dfr0_el1,
.reset = read_sanitised_id_aa64dfr0_el1,
/*
* Prior to FEAT_Debugv8.9, the architecture defines context-aware
* breakpoints (CTX_CMPs) as the highest numbered breakpoints (BRPs).
@ -2423,10 +2471,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
* See DDI0487K.a, section D2.8.3 Breakpoint types and linking
* of breakpoints for more details.
*/
.val = ID_AA64DFR0_EL1_DoubleLock_MASK |
ID_FILTERED(ID_AA64DFR0_EL1, id_aa64dfr0_el1,
ID_AA64DFR0_EL1_DoubleLock_MASK |
ID_AA64DFR0_EL1_WRPs_MASK |
ID_AA64DFR0_EL1_PMUVer_MASK |
ID_AA64DFR0_EL1_DebugVer_MASK, },
ID_AA64DFR0_EL1_DebugVer_MASK),
ID_SANITISED(ID_AA64DFR1_EL1),
ID_UNALLOCATED(5,2),
ID_UNALLOCATED(5,3),

View File

@ -494,7 +494,6 @@ CONFIG_PPC_TRANSACTIONAL_MEM=y
CONFIG_PPC_UV=y
# CONFIG_LD_HEAD_STUB_CATCH is not set
CONFIG_MPROFILE_KERNEL=y
CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY=y
CONFIG_HOTPLUG_CPU=y
CONFIG_INTERRUPT_SANITIZE_REGISTERS=y
CONFIG_PPC_QUEUED_SPINLOCKS=y
@ -7020,7 +7019,7 @@ CONFIG_BPF_EVENTS=y
CONFIG_DYNAMIC_EVENTS=y
CONFIG_PROBE_EVENTS=y
CONFIG_FTRACE_MCOUNT_RECORD=y
CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY=y
CONFIG_FTRACE_MCOUNT_USE_CC=y
CONFIG_TRACING_MAP=y
CONFIG_SYNTH_EVENTS=y
# CONFIG_USER_EVENTS is not set

View File

@ -326,7 +326,7 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
int node;
sq->pdev = c->pdev;
sq->clock = &mdev->clock;
sq->clock = mdev->clock;
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
sq->priv = c->priv;
@ -696,7 +696,7 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
rq->pdev = c->pdev;
rq->netdev = priv->netdev;
rq->priv = priv;
rq->clock = &mdev->clock;
rq->clock = mdev->clock;
rq->tstamp = &priv->tstamp;
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);

View File

@ -46,7 +46,7 @@ static void mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params
rq->pdev = t->pdev;
rq->netdev = priv->netdev;
rq->priv = priv;
rq->clock = &mdev->clock;
rq->clock = mdev->clock;
rq->tstamp = &priv->tstamp;
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);

View File

@ -289,9 +289,9 @@ static u64 mlx5e_xsk_fill_timestamp(void *_priv)
ts = get_cqe_ts(priv->cqe);
if (mlx5_is_real_time_rq(priv->cq->mdev) || mlx5_is_real_time_sq(priv->cq->mdev))
return mlx5_real_time_cyc2time(&priv->cq->mdev->clock, ts);
return mlx5_real_time_cyc2time(priv->cq->mdev->clock, ts);
return mlx5_timecounter_cyc2time(&priv->cq->mdev->clock, ts);
return mlx5_timecounter_cyc2time(priv->cq->mdev->clock, ts);
}
static void mlx5e_xsk_request_checksum(u16 csum_start, u16 csum_offset, void *priv)

View File

@ -72,7 +72,7 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
rq->netdev = c->netdev;
rq->priv = c->priv;
rq->tstamp = c->tstamp;
rq->clock = &mdev->clock;
rq->clock = mdev->clock;
rq->icosq = &c->icosq;
rq->ix = c->ix;
rq->channel = c;

View File

@ -742,7 +742,7 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
rq->netdev = c->netdev;
rq->priv = c->priv;
rq->tstamp = c->tstamp;
rq->clock = &mdev->clock;
rq->clock = mdev->clock;
rq->icosq = &c->icosq;
rq->ix = c->ix;
rq->channel = c;
@ -1621,7 +1621,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
int err;
sq->pdev = c->pdev;
sq->clock = &mdev->clock;
sq->clock = mdev->clock;
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
sq->mdev = c->mdev;

View File

@ -43,6 +43,8 @@
#include <linux/cpufeature.h>
#endif /* CONFIG_X86 */
#define MLX5_RT_CLOCK_IDENTITY_SIZE MLX5_FLD_SZ_BYTES(mrtcq_reg, rt_clock_identity)
enum {
MLX5_PIN_MODE_IN = 0x0,
MLX5_PIN_MODE_OUT = 0x1,
@ -77,6 +79,56 @@ enum {
MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX = 200000,
};
struct mlx5_clock_dev_state {
struct mlx5_core_dev *mdev;
struct mlx5_devcom_comp_dev *compdev;
struct mlx5_nb pps_nb;
struct work_struct out_work;
};
struct mlx5_clock_priv {
struct mlx5_clock clock;
struct mlx5_core_dev *mdev;
struct mutex lock; /* protect mdev and used in PTP callbacks */
struct mlx5_core_dev *event_mdev;
};
static struct mlx5_clock_priv *clock_priv(struct mlx5_clock *clock)
{
return container_of(clock, struct mlx5_clock_priv, clock);
}
static void mlx5_clock_lockdep_assert(struct mlx5_clock *clock)
{
if (!clock->shared)
return;
lockdep_assert(lockdep_is_held(&clock_priv(clock)->lock));
}
static struct mlx5_core_dev *mlx5_clock_mdev_get(struct mlx5_clock *clock)
{
mlx5_clock_lockdep_assert(clock);
return clock_priv(clock)->mdev;
}
static void mlx5_clock_lock(struct mlx5_clock *clock)
{
if (!clock->shared)
return;
mutex_lock(&clock_priv(clock)->lock);
}
static void mlx5_clock_unlock(struct mlx5_clock *clock)
{
if (!clock->shared)
return;
mutex_unlock(&clock_priv(clock)->lock);
}
static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
{
return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
@ -94,6 +146,22 @@ static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
}
static int mlx5_clock_identity_get(struct mlx5_core_dev *mdev,
u8 identify[MLX5_RT_CLOCK_IDENTITY_SIZE])
{
u32 out[MLX5_ST_SZ_DW(mrtcq_reg)] = {};
u32 in[MLX5_ST_SZ_DW(mrtcq_reg)] = {};
int err;
err = mlx5_core_access_reg(mdev, in, sizeof(in),
out, sizeof(out), MLX5_REG_MRTCQ, 0, 0);
if (!err)
memcpy(identify, MLX5_ADDR_OF(mrtcq_reg, out, rt_clock_identity),
MLX5_RT_CLOCK_IDENTITY_SIZE);
return err;
}
static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz)
{
/* Optimal shift constant leads to corrections above just 1 scaled ppm.
@ -119,21 +187,30 @@ static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz)
ilog2((U32_MAX / NSEC_PER_MSEC) * dev_freq_khz));
}
static s32 mlx5_ptp_getmaxphase(struct ptp_clock_info *ptp)
static s32 mlx5_clock_getmaxphase(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_core_dev *mdev;
mdev = container_of(clock, struct mlx5_core_dev, clock);
return MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range) ?
MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX :
MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
}
static s32 mlx5_ptp_getmaxphase(struct ptp_clock_info *ptp)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_core_dev *mdev;
s32 ret;
mlx5_clock_lock(clock);
mdev = mlx5_clock_mdev_get(clock);
ret = mlx5_clock_getmaxphase(mdev);
mlx5_clock_unlock(clock);
return ret;
}
static bool mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev *mdev, s64 delta)
{
s64 max = mlx5_ptp_getmaxphase(&mdev->clock.ptp_info);
s64 max = mlx5_clock_getmaxphase(mdev);
if (delta < -max || delta > max)
return false;
@ -209,7 +286,7 @@ static int mlx5_mtctr_syncdevicetime(ktime_t *device_time,
if (real_time_mode)
*device_time = ns_to_ktime(REAL_TIME_TO_NS(device >> 32, device & U32_MAX));
else
*device_time = mlx5_timecounter_cyc2time(&mdev->clock, device);
*device_time = mlx5_timecounter_cyc2time(mdev->clock, device);
return 0;
}
@ -220,16 +297,23 @@ static int mlx5_ptp_getcrosststamp(struct ptp_clock_info *ptp,
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct system_time_snapshot history_begin = {0};
struct mlx5_core_dev *mdev;
int err;
mdev = container_of(clock, struct mlx5_core_dev, clock);
mlx5_clock_lock(clock);
mdev = mlx5_clock_mdev_get(clock);
if (!mlx5_is_ptm_source_time_available(mdev))
return -EBUSY;
if (!mlx5_is_ptm_source_time_available(mdev)) {
err = -EBUSY;
goto unlock;
}
ktime_get_snapshot(&history_begin);
return get_device_system_crosststamp(mlx5_mtctr_syncdevicetime, mdev,
err = get_device_system_crosststamp(mlx5_mtctr_syncdevicetime, mdev,
&history_begin, cts);
unlock:
mlx5_clock_unlock(clock);
return err;
}
#endif /* CONFIG_X86 */
@ -263,8 +347,7 @@ static u64 read_internal_timer(const struct cyclecounter *cc)
{
struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
clock);
struct mlx5_core_dev *mdev = mlx5_clock_mdev_get(clock);
return mlx5_read_time(mdev, NULL, false) & cc->mask;
}
@ -272,7 +355,7 @@ static u64 read_internal_timer(const struct cyclecounter *cc)
static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
{
struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
struct mlx5_clock *clock = &mdev->clock;
struct mlx5_clock *clock = mdev->clock;
struct mlx5_timer *timer;
u32 sign;
@ -295,12 +378,10 @@ static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
static void mlx5_pps_out(struct work_struct *work)
{
struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
struct mlx5_clock_dev_state *clock_state = container_of(work, struct mlx5_clock_dev_state,
out_work);
struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
pps_info);
struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
clock);
struct mlx5_core_dev *mdev = clock_state->mdev;
struct mlx5_clock *clock = mdev->clock;
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
unsigned long flags;
int i;
@ -322,17 +403,17 @@ static void mlx5_pps_out(struct work_struct *work)
}
}
static void mlx5_timestamp_overflow(struct work_struct *work)
static long mlx5_timestamp_overflow(struct ptp_clock_info *ptp_info)
{
struct delayed_work *dwork = to_delayed_work(work);
struct mlx5_core_dev *mdev;
struct mlx5_timer *timer;
struct mlx5_clock *clock;
unsigned long flags;
timer = container_of(dwork, struct mlx5_timer, overflow_work);
clock = container_of(timer, struct mlx5_clock, timer);
mdev = container_of(clock, struct mlx5_core_dev, clock);
clock = container_of(ptp_info, struct mlx5_clock, ptp_info);
mlx5_clock_lock(clock);
mdev = mlx5_clock_mdev_get(clock);
timer = &clock->timer;
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
goto out;
@ -343,7 +424,8 @@ static void mlx5_timestamp_overflow(struct work_struct *work)
write_sequnlock_irqrestore(&clock->lock, flags);
out:
schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
mlx5_clock_unlock(clock);
return timer->overflow_period;
}
static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
@ -362,15 +444,12 @@ static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
return mlx5_set_mtutc(mdev, in, sizeof(in));
}
static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
static int mlx5_clock_settime(struct mlx5_core_dev *mdev, struct mlx5_clock *clock,
const struct timespec64 *ts)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
mdev = container_of(clock, struct mlx5_core_dev, clock);
if (mlx5_modify_mtutc_allowed(mdev)) {
int err = mlx5_ptp_settime_real_time(mdev, ts);
@ -386,6 +465,20 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64
return 0;
}
static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_core_dev *mdev;
int err;
mlx5_clock_lock(clock);
mdev = mlx5_clock_mdev_get(clock);
err = mlx5_clock_settime(mdev, clock, ts);
mlx5_clock_unlock(clock);
return err;
}
static
struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev,
struct ptp_system_timestamp *sts)
@ -402,23 +495,21 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
u64 cycles, ns;
mdev = container_of(clock, struct mlx5_core_dev, clock);
mlx5_clock_lock(clock);
mdev = mlx5_clock_mdev_get(clock);
if (mlx5_real_time_mode(mdev)) {
*ts = mlx5_ptp_gettimex_real_time(mdev, sts);
goto out;
}
write_seqlock_irqsave(&clock->lock, flags);
cycles = mlx5_read_time(mdev, sts, false);
ns = timecounter_cyc2time(&timer->tc, cycles);
write_sequnlock_irqrestore(&clock->lock, flags);
ns = mlx5_timecounter_cyc2time(clock, cycles);
*ts = ns_to_timespec64(ns);
out:
mlx5_clock_unlock(clock);
return 0;
}
@ -449,14 +540,16 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
int err = 0;
mdev = container_of(clock, struct mlx5_core_dev, clock);
mlx5_clock_lock(clock);
mdev = mlx5_clock_mdev_get(clock);
if (mlx5_modify_mtutc_allowed(mdev)) {
int err = mlx5_ptp_adjtime_real_time(mdev, delta);
err = mlx5_ptp_adjtime_real_time(mdev, delta);
if (err)
return err;
goto unlock;
}
write_seqlock_irqsave(&clock->lock, flags);
@ -464,17 +557,23 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
return 0;
unlock:
mlx5_clock_unlock(clock);
return err;
}
static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_core_dev *mdev;
int err;
mdev = container_of(clock, struct mlx5_core_dev, clock);
mlx5_clock_lock(clock);
mdev = mlx5_clock_mdev_get(clock);
err = mlx5_ptp_adjtime_real_time(mdev, delta);
mlx5_clock_unlock(clock);
return mlx5_ptp_adjtime_real_time(mdev, delta);
return err;
}
static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
@ -503,15 +602,17 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
int err = 0;
u32 mult;
mdev = container_of(clock, struct mlx5_core_dev, clock);
mlx5_clock_lock(clock);
mdev = mlx5_clock_mdev_get(clock);
if (mlx5_modify_mtutc_allowed(mdev)) {
int err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
if (err)
return err;
goto unlock;
}
mult = (u32)adjust_by_scaled_ppm(timer->nominal_c_mult, scaled_ppm);
@ -521,8 +622,11 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
timer->cycles.mult = mult;
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
ptp_schedule_worker(clock->ptp, timer->overflow_period);
return 0;
unlock:
mlx5_clock_unlock(clock);
return err;
}
static int mlx5_extts_configure(struct ptp_clock_info *ptp,
@ -531,18 +635,14 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
{
struct mlx5_clock *clock =
container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_core_dev *mdev =
container_of(clock, struct mlx5_core_dev, clock);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
struct mlx5_core_dev *mdev;
u32 field_select = 0;
u8 pin_mode = 0;
u8 pattern = 0;
int pin = -1;
int err = 0;
if (!MLX5_PPS_CAP(mdev))
return -EOPNOTSUPP;
/* Reject requests with unsupported flags */
if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
PTP_RISING_EDGE |
@ -573,6 +673,14 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
field_select = MLX5_MTPPS_FS_ENABLE;
}
mlx5_clock_lock(clock);
mdev = mlx5_clock_mdev_get(clock);
if (!MLX5_PPS_CAP(mdev)) {
err = -EOPNOTSUPP;
goto unlock;
}
MLX5_SET(mtpps_reg, in, pin, pin);
MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
MLX5_SET(mtpps_reg, in, pattern, pattern);
@ -581,15 +689,23 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
err = mlx5_set_mtpps(mdev, in, sizeof(in));
if (err)
return err;
goto unlock;
return mlx5_set_mtppse(mdev, pin, 0,
MLX5_EVENT_MODE_REPETETIVE & on);
err = mlx5_set_mtppse(mdev, pin, 0, MLX5_EVENT_MODE_REPETETIVE & on);
if (err)
goto unlock;
clock->pps_info.pin_armed[pin] = on;
clock_priv(clock)->event_mdev = mdev;
unlock:
mlx5_clock_unlock(clock);
return err;
}
static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
{
struct mlx5_clock *clock = &mdev->clock;
struct mlx5_clock *clock = mdev->clock;
u64 cycles_now, cycles_delta;
u64 nsec_now, nsec_delta;
struct mlx5_timer *timer;
@ -648,7 +764,7 @@ static int mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev *mdev,
struct ptp_clock_request *rq,
u32 *out_pulse_duration_ns)
{
struct mlx5_pps *pps_info = &mdev->clock.pps_info;
struct mlx5_pps *pps_info = &mdev->clock->pps_info;
u32 out_pulse_duration;
struct timespec64 ts;
@ -681,7 +797,7 @@ static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clo
u32 *field_select, u32 *out_pulse_duration_ns,
u64 *period, u64 *time_stamp)
{
struct mlx5_pps *pps_info = &mdev->clock.pps_info;
struct mlx5_pps *pps_info = &mdev->clock->pps_info;
struct ptp_clock_time *time = &rq->perout.start;
struct timespec64 ts;
@ -716,26 +832,18 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
{
struct mlx5_clock *clock =
container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_core_dev *mdev =
container_of(clock, struct mlx5_core_dev, clock);
bool rt_mode = mlx5_real_time_mode(mdev);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
u32 out_pulse_duration_ns = 0;
struct mlx5_core_dev *mdev;
u32 field_select = 0;
u64 npps_period = 0;
u64 time_stamp = 0;
u8 pin_mode = 0;
u8 pattern = 0;
bool rt_mode;
int pin = -1;
int err = 0;
if (!MLX5_PPS_CAP(mdev))
return -EOPNOTSUPP;
/* Reject requests with unsupported flags */
if (mlx5_perout_verify_flags(mdev, rq->perout.flags))
return -EOPNOTSUPP;
if (rq->perout.index >= clock->ptp_info.n_pins)
return -EINVAL;
@ -744,14 +852,29 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (pin < 0)
return -EBUSY;
if (on) {
bool rt_mode = mlx5_real_time_mode(mdev);
mlx5_clock_lock(clock);
mdev = mlx5_clock_mdev_get(clock);
rt_mode = mlx5_real_time_mode(mdev);
if (!MLX5_PPS_CAP(mdev)) {
err = -EOPNOTSUPP;
goto unlock;
}
/* Reject requests with unsupported flags */
if (mlx5_perout_verify_flags(mdev, rq->perout.flags)) {
err = -EOPNOTSUPP;
goto unlock;
}
if (on) {
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
if (rt_mode && rq->perout.start.sec > U32_MAX)
return -EINVAL;
if (rt_mode && rq->perout.start.sec > U32_MAX) {
err = -EINVAL;
goto unlock;
}
field_select |= MLX5_MTPPS_FS_PIN_MODE |
MLX5_MTPPS_FS_PATTERN |
@ -764,7 +887,7 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
else
err = perout_conf_1pps(mdev, rq, &time_stamp, rt_mode);
if (err)
return err;
goto unlock;
}
MLX5_SET(mtpps_reg, in, pin, pin);
@ -777,13 +900,16 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
MLX5_SET(mtpps_reg, in, out_pulse_duration_ns, out_pulse_duration_ns);
err = mlx5_set_mtpps(mdev, in, sizeof(in));
if (err)
return err;
goto unlock;
if (rt_mode)
return 0;
goto unlock;
return mlx5_set_mtppse(mdev, pin, 0,
MLX5_EVENT_MODE_REPETETIVE & on);
err = mlx5_set_mtppse(mdev, pin, 0, MLX5_EVENT_MODE_REPETETIVE & on);
unlock:
mlx5_clock_unlock(clock);
return err;
}
static int mlx5_pps_configure(struct ptp_clock_info *ptp,
@ -856,6 +982,7 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
.settime64 = mlx5_ptp_settime,
.enable = NULL,
.verify = NULL,
.do_aux_work = mlx5_timestamp_overflow,
};
static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
@ -869,10 +996,8 @@ static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
mtpps_size, MLX5_REG_MTPPS, 0, 0);
}
static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
static int mlx5_get_pps_pin_mode(struct mlx5_core_dev *mdev, u8 pin)
{
struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
u8 mode;
int err;
@ -891,8 +1016,9 @@ static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
return PTP_PF_NONE;
}
static void mlx5_init_pin_config(struct mlx5_clock *clock)
static void mlx5_init_pin_config(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = mdev->clock;
int i;
if (!clock->ptp_info.n_pins)
@ -913,15 +1039,15 @@ static void mlx5_init_pin_config(struct mlx5_clock *clock)
sizeof(clock->ptp_info.pin_config[i].name),
"mlx5_pps%d", i);
clock->ptp_info.pin_config[i].index = i;
clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(mdev, i);
clock->ptp_info.pin_config[i].chan = 0;
}
}
static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
struct mlx5_clock *clock = mdev->clock;
mlx5_query_mtpps(mdev, out, sizeof(out));
@ -971,16 +1097,16 @@ static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
static int mlx5_pps_event(struct notifier_block *nb,
unsigned long type, void *data)
{
struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
struct mlx5_clock_dev_state *clock_state = mlx5_nb_cof(nb, struct mlx5_clock_dev_state,
pps_nb);
struct mlx5_core_dev *mdev = clock_state->mdev;
struct mlx5_clock *clock = mdev->clock;
struct ptp_clock_event ptp_event;
struct mlx5_eqe *eqe = data;
int pin = eqe->data.pps.pin;
struct mlx5_core_dev *mdev;
unsigned long flags;
u64 ns;
mdev = container_of(clock, struct mlx5_core_dev, clock);
switch (clock->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS:
ptp_event.index = pin;
@ -1000,11 +1126,15 @@ static int mlx5_pps_event(struct notifier_block *nb,
ptp_clock_event(clock->ptp, &ptp_event);
break;
case PTP_PF_PEROUT:
if (clock->shared) {
mlx5_core_warn(mdev, " Received unexpected PPS out event\n");
break;
}
ns = perout_conf_next_event_timer(mdev, clock);
write_seqlock_irqsave(&clock->lock, flags);
clock->pps_info.start[pin] = ns;
write_sequnlock_irqrestore(&clock->lock, flags);
schedule_work(&clock->pps_info.out_work);
schedule_work(&clock_state->out_work);
break;
default:
mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
@ -1016,7 +1146,7 @@ static int mlx5_pps_event(struct notifier_block *nb,
static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
struct mlx5_clock *clock = mdev->clock;
struct mlx5_timer *timer = &clock->timer;
u32 dev_freq;
@ -1032,10 +1162,10 @@ static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
ktime_to_ns(ktime_get_real()));
}
static void mlx5_init_overflow_period(struct mlx5_clock *clock)
static void mlx5_init_overflow_period(struct mlx5_core_dev *mdev)
{
struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
struct mlx5_clock *clock = mdev->clock;
struct mlx5_timer *timer = &clock->timer;
u64 overflow_cycles;
u64 frac = 0;
@ -1056,12 +1186,11 @@ static void mlx5_init_overflow_period(struct mlx5_clock *clock)
do_div(ns, NSEC_PER_SEC / HZ);
timer->overflow_period = ns;
INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
if (timer->overflow_period)
schedule_delayed_work(&timer->overflow_work, 0);
else
if (!timer->overflow_period) {
timer->overflow_period = HZ;
mlx5_core_warn(mdev,
"invalid overflow period, overflow_work is not scheduled\n");
"invalid overflow period, overflow_work is scheduled once per second\n");
}
if (clock_info)
clock_info->overflow_period = timer->overflow_period;
@ -1069,7 +1198,7 @@ static void mlx5_init_overflow_period(struct mlx5_clock *clock)
static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
struct mlx5_clock *clock = mdev->clock;
struct mlx5_ib_clock_info *info;
struct mlx5_timer *timer;
@ -1092,7 +1221,7 @@ static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
struct mlx5_clock *clock = mdev->clock;
u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
u8 log_max_freq_adjustment = 0;
@ -1111,7 +1240,7 @@ static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev)
static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
struct mlx5_clock *clock = mdev->clock;
/* Configure the PHC */
clock->ptp_info = mlx5_ptp_clock_info;
@ -1127,38 +1256,30 @@ static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
mlx5_timecounter_init(mdev);
mlx5_init_clock_info(mdev);
mlx5_init_overflow_period(clock);
mlx5_init_overflow_period(mdev);
if (mlx5_real_time_mode(mdev)) {
struct timespec64 ts;
ktime_get_real_ts64(&ts);
mlx5_ptp_settime(&clock->ptp_info, &ts);
mlx5_clock_settime(mdev, clock, &ts);
}
}
static void mlx5_init_pps(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
if (!MLX5_PPS_CAP(mdev))
return;
mlx5_get_pps_caps(mdev);
mlx5_init_pin_config(clock);
mlx5_init_pin_config(mdev);
}
void mlx5_init_clock(struct mlx5_core_dev *mdev)
static void mlx5_init_clock_dev(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
return;
}
struct mlx5_clock *clock = mdev->clock;
seqlock_init(&clock->lock);
INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
/* Initialize the device clock */
mlx5_init_timer_clock(mdev);
@ -1167,33 +1288,27 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
mlx5_init_pps(mdev);
clock->ptp = ptp_clock_register(&clock->ptp_info,
&mdev->pdev->dev);
clock->shared ? NULL : &mdev->pdev->dev);
if (IS_ERR(clock->ptp)) {
mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
mlx5_core_warn(mdev, "%sptp_clock_register failed %ld\n",
clock->shared ? "shared clock " : "",
PTR_ERR(clock->ptp));
clock->ptp = NULL;
}
MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
mlx5_eq_notifier_register(mdev, &clock->pps_nb);
if (clock->ptp)
ptp_schedule_worker(clock->ptp, 0);
}
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
static void mlx5_destroy_clock_dev(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
struct mlx5_clock *clock = mdev->clock;
if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
return;
mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
if (clock->ptp) {
ptp_clock_unregister(clock->ptp);
clock->ptp = NULL;
}
cancel_work_sync(&clock->pps_info.out_work);
cancel_delayed_work_sync(&clock->timer.overflow_work);
if (mdev->clock_info) {
free_page((unsigned long)mdev->clock_info);
mdev->clock_info = NULL;
@ -1201,3 +1316,248 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
kfree(clock->ptp_info.pin_config);
}
static void mlx5_clock_free(struct mlx5_core_dev *mdev)
{
struct mlx5_clock_priv *cpriv = clock_priv(mdev->clock);
mlx5_destroy_clock_dev(mdev);
mutex_destroy(&cpriv->lock);
kfree(cpriv);
mdev->clock = NULL;
}
static int mlx5_clock_alloc(struct mlx5_core_dev *mdev, bool shared)
{
struct mlx5_clock_priv *cpriv;
struct mlx5_clock *clock;
cpriv = kzalloc(sizeof(*cpriv), GFP_KERNEL);
if (!cpriv)
return -ENOMEM;
mutex_init(&cpriv->lock);
cpriv->mdev = mdev;
clock = &cpriv->clock;
clock->shared = shared;
mdev->clock = clock;
mlx5_clock_lock(clock);
mlx5_init_clock_dev(mdev);
mlx5_clock_unlock(clock);
if (!clock->shared)
return 0;
if (!clock->ptp) {
mlx5_core_warn(mdev, "failed to create ptp dev shared by multiple functions");
mlx5_clock_free(mdev);
return -EINVAL;
}
return 0;
}
static void mlx5_shared_clock_register(struct mlx5_core_dev *mdev, u64 key)
{
struct mlx5_core_dev *peer_dev, *next = NULL;
struct mlx5_devcom_comp_dev *pos;
mdev->clock_state->compdev = mlx5_devcom_register_component(mdev->priv.devc,
MLX5_DEVCOM_SHARED_CLOCK,
key, NULL, mdev);
if (IS_ERR(mdev->clock_state->compdev))
return;
mlx5_devcom_comp_lock(mdev->clock_state->compdev);
mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
if (peer_dev->clock) {
next = peer_dev;
break;
}
}
if (next) {
mdev->clock = next->clock;
/* clock info is shared among all the functions using the same clock */
mdev->clock_info = next->clock_info;
} else {
mlx5_clock_alloc(mdev, true);
}
mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
if (!mdev->clock) {
mlx5_devcom_unregister_component(mdev->clock_state->compdev);
mdev->clock_state->compdev = NULL;
}
}
static void mlx5_shared_clock_unregister(struct mlx5_core_dev *mdev)
{
struct mlx5_core_dev *peer_dev, *next = NULL;
struct mlx5_clock *clock = mdev->clock;
struct mlx5_devcom_comp_dev *pos;
mlx5_devcom_comp_lock(mdev->clock_state->compdev);
mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
if (peer_dev->clock && peer_dev != mdev) {
next = peer_dev;
break;
}
}
if (next) {
struct mlx5_clock_priv *cpriv = clock_priv(clock);
mlx5_clock_lock(clock);
if (mdev == cpriv->mdev)
cpriv->mdev = next;
mlx5_clock_unlock(clock);
} else {
mlx5_clock_free(mdev);
}
mdev->clock = NULL;
mdev->clock_info = NULL;
mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
mlx5_devcom_unregister_component(mdev->clock_state->compdev);
}
static void mlx5_clock_arm_pps_in_event(struct mlx5_clock *clock,
struct mlx5_core_dev *new_mdev,
struct mlx5_core_dev *old_mdev)
{
struct ptp_clock_info *ptp_info = &clock->ptp_info;
struct mlx5_clock_priv *cpriv = clock_priv(clock);
int i;
for (i = 0; i < ptp_info->n_pins; i++) {
if (ptp_info->pin_config[i].func != PTP_PF_EXTTS ||
!clock->pps_info.pin_armed[i])
continue;
if (new_mdev) {
mlx5_set_mtppse(new_mdev, i, 0, MLX5_EVENT_MODE_REPETETIVE);
cpriv->event_mdev = new_mdev;
} else {
cpriv->event_mdev = NULL;
}
if (old_mdev)
mlx5_set_mtppse(old_mdev, i, 0, MLX5_EVENT_MODE_DISABLE);
}
}
void mlx5_clock_load(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = mdev->clock;
struct mlx5_clock_priv *cpriv;
if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
return;
INIT_WORK(&mdev->clock_state->out_work, mlx5_pps_out);
MLX5_NB_INIT(&mdev->clock_state->pps_nb, mlx5_pps_event, PPS_EVENT);
mlx5_eq_notifier_register(mdev, &mdev->clock_state->pps_nb);
if (!clock->shared) {
mlx5_clock_arm_pps_in_event(clock, mdev, NULL);
return;
}
cpriv = clock_priv(clock);
mlx5_devcom_comp_lock(mdev->clock_state->compdev);
mlx5_clock_lock(clock);
if (mdev == cpriv->mdev && mdev != cpriv->event_mdev)
mlx5_clock_arm_pps_in_event(clock, mdev, cpriv->event_mdev);
mlx5_clock_unlock(clock);
mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
}
void mlx5_clock_unload(struct mlx5_core_dev *mdev)
{
struct mlx5_core_dev *peer_dev, *next = NULL;
struct mlx5_clock *clock = mdev->clock;
struct mlx5_devcom_comp_dev *pos;
if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
return;
if (!clock->shared) {
mlx5_clock_arm_pps_in_event(clock, NULL, mdev);
goto out;
}
mlx5_devcom_comp_lock(mdev->clock_state->compdev);
mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
if (peer_dev->clock && peer_dev != mdev) {
next = peer_dev;
break;
}
}
mlx5_clock_lock(clock);
if (mdev == clock_priv(clock)->event_mdev)
mlx5_clock_arm_pps_in_event(clock, next, mdev);
mlx5_clock_unlock(clock);
mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
out:
mlx5_eq_notifier_unregister(mdev, &mdev->clock_state->pps_nb);
cancel_work_sync(&mdev->clock_state->out_work);
}
static struct mlx5_clock null_clock;
int mlx5_init_clock(struct mlx5_core_dev *mdev)
{
u8 identity[MLX5_RT_CLOCK_IDENTITY_SIZE];
struct mlx5_clock_dev_state *clock_state;
u64 key;
int err;
if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
mdev->clock = &null_clock;
mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
return 0;
}
clock_state = kzalloc(sizeof(*clock_state), GFP_KERNEL);
if (!clock_state)
return -ENOMEM;
clock_state->mdev = mdev;
mdev->clock_state = clock_state;
if (MLX5_CAP_MCAM_REG3(mdev, mrtcq) && mlx5_real_time_mode(mdev)) {
if (mlx5_clock_identity_get(mdev, identity)) {
mlx5_core_warn(mdev, "failed to get rt clock identity, create ptp dev per function\n");
} else {
memcpy(&key, &identity, sizeof(key));
mlx5_shared_clock_register(mdev, key);
}
}
if (!mdev->clock) {
err = mlx5_clock_alloc(mdev, false);
if (err) {
kfree(clock_state);
mdev->clock_state = NULL;
return err;
}
}
return 0;
}
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
{
if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
return;
if (mdev->clock->shared)
mlx5_shared_clock_unregister(mdev);
else
mlx5_clock_free(mdev);
kfree(mdev->clock_state);
mdev->clock_state = NULL;
}

View File

@ -33,6 +33,38 @@
#ifndef __LIB_CLOCK_H__
#define __LIB_CLOCK_H__
#include <linux/ptp_clock_kernel.h>
/* KABI checker sees the old definitions in include/linux/mlx5/driver.h */
#ifndef __GENKSYMS__
#define MAX_PIN_NUM 8
struct mlx5_pps {
u8 pin_caps[MAX_PIN_NUM];
u64 start[MAX_PIN_NUM];
u8 enabled;
u64 min_npps_period;
u64 min_out_pulse_duration_ns;
bool pin_armed[MAX_PIN_NUM];
};
struct mlx5_timer {
struct cyclecounter cycles;
struct timecounter tc;
u32 nominal_c_mult;
unsigned long overflow_period;
};
struct mlx5_clock {
seqlock_t lock;
struct hwtstamp_config hwtstamp_config;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
struct mlx5_pps pps_info;
struct mlx5_timer timer;
bool shared;
};
#endif
static inline bool mlx5_is_real_time_rq(struct mlx5_core_dev *mdev)
{
u8 rq_ts_format_cap = MLX5_CAP_GEN(mdev, rq_ts_format);
@ -54,12 +86,14 @@ static inline bool mlx5_is_real_time_sq(struct mlx5_core_dev *mdev)
typedef ktime_t (*cqe_ts_to_ns)(struct mlx5_clock *, u64);
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
void mlx5_init_clock(struct mlx5_core_dev *mdev);
int mlx5_init_clock(struct mlx5_core_dev *mdev);
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev);
void mlx5_clock_load(struct mlx5_core_dev *mdev);
void mlx5_clock_unload(struct mlx5_core_dev *mdev);
static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
{
return mdev->clock.ptp ? ptp_clock_index(mdev->clock.ptp) : -1;
return mdev->clock->ptp ? ptp_clock_index(mdev->clock->ptp) : -1;
}
static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
@ -87,8 +121,10 @@ static inline ktime_t mlx5_real_time_cyc2time(struct mlx5_clock *clock,
return ns_to_ktime(time);
}
#else
static inline void mlx5_init_clock(struct mlx5_core_dev *mdev) {}
static inline int mlx5_init_clock(struct mlx5_core_dev *mdev) { return 0; }
static inline void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) {}
static inline void mlx5_clock_load(struct mlx5_core_dev *mdev) {}
static inline void mlx5_clock_unload(struct mlx5_core_dev *mdev) {}
static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
{
return -1;

View File

@ -11,6 +11,7 @@ enum mlx5_devcom_component {
MLX5_DEVCOM_MPV,
MLX5_DEVCOM_HCA_PORTS,
MLX5_DEVCOM_SD_GROUP,
MLX5_DEVCOM_SHARED_CLOCK,
MLX5_DEVCOM_NUM_COMPONENTS,
};

View File

@ -1032,7 +1032,11 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
mlx5_init_reserved_gids(dev);
mlx5_init_clock(dev);
err = mlx5_init_clock(dev);
if (err) {
mlx5_core_err(dev, "failed to initialize hardware clock\n");
goto err_tables_cleanup;
}
dev->vxlan = mlx5_vxlan_create(dev);
dev->geneve = mlx5_geneve_create(dev);
@ -1040,7 +1044,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
err = mlx5_init_rl_table(dev);
if (err) {
mlx5_core_err(dev, "Failed to init rate limiting\n");
goto err_tables_cleanup;
goto err_clock_cleanup;
}
err = mlx5_mpfs_init(dev);
@ -1117,10 +1121,11 @@ err_mpfs_cleanup:
mlx5_mpfs_cleanup(dev);
err_rl_cleanup:
mlx5_cleanup_rl_table(dev);
err_tables_cleanup:
err_clock_cleanup:
mlx5_geneve_destroy(dev->geneve);
mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_clock(dev);
err_tables_cleanup:
mlx5_cleanup_reserved_gids(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_fw_reset_cleanup(dev);
@ -1353,6 +1358,8 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_eq_table;
}
mlx5_clock_load(dev);
err = mlx5_fw_tracer_init(dev->tracer);
if (err) {
mlx5_core_err(dev, "Failed to init FW tracer %d\n", err);
@ -1436,6 +1443,7 @@ err_fpga_start:
mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_reset_events_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
mlx5_clock_unload(dev);
mlx5_eq_table_destroy(dev);
err_eq_table:
mlx5_irq_table_destroy(dev);
@ -1462,6 +1470,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_reset_events_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
mlx5_clock_unload(dev);
mlx5_eq_table_destroy(dev);
mlx5_irq_table_destroy(dev);
mlx5_pagealloc_stop(dev);

View File

@ -166,7 +166,7 @@ static int tpmi_get_logical_id(unsigned int cpu, struct tpmi_cpu_info *info)
info->punit_thread_id = FIELD_GET(LP_ID_MASK, data);
info->punit_core_id = FIELD_GET(MODULE_ID_MASK, data);
info->pkg_id = topology_physical_package_id(cpu);
info->pkg_id = topology_logical_package_id(cpu);
info->linux_cpu = cpu;
return 0;

View File

@ -131,6 +131,7 @@ static int ism_cmd(struct ism_dev *ism, void *cmd)
struct ism_req_hdr *req = cmd;
struct ism_resp_hdr *resp = cmd;
spin_lock(&ism->cmd_lock);
__ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req));
__ism_write_cmd(ism, req, 0, sizeof(*req));
@ -144,6 +145,7 @@ static int ism_cmd(struct ism_dev *ism, void *cmd)
}
__ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp));
out:
spin_unlock(&ism->cmd_lock);
return resp->ret;
}
@ -598,6 +600,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
spin_lock_init(&ism->lock);
spin_lock_init(&ism->cmd_lock);
dev_set_drvdata(&pdev->dev, ism);
ism->pdev = pdev;
ism->dev.parent = &pdev->dev;

View File

@ -631,7 +631,11 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
errorfc(fc, "unsupported blksize for fscache mode");
return -EINVAL;
}
if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) {
if (erofs_is_fileio_mode(sbi)) {
sb->s_blocksize = 1 << sbi->blkszbits;
sb->s_blocksize_bits = sbi->blkszbits;
} else if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) {
errorfc(fc, "failed to set erofs blksize");
return -EINVAL;
}

View File

@ -70,6 +70,24 @@ static struct kernfs_open_node *of_on(struct kernfs_open_file *of)
!list_empty(&of->list));
}
/* Get active reference to kernfs node for an open file */
static struct kernfs_open_file *kernfs_get_active_of(struct kernfs_open_file *of)
{
/* Skip if file was already released */
if (unlikely(of->released))
return NULL;
if (!kernfs_get_active(of->kn))
return NULL;
return of;
}
static void kernfs_put_active_of(struct kernfs_open_file *of)
{
return kernfs_put_active(of->kn);
}
/**
* kernfs_deref_open_node_locked - Get kernfs_open_node corresponding to @kn
*
@ -139,7 +157,7 @@ static void kernfs_seq_stop_active(struct seq_file *sf, void *v)
if (ops->seq_stop)
ops->seq_stop(sf, v);
kernfs_put_active(of->kn);
kernfs_put_active_of(of);
}
static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
@ -152,7 +170,7 @@ static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
* the ops aren't called concurrently for the same open file.
*/
mutex_lock(&of->mutex);
if (!kernfs_get_active(of->kn))
if (!kernfs_get_active_of(of))
return ERR_PTR(-ENODEV);
ops = kernfs_ops(of->kn);
@ -238,7 +256,7 @@ static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
* the ops aren't called concurrently for the same open file.
*/
mutex_lock(&of->mutex);
if (!kernfs_get_active(of->kn)) {
if (!kernfs_get_active_of(of)) {
len = -ENODEV;
mutex_unlock(&of->mutex);
goto out_free;
@ -252,7 +270,7 @@ static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
else
len = -EINVAL;
kernfs_put_active(of->kn);
kernfs_put_active_of(of);
mutex_unlock(&of->mutex);
if (len < 0)
@ -323,7 +341,7 @@ static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter)
* the ops aren't called concurrently for the same open file.
*/
mutex_lock(&of->mutex);
if (!kernfs_get_active(of->kn)) {
if (!kernfs_get_active_of(of)) {
mutex_unlock(&of->mutex);
len = -ENODEV;
goto out_free;
@ -335,7 +353,7 @@ static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter)
else
len = -EINVAL;
kernfs_put_active(of->kn);
kernfs_put_active_of(of);
mutex_unlock(&of->mutex);
if (len > 0)
@ -357,13 +375,13 @@ static void kernfs_vma_open(struct vm_area_struct *vma)
if (!of->vm_ops)
return;
if (!kernfs_get_active(of->kn))
if (!kernfs_get_active_of(of))
return;
if (of->vm_ops->open)
of->vm_ops->open(vma);
kernfs_put_active(of->kn);
kernfs_put_active_of(of);
}
static vm_fault_t kernfs_vma_fault(struct vm_fault *vmf)
@ -375,14 +393,14 @@ static vm_fault_t kernfs_vma_fault(struct vm_fault *vmf)
if (!of->vm_ops)
return VM_FAULT_SIGBUS;
if (!kernfs_get_active(of->kn))
if (!kernfs_get_active_of(of))
return VM_FAULT_SIGBUS;
ret = VM_FAULT_SIGBUS;
if (of->vm_ops->fault)
ret = of->vm_ops->fault(vmf);
kernfs_put_active(of->kn);
kernfs_put_active_of(of);
return ret;
}
@ -395,7 +413,7 @@ static vm_fault_t kernfs_vma_page_mkwrite(struct vm_fault *vmf)
if (!of->vm_ops)
return VM_FAULT_SIGBUS;
if (!kernfs_get_active(of->kn))
if (!kernfs_get_active_of(of))
return VM_FAULT_SIGBUS;
ret = 0;
@ -404,7 +422,7 @@ static vm_fault_t kernfs_vma_page_mkwrite(struct vm_fault *vmf)
else
file_update_time(file);
kernfs_put_active(of->kn);
kernfs_put_active_of(of);
return ret;
}
@ -418,14 +436,14 @@ static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
if (!of->vm_ops)
return -EINVAL;
if (!kernfs_get_active(of->kn))
if (!kernfs_get_active_of(of))
return -EINVAL;
ret = -EINVAL;
if (of->vm_ops->access)
ret = of->vm_ops->access(vma, addr, buf, len, write);
kernfs_put_active(of->kn);
kernfs_put_active_of(of);
return ret;
}
@ -455,7 +473,7 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
mutex_lock(&of->mutex);
rc = -ENODEV;
if (!kernfs_get_active(of->kn))
if (!kernfs_get_active_of(of))
goto out_unlock;
ops = kernfs_ops(of->kn);
@ -490,7 +508,7 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
}
vma->vm_ops = &kernfs_vm_ops;
out_put:
kernfs_put_active(of->kn);
kernfs_put_active_of(of);
out_unlock:
mutex_unlock(&of->mutex);
@ -851,7 +869,7 @@ static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait)
struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry);
__poll_t ret;
if (!kernfs_get_active(kn))
if (!kernfs_get_active_of(of))
return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
if (kn->attr.ops->poll)
@ -859,7 +877,7 @@ static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait)
else
ret = kernfs_generic_poll(of, wait);
kernfs_put_active(kn);
kernfs_put_active_of(of);
return ret;
}
@ -874,7 +892,7 @@ static loff_t kernfs_fop_llseek(struct file *file, loff_t offset, int whence)
* the ops aren't called concurrently for the same open file.
*/
mutex_lock(&of->mutex);
if (!kernfs_get_active(of->kn)) {
if (!kernfs_get_active_of(of)) {
mutex_unlock(&of->mutex);
return -ENODEV;
}
@ -885,7 +903,7 @@ static loff_t kernfs_fop_llseek(struct file *file, loff_t offset, int whence)
else
ret = generic_file_llseek(file, offset, whence);
kernfs_put_active(of->kn);
kernfs_put_active_of(of);
mutex_unlock(&of->mutex);
return ret;
}

View File

@ -66,14 +66,21 @@ nfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
{
struct nfs_fattr *fattr = NULL;
struct nfs_fh *server_fh = nfs_exp_embedfh(fid->raw);
size_t fh_size = offsetof(struct nfs_fh, data) + server_fh->size;
size_t fh_size = offsetof(struct nfs_fh, data);
const struct nfs_rpc_ops *rpc_ops;
struct dentry *dentry;
struct inode *inode;
int len = EMBED_FH_OFF + XDR_QUADLEN(fh_size);
int len = EMBED_FH_OFF;
u32 *p = fid->raw;
int ret;
/* Initial check of bounds */
if (fh_len < len + XDR_QUADLEN(fh_size) ||
fh_len > XDR_QUADLEN(NFS_MAXFHSIZE))
return NULL;
/* Calculate embedded filehandle size */
fh_size += server_fh->size;
len += XDR_QUADLEN(fh_size);
/* NULL translates to ESTALE */
if (fh_len < len || fh_type != len)
return NULL;

View File

@ -253,13 +253,14 @@ nfs_page_group_unlock(struct nfs_page *req)
nfs_page_clear_headlock(req);
}
/*
* nfs_page_group_sync_on_bit_locked
/**
* nfs_page_group_sync_on_bit_locked - Test if all requests have @bit set
* @req: request in page group
* @bit: PG_* bit that is used to sync page group
*
* must be called with page group lock held
*/
static bool
nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
bool nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
{
struct nfs_page *head = req->wb_head;
struct nfs_page *tmp;

View File

@ -153,20 +153,10 @@ nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode)
}
}
static int
nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
static void nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
{
int ret;
if (!test_bit(PG_REMOVE, &req->wb_flags))
return 0;
ret = nfs_page_group_lock(req);
if (ret)
return ret;
if (test_and_clear_bit(PG_REMOVE, &req->wb_flags))
nfs_page_set_inode_ref(req, inode);
nfs_page_group_unlock(req);
return 0;
}
/**
@ -583,19 +573,18 @@ retry:
return ERR_PTR(ret);
}
ret = nfs_page_group_lock(head);
if (ret < 0)
goto out_unlock;
/* Ensure that nobody removed the request before we locked it */
if (head != folio->private) {
nfs_page_group_unlock(head);
nfs_unlock_and_release_request(head);
goto retry;
}
ret = nfs_cancel_remove_inode(head, inode);
if (ret < 0)
goto out_unlock;
ret = nfs_page_group_lock(head);
if (ret < 0)
goto out_unlock;
nfs_cancel_remove_inode(head, inode);
/* lock each request in the page group */
for (subreq = head->wb_this_page;
@ -800,7 +789,8 @@ static void nfs_inode_remove_request(struct nfs_page *req)
{
struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req));
if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
nfs_page_group_lock(req);
if (nfs_page_group_sync_on_bit_locked(req, PG_REMOVE)) {
struct folio *folio = nfs_page_to_folio(req->wb_head);
struct address_space *mapping = folio->mapping;
@ -812,6 +802,7 @@ static void nfs_inode_remove_request(struct nfs_page *req)
}
spin_unlock(&mapping->i_private_lock);
}
nfs_page_group_unlock(req);
if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
atomic_long_dec(&nfsi->nrequests);

View File

@ -28,6 +28,7 @@ struct ism_dmb {
struct ism_dev {
spinlock_t lock; /* protects the ism device */
spinlock_t cmd_lock; /* serializes cmds */
struct list_head list;
struct pci_dev *pdev;

View File

@ -161,6 +161,7 @@ enum {
MLX5_REG_MIRC = 0x9162,
MLX5_REG_MTPTM = 0x9180,
MLX5_REG_MTCTR = 0x9181,
MLX5_REG_MRTCQ = 0x9182,
MLX5_REG_SBCAM = 0xB01F,
MLX5_REG_RESOURCE_DUMP = 0xC000,
MLX5_REG_DTOR = 0xC00E,
@ -706,17 +707,38 @@ struct mlx5_rsvd_gids {
struct ida ida;
};
#define MAX_PIN_NUM 8
struct mlx5_pps {
u8 pin_caps[MAX_PIN_NUM];
/* RHEL 10.0 GA had an embedded struct mlx5_clock in struct mlx5_core_dev.
* For PTP clock support with BF-3 (RHEL-87775), it needs to change to
* a pointer. However, changes to struct mlx5_core_dev break KABI symbols:
* mlx5_blocking_notifier_register
* mlx5_blocking_notifier_unregister
* mlx5_core_access_reg
* mlx5_core_uplink_netdev_event_replay
*
* We assume that:
* - The functions are exported to serve the in-tree drivers (mlx5_ib,
* mlx5_dpll).
* - Mixing the in-tree mlx5_core with an external mlx5_* aux binary
* driver is unlikely.
* - Even if the mixing occurs, the external driver is unlikely to touch
* the "clock" member anyway.
*
* To preserve the maximum of the KABI, we keep a dummy struct mlx5_clock where
* it was originally. To any external module accessing it, it will appear as if
* mlx5_init_clock() had aborted the HW clock init and left the struct cleared.
* To make this work, we keep the original struct layout definitions here.
*/
#define RHEL96_KABI_MAX_PIN_NUM 8
struct RH_KABI_RENAME(mlx5_pps, rhel96_kabi_mlx5_pps) {
u8 pin_caps[RHEL96_KABI_MAX_PIN_NUM];
struct work_struct out_work;
u64 start[MAX_PIN_NUM];
u64 start[RHEL96_KABI_MAX_PIN_NUM];
u8 enabled;
u64 min_npps_period;
u64 min_out_pulse_duration_ns;
};
struct mlx5_timer {
struct RH_KABI_RENAME(mlx5_timer, rhel96_kabi_mlx5_timer) {
struct cyclecounter cycles;
struct timecounter tc;
u32 nominal_c_mult;
@ -724,16 +746,18 @@ struct mlx5_timer {
struct delayed_work overflow_work;
};
struct mlx5_clock {
struct RH_KABI_RENAME(mlx5_clock, rhel96_kabi_mlx5_clock) {
struct mlx5_nb pps_nb;
seqlock_t lock;
struct hwtstamp_config hwtstamp_config;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
struct mlx5_pps pps_info;
struct mlx5_timer timer;
struct RH_KABI_RENAME(mlx5_pps, rhel96_kabi_mlx5_pps) pps_info;
struct RH_KABI_RENAME(mlx5_timer, rhel96_kabi_mlx5_timer) timer;
};
struct mlx5_clock;
struct mlx5_clock_dev_state;
struct mlx5_dm;
struct mlx5_fw_tracer;
struct mlx5_vxlan;
@ -817,7 +841,8 @@ struct mlx5_core_dev {
#ifdef CONFIG_MLX5_FPGA
struct mlx5_fpga_device *fpga;
#endif
struct mlx5_clock clock;
struct RH_KABI_RENAME(mlx5_clock, rhel96_kabi_mlx5_clock)
RH_KABI_RENAME(clock, rhel96_kabi_dummy_clock);
struct mlx5_ib_clock_info *clock_info;
struct mlx5_fw_tracer *tracer;
struct mlx5_rsc_dump *rsc_dump;
@ -836,6 +861,8 @@ struct mlx5_core_dev {
enum mlx5_wc_state wc_state;
/* sync write combining state */
struct mutex wc_state_lock;
RH_KABI_EXTEND(struct mlx5_clock *clock)
RH_KABI_EXTEND(struct mlx5_clock_dev_state *clock_state)
};
struct mlx5_db {

View File

@ -10627,7 +10627,8 @@ struct mlx5_ifc_mcam_access_reg_bits3 {
u8 regs_63_to_32[0x20];
u8 regs_31_to_2[0x1e];
u8 regs_31_to_3[0x1d];
u8 mrtcq[0x1];
u8 mtctr[0x1];
u8 mtptm[0x1];
};
@ -13118,4 +13119,12 @@ struct mlx5_ifc_msees_reg_bits {
u8 reserved_at_80[0x180];
};
struct mlx5_ifc_mrtcq_reg_bits {
u8 reserved_at_0[0x40];
u8 rt_clock_identity[0x40];
u8 reserved_at_80[0x180];
};
#endif /* MLX5_IFC_H */

View File

@ -160,6 +160,7 @@ extern void nfs_join_page_group(struct nfs_page *head,
extern int nfs_page_group_lock(struct nfs_page *);
extern void nfs_page_group_unlock(struct nfs_page *);
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
extern bool nfs_page_group_sync_on_bit_locked(struct nfs_page *, unsigned int);
extern int nfs_page_set_headlock(struct nfs_page *req);
extern void nfs_page_clear_headlock(struct nfs_page *req);
extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);

View File

@ -4113,6 +4113,7 @@ static void cgroup_file_release(struct kernfs_open_file *of)
cft->release(of);
put_cgroup_ns(ctx->ns);
kfree(ctx);
of->priv = NULL;
}
static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,

View File

@ -3322,9 +3322,13 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si,
* and the EOF part of the last cluster.
*/
inc_cluster_info_page(si, cluster_info, 0);
for (i = 0; i < swap_header->info.nr_badpages; i++)
inc_cluster_info_page(si, cluster_info,
swap_header->info.badpages[i]);
for (i = 0; i < swap_header->info.nr_badpages; i++) {
unsigned int page_nr = swap_header->info.badpages[i];
if (page_nr >= maxpages)
continue;
inc_cluster_info_page(si, cluster_info, page_nr);
}
for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
inc_cluster_info_page(si, cluster_info, i);

View File

@ -35,6 +35,7 @@
#include <net/xfrm.h>
#include <crypto/hash.h>
#include <crypto/utils.h>
#include <net/seg6.h>
#include <net/genetlink.h>
#include <net/seg6_hmac.h>
@ -271,7 +272,7 @@ bool seg6_hmac_validate_skb(struct sk_buff *skb)
if (seg6_hmac_compute(hinfo, srh, &ipv6_hdr(skb)->saddr, hmac_output))
return false;
if (memcmp(hmac_output, tlv->hmac, SEG6_HMAC_FIELD_LEN) != 0)
if (crypto_memneq(hmac_output, tlv->hmac, SEG6_HMAC_FIELD_LEN))
return false;
return true;

View File

@ -624,8 +624,9 @@ static void virtio_transport_rx_work(struct work_struct *work)
do {
virtqueue_disable_cb(vq);
for (;;) {
unsigned int len, payload_len;
struct virtio_vsock_hdr *hdr;
struct sk_buff *skb;
unsigned int len;
if (!virtio_transport_more_replies(vsock)) {
/* Stop rx until the device processes already
@ -642,12 +643,19 @@ static void virtio_transport_rx_work(struct work_struct *work)
vsock->rx_buf_nr--;
/* Drop short/long packets */
if (unlikely(len < sizeof(struct virtio_vsock_hdr) ||
if (unlikely(len < sizeof(*hdr) ||
len > virtio_vsock_skb_len(skb))) {
kfree_skb(skb);
continue;
}
hdr = virtio_vsock_hdr(skb);
payload_len = le32_to_cpu(hdr->len);
if (unlikely(payload_len > len - sizeof(*hdr))) {
kfree_skb(skb);
continue;
}
virtio_vsock_skb_rx_put(skb);
virtio_transport_deliver_tap_pkt(skb);
virtio_transport_recv_pkt(&virtio_transport, skb);

View File

@ -903,13 +903,16 @@ void __cfg80211_connect_result(struct net_device *dev,
if (!wdev->u.client.ssid_len) {
rcu_read_lock();
for_each_valid_link(cr, link) {
u32 ssid_len;
ssid = ieee80211_bss_get_elem(cr->links[link].bss,
WLAN_EID_SSID);
if (!ssid || !ssid->datalen)
continue;
memcpy(wdev->u.client.ssid, ssid->data, ssid->datalen);
ssid_len = min(ssid->datalen, IEEE80211_MAX_SSID_LEN);
memcpy(wdev->u.client.ssid, ssid->data, ssid_len);
wdev->u.client.ssid_len = ssid->datalen;
break;
}

View File

@ -1,3 +1,36 @@
* Sat Oct 25 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [6.12.0-55.42.1.el10_0]
- platform/x86/intel: power-domains: Use topology_logical_package_id() for package ID (CKI Backport Bot) [RHEL-123288]
- KVM: arm64: Disable MPAM visibility by default and ignore VMM writes (Gavin Shan) [RHEL-121690]
- KVM: arm64: Add a macro for creating filtered sys_reg_descs entries (Gavin Shan) [RHEL-121690]
- redhat: rpminspect: update emptyrpm list for kernel variants (Alexandra Hájková)
- cgroup/psi: Set of->priv to NULL upon file release (CKI Backport Bot) [RHEL-119147] {CVE-2025-39881}
- kernfs: Fix UAF in polling when open file is released (CKI Backport Bot) [RHEL-119147] {CVE-2025-39881}
- ipv6: sr: Fix MAC comparison to be constant-time (CKI Backport Bot) [RHEL-116388] {CVE-2025-39702}
Resolves: RHEL-116388, RHEL-119147, RHEL-121690, RHEL-123288
* Mon Oct 20 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.41.1.el10_0]
- ALSA: hda/ca0132: Fix buffer overflow in add_tuning_control (CKI Backport Bot) [RHEL-114853] {CVE-2025-39751}
- erofs: fix blksize < PAGE_SIZE for file-backed mounts (CKI Backport Bot) [RHEL-83885] {CVE-2024-56750}
- s390/ism: fix concurrency management in ism_cmd() (CKI Backport Bot) [RHEL-114499]
- NFS: Fix filehandle bounds checking in nfs_fh_to_dentry() (CKI Backport Bot) [RHEL-113614] {CVE-2025-39730}
- net/mlx5: Generate PPS IN event on new function for shared clock (Benjamin Poirier) [RHEL-101997]
- net/mlx5: Support one PTP device per hardware clock (Benjamin Poirier) [RHEL-101997]
- net/mlx5: Move PPS notifier and out_work to clock_state (Benjamin Poirier) [RHEL-101997]
- net/mlx5: Add devcom component for the clock shared by functions (Michal Schmidt) [RHEL-101997]
- net/mlx5: Change clock in mlx5_core_dev to mlx5_clock pointer (Michal Schmidt) [RHEL-101997]
- net/mlx5: Add API to get mlx5_core_dev from mlx5_clock (Benjamin Poirier) [RHEL-101997]
- net/mlx5: Add init and destruction functions for a single HW clock (Benjamin Poirier) [RHEL-101997]
- net/mlx5: Change parameters for PTP internal functions (Benjamin Poirier) [RHEL-101997]
- net/mlx5: Add helper functions for PTP callbacks (Benjamin Poirier) [RHEL-101997]
- net/mlx5: Add support for MRTCQ register (Benjamin Poirier) [RHEL-101997]
- net/mlx5: use do_aux_work for PHC overflow checks (Michal Schmidt) [RHEL-101997]
- mlx5_en: use read sequence for gettimex64 (Benjamin Poirier) [RHEL-101997]
- NFS: Fix a race when updating an existing write (CKI Backport Bot) [RHEL-113859] {CVE-2025-39697}
- vsock/virtio: Validate length in packet header before skb_put() (Jon Maloy) [RHEL-114304] {CVE-2025-39718}
- mm: swap: fix potential buffer overflow in setup_clusters() (CKI Backport Bot) [RHEL-114863] {CVE-2025-39727}
- wifi: cfg80211: sme: cap SSID length in __cfg80211_connect_result() (CKI Backport Bot) [RHEL-117583] {CVE-2025-39849}
Resolves: RHEL-101997, RHEL-113614, RHEL-113859, RHEL-114304, RHEL-114499, RHEL-114853, RHEL-114863, RHEL-117583, RHEL-83885
* Sat Oct 11 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [6.12.0-55.40.1.el10_0]
- scsi: lpfc: Fix buffer free/clear order in deferred receive path (CKI Backport Bot) [RHEL-119125] {CVE-2025-39841}
- efivarfs: Fix slab-out-of-bounds in efivarfs_d_compare (CKI Backport Bot) [RHEL-118460] {CVE-2025-39817}

View File

@ -21,10 +21,22 @@ emptyrpm:
- kernel-debug
- kernel-debug-devel-matched
- kernel-devel-matched
- kernel-lpae
- kernel-zfcpdump
- kernel-zfcpdump-devel-matched
- kernel-zfcpdump-modules
- kernel-zfcpdump-modules-partner
- kernel-rt
- kernel-rt-debug
- kernel-rt-debug-devel-matched
- kernel-rt-devel-matched
- kernel-64k
- kernel-64k-debug
- kernel-64k-debug-devel-matched
- kernel-64k-devel-matched
- kernel-rt-64k
- kernel-rt-64k-debug
- kernel-rt-64k-debug-devel-matched
- kernel-rt-64k-devel-matched
patches:
ignore_list:

View File

@ -4410,7 +4410,7 @@ static int add_tuning_control(struct hda_codec *codec,
}
knew.private_value =
HDA_COMPOSE_AMP_VAL(nid, 1, 0, type);
sprintf(namestr, "%s %s Volume", name, dirstr[dir]);
snprintf(namestr, sizeof(namestr), "%s %s Volume", name, dirstr[dir]);
return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec));
}