Import of kernel-5.14.0-570.49.1.el9_6

This commit is contained in:
eabdullin 2025-10-07 08:23:59 +00:00
parent db9979bc72
commit bd03e68244
43 changed files with 1662 additions and 925 deletions

View File

@ -12,7 +12,7 @@ RHEL_MINOR = 6
#
# Use this spot to avoid future merge conflicts.
# Do not trim this comment.
RHEL_RELEASE = 570.46.1
RHEL_RELEASE = 570.49.1
#
# ZSTREAM

View File

@ -275,15 +275,10 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
struct cppc_cpudata *cpu_data = policy->driver_data;
unsigned int cpu = policy->cpu;
struct cpufreq_freqs freqs;
u32 desired_perf;
int ret = 0;
desired_perf = cppc_khz_to_perf(&cpu_data->perf_caps, target_freq);
/* Return if it is exactly the same perf */
if (desired_perf == cpu_data->perf_ctrls.desired_perf)
return ret;
cpu_data->perf_ctrls.desired_perf = desired_perf;
cpu_data->perf_ctrls.desired_perf =
cppc_khz_to_perf(&cpu_data->perf_caps, target_freq);
freqs.old = policy->cur;
freqs.new = target_freq;

View File

@ -351,7 +351,7 @@ config ICE_SWITCHDEV
config ICE_HWTS
bool "Support HW cross-timestamp on platforms with PTM support"
default y
depends on ICE && X86
depends on ICE && X86 && PCIE_PTM
help
Say Y to enable hardware supported cross-timestamping on platforms
with PCIe PTM support. The cross-timestamp is available through

View File

@ -3137,10 +3137,10 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
const u8 *addr = al->list[i].addr;
/* Allow to delete VF primary MAC only if it was not set
* administratively by PF or if VF is trusted.
* administratively by PF.
*/
if (ether_addr_equal(addr, vf->default_lan_addr.addr)) {
if (i40e_can_vf_change_mac(vf))
if (!vf->pf_set_mac)
was_unimac_deleted = true;
else
continue;

View File

@ -1047,10 +1047,5 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
}
static inline enum ice_phy_model ice_get_phy_model(const struct ice_hw *hw)
{
return hw->ptp.phy_model;
}
extern const struct xdp_metadata_ops ice_xdp_md_ops;
#endif /* _ICE_H_ */

View File

@ -1648,6 +1648,7 @@ struct ice_aqc_get_port_options_elem {
#define ICE_AQC_PORT_OPT_MAX_LANE_25G 5
#define ICE_AQC_PORT_OPT_MAX_LANE_50G 6
#define ICE_AQC_PORT_OPT_MAX_LANE_100G 7
#define ICE_AQC_PORT_OPT_MAX_LANE_200G 8
u8 global_scid[2];
u8 phy_scid[2];
@ -2247,6 +2248,8 @@ struct ice_aqc_get_pkg_info_resp {
struct ice_aqc_get_pkg_info pkg_info[];
};
#define ICE_AQC_GET_CGU_MAX_PHASE_ADJ GENMASK(30, 0)
/* Get CGU abilities command response data structure (indirect 0x0C61) */
struct ice_aqc_get_cgu_abilities {
u8 num_inputs;

View File

@ -185,7 +185,7 @@ static int ice_set_mac_type(struct ice_hw *hw)
* ice_is_generic_mac - check if device's mac_type is generic
* @hw: pointer to the hardware structure
*
* Return: true if mac_type is generic (with SBQ support), false if not
* Return: true if mac_type is ICE_MAC_GENERIC*, false otherwise.
*/
bool ice_is_generic_mac(struct ice_hw *hw)
{
@ -193,120 +193,6 @@ bool ice_is_generic_mac(struct ice_hw *hw)
hw->mac_type == ICE_MAC_GENERIC_3K_E825);
}
/**
* ice_is_e810
* @hw: pointer to the hardware structure
*
* returns true if the device is E810 based, false if not.
*/
bool ice_is_e810(struct ice_hw *hw)
{
return hw->mac_type == ICE_MAC_E810;
}
/**
* ice_is_e810t
* @hw: pointer to the hardware structure
*
* returns true if the device is E810T based, false if not.
*/
bool ice_is_e810t(struct ice_hw *hw)
{
switch (hw->device_id) {
case ICE_DEV_ID_E810C_SFP:
switch (hw->subsystem_device_id) {
case ICE_SUBDEV_ID_E810T:
case ICE_SUBDEV_ID_E810T2:
case ICE_SUBDEV_ID_E810T3:
case ICE_SUBDEV_ID_E810T4:
case ICE_SUBDEV_ID_E810T6:
case ICE_SUBDEV_ID_E810T7:
return true;
}
break;
case ICE_DEV_ID_E810C_QSFP:
switch (hw->subsystem_device_id) {
case ICE_SUBDEV_ID_E810T2:
case ICE_SUBDEV_ID_E810T3:
case ICE_SUBDEV_ID_E810T5:
return true;
}
break;
default:
break;
}
return false;
}
/**
* ice_is_e822 - Check if a device is E822 family device
* @hw: pointer to the hardware structure
*
* Return: true if the device is E822 based, false if not.
*/
bool ice_is_e822(struct ice_hw *hw)
{
switch (hw->device_id) {
case ICE_DEV_ID_E822C_BACKPLANE:
case ICE_DEV_ID_E822C_QSFP:
case ICE_DEV_ID_E822C_SFP:
case ICE_DEV_ID_E822C_10G_BASE_T:
case ICE_DEV_ID_E822C_SGMII:
case ICE_DEV_ID_E822L_BACKPLANE:
case ICE_DEV_ID_E822L_SFP:
case ICE_DEV_ID_E822L_10G_BASE_T:
case ICE_DEV_ID_E822L_SGMII:
return true;
default:
return false;
}
}
/**
* ice_is_e823
* @hw: pointer to the hardware structure
*
* returns true if the device is E823-L or E823-C based, false if not.
*/
bool ice_is_e823(struct ice_hw *hw)
{
switch (hw->device_id) {
case ICE_DEV_ID_E823L_BACKPLANE:
case ICE_DEV_ID_E823L_SFP:
case ICE_DEV_ID_E823L_10G_BASE_T:
case ICE_DEV_ID_E823L_1GBE:
case ICE_DEV_ID_E823L_QSFP:
case ICE_DEV_ID_E823C_BACKPLANE:
case ICE_DEV_ID_E823C_QSFP:
case ICE_DEV_ID_E823C_SFP:
case ICE_DEV_ID_E823C_10G_BASE_T:
case ICE_DEV_ID_E823C_SGMII:
return true;
default:
return false;
}
}
/**
* ice_is_e825c - Check if a device is E825C family device
* @hw: pointer to the hardware structure
*
* Return: true if the device is E825-C based, false if not.
*/
bool ice_is_e825c(struct ice_hw *hw)
{
switch (hw->device_id) {
case ICE_DEV_ID_E825C_BACKPLANE:
case ICE_DEV_ID_E825C_QSFP:
case ICE_DEV_ID_E825C_SFP:
case ICE_DEV_ID_E825C_SGMII:
return true;
default:
return false;
}
}
/**
* ice_is_pf_c827 - check if pf contains c827 phy
* @hw: pointer to the hw struct
@ -2388,7 +2274,7 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
if (!ice_is_e825c(hw)) {
if (hw->mac_type != ICE_MAC_GENERIC_3K_E825) {
info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
} else {
@ -4117,6 +4003,57 @@ ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
/**
* ice_get_phy_lane_number - Get PHY lane number for current adapter
* @hw: pointer to the hw struct
*
* Return: PHY lane number on success, negative error code otherwise.
*/
int ice_get_phy_lane_number(struct ice_hw *hw)
{
struct ice_aqc_get_port_options_elem *options;
unsigned int lport = 0;
unsigned int lane;
int err;
options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL);
if (!options)
return -ENOMEM;
for (lane = 0; lane < ICE_MAX_PORT_PER_PCI_DEV; lane++) {
u8 options_count = ICE_AQC_PORT_OPT_MAX;
u8 speed, active_idx, pending_idx;
bool active_valid, pending_valid;
err = ice_aq_get_port_options(hw, options, &options_count, lane,
true, &active_idx, &active_valid,
&pending_idx, &pending_valid);
if (err)
goto err;
if (!active_valid)
continue;
speed = options[active_idx].max_lane_speed;
/* If we don't get speed for this lane, it's unoccupied */
if (speed > ICE_AQC_PORT_OPT_MAX_LANE_200G)
continue;
if (hw->pf_id == lport) {
kfree(options);
return lane;
}
lport++;
}
/* PHY lane not found */
err = -ENXIO;
err:
kfree(options);
return err;
}
/**
* ice_aq_sff_eeprom
* @hw: pointer to the HW struct
@ -5871,6 +5808,96 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
/**
* ice_get_pca9575_handle - find and return the PCA9575 controller
* @hw: pointer to the hw struct
* @pca9575_handle: GPIO controller's handle
*
* Find and return the GPIO controller's handle in the netlist.
* When found - the value will be cached in the hw structure and following calls
* will return cached value.
*
* Return: 0 on success, -ENXIO when there's no PCA9575 present.
*/
int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
{
struct ice_aqc_get_link_topo *cmd;
struct ice_aq_desc desc;
int err;
u8 idx;
/* If handle was read previously return cached value */
if (hw->io_expander_handle) {
*pca9575_handle = hw->io_expander_handle;
return 0;
}
#define SW_PCA9575_SFP_TOPO_IDX 2
#define SW_PCA9575_QSFP_TOPO_IDX 1
/* Check if the SW IO expander controlling SMA exists in the netlist. */
if (hw->device_id == ICE_DEV_ID_E810C_SFP)
idx = SW_PCA9575_SFP_TOPO_IDX;
else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
idx = SW_PCA9575_QSFP_TOPO_IDX;
else
return -ENXIO;
/* If handle was not detected read it from the netlist */
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
cmd = &desc.params.get_link_topo;
cmd->addr.topo_params.node_type_ctx =
ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL;
cmd->addr.topo_params.index = idx;
err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
if (err)
return -ENXIO;
/* Verify if we found the right IO expander type */
if (desc.params.get_link_topo.node_part_num !=
ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
return -ENXIO;
/* If present save the handle and return it */
hw->io_expander_handle =
le16_to_cpu(desc.params.get_link_topo.addr.handle);
*pca9575_handle = hw->io_expander_handle;
return 0;
}
/**
* ice_read_pca9575_reg - read the register from the PCA9575 controller
* @hw: pointer to the hw struct
* @offset: GPIO controller register offset
* @data: pointer to data to be read from the GPIO controller
*
* Return: 0 on success, negative error code otherwise.
*/
int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data)
{
struct ice_aqc_link_topo_addr link_topo;
__le16 addr;
u16 handle;
int err;
memset(&link_topo, 0, sizeof(link_topo));
err = ice_get_pca9575_handle(hw, &handle);
if (err)
return err;
link_topo.handle = cpu_to_le16(handle);
link_topo.topo_params.node_type_ctx =
FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
addr = cpu_to_le16((u16)offset);
return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
}
/**
* ice_aq_set_gpio
* @hw: pointer to the hw struct

View File

@ -132,7 +132,6 @@ int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
bool ice_is_generic_mac(struct ice_hw *hw);
bool ice_is_e810(struct ice_hw *hw);
int ice_clear_pf_cfg(struct ice_hw *hw);
int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
@ -192,6 +191,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
int
ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
u8 new_option);
int ice_get_phy_lane_number(struct ice_hw *hw);
int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
@ -274,10 +274,6 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
bool ice_is_e810t(struct ice_hw *hw);
bool ice_is_e822(struct ice_hw *hw);
bool ice_is_e823(struct ice_hw *hw);
bool ice_is_e825c(struct ice_hw *hw);
int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
@ -304,5 +300,7 @@ int
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd);
int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle);
int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */

View File

@ -2312,14 +2312,14 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
if (ice_is_e825c(hw))
if (hw->mac_type == ICE_MAC_GENERIC_3K_E825)
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
} else {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
}
if (!ice_is_e825c(hw))
if (hw->mac_type != ICE_MAC_GENERIC_3K_E825)
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);

View File

@ -2064,6 +2064,18 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
return 0;
}
/**
* ice_dpll_phase_range_set - initialize phase adjust range helper
* @range: pointer to phase adjust range struct to be initialized
* @phase_adj: a value to be used as min(-)/max(+) boundary
*/
static void ice_dpll_phase_range_set(struct dpll_pin_phase_adjust_range *range,
u32 phase_adj)
{
range->min = -phase_adj;
range->max = phase_adj;
}
/**
* ice_dpll_init_info_pins_generic - initializes generic pins info
* @pf: board private structure
@ -2105,8 +2117,8 @@ static int ice_dpll_init_info_pins_generic(struct ice_pf *pf, bool input)
for (i = 0; i < pin_num; i++) {
pins[i].idx = i;
pins[i].prop.board_label = labels[i];
pins[i].prop.phase_range.min = phase_adj_max;
pins[i].prop.phase_range.max = -phase_adj_max;
ice_dpll_phase_range_set(&pins[i].prop.phase_range,
phase_adj_max);
pins[i].prop.capabilities = cap;
pins[i].pf = pf;
ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
@ -2152,6 +2164,7 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
struct ice_hw *hw = &pf->hw;
struct ice_dpll_pin *pins;
unsigned long caps;
u32 phase_adj_max;
u8 freq_supp_num;
bool input;
@ -2159,11 +2172,13 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
case ICE_DPLL_PIN_TYPE_INPUT:
pins = pf->dplls.inputs;
num_pins = pf->dplls.num_inputs;
phase_adj_max = pf->dplls.input_phase_adj_max;
input = true;
break;
case ICE_DPLL_PIN_TYPE_OUTPUT:
pins = pf->dplls.outputs;
num_pins = pf->dplls.num_outputs;
phase_adj_max = pf->dplls.output_phase_adj_max;
input = false;
break;
default:
@ -2188,19 +2203,13 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
return ret;
caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE);
pins[i].prop.phase_range.min =
pf->dplls.input_phase_adj_max;
pins[i].prop.phase_range.max =
-pf->dplls.input_phase_adj_max;
} else {
pins[i].prop.phase_range.min =
pf->dplls.output_phase_adj_max;
pins[i].prop.phase_range.max =
-pf->dplls.output_phase_adj_max;
ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps);
if (ret)
return ret;
}
ice_dpll_phase_range_set(&pins[i].prop.phase_range,
phase_adj_max);
pins[i].prop.capabilities = caps;
ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
if (ret)
@ -2308,8 +2317,10 @@ static int ice_dpll_init_info(struct ice_pf *pf, bool cgu)
dp->dpll_idx = abilities.pps_dpll_idx;
d->num_inputs = abilities.num_inputs;
d->num_outputs = abilities.num_outputs;
d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj);
d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj);
d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj) &
ICE_AQC_GET_CGU_MAX_PHASE_ADJ;
d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj) &
ICE_AQC_GET_CGU_MAX_PHASE_ADJ;
alloc_size = sizeof(*d->inputs) * d->num_inputs;
d->inputs = kzalloc(alloc_size, GFP_KERNEL);

View File

@ -381,32 +381,23 @@ void ice_gnss_exit(struct ice_pf *pf)
}
/**
* ice_gnss_is_gps_present - Check if GPS HW is present
* ice_gnss_is_module_present - Check if GNSS HW is present
* @hw: pointer to HW struct
*
* Return: true when GNSS is present, false otherwise.
*/
bool ice_gnss_is_gps_present(struct ice_hw *hw)
bool ice_gnss_is_module_present(struct ice_hw *hw)
{
if (!hw->func_caps.ts_func_info.src_tmr_owned)
int err;
u8 data;
if (!hw->func_caps.ts_func_info.src_tmr_owned ||
!ice_is_gps_in_netlist(hw))
return false;
if (!ice_is_gps_in_netlist(hw))
err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data);
if (err || !!(data & ICE_P0_GNSS_PRSNT_N))
return false;
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
if (ice_is_e810t(hw)) {
int err;
u8 data;
err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data);
if (err || !!(data & ICE_P0_GNSS_PRSNT_N))
return false;
} else {
return false;
}
#else
if (!ice_is_e810t(hw))
return false;
#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
return true;
}

View File

@ -37,11 +37,11 @@ struct gnss_serial {
#if IS_ENABLED(CONFIG_GNSS)
void ice_gnss_init(struct ice_pf *pf);
void ice_gnss_exit(struct ice_pf *pf);
bool ice_gnss_is_gps_present(struct ice_hw *hw);
bool ice_gnss_is_module_present(struct ice_hw *hw);
#else
static inline void ice_gnss_init(struct ice_pf *pf) { }
static inline void ice_gnss_exit(struct ice_pf *pf) { }
static inline bool ice_gnss_is_gps_present(struct ice_hw *hw)
static inline bool ice_gnss_is_module_present(struct ice_hw *hw)
{
return false;
}

View File

@ -533,10 +533,22 @@
#define PFPM_WUS_MAG_M BIT(1)
#define PFPM_WUS_MNG_M BIT(3)
#define PFPM_WUS_FW_RST_WK_M BIT(31)
#define E830_PRTMAC_TS_TX_MEM_VALID_H 0x001E2020
#define E830_PRTMAC_TS_TX_MEM_VALID_L 0x001E2000
#define E830_PRTMAC_CL01_PS_QNT 0x001E32A0
#define E830_PRTMAC_CL01_PS_QNT_CL0_M GENMASK(15, 0)
#define E830_PRTMAC_CL01_QNT_THR 0x001E3320
#define E830_PRTMAC_CL01_QNT_THR_CL0_M GENMASK(15, 0)
#define E830_PRTTSYN_TXTIME_H(_i) (0x001E5800 + ((_i) * 32))
#define E830_PRTTSYN_TXTIME_L(_i) (0x001E5000 + ((_i) * 32))
#define E830_GLPTM_ART_CTL 0x00088B50
#define E830_GLPTM_ART_CTL_ACTIVE_M BIT(0)
#define E830_GLPTM_ART_TIME_H 0x00088B54
#define E830_GLPTM_ART_TIME_L 0x00088B58
#define E830_GLTSYN_PTMTIME_H(_i) (0x00088B48 + ((_i) * 4))
#define E830_GLTSYN_PTMTIME_L(_i) (0x00088B40 + ((_i) * 4))
#define E830_PFPTM_SEM 0x00088B00
#define E830_PFPTM_SEM_BUSY_M BIT(0)
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000

View File

@ -3885,7 +3885,7 @@ void ice_init_feature_support(struct ice_pf *pf)
ice_set_feature_support(pf, ICE_F_CGU);
if (ice_is_clock_mux_in_netlist(&pf->hw))
ice_set_feature_support(pf, ICE_F_SMA_CTRL);
if (ice_gnss_is_gps_present(&pf->hw))
if (ice_gnss_is_module_present(&pf->hw))
ice_set_feature_support(pf, ICE_F_GNSS);
break;
default:

View File

@ -1158,7 +1158,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (link_up == old_link && link_speed == old_link_speed)
return 0;
ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
ice_ptp_link_change(pf, link_up);
if (ice_is_dcb_active(pf)) {
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
@ -3188,12 +3188,14 @@ static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data)
hw = &pf->hw;
tx = &pf->ptp.port.tx;
spin_lock_irqsave(&tx->lock, flags);
ice_ptp_complete_tx_single_tstamp(tx);
if (tx->init) {
ice_ptp_complete_tx_single_tstamp(tx);
idx = find_next_bit_wrap(tx->in_use, tx->len,
tx->last_ll_ts_idx_read + 1);
if (idx != tx->len)
ice_ptp_req_tx_single_tstamp(tx, idx);
idx = find_next_bit_wrap(tx->in_use, tx->len,
tx->last_ll_ts_idx_read + 1);
if (idx != tx->len)
ice_ptp_req_tx_single_tstamp(tx, idx);
}
spin_unlock_irqrestore(&tx->lock, flags);
val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
@ -3295,22 +3297,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_TSYN_TX_M) {
ena_mask &= ~PFINT_OICR_TSYN_TX_M;
if (ice_pf_state_is_nominal(pf) &&
pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) {
struct ice_ptp_tx *tx = &pf->ptp.port.tx;
unsigned long flags;
u8 idx;
spin_lock_irqsave(&tx->lock, flags);
idx = find_next_bit_wrap(tx->in_use, tx->len,
tx->last_ll_ts_idx_read + 1);
if (idx != tx->len)
ice_ptp_req_tx_single_tstamp(tx, idx);
spin_unlock_irqrestore(&tx->lock, flags);
} else if (ice_ptp_pf_handles_tx_interrupt(pf)) {
set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
ret = IRQ_WAKE_THREAD;
}
ret = ice_ptp_ts_irq(pf);
}
if (oicr & PFINT_OICR_TSYN_EVNT_M) {
@ -4057,8 +4045,7 @@ static void ice_set_pf_caps(struct ice_pf *pf)
}
clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
if (func_caps->common_cap.ieee_1588 &&
!(pf->hw.mac_type == ICE_MAC_E830))
if (func_caps->common_cap.ieee_1588)
set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
pf->max_pf_txqs = func_caps->common_cap.num_txq;
@ -5045,6 +5032,12 @@ static int ice_init(struct ice_pf *pf)
if (err)
return err;
if (pf->hw.mac_type == ICE_MAC_E830) {
err = pci_enable_ptm(pf->pdev, NULL);
if (err)
dev_dbg(ice_pf_to_dev(pf), "PCIe PTM not supported by PCIe bus/controller\n");
}
err = ice_alloc_vsis(pf);
if (err)
goto err_alloc_vsis;
@ -6734,7 +6727,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
ice_print_link_msg(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
ice_ptp_link_change(pf, pf->hw.pf_id, true);
ice_ptp_link_change(pf, true);
}
/* Perform an initial read of the statistics registers now to
@ -7204,7 +7197,7 @@ int ice_down(struct ice_vsi *vsi)
if (vsi->netdev) {
vlan_err = ice_vsi_del_vlan_zero(vsi);
ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
ice_ptp_link_change(vsi->back, false);
netif_carrier_off(vsi->netdev);
netif_tx_disable(vsi->netdev);
}

View File

@ -16,28 +16,28 @@ static const char ice_pin_names[][64] = {
};
static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = {
/* name, gpio */
{ TIME_SYNC, { 4, -1 }},
{ ONE_PPS, { -1, 5 }},
/* name, gpio, delay */
{ TIME_SYNC, { 4, -1 }, { 0, 0 }},
{ ONE_PPS, { -1, 5 }, { 0, 11 }},
};
static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = {
/* name, gpio */
{ SDP0, { 0, 0 }},
{ SDP1, { 1, 1 }},
{ SDP2, { 2, 2 }},
{ SDP3, { 3, 3 }},
{ TIME_SYNC, { 4, -1 }},
{ ONE_PPS, { -1, 5 }},
/* name, gpio, delay */
{ SDP0, { 0, 0 }, { 15, 14 }},
{ SDP1, { 1, 1 }, { 15, 14 }},
{ SDP2, { 2, 2 }, { 15, 14 }},
{ SDP3, { 3, 3 }, { 15, 14 }},
{ TIME_SYNC, { 4, -1 }, { 11, 0 }},
{ ONE_PPS, { -1, 5 }, { 0, 9 }},
};
static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = {
/* name, gpio */
{ SDP0, { 0, 0 }},
{ SDP1, { 1, 1 }},
{ SDP2, { 2, 2 }},
{ SDP3, { 3, 3 }},
{ ONE_PPS, { -1, 5 }},
/* name, gpio, delay */
{ SDP0, { 0, 0 }, { 0, 1 }},
{ SDP1, { 1, 1 }, { 0, 1 }},
{ SDP2, { 2, 2 }, { 0, 1 }},
{ SDP3, { 3, 3 }, { 0, 1 }},
{ ONE_PPS, { -1, 5 }, { 0, 1 }},
};
static const char ice_pin_names_nvm[][64] = {
@ -49,12 +49,12 @@ static const char ice_pin_names_nvm[][64] = {
};
static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = {
/* name, gpio */
{ GNSS, { 1, -1 }},
{ SMA1, { 1, 0 }},
{ UFL1, { -1, 0 }},
{ SMA2, { 3, 2 }},
{ UFL2, { 3, -1 }},
/* name, gpio, delay */
{ GNSS, { 1, -1 }, { 0, 0 }},
{ SMA1, { 1, 0 }, { 0, 1 }},
{ UFL1, { -1, 0 }, { 0, 1 }},
{ SMA2, { 3, 2 }, { 0, 1 }},
{ UFL2, { 3, -1 }, { 0, 0 }},
};
static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf)
@ -310,6 +310,15 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
/* Read the system timestamp pre PHC read */
ptp_read_system_prets(sts);
if (hw->mac_type == ICE_MAC_E830) {
u64 clk_time = rd64(hw, E830_GLTSYN_TIME_L(tmr_idx));
/* Read the system timestamp post PHC read */
ptp_read_system_postts(sts);
return clk_time;
}
lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
/* Read the system timestamp post PHC read */
@ -971,28 +980,6 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
tx->len = 0;
}
/**
* ice_ptp_init_tx_eth56g - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
* @port: the port this structure tracks
*
* Initialize the Tx timestamp tracker for this port. ETH56G PHYs
* have independent memory blocks for all ports.
*
* Return: 0 for success, -ENOMEM when failed to allocate Tx tracker
*/
static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx,
u8 port)
{
tx->block = port;
tx->offset = 0;
tx->len = INDEX_PER_PORT_ETH56G;
tx->has_ready_bitmap = 1;
return ice_ptp_alloc_tx_tracker(tx);
}
/**
* ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
* @pf: Board private structure
@ -1003,9 +990,11 @@ static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx,
* the timestamp block is shared for all ports in the same quad. To avoid
* ports using the same timestamp index, logically break the block of
* registers into chunks based on the port number.
*
* Return: 0 on success, -ENOMEM when out of memory
*/
static int
ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
static int ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx,
u8 port)
{
tx->block = ICE_GET_QUAD_NUM(port);
tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
@ -1016,24 +1005,27 @@ ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
}
/**
* ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
* ice_ptp_init_tx - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
* @port: the port this structure tracks
*
* Initialize the Tx timestamp tracker for this PF. For E810 devices, each
* port has its own block of timestamps, independent of the other ports.
* Initialize the Tx timestamp tracker for this PF. For all PHYs except E82X,
* each port has its own block of timestamps, independent of the other ports.
*
* Return: 0 on success, -ENOMEM when out of memory
*/
static int
ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
static int ice_ptp_init_tx(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
{
tx->block = pf->hw.port_info->lport;
tx->block = port;
tx->offset = 0;
tx->len = INDEX_PER_PORT_E810;
tx->len = INDEX_PER_PORT;
/* The E810 PHY does not provide a timestamp ready bitmap. Instead,
* verify new timestamps against cached copy of the last read
* timestamp.
*/
tx->has_ready_bitmap = 0;
tx->has_ready_bitmap = pf->hw.mac_type != ICE_MAC_E810;
return ice_ptp_alloc_tx_tracker(tx);
}
@ -1318,20 +1310,21 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
struct ice_hw *hw = &pf->hw;
int err;
if (ice_is_e810(hw))
return 0;
mutex_lock(&ptp_port->ps_lock);
switch (ice_get_phy_model(hw)) {
case ICE_PHY_ETH56G:
err = ice_stop_phy_timer_eth56g(hw, port, true);
switch (hw->mac_type) {
case ICE_MAC_E810:
case ICE_MAC_E830:
err = 0;
break;
case ICE_PHY_E82X:
case ICE_MAC_GENERIC:
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
err = ice_stop_phy_timer_e82x(hw, port, true);
break;
case ICE_MAC_GENERIC_3K_E825:
err = ice_stop_phy_timer_eth56g(hw, port, true);
break;
default:
err = -ENODEV;
}
@ -1361,19 +1354,17 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
unsigned long flags;
int err;
if (ice_is_e810(hw))
return 0;
if (!ptp_port->link_up)
return ice_ptp_port_phy_stop(ptp_port);
mutex_lock(&ptp_port->ps_lock);
switch (ice_get_phy_model(hw)) {
case ICE_PHY_ETH56G:
err = ice_start_phy_timer_eth56g(hw, port);
switch (hw->mac_type) {
case ICE_MAC_E810:
case ICE_MAC_E830:
err = 0;
break;
case ICE_PHY_E82X:
case ICE_MAC_GENERIC:
/* Start the PHY timer in Vernier mode */
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
@ -1398,6 +1389,9 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
0);
break;
case ICE_MAC_GENERIC_3K_E825:
err = ice_start_phy_timer_eth56g(hw, port);
break;
default:
err = -ENODEV;
}
@ -1414,10 +1408,9 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
/**
* ice_ptp_link_change - Reconfigure PTP after link status change
* @pf: Board private structure
* @port: Port for which the PHY start is set
* @linkup: Link is up or down
*/
void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
{
struct ice_ptp_port *ptp_port;
struct ice_hw *hw = &pf->hw;
@ -1425,14 +1418,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
if (pf->ptp.state != ICE_PTP_READY)
return;
if (WARN_ON_ONCE(port >= hw->ptp.num_lports))
return;
ptp_port = &pf->ptp.port;
if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
port *= 2;
if (WARN_ON_ONCE(ptp_port->port_num != port))
return;
/* Update cached link status for this port immediately */
ptp_port->link_up = linkup;
@ -1440,12 +1426,14 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
/* Skip HW writes if reset is in progress */
if (pf->hw.reset_ongoing)
return;
switch (ice_get_phy_model(hw)) {
case ICE_PHY_E810:
/* Do not reconfigure E810 PHY */
switch (hw->mac_type) {
case ICE_MAC_E810:
case ICE_MAC_E830:
/* Do not reconfigure E810 or E830 PHY */
return;
case ICE_PHY_ETH56G:
case ICE_PHY_E82X:
case ICE_MAC_GENERIC:
case ICE_MAC_GENERIC_3K_E825:
ice_ptp_port_phy_restart(ptp_port);
return;
default:
@ -1473,24 +1461,11 @@ static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
ice_ptp_reset_ts_memory(hw);
switch (ice_get_phy_model(hw)) {
case ICE_PHY_ETH56G: {
int port;
for (port = 0; port < hw->ptp.num_lports; port++) {
int err;
err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
if (err) {
dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
port, err);
return err;
}
}
switch (hw->mac_type) {
case ICE_MAC_E810:
case ICE_MAC_E830:
return 0;
}
case ICE_PHY_E82X: {
case ICE_MAC_GENERIC: {
int quad;
for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
@ -1507,12 +1482,24 @@ static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
return 0;
}
case ICE_PHY_E810:
case ICE_MAC_GENERIC_3K_E825: {
int port;
for (port = 0; port < hw->ptp.num_lports; port++) {
int err;
err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
if (err) {
dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
port, err);
return err;
}
}
return 0;
case ICE_PHY_UNSUP:
}
case ICE_MAC_UNKNOWN:
default:
dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__,
ice_get_phy_model(hw));
return -EOPNOTSUPP;
}
}
@ -1592,18 +1579,29 @@ void ice_ptp_extts_event(struct ice_pf *pf)
* Event is defined in GLTSYN_EVNT_0 register
*/
for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
/* Check if channel is enabled */
if (pf->ptp.ext_ts_irq & (1 << chan)) {
lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
event.timestamp = (((u64)hi) << 32) | lo;
event.type = PTP_CLOCK_EXTTS;
event.index = chan;
int pin_desc_idx;
/* Fire event */
ptp_clock_event(pf->ptp.clock, &event);
pf->ptp.ext_ts_irq &= ~(1 << chan);
/* Check if channel is enabled */
if (!(pf->ptp.ext_ts_irq & (1 << chan)))
continue;
lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
event.timestamp = (u64)hi << 32 | lo;
/* Add delay compensation */
pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
if (pin_desc_idx >= 0) {
const struct ice_ptp_pin_desc *desc;
desc = &pf->ptp.ice_pin_desc[pin_desc_idx];
event.timestamp -= desc->delay[0];
}
event.type = PTP_CLOCK_EXTTS;
event.index = chan;
pf->ptp.ext_ts_irq &= ~(1 << chan);
ptp_clock_event(pf->ptp.clock, &event);
}
}
@ -1737,7 +1735,7 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
/* 0. Reset mode & out_en in AUX_OUT */
wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
if (ice_is_e825c(hw)) {
if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) {
int err;
/* Enable/disable CGU 1PPS output for E825C */
@ -1799,9 +1797,9 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
int on)
{
unsigned int gpio_pin, prop_delay_ns;
u64 clk, period, start, phase;
struct ice_hw *hw = &pf->hw;
unsigned int gpio_pin;
int pin_desc_idx;
if (rq->flags & ~PTP_PEROUT_PHASE)
@ -1812,6 +1810,7 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
return -EIO;
gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1];
prop_delay_ns = pf->ptp.ice_pin_desc[pin_desc_idx].delay[1];
period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec;
/* If we're disabling the output or period is 0, clear out CLKO and TGT
@ -1821,7 +1820,7 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0);
if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 &&
period != NSEC_PER_SEC && hw->ptp.phy_model == ICE_PHY_E82X) {
period != NSEC_PER_SEC && hw->mac_type == ICE_MAC_GENERIC) {
dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n");
return -EOPNOTSUPP;
}
@ -1844,11 +1843,11 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
* from now, so we have time to write it to HW.
*/
clk = ice_ptp_read_src_clk_reg(pf, NULL) + NSEC_PER_MSEC * 500;
if (rq->flags & PTP_PEROUT_PHASE || start <= clk - ice_prop_delay(hw))
if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns)
start = div64_u64(clk + period - 1, period) * period + phase;
/* Compensate for propagation delay from the generator to the pin. */
start -= ice_prop_delay(hw);
start -= prop_delay_ns;
return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period);
}
@ -2076,7 +2075,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
/* For Vernier mode on E82X, we need to recalibrate after new settime.
* Start with marking timestamps as invalid.
*/
if (ice_get_phy_model(hw) == ICE_PHY_E82X) {
if (hw->mac_type == ICE_MAC_GENERIC) {
err = ice_ptp_clear_phy_offset_ready_e82x(hw);
if (err)
dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
@ -2100,7 +2099,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
ice_ptp_enable_all_perout(pf);
/* Recalibrate and re-enable timestamp blocks for E822/E823 */
if (ice_get_phy_model(hw) == ICE_PHY_E82X)
if (hw->mac_type == ICE_MAC_GENERIC)
ice_ptp_restart_all_phy(pf);
exit:
if (err) {
@ -2178,93 +2177,158 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
return 0;
}
#ifdef CONFIG_ICE_HWTS
/**
* ice_ptp_get_syncdevicetime - Get the cross time stamp info
* struct ice_crosststamp_cfg - Device cross timestamp configuration
* @lock_reg: The hardware semaphore lock to use
* @lock_busy: Bit in the semaphore lock indicating the lock is busy
* @ctl_reg: The hardware register to request cross timestamp
* @ctl_active: Bit in the control register to request cross timestamp
* @art_time_l: Lower 32-bits of ART system time
* @art_time_h: Upper 32-bits of ART system time
* @dev_time_l: Lower 32-bits of device time (per timer index)
* @dev_time_h: Upper 32-bits of device time (per timer index)
*/
struct ice_crosststamp_cfg {
/* HW semaphore lock register */
u32 lock_reg;
u32 lock_busy;
/* Capture control register */
u32 ctl_reg;
u32 ctl_active;
/* Time storage */
u32 art_time_l;
u32 art_time_h;
u32 dev_time_l[2];
u32 dev_time_h[2];
};
static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e82x = {
.lock_reg = PFHH_SEM,
.lock_busy = PFHH_SEM_BUSY_M,
.ctl_reg = GLHH_ART_CTL,
.ctl_active = GLHH_ART_CTL_ACTIVE_M,
.art_time_l = GLHH_ART_TIME_L,
.art_time_h = GLHH_ART_TIME_H,
.dev_time_l[0] = GLTSYN_HHTIME_L(0),
.dev_time_h[0] = GLTSYN_HHTIME_H(0),
.dev_time_l[1] = GLTSYN_HHTIME_L(1),
.dev_time_h[1] = GLTSYN_HHTIME_H(1),
};
#ifdef CONFIG_ICE_HWTS
static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e830 = {
.lock_reg = E830_PFPTM_SEM,
.lock_busy = E830_PFPTM_SEM_BUSY_M,
.ctl_reg = E830_GLPTM_ART_CTL,
.ctl_active = E830_GLPTM_ART_CTL_ACTIVE_M,
.art_time_l = E830_GLPTM_ART_TIME_L,
.art_time_h = E830_GLPTM_ART_TIME_H,
.dev_time_l[0] = E830_GLTSYN_PTMTIME_L(0),
.dev_time_h[0] = E830_GLTSYN_PTMTIME_H(0),
.dev_time_l[1] = E830_GLTSYN_PTMTIME_L(1),
.dev_time_h[1] = E830_GLTSYN_PTMTIME_H(1),
};
#endif /* CONFIG_ICE_HWTS */
/**
* struct ice_crosststamp_ctx - Device cross timestamp context
* @snapshot: snapshot of system clocks for historic interpolation
* @pf: pointer to the PF private structure
* @cfg: pointer to hardware configuration for cross timestamp
*/
struct ice_crosststamp_ctx {
struct system_time_snapshot snapshot;
struct ice_pf *pf;
const struct ice_crosststamp_cfg *cfg;
};
/**
* ice_capture_crosststamp - Capture a device/system cross timestamp
* @device: Current device time
* @system: System counter value read synchronously with device time
* @ctx: Context provided by timekeeping code
* @__ctx: Context passed from ice_ptp_getcrosststamp
*
* Read device and system (ART) clock simultaneously and return the corrected
* clock values in ns.
*
* Return: zero on success, or a negative error code on failure.
*/
static int
ice_ptp_get_syncdevicetime(ktime_t *device,
struct system_counterval_t *system,
void *ctx)
static int ice_capture_crosststamp(ktime_t *device,
struct system_counterval_t *system,
void *__ctx)
{
struct ice_pf *pf = (struct ice_pf *)ctx;
struct ice_hw *hw = &pf->hw;
u32 hh_lock, hh_art_ctl;
int i;
struct ice_crosststamp_ctx *ctx = __ctx;
const struct ice_crosststamp_cfg *cfg;
u32 lock, ctl, ts_lo, ts_hi, tmr_idx;
struct ice_pf *pf;
struct ice_hw *hw;
int err;
u64 ts;
#define MAX_HH_HW_LOCK_TRIES 5
#define MAX_HH_CTL_LOCK_TRIES 100
cfg = ctx->cfg;
pf = ctx->pf;
hw = &pf->hw;
for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) {
/* Get the HW lock */
hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
if (hh_lock & PFHH_SEM_BUSY_M) {
usleep_range(10000, 15000);
continue;
}
break;
}
if (hh_lock & PFHH_SEM_BUSY_M) {
dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
if (tmr_idx > 1)
return -EINVAL;
/* Poll until we obtain the cross-timestamp hardware semaphore */
err = rd32_poll_timeout(hw, cfg->lock_reg, lock,
!(lock & cfg->lock_busy),
10 * USEC_PER_MSEC, 50 * USEC_PER_MSEC);
if (err) {
dev_err(ice_pf_to_dev(pf), "PTP failed to get cross timestamp lock\n");
return -EBUSY;
}
/* Snapshot system time for historic interpolation */
ktime_get_snapshot(&ctx->snapshot);
/* Program cmd to master timer */
ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
/* Start the ART and device clock sync sequence */
hh_art_ctl = rd32(hw, GLHH_ART_CTL);
hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
wr32(hw, GLHH_ART_CTL, hh_art_ctl);
ctl = rd32(hw, cfg->ctl_reg);
ctl |= cfg->ctl_active;
wr32(hw, cfg->ctl_reg, ctl);
for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) {
/* Wait for sync to complete */
hh_art_ctl = rd32(hw, GLHH_ART_CTL);
if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
udelay(1);
continue;
} else {
u32 hh_ts_lo, hh_ts_hi, tmr_idx;
u64 hh_ts;
/* Poll until hardware completes the capture */
err = rd32_poll_timeout(hw, cfg->ctl_reg, ctl, !(ctl & cfg->ctl_active),
5, 20 * USEC_PER_MSEC);
if (err)
goto err_timeout;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
/* Read ART time */
hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
system->cycles = hh_ts;
system->cs_id = CSID_X86_ART;
/* Read Device source clock time */
hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
*device = ns_to_ktime(hh_ts);
break;
}
}
/* Read ART system time */
ts_lo = rd32(hw, cfg->art_time_l);
ts_hi = rd32(hw, cfg->art_time_h);
ts = ((u64)ts_hi << 32) | ts_lo;
system->cycles = ts;
system->cs_id = CSID_X86_ART;
system->use_nsecs = true;
/* Read Device source clock time */
ts_lo = rd32(hw, cfg->dev_time_l[tmr_idx]);
ts_hi = rd32(hw, cfg->dev_time_h[tmr_idx]);
ts = ((u64)ts_hi << 32) | ts_lo;
*device = ns_to_ktime(ts);
err_timeout:
/* Clear the master timer */
ice_ptp_src_cmd(hw, ICE_PTP_NOP);
/* Release HW lock */
hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
lock = rd32(hw, cfg->lock_reg);
lock &= ~cfg->lock_busy;
wr32(hw, cfg->lock_reg, lock);
if (i == MAX_HH_CTL_LOCK_TRIES)
return -ETIMEDOUT;
return 0;
return err;
}
/**
* ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp
* ice_ptp_getcrosststamp - Capture a device cross timestamp
* @info: the driver's PTP info structure
* @cts: The memory to fill the cross timestamp info
*
@ -2272,22 +2336,36 @@ ice_ptp_get_syncdevicetime(ktime_t *device,
* clock. Fill the cross timestamp information and report it back to the
* caller.
*
* This is only valid for E822 and E823 devices which have support for
* generating the cross timestamp via PCIe PTM.
*
* In order to correctly correlate the ART timestamp back to the TSC time, the
* CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
*
* Return: zero on success, or a negative error code on failure.
*/
static int
ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info,
struct system_device_crosststamp *cts)
static int ice_ptp_getcrosststamp(struct ptp_clock_info *info,
struct system_device_crosststamp *cts)
{
struct ice_pf *pf = ptp_info_to_pf(info);
struct ice_crosststamp_ctx ctx = {
.pf = pf,
};
return get_device_system_crosststamp(ice_ptp_get_syncdevicetime,
pf, NULL, cts);
}
switch (pf->hw.mac_type) {
case ICE_MAC_GENERIC:
case ICE_MAC_GENERIC_3K_E825:
ctx.cfg = &ice_crosststamp_cfg_e82x;
break;
#ifdef CONFIG_ICE_HWTS
case ICE_MAC_E830:
ctx.cfg = &ice_crosststamp_cfg_e830;
break;
#endif /* CONFIG_ICE_HWTS */
default:
return -EOPNOTSUPP;
}
return get_device_system_crosststamp(ice_capture_crosststamp, &ctx,
&ctx.snapshot, cts);
}
/**
* ice_ptp_get_ts_config - ioctl interface to read the timestamping config
@ -2548,13 +2626,9 @@ static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries,
*/
static void ice_ptp_set_funcs_e82x(struct ice_pf *pf)
{
#ifdef CONFIG_ICE_HWTS
if (boot_cpu_has(X86_FEATURE_ART) &&
boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp_e82x;
pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
#endif /* CONFIG_ICE_HWTS */
if (ice_is_e825c(&pf->hw)) {
if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) {
pf->ptp.ice_pin_desc = ice_pin_desc_e825c;
pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c);
} else {
@ -2620,6 +2694,28 @@ err:
}
}
/**
* ice_ptp_set_funcs_e830 - Set specialized functions for E830 support
* @pf: Board private structure
*
* Assign functions to the PTP capabiltiies structure for E830 devices.
* Functions which operate across all device families should be set directly
* in ice_ptp_set_caps. Only add functions here which are distinct for E830
* devices.
*/
static void ice_ptp_set_funcs_e830(struct ice_pf *pf)
{
#ifdef CONFIG_ICE_HWTS
if (pcie_ptm_enabled(pf->pdev) && boot_cpu_has(X86_FEATURE_ART))
pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
#endif /* CONFIG_ICE_HWTS */
/* Rest of the config is the same as base E810 */
pf->ptp.ice_pin_desc = ice_pin_desc_e810;
pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810);
ice_ptp_setup_pin_cfg(pf);
}
/**
* ice_ptp_set_caps - Set PTP capabilities
* @pf: Board private structure
@ -2642,10 +2738,20 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
info->enable = ice_ptp_gpio_enable;
info->verify = ice_verify_pin;
if (ice_is_e810(&pf->hw))
switch (pf->hw.mac_type) {
case ICE_MAC_E810:
ice_ptp_set_funcs_e810(pf);
else
return;
case ICE_MAC_E830:
ice_ptp_set_funcs_e830(pf);
return;
case ICE_MAC_GENERIC:
case ICE_MAC_GENERIC_3K_E825:
ice_ptp_set_funcs_e82x(pf);
return;
default:
return;
}
}
/**
@ -2755,6 +2861,68 @@ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
}
}
/**
* ice_ptp_ts_irq - Process the PTP Tx timestamps in IRQ context
* @pf: Board private structure
*
* Return: IRQ_WAKE_THREAD if Tx timestamp read has to be handled in the bottom
* half of the interrupt and IRQ_HANDLED otherwise.
*/
irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
switch (hw->mac_type) {
case ICE_MAC_E810:
/* E810 capable of low latency timestamping with interrupt can
* request a single timestamp in the top half and wait for
* a second LL TS interrupt from the FW when it's ready.
*/
if (hw->dev_caps.ts_dev_info.ts_ll_int_read) {
struct ice_ptp_tx *tx = &pf->ptp.port.tx;
u8 idx, last;
if (!ice_pf_state_is_nominal(pf))
return IRQ_HANDLED;
spin_lock(&tx->lock);
if (tx->init) {
last = tx->last_ll_ts_idx_read + 1;
idx = find_next_bit_wrap(tx->in_use, tx->len,
last);
if (idx != tx->len)
ice_ptp_req_tx_single_tstamp(tx, idx);
}
spin_unlock(&tx->lock);
return IRQ_HANDLED;
}
fallthrough; /* non-LL_TS E810 */
case ICE_MAC_GENERIC:
case ICE_MAC_GENERIC_3K_E825:
/* All other devices process timestamps in the bottom half due
* to sleeping or polling.
*/
if (!ice_ptp_pf_handles_tx_interrupt(pf))
return IRQ_HANDLED;
set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
return IRQ_WAKE_THREAD;
case ICE_MAC_E830:
/* E830 can read timestamps in the top half using rd32() */
if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
/* Process outstanding Tx timestamps. If there
* is more work, re-arm the interrupt to trigger again.
*/
wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
ice_flush(hw);
}
return IRQ_HANDLED;
default:
return IRQ_HANDLED;
}
}
/**
* ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
* @pf: Board private structure
@ -2775,7 +2943,7 @@ static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
bool trigger_oicr = false;
unsigned int i;
if (ice_is_e810(hw))
if (!pf->ptp.port.tx.has_ready_bitmap)
return;
if (!ice_pf_src_tmr_owned(pf))
@ -2914,14 +3082,12 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
*/
ice_ptp_flush_all_tx_tracker(pf);
if (!ice_is_e810(hw)) {
/* Enable quad interrupts */
err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
if (err)
return err;
/* Enable quad interrupts */
err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
if (err)
return err;
ice_ptp_restart_all_phy(pf);
}
ice_ptp_restart_all_phy(pf);
/* Re-enable all periodic outputs and external timestamp events */
ice_ptp_enable_all_perout(pf);
@ -2969,8 +3135,9 @@ err:
static bool ice_is_primary(struct ice_hw *hw)
{
return ice_is_e825c(hw) && ice_is_dual(hw) ?
!!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : true;
return hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) ?
!!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) :
true;
}
static int ice_ptp_setup_adapter(struct ice_pf *pf)
@ -2988,7 +3155,7 @@ static int ice_ptp_setup_pf(struct ice_pf *pf)
struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
struct ice_ptp *ptp = &pf->ptp;
if (WARN_ON(!ctrl_ptp) || ice_get_phy_model(&pf->hw) == ICE_PHY_UNSUP)
if (WARN_ON(!ctrl_ptp) || pf->hw.mac_type == ICE_MAC_UNKNOWN)
return -ENODEV;
INIT_LIST_HEAD(&ptp->port.list_node);
@ -3005,7 +3172,7 @@ static void ice_ptp_cleanup_pf(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
if (ice_get_phy_model(&pf->hw) != ICE_PHY_UNSUP) {
if (pf->hw.mac_type != ICE_MAC_UNKNOWN) {
mutex_lock(&pf->adapter->ports.lock);
list_del(&ptp->port.list_node);
mutex_unlock(&pf->adapter->ports.lock);
@ -3125,6 +3292,8 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
* ice_ptp_init_port - Initialize PTP port structure
* @pf: Board private structure
* @ptp_port: PTP port structure
*
* Return: 0 on success, -ENODEV on invalid MAC type, -ENOMEM on failed alloc.
*/
static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
{
@ -3132,16 +3301,14 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
mutex_init(&ptp_port->ps_lock);
switch (ice_get_phy_model(hw)) {
case ICE_PHY_ETH56G:
return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx,
ptp_port->port_num);
case ICE_PHY_E810:
return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
case ICE_PHY_E82X:
switch (hw->mac_type) {
case ICE_MAC_E810:
case ICE_MAC_E830:
case ICE_MAC_GENERIC_3K_E825:
return ice_ptp_init_tx(pf, &ptp_port->tx, ptp_port->port_num);
case ICE_MAC_GENERIC:
kthread_init_delayed_work(&ptp_port->ov_work,
ice_ptp_wait_for_offsets);
return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
ptp_port->port_num);
default:
@ -3160,8 +3327,8 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
*/
static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
{
switch (ice_get_phy_model(&pf->hw)) {
case ICE_PHY_E82X:
switch (pf->hw.mac_type) {
case ICE_MAC_GENERIC:
/* E822 based PHY has the clock owner process the interrupt
* for all ports.
*/
@ -3192,10 +3359,17 @@ void ice_ptp_init(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
struct ice_hw *hw = &pf->hw;
int err;
int lane_num, err;
ptp->state = ICE_PTP_INITIALIZING;
lane_num = ice_get_phy_lane_number(hw);
if (lane_num < 0) {
err = lane_num;
goto err_exit;
}
ptp->port.port_num = (u8)lane_num;
ice_ptp_init_hw(hw);
ice_ptp_init_tx_interrupt_mode(pf);
@ -3216,10 +3390,6 @@ void ice_ptp_init(struct ice_pf *pf)
if (err)
goto err_exit;
ptp->port.port_num = hw->pf_id;
if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
ptp->port.port_num = hw->pf_id * 2;
err = ice_ptp_init_port(pf, &ptp->port);
if (err)
goto err_exit;

View File

@ -128,8 +128,7 @@ struct ice_ptp_tx {
/* Quad and port information for initializing timestamp blocks */
#define INDEX_PER_QUAD 64
#define INDEX_PER_PORT_E82X 16
#define INDEX_PER_PORT_E810 64
#define INDEX_PER_PORT_ETH56G 64
#define INDEX_PER_PORT 64
/**
* struct ice_ptp_port - data used to initialize an external port for PTP
@ -211,6 +210,7 @@ enum ice_ptp_pin_nvm {
* struct ice_ptp_pin_desc - hardware pin description data
* @name_idx: index of the name of pin in ice_pin_names
* @gpio: the associated GPIO input and output pins
* @delay: input and output signal delays in nanoseconds
*
* Structure describing a PTP-capable GPIO pin that extends ptp_pin_desc array
* for the device. Device families have separate sets of available pins with
@ -219,6 +219,7 @@ enum ice_ptp_pin_nvm {
struct ice_ptp_pin_desc {
int name_idx;
int gpio[2];
unsigned int delay[2];
};
/**
@ -302,6 +303,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx);
void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx);
enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf);
irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf);
u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
const struct ice_pkt_ctx *pkt_ctx);
@ -310,7 +312,7 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf,
enum ice_reset_req reset_type);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup);
void ice_ptp_link_change(struct ice_pf *pf, bool linkup);
#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
{
@ -340,6 +342,11 @@ static inline bool ice_ptp_process_ts(struct ice_pf *pf)
return true;
}
static inline irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
{
return IRQ_HANDLED;
}
static inline u64
ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
const struct ice_pkt_ctx *pkt_ctx)
@ -358,7 +365,7 @@ static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf,
}
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
static inline void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
{
}

View File

@ -131,7 +131,7 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
.rx_offset = {
.serdes = 0xffffeb27, /* -10.42424 */
.no_fec = 0xffffcccd, /* -25.6 */
.fc = 0xfffe0014, /* -255.96 */
.fc = 0xfffc557b, /* -469.26 */
.sfd = 0x4a4, /* 2.32 */
.bs_ds = 0x32 /* 0.0969697 */
}
@ -341,8 +341,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
823437500, /* 823.4375 MHz PLL */
/* nominal_incval */
0x136e44fabULL,
/* pps_delay */
11,
},
/* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
@ -351,8 +349,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
783360000, /* 783.36 MHz */
/* nominal_incval */
0x146cc2177ULL,
/* pps_delay */
12,
},
/* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
@ -361,8 +357,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
796875000, /* 796.875 MHz */
/* nominal_incval */
0x141414141ULL,
/* pps_delay */
12,
},
/* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
@ -371,8 +365,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
816000000, /* 816 MHz */
/* nominal_incval */
0x139b9b9baULL,
/* pps_delay */
12,
},
/* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
@ -381,8 +373,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
830078125, /* 830.78125 MHz */
/* nominal_incval */
0x134679aceULL,
/* pps_delay */
11,
},
/* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
@ -391,8 +381,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
783360000, /* 783.36 MHz */
/* nominal_incval */
0x146cc2177ULL,
/* pps_delay */
12,
},
};

File diff suppressed because it is too large Load Diff

View File

@ -80,7 +80,6 @@ struct ice_phy_reg_info_eth56g {
* struct ice_time_ref_info_e82x
* @pll_freq: Frequency of PLL that drives timer ticks in Hz
* @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L
* @pps_delay: propagation delay of the PPS output signal
*
* Characteristic information for the various TIME_REF sources possible in the
* E822 devices
@ -88,7 +87,6 @@ struct ice_phy_reg_info_eth56g {
struct ice_time_ref_info_e82x {
u64 pll_freq;
u64 nominal_incval;
u8 pps_delay;
};
/**
@ -326,8 +324,7 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
*/
#define ICE_E810_PLL_FREQ 812500000
#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL
#define ICE_E810_OUT_PROP_DELAY_NS 1
#define ICE_E825C_OUT_PROP_DELAY_NS 11
#define ICE_E810_E830_SYNC_DELAY 0
/* Device agnostic functions */
u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
@ -389,11 +386,6 @@ static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref)
return e82x_time_ref[time_ref].nominal_incval;
}
static inline u64 ice_e82x_pps_delay(enum ice_time_ref_freq time_ref)
{
return e82x_time_ref[time_ref].pps_delay;
}
/* E822 Vernier calibration functions */
int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset);
int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port);
@ -404,7 +396,6 @@ int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold);
/* E810 family functions */
int ice_read_sma_ctrl(struct ice_hw *hw, u8 *data);
int ice_write_sma_ctrl(struct ice_hw *hw, u8 data);
int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
int ice_ptp_read_sdp_ac(struct ice_hw *hw, __le16 *entries, uint *num_entries);
int ice_cgu_get_num_pins(struct ice_hw *hw, bool input);
enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input);
@ -434,20 +425,6 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port);
#define ICE_ETH56G_NOMINAL_THRESH4 0x7777
#define ICE_ETH56G_NOMINAL_TX_THRESH 0x6
static inline u64 ice_prop_delay(const struct ice_hw *hw)
{
switch (hw->ptp.phy_model) {
case ICE_PHY_ETH56G:
return ICE_E825C_OUT_PROP_DELAY_NS;
case ICE_PHY_E810:
return ICE_E810_OUT_PROP_DELAY_NS;
case ICE_PHY_E82X:
return ice_e82x_pps_delay(ice_e82x_time_ref(hw));
default:
return 0;
}
}
/**
* ice_get_base_incval - Get base clock increment value
* @hw: pointer to the HW struct
@ -456,13 +433,14 @@ static inline u64 ice_prop_delay(const struct ice_hw *hw)
*/
static inline u64 ice_get_base_incval(struct ice_hw *hw)
{
switch (hw->ptp.phy_model) {
case ICE_PHY_ETH56G:
return ICE_ETH56G_NOMINAL_INCVAL;
case ICE_PHY_E810:
switch (hw->mac_type) {
case ICE_MAC_E810:
case ICE_MAC_E830:
return ICE_PTP_NOMINAL_INCVAL_E810;
case ICE_PHY_E82X:
case ICE_MAC_GENERIC:
return ice_e82x_nominal_incval(ice_e82x_time_ref(hw));
case ICE_MAC_GENERIC_3K_E825:
return ICE_ETH56G_NOMINAL_INCVAL;
default:
return 0;
}
@ -675,19 +653,25 @@ static inline bool ice_is_dual(struct ice_hw *hw)
/* E810 timer command register */
#define E810_ETH_GLTSYN_CMD 0x03000344
/* E830 timer command register */
#define E830_ETH_GLTSYN_CMD 0x00088814
/* E810 PHC time register */
#define E830_GLTSYN_TIME_L(_tmr_idx) (0x0008A000 + 0x1000 * (_tmr_idx))
/* Source timer incval macros */
#define INCVAL_HIGH_M 0xFF
/* Timestamp block macros */
/* PHY 40b registers macros */
#define PHY_EXT_40B_LOW_M GENMASK(31, 0)
#define PHY_EXT_40B_HIGH_M GENMASK_ULL(39, 32)
#define PHY_40B_LOW_M GENMASK(7, 0)
#define PHY_40B_HIGH_M GENMASK_ULL(39, 8)
#define TS_VALID BIT(0)
#define TS_LOW_M 0xFFFFFFFF
#define TS_HIGH_M 0xFF
#define TS_HIGH_S 32
#define TS_PHY_LOW_M 0xFF
#define TS_PHY_HIGH_M 0xFFFFFFFF
#define TS_PHY_HIGH_S 8
#define BYTES_PER_IDX_ADDR_L_U 8
#define BYTES_PER_IDX_ADDR_L 4

View File

@ -861,7 +861,6 @@ struct ice_e810_params {
struct ice_eth56g_params {
u8 num_phys;
u8 phy_addr[2];
bool onestep_ena;
bool sfd_ena;
u32 peer_delay;
@ -872,14 +871,6 @@ union ice_phy_params {
struct ice_eth56g_params eth56g;
};
/* PHY model */
enum ice_phy_model {
ICE_PHY_UNSUP = -1,
ICE_PHY_E810 = 1,
ICE_PHY_E82X,
ICE_PHY_ETH56G,
};
/* Global Link Topology */
enum ice_global_link_topo {
ICE_LINK_TOPO_UP_TO_2_LINKS,
@ -889,11 +880,9 @@ enum ice_global_link_topo {
};
struct ice_ptp_hw {
enum ice_phy_model phy_model;
union ice_phy_params phy;
u8 num_lports;
u8 ports_per_phy;
bool is_2x50g_muxed_topo;
};
/* Port hardware description */

View File

@ -55,8 +55,6 @@ static int mdsc_show(struct seq_file *s, void *p)
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
struct rb_node *rp;
int pathlen = 0;
u64 pathbase;
char *path;
mutex_lock(&mdsc->mutex);
@ -81,8 +79,8 @@ static int mdsc_show(struct seq_file *s, void *p)
if (req->r_inode) {
seq_printf(s, " #%llx", ceph_ino(req->r_inode));
} else if (req->r_dentry) {
path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
&pathbase, 0);
struct ceph_path_info path_info;
path = ceph_mdsc_build_path(req->r_dentry, &path_info, 0);
if (IS_ERR(path))
path = NULL;
spin_lock(&req->r_dentry->d_lock);
@ -91,7 +89,7 @@ static int mdsc_show(struct seq_file *s, void *p)
req->r_dentry,
path ? path : "");
spin_unlock(&req->r_dentry->d_lock);
ceph_mdsc_free_path(path, pathlen);
ceph_mdsc_free_path_info(&path_info);
} else if (req->r_path1) {
seq_printf(s, " #%llx/%s", req->r_ino1.ino,
req->r_path1);
@ -100,8 +98,8 @@ static int mdsc_show(struct seq_file *s, void *p)
}
if (req->r_old_dentry) {
path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen,
&pathbase, 0);
struct ceph_path_info path_info;
path = ceph_mdsc_build_path(req->r_old_dentry, &path_info, 0);
if (IS_ERR(path))
path = NULL;
spin_lock(&req->r_old_dentry->d_lock);
@ -111,7 +109,7 @@ static int mdsc_show(struct seq_file *s, void *p)
req->r_old_dentry,
path ? path : "");
spin_unlock(&req->r_old_dentry->d_lock);
ceph_mdsc_free_path(path, pathlen);
ceph_mdsc_free_path_info(&path_info);
} else if (req->r_path2 && req->r_op != CEPH_MDS_OP_SYMLINK) {
if (req->r_ino2.ino)
seq_printf(s, " #%llx/%s", req->r_ino2.ino,

View File

@ -1224,10 +1224,8 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
/* If op failed, mark everyone involved for errors */
if (result) {
int pathlen = 0;
u64 base = 0;
char *path = ceph_mdsc_build_path(dentry, &pathlen,
&base, 0);
struct ceph_path_info path_info = {0};
char *path = ceph_mdsc_build_path(dentry, &path_info, 0);
/* mark error on parent + clear complete */
mapping_set_error(req->r_parent->i_mapping, result);
@ -1241,8 +1239,8 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
mapping_set_error(req->r_old_inode->i_mapping, result);
pr_warn("async unlink failure path=(%llx)%s result=%d!\n",
base, IS_ERR(path) ? "<<bad>>" : path, result);
ceph_mdsc_free_path(path, pathlen);
path_info.vino.ino, IS_ERR(path) ? "<<bad>>" : path, result);
ceph_mdsc_free_path_info(&path_info);
}
out:
iput(req->r_old_inode);

View File

@ -576,14 +576,12 @@ static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
mapping_set_error(req->r_parent->i_mapping, result);
if (result) {
int pathlen = 0;
u64 base = 0;
char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
&base, 0);
struct ceph_path_info path_info = {0};
char *path = ceph_mdsc_build_path(req->r_dentry, &path_info, 0);
pr_warn("async create failure path=(%llx)%s result=%d!\n",
base, IS_ERR(path) ? "<<bad>>" : path, result);
ceph_mdsc_free_path(path, pathlen);
path_info.vino.ino, IS_ERR(path) ? "<<bad>>" : path, result);
ceph_mdsc_free_path_info(&path_info);
ceph_dir_clear_complete(req->r_parent);
if (!d_unhashed(dentry))

View File

@ -55,6 +55,52 @@ static int ceph_set_ino_cb(struct inode *inode, void *data)
return 0;
}
/*
* Check if the parent inode matches the vino from directory reply info
*/
static inline bool ceph_vino_matches_parent(struct inode *parent,
struct ceph_vino vino)
{
return ceph_ino(parent) == vino.ino && ceph_snap(parent) == vino.snap;
}
/*
* Validate that the directory inode referenced by @req->r_parent matches the
* inode number and snapshot id contained in the reply's directory record. If
* they do not match which can theoretically happen if the parent dentry was
* moved between the time the request was issued and the reply arrived fall
* back to looking up the correct inode in the inode cache.
*
* A reference is *always* returned. Callers that receive a different inode
* than the original @parent are responsible for dropping the extra reference
* once the reply has been processed.
*/
static struct inode *ceph_get_reply_dir(struct super_block *sb,
struct inode *parent,
struct ceph_mds_reply_info_parsed *rinfo)
{
struct ceph_vino vino;
if (unlikely(!rinfo->diri.in))
return parent; /* nothing to compare against */
/* If we didn't have a cached parent inode to begin with, just bail out. */
if (!parent)
return NULL;
vino.ino = le64_to_cpu(rinfo->diri.in->ino);
vino.snap = le64_to_cpu(rinfo->diri.in->snapid);
if (likely(ceph_vino_matches_parent(parent, vino)))
return parent; /* matches use the original reference */
/* Mismatch this should be rare. Emit a WARN and obtain the correct inode. */
WARN_ONCE(1, "ceph: reply dir mismatch (parent valid %llx.%llx reply %llx.%llx)\n",
ceph_ino(parent), ceph_snap(parent), vino.ino, vino.snap);
return ceph_get_inode(sb, vino, NULL);
}
/**
* ceph_new_inode - allocate a new inode in advance of an expected create
* @dir: parent directory for new inode
@ -1489,6 +1535,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
struct inode *in = NULL;
struct ceph_vino tvino, dvino;
struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
struct inode *parent_dir = NULL;
int err = 0;
dout("fill_trace %p is_dentry %d is_target %d\n", req,
@ -1502,10 +1549,18 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
}
if (rinfo->head->is_dentry) {
struct inode *dir = req->r_parent;
/*
* r_parent may be stale, in cases when R_PARENT_LOCKED is not set,
* so we need to get the correct inode
*/
parent_dir = ceph_get_reply_dir(sb, req->r_parent, rinfo);
if (unlikely(IS_ERR(parent_dir))) {
err = PTR_ERR(parent_dir);
goto done;
}
if (dir) {
err = ceph_fill_inode(dir, NULL, &rinfo->diri,
if (parent_dir) {
err = ceph_fill_inode(parent_dir, NULL, &rinfo->diri,
rinfo->dirfrag, session, -1,
&req->r_caps_reservation);
if (err < 0)
@ -1514,14 +1569,14 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
WARN_ON_ONCE(1);
}
if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
if (parent_dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
bool is_nokey = false;
struct qstr dname;
struct dentry *dn, *parent;
struct fscrypt_str oname = FSTR_INIT(NULL, 0);
struct ceph_fname fname = { .dir = dir,
struct ceph_fname fname = { .dir = parent_dir,
.name = rinfo->dname,
.ctext = rinfo->altname,
.name_len = rinfo->dname_len,
@ -1530,10 +1585,10 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
BUG_ON(!rinfo->head->is_target);
BUG_ON(req->r_dentry);
parent = d_find_any_alias(dir);
parent = d_find_any_alias(parent_dir);
BUG_ON(!parent);
err = ceph_fname_alloc_buffer(dir, &oname);
err = ceph_fname_alloc_buffer(parent_dir, &oname);
if (err < 0) {
dput(parent);
goto done;
@ -1542,7 +1597,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
err = ceph_fname_to_usr(&fname, NULL, &oname, &is_nokey);
if (err < 0) {
dput(parent);
ceph_fname_free_buffer(dir, &oname);
ceph_fname_free_buffer(parent_dir, &oname);
goto done;
}
dname.name = oname.name;
@ -1550,6 +1605,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
dname.hash = full_name_hash(parent, dname.name, dname.len);
tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
retry_lookup:
dn = d_lookup(parent, &dname);
dout("d_lookup on parent=%p name=%.*s got %p\n",
@ -1561,7 +1617,7 @@ retry_lookup:
dname.len, dname.name, dn);
if (!dn) {
dput(parent);
ceph_fname_free_buffer(dir, &oname);
ceph_fname_free_buffer(parent_dir, &oname);
err = -ENOMEM;
goto done;
}
@ -1576,12 +1632,12 @@ retry_lookup:
ceph_snap(d_inode(dn)) != tvino.snap)) {
dout(" dn %p points to wrong inode %p\n",
dn, d_inode(dn));
ceph_dir_clear_ordered(dir);
ceph_dir_clear_ordered(parent_dir);
d_delete(dn);
dput(dn);
goto retry_lookup;
}
ceph_fname_free_buffer(dir, &oname);
ceph_fname_free_buffer(parent_dir, &oname);
req->r_dentry = dn;
dput(parent);
@ -1763,6 +1819,9 @@ retry_lookup:
&dvino, ptvino);
}
done:
/* Drop extra ref from ceph_get_reply_dir() if it returned a new inode */
if (unlikely(!IS_ERR_OR_NULL(parent_dir) && parent_dir != req->r_parent))
iput(parent_dir);
dout("fill_trace done err=%d\n", err);
return err;
}

View File

@ -2631,8 +2631,7 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
/**
* ceph_mdsc_build_path - build a path string to a given dentry
* @dentry: dentry to which path should be built
* @plen: returned length of string
* @pbase: returned base inode number
* @path_info: output path, length, base ino+snap, and freepath ownership flag
* @for_wire: is this path going to be sent to the MDS?
*
* Build a string that represents the path to the dentry. This is mostly called
@ -2649,7 +2648,7 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
* Encode hidden .snap dirs as a double /, i.e.
* foo/.snap/bar -> foo//bar
*/
char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *pbase,
char *ceph_mdsc_build_path(struct dentry *dentry, struct ceph_path_info *path_info,
int for_wire)
{
struct dentry *cur;
@ -2761,16 +2760,28 @@ retry:
goto retry;
}
*pbase = base;
*plen = PATH_MAX - 1 - pos;
/* Initialize the output structure */
memset(path_info, 0, sizeof(*path_info));
path_info->vino.ino = base;
path_info->pathlen = PATH_MAX - 1 - pos;
path_info->path = path + pos;
path_info->freepath = true;
/* Set snap from dentry if available */
if (d_inode(dentry))
path_info->vino.snap = ceph_snap(d_inode(dentry));
else
path_info->vino.snap = CEPH_NOSNAP;
dout("build_path on %p %d built %llx '%.*s'\n",
dentry, d_count(dentry), base, *plen, path + pos);
dentry, d_count(dentry), base, PATH_MAX - 1 - pos, path + pos);
return path + pos;
}
static int build_dentry_path(struct dentry *dentry, struct inode *dir,
const char **ppath, int *ppathlen, u64 *pino,
bool *pfreepath, bool parent_locked)
struct ceph_path_info *path_info,
bool parent_locked)
{
char *path;
@ -2779,40 +2790,46 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
dir = d_inode_rcu(dentry->d_parent);
if (dir && parent_locked && ceph_snap(dir) == CEPH_NOSNAP &&
!IS_ENCRYPTED(dir)) {
*pino = ceph_ino(dir);
path_info->vino.ino = ceph_ino(dir);
path_info->vino.snap = ceph_snap(dir);
rcu_read_unlock();
*ppath = dentry->d_name.name;
*ppathlen = dentry->d_name.len;
path_info->path = dentry->d_name.name;
path_info->pathlen = dentry->d_name.len;
path_info->freepath = false;
return 0;
}
rcu_read_unlock();
path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
path = ceph_mdsc_build_path(dentry, path_info, 1);
if (IS_ERR(path))
return PTR_ERR(path);
*ppath = path;
*pfreepath = true;
/*
* ceph_mdsc_build_path already fills path_info, including snap handling.
*/
return 0;
}
static int build_inode_path(struct inode *inode,
const char **ppath, int *ppathlen, u64 *pino,
bool *pfreepath)
static int build_inode_path(struct inode *inode, struct ceph_path_info *path_info)
{
struct dentry *dentry;
char *path;
if (ceph_snap(inode) == CEPH_NOSNAP) {
*pino = ceph_ino(inode);
*ppathlen = 0;
path_info->vino.ino = ceph_ino(inode);
path_info->vino.snap = ceph_snap(inode);
path_info->pathlen = 0;
path_info->freepath = false;
return 0;
}
dentry = d_find_alias(inode);
path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
path = ceph_mdsc_build_path(dentry, path_info, 1);
dput(dentry);
if (IS_ERR(path))
return PTR_ERR(path);
*ppath = path;
*pfreepath = true;
/*
* ceph_mdsc_build_path already fills path_info, including snap from dentry.
* Override with inode's snap since that's what this function is for.
*/
path_info->vino.snap = ceph_snap(inode);
return 0;
}
@ -2822,25 +2839,30 @@ static int build_inode_path(struct inode *inode,
*/
static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
struct inode *rdiri, const char *rpath,
u64 rino, const char **ppath, int *pathlen,
u64 *ino, bool *freepath, bool parent_locked)
u64 rino, struct ceph_path_info *path_info,
bool parent_locked)
{
int r = 0;
/* Initialize the output structure */
memset(path_info, 0, sizeof(*path_info));
if (rinode) {
r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
r = build_inode_path(rinode, path_info);
dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
ceph_snap(rinode));
} else if (rdentry) {
r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
freepath, parent_locked);
dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
*ppath);
r = build_dentry_path(rdentry, rdiri, path_info, parent_locked);
dout(" dentry %p %llx/%.*s\n", rdentry, path_info->vino.ino,
path_info->pathlen, path_info->path);
} else if (rpath || rino) {
*ino = rino;
*ppath = rpath;
*pathlen = rpath ? strlen(rpath) : 0;
dout(" path %.*s\n", *pathlen, rpath);
path_info->vino.ino = rino;
path_info->vino.snap = CEPH_NOSNAP;
path_info->path = rpath;
path_info->pathlen = rpath ? strlen(rpath) : 0;
path_info->freepath = false;
dout(" path %.*s\n", path_info->pathlen, rpath);
}
return r;
@ -2893,28 +2915,49 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
struct ceph_mds_client *mdsc = session->s_mdsc;
struct ceph_msg *msg;
struct ceph_mds_request_head_old *head;
const char *path1 = NULL;
const char *path2 = NULL;
u64 ino1 = 0, ino2 = 0;
int pathlen1 = 0, pathlen2 = 0;
bool freepath1 = false, freepath2 = false;
struct ceph_path_info path_info1 = {0};
struct ceph_path_info path_info2 = {0};
struct dentry *old_dentry = NULL;
int len;
u16 releases;
void *p, *end;
int ret;
bool legacy = !(session->s_con.peer_features & CEPH_FEATURE_FS_BTIME);
bool parent_locked = test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
ret = set_request_path_attr(req->r_inode, req->r_dentry,
req->r_parent, req->r_path1, req->r_ino1.ino,
&path1, &pathlen1, &ino1, &freepath1,
test_bit(CEPH_MDS_R_PARENT_LOCKED,
&req->r_req_flags));
&path_info1, parent_locked);
if (ret < 0) {
msg = ERR_PTR(ret);
goto out;
}
/*
* When the parent directory's i_rwsem is *not* locked, req->r_parent may
* have become stale (e.g. after a concurrent rename) between the time the
* dentry was looked up and now. If we detect that the stored r_parent
* does not match the inode number we just encoded for the request, switch
* to the correct inode so that the MDS receives a valid parent reference.
*/
if (!parent_locked && req->r_parent && path_info1.vino.ino &&
ceph_ino(req->r_parent) != path_info1.vino.ino) {
struct inode *old_parent = req->r_parent;
struct inode *correct_dir = ceph_get_inode(mdsc->fsc->sb, path_info1.vino, NULL);
if (!IS_ERR(correct_dir)) {
WARN_ONCE(1, "ceph: r_parent mismatch (had %llx wanted %llx) - updating\n",
ceph_ino(old_parent), path_info1.vino.ino);
/*
* Transfer CEPH_CAP_PIN from the old parent to the new one.
* The pin was taken earlier in ceph_mdsc_submit_request().
*/
ceph_put_cap_refs(ceph_inode(old_parent), CEPH_CAP_PIN);
iput(old_parent);
req->r_parent = correct_dir;
ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
}
}
/* If r_old_dentry is set, then assume that its parent is locked */
if (req->r_old_dentry &&
!(req->r_old_dentry->d_flags & DCACHE_DISCONNECTED))
@ -2922,7 +2965,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
ret = set_request_path_attr(NULL, old_dentry,
req->r_old_dentry_dir,
req->r_path2, req->r_ino2.ino,
&path2, &pathlen2, &ino2, &freepath2, true);
&path_info2, true);
if (ret < 0) {
msg = ERR_PTR(ret);
goto out_free1;
@ -2939,7 +2982,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
/* filepaths */
len += 2 * (1 + sizeof(u32) + sizeof(u64));
len += pathlen1 + pathlen2;
len += path_info1.pathlen + path_info2.pathlen;
/* cap releases */
len += sizeof(struct ceph_mds_request_release) *
@ -2947,9 +2990,9 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
!!req->r_old_inode_drop + !!req->r_old_dentry_drop);
if (req->r_dentry_drop)
len += pathlen1;
len += path_info1.pathlen;
if (req->r_old_dentry_drop)
len += pathlen2;
len += path_info2.pathlen;
/* MClientRequest tail */
@ -3008,8 +3051,8 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
head->ino = cpu_to_le64(req->r_deleg_ino);
head->args = req->r_args;
ceph_encode_filepath(&p, end, ino1, path1);
ceph_encode_filepath(&p, end, ino2, path2);
ceph_encode_filepath(&p, end, path_info1.vino.ino, path_info1.path);
ceph_encode_filepath(&p, end, path_info2.vino.ino, path_info2.path);
/* make note of release offset, in case we need to replay */
req->r_request_release_offset = p - msg->front.iov_base;
@ -3072,11 +3115,9 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
msg->hdr.data_off = cpu_to_le16(0);
out_free2:
if (freepath2)
ceph_mdsc_free_path((char *)path2, pathlen2);
ceph_mdsc_free_path_info(&path_info2);
out_free1:
if (freepath1)
ceph_mdsc_free_path((char *)path1, pathlen1);
ceph_mdsc_free_path_info(&path_info1);
out:
return msg;
out_err:
@ -4323,24 +4364,20 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
struct ceph_pagelist *pagelist = recon_state->pagelist;
struct dentry *dentry;
struct ceph_cap *cap;
char *path;
int pathlen = 0, err;
u64 pathbase;
struct ceph_path_info path_info = {0};
int err;
u64 snap_follows;
dentry = d_find_primary(inode);
if (dentry) {
/* set pathbase to parent dir when msg_version >= 2 */
path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase,
recon_state->msg_version >= 2);
char *path = ceph_mdsc_build_path(dentry, &path_info,
recon_state->msg_version >= 2);
dput(dentry);
if (IS_ERR(path)) {
err = PTR_ERR(path);
goto out_err;
}
} else {
path = NULL;
pathbase = 0;
}
spin_lock(&ci->i_ceph_lock);
@ -4373,7 +4410,7 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
rec.v2.issued = cpu_to_le32(cap->issued);
rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
rec.v2.pathbase = cpu_to_le64(pathbase);
rec.v2.pathbase = cpu_to_le64(path_info.vino.ino);
rec.v2.flock_len = (__force __le32)
((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
} else {
@ -4384,7 +4421,7 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime);
ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime);
rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
rec.v1.pathbase = cpu_to_le64(pathbase);
rec.v1.pathbase = cpu_to_le64(path_info.vino.ino);
}
if (list_empty(&ci->i_cap_snaps)) {
@ -4446,7 +4483,7 @@ encode_again:
sizeof(struct ceph_filelock);
rec.v2.flock_len = cpu_to_le32(struct_len);
struct_len += sizeof(u32) + pathlen + sizeof(rec.v2);
struct_len += sizeof(u32) + path_info.pathlen + sizeof(rec.v2);
if (struct_v >= 2)
struct_len += sizeof(u64); /* snap_follows */
@ -4470,7 +4507,7 @@ encode_again:
ceph_pagelist_encode_8(pagelist, 1);
ceph_pagelist_encode_32(pagelist, struct_len);
}
ceph_pagelist_encode_string(pagelist, path, pathlen);
ceph_pagelist_encode_string(pagelist, (char *)path_info.path, path_info.pathlen);
ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
ceph_locks_to_pagelist(flocks, pagelist,
num_fcntl_locks, num_flock_locks);
@ -4481,17 +4518,17 @@ out_freeflocks:
} else {
err = ceph_pagelist_reserve(pagelist,
sizeof(u64) + sizeof(u32) +
pathlen + sizeof(rec.v1));
path_info.pathlen + sizeof(rec.v1));
if (err)
goto out_err;
ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
ceph_pagelist_encode_string(pagelist, path, pathlen);
ceph_pagelist_encode_string(pagelist, (char *)path_info.path, path_info.pathlen);
ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
}
out_err:
ceph_mdsc_free_path(path, pathlen);
ceph_mdsc_free_path_info(&path_info);
if (!err)
recon_state->nr_caps++;
return err;

View File

@ -577,13 +577,23 @@ extern int ceph_iterate_session_caps(struct ceph_mds_session *session,
void *arg);
extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc);
static inline void ceph_mdsc_free_path(char *path, int len)
/*
* Structure to group path-related output parameters for build_*_path functions
*/
struct ceph_path_info {
const char *path;
int pathlen;
struct ceph_vino vino;
bool freepath;
};
static inline void ceph_mdsc_free_path_info(const struct ceph_path_info *path_info)
{
if (!IS_ERR_OR_NULL(path))
__putname(path - (PATH_MAX - 1 - len));
if (path_info && path_info->freepath && !IS_ERR_OR_NULL(path_info->path))
__putname((char *)path_info->path - (PATH_MAX - 1 - path_info->pathlen));
}
extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
extern char *ceph_mdsc_build_path(struct dentry *dentry, struct ceph_path_info *path_info,
int for_wire);
extern void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry);

View File

@ -5256,7 +5256,8 @@ void cifs_oplock_break(struct work_struct *work)
struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
oplock_break);
struct inode *inode = d_inode(cfile->dentry);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct super_block *sb = inode->i_sb;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
@ -5266,6 +5267,12 @@ void cifs_oplock_break(struct work_struct *work)
__u64 persistent_fid, volatile_fid;
__u16 net_fid;
/*
* Hold a reference to the superblock to prevent it and its inodes from
* being freed while we are accessing cinode. Otherwise, _cifsFileInfo_put()
* may release the last reference to the sb and trigger inode eviction.
*/
cifs_sb_active(sb);
wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
TASK_UNINTERRUPTIBLE);
@ -5338,6 +5345,7 @@ oplock_break_ack:
cifs_put_tlink(tlink);
out:
cifs_done_oplock_break(cinode);
cifs_sb_deactive(sb);
}
/*

View File

@ -300,8 +300,19 @@ static inline bool nf_ct_is_expired(const struct nf_conn *ct)
/* use after obtaining a reference count */
static inline bool nf_ct_should_gc(const struct nf_conn *ct)
{
return nf_ct_is_expired(ct) && nf_ct_is_confirmed(ct) &&
!nf_ct_is_dying(ct);
if (!nf_ct_is_confirmed(ct))
return false;
/* load ct->timeout after is_confirmed() test.
* Pairs with __nf_conntrack_confirm() which:
* 1. Increases ct->timeout value
* 2. Inserts ct into rcu hlist
* 3. Sets the confirmed bit
* 4. Unlocks the hlist lock
*/
smp_acquire__after_ctrl_dep();
return nf_ct_is_expired(ct) && !nf_ct_is_dying(ct);
}
#define NF_CT_DAY (86400 * HZ)

View File

@ -40,6 +40,7 @@
#include <net/inet_ecn.h>
#include <net/dst.h>
#include <net/mptcp.h>
#include RH_KABI_HIDE_INCLUDE(<net/xfrm.h>)
#include <linux/seq_file.h>
#include <linux/memcontrol.h>
@ -637,6 +638,19 @@ void tcp_fin(struct sock *sk);
void tcp_check_space(struct sock *sk);
void tcp_sack_compress_send_ack(struct sock *sk);
static inline void tcp_cleanup_skb(struct sk_buff *skb)
{
skb_dst_drop(skb);
secpath_reset(skb);
}
static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb)
{
DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
DEBUG_NET_WARN_ON_ONCE(secpath_exists(skb));
__skb_queue_tail(&sk->sk_receive_queue, skb);
}
/* tcp_timer.c */
void tcp_init_xmit_timers(struct sock *);
static inline void tcp_clear_xmit_timers(struct sock *sk)

View File

@ -337,6 +337,7 @@ int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags)
goto done_unlock;
}
req->flags |= REQ_F_ASYNC_DATA;
req->async_data = ifd;
ifd->q = futex_q_init;
ifd->q.bitset = iof->futex_mask;
@ -359,6 +360,8 @@ done:
if (ret < 0)
req_set_fail(req);
io_req_set_res(req, ret, 0);
req->async_data = NULL;
req->flags &= ~REQ_F_ASYNC_DATA;
kfree(ifd);
return IOU_OK;
}

View File

@ -1913,6 +1913,26 @@ static int find_lowest_rq(struct task_struct *task)
return -1;
}
static struct task_struct *pick_next_pushable_task(struct rq *rq)
{
struct task_struct *p;
if (!has_pushable_tasks(rq))
return NULL;
p = plist_first_entry(&rq->rt.pushable_tasks,
struct task_struct, pushable_tasks);
BUG_ON(rq->cpu != task_cpu(p));
BUG_ON(task_current(rq, p));
BUG_ON(p->nr_cpus_allowed <= 1);
BUG_ON(!task_on_rq_queued(p));
BUG_ON(!rt_task(p));
return p;
}
/* Will lock the rq it finds */
static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
{
@ -1943,18 +1963,16 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
/*
* We had to unlock the run queue. In
* the mean time, task could have
* migrated already or had its affinity changed.
* Also make sure that it wasn't scheduled on its rq.
* migrated already or had its affinity changed,
* therefore check if the task is still at the
* head of the pushable tasks list.
* It is possible the task was scheduled, set
* "migrate_disabled" and then got preempted, so we must
* check the task migration disable flag here too.
*/
if (unlikely(task_rq(task) != rq ||
if (unlikely(is_migration_disabled(task) ||
!cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
task_on_cpu(rq, task) ||
!rt_task(task) ||
is_migration_disabled(task) ||
!task_on_rq_queued(task))) {
task != pick_next_pushable_task(rq))) {
double_unlock_balance(rq, lowest_rq);
lowest_rq = NULL;
@ -1974,26 +1992,6 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
return lowest_rq;
}
static struct task_struct *pick_next_pushable_task(struct rq *rq)
{
struct task_struct *p;
if (!has_pushable_tasks(rq))
return NULL;
p = plist_first_entry(&rq->rt.pushable_tasks,
struct task_struct, pushable_tasks);
BUG_ON(rq->cpu != task_cpu(p));
BUG_ON(task_current(rq, p));
BUG_ON(p->nr_cpus_allowed <= 1);
BUG_ON(!task_on_rq_queued(p));
BUG_ON(!rt_task(p));
return p;
}
/*
* If the current CPU has more than one RT task, see if the non
* running task can migrate over to a CPU that is running a task

View File

@ -204,6 +204,9 @@ static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, int mtu)
if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
return -EINVAL;
if (skb_is_gso(skb))
skb_gso_reset(skb);
skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
pskb_pull(skb, ETH_HLEN);
skb_reset_network_header(skb);
@ -298,6 +301,9 @@ static int iptunnel_pmtud_build_icmpv6(struct sk_buff *skb, int mtu)
if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
return -EINVAL;
if (skb_is_gso(skb))
skb_gso_reset(skb);
skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
pskb_pull(skb, ETH_HLEN);
skb_reset_network_header(skb);

View File

@ -194,7 +194,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
if (!skb)
return;
skb_dst_drop(skb);
tcp_cleanup_skb(skb);
/* segs_in has been initialized to 1 in tcp_create_openreq_child().
* Hence, reset segs_in to 0 before calling tcp_segs_in()
* to avoid double counting. Also, tcp_segs_in() expects
@ -211,7 +211,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
__skb_queue_tail(&sk->sk_receive_queue, skb);
tcp_add_receive_queue(sk, skb);
tp->syn_data_acked = 1;
/* u64_stats_update_begin(&tp->syncp) not needed here,

View File

@ -4809,7 +4809,7 @@ static void tcp_ofo_queue(struct sock *sk)
tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
if (!eaten)
__skb_queue_tail(&sk->sk_receive_queue, skb);
tcp_add_receive_queue(sk, skb);
else
kfree_skb_partial(skb, fragstolen);
@ -5000,7 +5000,7 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
skb, fragstolen)) ? 1 : 0;
tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq);
if (!eaten) {
__skb_queue_tail(&sk->sk_receive_queue, skb);
tcp_add_receive_queue(sk, skb);
skb_set_owner_r(skb, sk);
}
return eaten;
@ -5083,7 +5083,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
__kfree_skb(skb);
return;
}
skb_dst_drop(skb);
tcp_cleanup_skb(skb);
__skb_pull(skb, tcp_hdr(skb)->doff * 4);
reason = SKB_DROP_REASON_NOT_SPECIFIED;
@ -5994,7 +5994,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS);
/* Bulk data transfer: receiver */
skb_dst_drop(skb);
tcp_cleanup_skb(skb);
__skb_pull(skb, tcp_header_len);
eaten = tcp_queue_rcv(sk, skb, &fragstolen);

View File

@ -1773,7 +1773,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
*/
skb_condense(skb);
skb_dst_drop(skb);
tcp_cleanup_skb(skb);
if (unlikely(tcp_checksum_complete(skb))) {
bh_unlock_sock(sk);

View File

@ -1087,6 +1087,12 @@ static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
hlist_nulls_add_head_rcu(&loser_ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
&nf_conntrack_hash[repl_idx]);
/* confirmed bit must be set after hlist add, not before:
* loser_ct can still be visible to other cpu due to
* SLAB_TYPESAFE_BY_RCU.
*/
smp_mb__before_atomic();
set_bit(IPS_CONFIRMED_BIT, &loser_ct->status);
NF_CT_STAT_INC(net, clash_resolve);
return NF_ACCEPT;
@ -1224,8 +1230,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
* user context, else we insert an already 'dead' hash, blocking
* further use of that particular connection -JM.
*/
ct->status |= IPS_CONFIRMED;
if (unlikely(nf_ct_is_dying(ct))) {
NF_CT_STAT_INC(net, insert_failed);
goto dying;
@ -1257,7 +1261,7 @@ chaintoolong:
}
}
/* Timer relative to confirmation time, not original
/* Timeout is relative to confirmation time, not original
setting time, otherwise we'd get timer wrap in
weird delay cases. */
ct->timeout += nfct_time_stamp;
@ -1265,11 +1269,21 @@ chaintoolong:
__nf_conntrack_insert_prepare(ct);
/* Since the lookup is lockless, hash insertion must be done after
* starting the timer and setting the CONFIRMED bit. The RCU barriers
* guarantee that no other CPU can find the conntrack before the above
* stores are visible.
* setting ct->timeout. The RCU barriers guarantee that no other CPU
* can find the conntrack before the above stores are visible.
*/
__nf_conntrack_hash_insert(ct, hash, reply_hash);
/* IPS_CONFIRMED unset means 'ct not (yet) in hash', conntrack lookups
* skip entries that lack this bit. This happens when a CPU is looking
* at a stale entry that is being recycled due to SLAB_TYPESAFE_BY_RCU
* or when another CPU encounters this entry right after the insertion
* but before the set-confirm-bit below. This bit must not be set until
* after __nf_conntrack_hash_insert().
*/
smp_mb__before_atomic();
set_bit(IPS_CONFIRMED_BIT, &ct->status);
nf_conntrack_double_unlock(hash, reply_hash);
local_bh_enable();

View File

@ -117,7 +117,7 @@ int sctp_rcv(struct sk_buff *skb)
* it's better to just linearize it otherwise crc computing
* takes longer.
*/
if ((!is_gso && skb_linearize(skb)) ||
if (((!is_gso || skb_cloned(skb)) && skb_linearize(skb)) ||
!pskb_may_pull(skb, sizeof(struct sctphdr)))
goto discard_it;

View File

@ -1773,6 +1773,9 @@ int decrypt_skb(struct sock *sk, struct scatterlist *sgout)
return tls_decrypt_sg(sk, NULL, sgout, &darg);
}
/* All records returned from a recvmsg() call must have the same type.
* 0 is not a valid content type. Use it as "no type reported, yet".
*/
static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
u8 *control)
{
@ -2019,8 +2022,10 @@ int tls_sw_recvmsg(struct sock *sk,
if (err < 0)
goto end;
/* process_rx_list() will set @control if it processed any records */
copied = err;
if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA) || rx_more)
if (len <= copied || rx_more ||
(control && control != TLS_RECORD_TYPE_DATA))
goto end;
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);

View File

@ -1,3 +1,43 @@
* Tue Sep 23 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.49.1.el9_6]
- io_uring/futex: ensure io_futex_wait() cleans up properly on failure (CKI Backport Bot) [RHEL-114335] {CVE-2025-39698}
- selftests: tls: add tests for zero-length records (Sabrina Dubroca) [RHEL-114326] {CVE-2025-39682}
- tls: fix handling of zero-length records on the rx_list (Sabrina Dubroca) [RHEL-114326] {CVE-2025-39682}
Resolves: RHEL-114326, RHEL-114335
* Sat Sep 20 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.48.1.el9_6]
- perf trace: Add missing perf_tool__init() (Michael Petlan) [RHEL-105393]
- ceph: fix client race condition where r_parent becomes stale before sending message (Alex Markuze) [RHEL-114962]
- ceph: fix client race condition validating r_parent before applying state (Alex Markuze) [RHEL-114962]
Resolves: RHEL-105393, RHEL-114962
* Thu Sep 18 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.47.1.el9_6]
- tunnels: reset the GSO metadata before reusing the skb (Antoine Tenart) [RHEL-113916]
- sctp: linearize cloned gso packets in sctp_rcv (CKI Backport Bot) [RHEL-113333] {CVE-2025-38718}
- ice: fix max values for dpll pin phase adjust (Petr Oros) [RHEL-113039]
- ice/ptp: fix crosstimestamp reporting (Petr Oros) [RHEL-112558]
- ice: fix NULL access of tx->in_use in ice_ll_ts_intr (Petr Oros) [RHEL-112873]
- ice: fix NULL access of tx->in_use in ice_ptp_ts_irq (Petr Oros) [RHEL-112873]
- ice: Implement PTP support for E830 devices (Petr Oros) [RHEL-112558]
- ice: Refactor ice_ptp_init_tx_* (Petr Oros) [RHEL-112558]
- ice: Add unified ice_capture_crosststamp (Petr Oros) [RHEL-112558]
- ice: Process TSYN IRQ in a separate function (Petr Oros) [RHEL-112558]
- ice: Use FIELD_PREP for timestamp values (Petr Oros) [RHEL-112558]
- ice: Remove unnecessary ice_is_e8xx() functions (Petr Oros) [RHEL-112558]
- ice: Don't check device type when checking GNSS presence (Petr Oros) [RHEL-112558]
- ice: Add in/out PTP pin delays (Petr Oros) [RHEL-112558]
- ice: fix PHY timestamp extraction for ETH56G (Petr Oros) [RHEL-112558]
- ice: Add correct PHY lane assignment (Petr Oros) [RHEL-112683]
- ice: Fix ETH56G FC-FEC Rx offset value (Petr Oros) [RHEL-112683]
- ice: Fix quad registers read on E825 (Petr Oros) [RHEL-112683]
- ice: Fix E825 initialization (Petr Oros) [RHEL-112683]
- tcp: drop secpath at the same time as we currently drop dst (Sabrina Dubroca) [RHEL-82136]
- smb: client: fix use-after-free in cifs_oplock_break (CKI Backport Bot) [RHEL-111196] {CVE-2025-38527}
- i40e: When removing VF MAC filters, only check PF-set MAC (CKI Backport Bot) [RHEL-109571]
- cpufreq/cppc: Don't compare desired_perf in target() (Mark Langsdorf) [RHEL-109525]
- netfilter: nf_conntrack: fix crash due to removal of uninitialised entry (CKI Backport Bot) [RHEL-106432] {CVE-2025-38472}
- sched/rt: Fix race in push_rt_task (Phil Auld) [RHEL-91800]
Resolves: RHEL-106432, RHEL-109525, RHEL-109571, RHEL-111196, RHEL-112558, RHEL-112683, RHEL-112873, RHEL-113039, RHEL-113333, RHEL-113916, RHEL-82136, RHEL-91800
* Tue Sep 16 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.46.1.el9_6]
- net: usb: smsc75xx: Move packet length check to prevent kernel panic in skb_pull (CKI Backport Bot) [RHEL-112246] {CVE-2023-53125}
- net: usb: smsc75xx: Limit packet length to skb->len (CKI Backport Bot) [RHEL-112246] {CVE-2023-53125}

View File

@ -4525,6 +4525,7 @@ static int trace__replay(struct trace *trace)
struct evsel *evsel;
int err = -1;
perf_tool__init(&trace->tool, /*ordered_events=*/true);
trace->tool.sample = trace__process_sample;
trace->tool.mmap = perf_event__process_mmap;
trace->tool.mmap2 = perf_event__process_mmap2;

View File

@ -155,13 +155,12 @@ static int tls_send_cmsg(int fd, unsigned char record_type,
return sendmsg(fd, &msg, flags);
}
static int tls_recv_cmsg(struct __test_metadata *_metadata,
int fd, unsigned char record_type,
void *data, size_t len, int flags)
static int __tls_recv_cmsg(struct __test_metadata *_metadata,
int fd, unsigned char *ctype,
void *data, size_t len, int flags)
{
char cbuf[CMSG_SPACE(sizeof(char))];
struct cmsghdr *cmsg;
unsigned char ctype;
struct msghdr msg;
struct iovec vec;
int n;
@ -180,7 +179,20 @@ static int tls_recv_cmsg(struct __test_metadata *_metadata,
EXPECT_NE(cmsg, NULL);
EXPECT_EQ(cmsg->cmsg_level, SOL_TLS);
EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE);
ctype = *((unsigned char *)CMSG_DATA(cmsg));
if (ctype)
*ctype = *((unsigned char *)CMSG_DATA(cmsg));
return n;
}
static int tls_recv_cmsg(struct __test_metadata *_metadata,
int fd, unsigned char record_type,
void *data, size_t len, int flags)
{
unsigned char ctype;
int n;
n = __tls_recv_cmsg(_metadata, fd, &ctype, data, len, flags);
EXPECT_EQ(ctype, record_type);
return n;
@ -1599,6 +1611,283 @@ TEST_F(tls, recv_efault)
EXPECT_EQ(memcmp(rec2, recv_mem + 9, ret - 9), 0);
}
struct raw_rec {
unsigned int plain_len;
unsigned char plain_data[100];
unsigned int cipher_len;
unsigned char cipher_data[128];
};
/* TLS 1.2, AES_CCM, data, seqno:0, plaintext: 'Hello world' */
static const struct raw_rec id0_data_l11 = {
.plain_len = 11,
.plain_data = {
0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f,
0x72, 0x6c, 0x64,
},
.cipher_len = 40,
.cipher_data = {
0x17, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0xa2, 0x33,
0xde, 0x8d, 0x94, 0xf0, 0x29, 0x6c, 0xb1, 0xaf,
0x6a, 0x75, 0xb2, 0x93, 0xad, 0x45, 0xd5, 0xfd,
0x03, 0x51, 0x57, 0x8f, 0xf9, 0xcc, 0x3b, 0x42,
},
};
/* TLS 1.2, AES_CCM, ctrl, seqno:0, plaintext: '' */
static const struct raw_rec id0_ctrl_l0 = {
.plain_len = 0,
.plain_data = {
},
.cipher_len = 29,
.cipher_data = {
0x16, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x38, 0x7b,
0xa6, 0x1c, 0xdd, 0xa7, 0x19, 0x33, 0xab, 0xae,
0x88, 0xe1, 0xd2, 0x08, 0x4f,
},
};
/* TLS 1.2, AES_CCM, data, seqno:0, plaintext: '' */
static const struct raw_rec id0_data_l0 = {
.plain_len = 0,
.plain_data = {
},
.cipher_len = 29,
.cipher_data = {
0x17, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc5, 0x37, 0x90,
0x70, 0x45, 0x89, 0xfb, 0x5c, 0xc7, 0x89, 0x03,
0x68, 0x80, 0xd3, 0xd8, 0xcc,
},
};
/* TLS 1.2, AES_CCM, data, seqno:1, plaintext: 'Hello world' */
static const struct raw_rec id1_data_l11 = {
.plain_len = 11,
.plain_data = {
0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f,
0x72, 0x6c, 0x64,
},
.cipher_len = 40,
.cipher_data = {
0x17, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0x3a, 0x1a, 0x9c,
0xd0, 0xa8, 0x9a, 0xd6, 0x69, 0xd6, 0x1a, 0xe3,
0xb5, 0x1f, 0x0d, 0x2c, 0xe2, 0x97, 0x46, 0xff,
0x2b, 0xcc, 0x5a, 0xc4, 0xa3, 0xb9, 0xef, 0xba,
},
};
/* TLS 1.2, AES_CCM, ctrl, seqno:1, plaintext: '' */
static const struct raw_rec id1_ctrl_l0 = {
.plain_len = 0,
.plain_data = {
},
.cipher_len = 29,
.cipher_data = {
0x16, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0xf0, 0xfe,
0xee, 0xd9, 0xe2, 0x5d, 0xc7, 0x11, 0x4c, 0xe6,
0xb4, 0x7e, 0xef, 0x40, 0x2b,
},
};
/* TLS 1.2, AES_CCM, data, seqno:1, plaintext: '' */
static const struct raw_rec id1_data_l0 = {
.plain_len = 0,
.plain_data = {
},
.cipher_len = 29,
.cipher_data = {
0x17, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0xce, 0xfc, 0x86,
0xc8, 0xf0, 0x55, 0xf9, 0x47, 0x3f, 0x74, 0xdc,
0xc9, 0xbf, 0xfe, 0x5b, 0xb1,
},
};
/* TLS 1.2, AES_CCM, ctrl, seqno:2, plaintext: 'Hello world' */
static const struct raw_rec id2_ctrl_l11 = {
.plain_len = 11,
.plain_data = {
0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f,
0x72, 0x6c, 0x64,
},
.cipher_len = 40,
.cipher_data = {
0x16, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0xe5, 0x3d, 0x19,
0x3d, 0xca, 0xb8, 0x16, 0xb6, 0xff, 0x79, 0x87,
0x2a, 0x04, 0x11, 0x3d, 0xf8, 0x64, 0x5f, 0x36,
0x8b, 0xa8, 0xee, 0x4c, 0x6d, 0x62, 0xa5, 0x00,
},
};
/* TLS 1.2, AES_CCM, data, seqno:2, plaintext: 'Hello world' */
static const struct raw_rec id2_data_l11 = {
.plain_len = 11,
.plain_data = {
0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f,
0x72, 0x6c, 0x64,
},
.cipher_len = 40,
.cipher_data = {
0x17, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0xe5, 0x3d, 0x19,
0x3d, 0xca, 0xb8, 0x16, 0xb6, 0xff, 0x79, 0x87,
0x8e, 0xa1, 0xd0, 0xcd, 0x33, 0xb5, 0x86, 0x2b,
0x17, 0xf1, 0x52, 0x2a, 0x55, 0x62, 0x65, 0x11,
},
};
/* TLS 1.2, AES_CCM, ctrl, seqno:2, plaintext: '' */
static const struct raw_rec id2_ctrl_l0 = {
.plain_len = 0,
.plain_data = {
},
.cipher_len = 29,
.cipher_data = {
0x16, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0xdc, 0x5c, 0x0e,
0x41, 0xdd, 0xba, 0xd3, 0xcc, 0xcf, 0x6d, 0xd9,
0x06, 0xdb, 0x79, 0xe5, 0x5d,
},
};
/* TLS 1.2, AES_CCM, data, seqno:2, plaintext: '' */
static const struct raw_rec id2_data_l0 = {
.plain_len = 0,
.plain_data = {
},
.cipher_len = 29,
.cipher_data = {
0x17, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0xc3, 0xca, 0x26,
0x22, 0xe4, 0x25, 0xfb, 0x5f, 0x6d, 0xbf, 0x83,
0x30, 0x48, 0x69, 0x1a, 0x47,
},
};
FIXTURE(zero_len)
{
int fd, cfd;
bool notls;
};
FIXTURE_VARIANT(zero_len)
{
const struct raw_rec *recs[4];
ssize_t recv_ret[4];
};
FIXTURE_VARIANT_ADD(zero_len, data_data_data)
{
.recs = { &id0_data_l11, &id1_data_l11, &id2_data_l11, },
.recv_ret = { 33, -EAGAIN, },
};
FIXTURE_VARIANT_ADD(zero_len, data_0ctrl_data)
{
.recs = { &id0_data_l11, &id1_ctrl_l0, &id2_data_l11, },
.recv_ret = { 11, 0, 11, -EAGAIN, },
};
FIXTURE_VARIANT_ADD(zero_len, 0data_0data_0data)
{
.recs = { &id0_data_l0, &id1_data_l0, &id2_data_l0, },
.recv_ret = { -EAGAIN, },
};
FIXTURE_VARIANT_ADD(zero_len, 0data_0data_ctrl)
{
.recs = { &id0_data_l0, &id1_data_l0, &id2_ctrl_l11, },
.recv_ret = { 0, 11, -EAGAIN, },
};
FIXTURE_VARIANT_ADD(zero_len, 0data_0data_0ctrl)
{
.recs = { &id0_data_l0, &id1_data_l0, &id2_ctrl_l0, },
.recv_ret = { 0, 0, -EAGAIN, },
};
FIXTURE_VARIANT_ADD(zero_len, 0ctrl_0ctrl_0ctrl)
{
.recs = { &id0_ctrl_l0, &id1_ctrl_l0, &id2_ctrl_l0, },
.recv_ret = { 0, 0, 0, -EAGAIN, },
};
FIXTURE_VARIANT_ADD(zero_len, 0data_0data_data)
{
.recs = { &id0_data_l0, &id1_data_l0, &id2_data_l11, },
.recv_ret = { 11, -EAGAIN, },
};
FIXTURE_VARIANT_ADD(zero_len, data_0data_0data)
{
.recs = { &id0_data_l11, &id1_data_l0, &id2_data_l0, },
.recv_ret = { 11, -EAGAIN, },
};
FIXTURE_SETUP(zero_len)
{
struct tls_crypto_info_keys tls12;
int ret;
tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_CCM_128, &tls12);
ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls);
if (self->notls)
return;
/* Don't install keys on fd, we'll send raw records */
ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len);
ASSERT_EQ(ret, 0);
}
FIXTURE_TEARDOWN(zero_len)
{
close(self->fd);
close(self->cfd);
}
TEST_F(zero_len, test)
{
const struct raw_rec *const *rec;
unsigned char buf[128];
int rec_off;
int i;
for (i = 0; i < 4 && variant->recs[i]; i++)
EXPECT_EQ(send(self->fd, variant->recs[i]->cipher_data,
variant->recs[i]->cipher_len, 0),
variant->recs[i]->cipher_len);
rec = &variant->recs[0];
rec_off = 0;
for (i = 0; i < 4; i++) {
int j, ret;
ret = variant->recv_ret[i] >= 0 ? variant->recv_ret[i] : -1;
EXPECT_EQ(__tls_recv_cmsg(_metadata, self->cfd, NULL,
buf, sizeof(buf), MSG_DONTWAIT), ret);
if (ret == -1)
EXPECT_EQ(errno, -variant->recv_ret[i]);
if (variant->recv_ret[i] == -EAGAIN)
break;
for (j = 0; j < ret; j++) {
while (rec_off == (*rec)->plain_len) {
rec++;
rec_off = 0;
}
EXPECT_EQ(buf[j], (*rec)->plain_data[rec_off]);
rec_off++;
}
}
};
FIXTURE(tls_err)
{
int fd, cfd;