Import of kernel-6.12.0-55.38.1.el10_0
This commit is contained in:
parent
25c106a7ce
commit
dfbe561888
@ -12,7 +12,7 @@ RHEL_MINOR = 0
|
||||
#
|
||||
# Use this spot to avoid future merge conflicts.
|
||||
# Do not trim this comment.
|
||||
RHEL_RELEASE = 55.37.1
|
||||
RHEL_RELEASE = 55.38.1
|
||||
|
||||
#
|
||||
# RHEL_REBASE_NUM
|
||||
|
@ -73,10 +73,6 @@ struct blkcg_gq {
|
||||
struct blkg_iostat_set iostat;
|
||||
|
||||
struct blkg_policy_data *pd[BLKCG_MAX_POLS];
|
||||
#ifdef CONFIG_BLK_CGROUP_PUNT_BIO
|
||||
RH_KABI_EXTEND(spinlock_t async_bio_lock)
|
||||
RH_KABI_EXTEND(struct bio_list async_bios)
|
||||
#endif
|
||||
union {
|
||||
struct work_struct async_bio_work;
|
||||
struct work_struct free_work;
|
||||
@ -89,6 +85,10 @@ struct blkcg_gq {
|
||||
int last_use;
|
||||
|
||||
struct rcu_head rcu_head;
|
||||
#ifdef CONFIG_BLK_CGROUP_PUNT_BIO
|
||||
RH_KABI_EXTEND(spinlock_t async_bio_lock)
|
||||
RH_KABI_EXTEND(struct bio_list async_bios)
|
||||
#endif
|
||||
};
|
||||
|
||||
struct blkcg {
|
||||
|
@ -1599,6 +1599,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
|
||||
int count;
|
||||
int rq_flushed = 0, sq_flushed;
|
||||
unsigned long flag;
|
||||
struct ib_event ev;
|
||||
|
||||
pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
|
||||
|
||||
@ -1607,6 +1608,13 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
|
||||
if (schp != rchp)
|
||||
spin_lock(&schp->lock);
|
||||
spin_lock(&qhp->lock);
|
||||
if (qhp->srq && qhp->attr.state == C4IW_QP_STATE_ERROR &&
|
||||
qhp->ibqp.event_handler) {
|
||||
ev.device = qhp->ibqp.device;
|
||||
ev.element.qp = &qhp->ibqp;
|
||||
ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
|
||||
qhp->ibqp.event_handler(&ev, qhp->ibqp.qp_context);
|
||||
}
|
||||
|
||||
if (qhp->wq.flushed) {
|
||||
spin_unlock(&qhp->lock);
|
||||
|
@ -51,6 +51,38 @@ static const struct ib_device_ops mana_ib_dev_ops = {
|
||||
ib_ind_table),
|
||||
};
|
||||
|
||||
static int mana_ib_netdev_event(struct notifier_block *this,
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
struct mana_ib_dev *dev = container_of(this, struct mana_ib_dev, nb);
|
||||
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
|
||||
struct gdma_context *gc = dev->gdma_dev->gdma_context;
|
||||
struct mana_context *mc = gc->mana.driver_data;
|
||||
struct net_device *ndev;
|
||||
|
||||
/* Only process events from our parent device */
|
||||
if (event_dev != mc->ports[0])
|
||||
return NOTIFY_DONE;
|
||||
|
||||
switch (event) {
|
||||
case NETDEV_CHANGEUPPER:
|
||||
ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
|
||||
/*
|
||||
* RDMA core will setup GID based on updated netdev.
|
||||
* It's not possible to race with the core as rtnl lock is being
|
||||
* held.
|
||||
*/
|
||||
ib_device_set_netdev(&dev->ib_dev, ndev, 1);
|
||||
|
||||
/* mana_get_primary_netdev() returns ndev with refcount held */
|
||||
netdev_put(ndev, &dev->dev_tracker);
|
||||
|
||||
return NOTIFY_OK;
|
||||
default:
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
}
|
||||
|
||||
static int mana_ib_probe(struct auxiliary_device *adev,
|
||||
const struct auxiliary_device_id *id)
|
||||
{
|
||||
@ -84,10 +116,8 @@ static int mana_ib_probe(struct auxiliary_device *adev,
|
||||
dev->ib_dev.num_comp_vectors = mdev->gdma_context->max_num_queues;
|
||||
dev->ib_dev.dev.parent = mdev->gdma_context->dev;
|
||||
|
||||
rcu_read_lock(); /* required to get primary netdev */
|
||||
ndev = mana_get_primary_netdev_rcu(mc, 0);
|
||||
ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
|
||||
if (!ndev) {
|
||||
rcu_read_unlock();
|
||||
ret = -ENODEV;
|
||||
ibdev_err(&dev->ib_dev, "Failed to get netdev for IB port 1");
|
||||
goto free_ib_device;
|
||||
@ -95,7 +125,8 @@ static int mana_ib_probe(struct auxiliary_device *adev,
|
||||
ether_addr_copy(mac_addr, ndev->dev_addr);
|
||||
addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, ndev->dev_addr);
|
||||
ret = ib_device_set_netdev(&dev->ib_dev, ndev, 1);
|
||||
rcu_read_unlock();
|
||||
/* mana_get_primary_netdev() returns ndev with refcount held */
|
||||
netdev_put(ndev, &dev->dev_tracker);
|
||||
if (ret) {
|
||||
ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
|
||||
goto free_ib_device;
|
||||
@ -109,17 +140,25 @@ static int mana_ib_probe(struct auxiliary_device *adev,
|
||||
}
|
||||
dev->gdma_dev = &mdev->gdma_context->mana_ib;
|
||||
|
||||
dev->nb.notifier_call = mana_ib_netdev_event;
|
||||
ret = register_netdevice_notifier(&dev->nb);
|
||||
if (ret) {
|
||||
ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d",
|
||||
ret);
|
||||
goto deregister_device;
|
||||
}
|
||||
|
||||
ret = mana_ib_gd_query_adapter_caps(dev);
|
||||
if (ret) {
|
||||
ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d",
|
||||
ret);
|
||||
goto deregister_device;
|
||||
goto deregister_net_notifier;
|
||||
}
|
||||
|
||||
ret = mana_ib_create_eqs(dev);
|
||||
if (ret) {
|
||||
ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
|
||||
goto deregister_device;
|
||||
goto deregister_net_notifier;
|
||||
}
|
||||
|
||||
ret = mana_ib_gd_create_rnic_adapter(dev);
|
||||
@ -148,6 +187,8 @@ destroy_rnic:
|
||||
mana_ib_gd_destroy_rnic_adapter(dev);
|
||||
destroy_eqs:
|
||||
mana_ib_destroy_eqs(dev);
|
||||
deregister_net_notifier:
|
||||
unregister_netdevice_notifier(&dev->nb);
|
||||
deregister_device:
|
||||
mana_gd_deregister_device(dev->gdma_dev);
|
||||
free_ib_device:
|
||||
@ -163,6 +204,7 @@ static void mana_ib_remove(struct auxiliary_device *adev)
|
||||
xa_destroy(&dev->qp_table_wq);
|
||||
mana_ib_gd_destroy_rnic_adapter(dev);
|
||||
mana_ib_destroy_eqs(dev);
|
||||
unregister_netdevice_notifier(&dev->nb);
|
||||
mana_gd_deregister_device(dev->gdma_dev);
|
||||
ib_dealloc_device(&dev->ib_dev);
|
||||
}
|
||||
|
@ -174,7 +174,7 @@ static int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
|
||||
|
||||
req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
|
||||
req.num_resources = 1;
|
||||
req.alignment = 1;
|
||||
req.alignment = PAGE_SIZE / MANA_PAGE_SIZE;
|
||||
|
||||
/* Have GDMA start searching from 0 */
|
||||
req.allocated_resources = 0;
|
||||
@ -358,7 +358,7 @@ static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem
|
||||
unsigned int tail = 0;
|
||||
u64 *page_addr_list;
|
||||
void *request_buf;
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
gc = mdev_to_gc(dev);
|
||||
hwc = gc->hwc.driver_data;
|
||||
@ -634,7 +634,7 @@ int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
|
||||
|
||||
mana_gd_init_req_hdr(&req.hdr, MANA_IB_GET_ADAPTER_CAP, sizeof(req),
|
||||
sizeof(resp));
|
||||
req.hdr.resp.msg_version = GDMA_MESSAGE_V3;
|
||||
req.hdr.resp.msg_version = GDMA_MESSAGE_V4;
|
||||
req.hdr.dev_id = dev->gdma_dev->dev_id;
|
||||
|
||||
err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req),
|
||||
@ -663,6 +663,7 @@ int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
|
||||
caps->max_inline_data_size = resp.max_inline_data_size;
|
||||
caps->max_send_sge_count = resp.max_send_sge_count;
|
||||
caps->max_recv_sge_count = resp.max_recv_sge_count;
|
||||
caps->feature_flags = resp.feature_flags;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -762,6 +763,9 @@ int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev)
|
||||
req.hdr.dev_id = gc->mana_ib.dev_id;
|
||||
req.notify_eq_id = mdev->fatal_err_eq->id;
|
||||
|
||||
if (mdev->adapter_caps.feature_flags & MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT)
|
||||
req.feature_flags |= MANA_IB_FEATURE_CLIENT_ERROR_CQE_REQUEST;
|
||||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err) {
|
||||
ibdev_err(&mdev->ib_dev, "Failed to create RNIC adapter err %d", err);
|
||||
|
@ -48,6 +48,7 @@ struct mana_ib_adapter_caps {
|
||||
u32 max_send_sge_count;
|
||||
u32 max_recv_sge_count;
|
||||
u32 max_inline_data_size;
|
||||
u64 feature_flags;
|
||||
};
|
||||
|
||||
struct mana_ib_queue {
|
||||
@ -64,6 +65,8 @@ struct mana_ib_dev {
|
||||
struct gdma_queue **eqs;
|
||||
struct xarray qp_table_wq;
|
||||
struct mana_ib_adapter_caps adapter_caps;
|
||||
netdevice_tracker dev_tracker;
|
||||
struct notifier_block nb;
|
||||
};
|
||||
|
||||
struct mana_ib_wq {
|
||||
@ -156,6 +159,10 @@ struct mana_ib_query_adapter_caps_req {
|
||||
struct gdma_req_hdr hdr;
|
||||
}; /*HW Data */
|
||||
|
||||
enum mana_ib_adapter_features {
|
||||
MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT = BIT(4),
|
||||
};
|
||||
|
||||
struct mana_ib_query_adapter_caps_resp {
|
||||
struct gdma_resp_hdr hdr;
|
||||
u32 max_sq_id;
|
||||
@ -176,8 +183,13 @@ struct mana_ib_query_adapter_caps_resp {
|
||||
u32 max_send_sge_count;
|
||||
u32 max_recv_sge_count;
|
||||
u32 max_inline_data_size;
|
||||
u64 feature_flags;
|
||||
}; /* HW Data */
|
||||
|
||||
enum mana_ib_adapter_features_request {
|
||||
MANA_IB_FEATURE_CLIENT_ERROR_CQE_REQUEST = BIT(1),
|
||||
}; /*HW Data */
|
||||
|
||||
struct mana_rnic_create_adapter_req {
|
||||
struct gdma_req_hdr hdr;
|
||||
u32 notify_eq_id;
|
||||
|
@ -561,7 +561,7 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
req.ah_attr.dest_port = ROCE_V2_UDP_DPORT;
|
||||
req.ah_attr.src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
|
||||
ibqp->qp_num, attr->dest_qp_num);
|
||||
req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class;
|
||||
req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class >> 2;
|
||||
req.ah_attr.hop_limit = attr->ah_attr.grh.hop_limit;
|
||||
}
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/version.h>
|
||||
|
||||
#include <net/mana/mana.h>
|
||||
#include <net/mana/hw_channel.h>
|
||||
|
||||
struct dentry *mana_debugfs_root;
|
||||
|
||||
@ -31,6 +32,9 @@ static void mana_gd_init_pf_regs(struct pci_dev *pdev)
|
||||
gc->db_page_base = gc->bar0_va +
|
||||
mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
|
||||
|
||||
gc->phys_db_page_base = gc->bar0_pa +
|
||||
mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
|
||||
|
||||
sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF);
|
||||
|
||||
sriov_base_va = gc->bar0_va + sriov_base_off;
|
||||
@ -63,6 +67,24 @@ static void mana_gd_init_registers(struct pci_dev *pdev)
|
||||
mana_gd_init_vf_regs(pdev);
|
||||
}
|
||||
|
||||
/* Suppress logging when we set timeout to zero */
|
||||
bool mana_need_log(struct gdma_context *gc, int err)
|
||||
{
|
||||
struct hw_channel_context *hwc;
|
||||
|
||||
if (err != -ETIMEDOUT)
|
||||
return true;
|
||||
|
||||
if (!gc)
|
||||
return true;
|
||||
|
||||
hwc = gc->hwc.driver_data;
|
||||
if (hwc && hwc->hwc_timeout == 0)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int mana_gd_query_max_resources(struct pci_dev *pdev)
|
||||
{
|
||||
struct gdma_context *gc = pci_get_drvdata(pdev);
|
||||
@ -134,9 +156,10 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
|
||||
struct gdma_list_devices_resp resp = {};
|
||||
struct gdma_general_req req = {};
|
||||
struct gdma_dev_id dev;
|
||||
u32 i, max_num_devs;
|
||||
int found_dev = 0;
|
||||
u16 dev_type;
|
||||
int err;
|
||||
u32 i;
|
||||
|
||||
mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
|
||||
sizeof(resp));
|
||||
@ -148,12 +171,17 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
|
||||
return err ? err : -EPROTO;
|
||||
}
|
||||
|
||||
max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs);
|
||||
|
||||
for (i = 0; i < max_num_devs; i++) {
|
||||
for (i = 0; i < GDMA_DEV_LIST_SIZE &&
|
||||
found_dev < resp.num_of_devs; i++) {
|
||||
dev = resp.devs[i];
|
||||
dev_type = dev.type;
|
||||
|
||||
/* Skip empty devices */
|
||||
if (dev.as_uint32 == 0)
|
||||
continue;
|
||||
|
||||
found_dev++;
|
||||
|
||||
/* HWC is already detected in mana_hwc_create_channel(). */
|
||||
if (dev_type == GDMA_DEVICE_HWC)
|
||||
continue;
|
||||
@ -260,8 +288,9 @@ static int mana_gd_disable_queue(struct gdma_queue *queue)
|
||||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err || resp.hdr.status) {
|
||||
dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
|
||||
resp.hdr.status);
|
||||
if (mana_need_log(gc, err))
|
||||
dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
|
||||
resp.hdr.status);
|
||||
return err ? err : -EPROTO;
|
||||
}
|
||||
|
||||
@ -344,11 +373,113 @@ void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
|
||||
head, arm_bit);
|
||||
}
|
||||
|
||||
#define MANA_SERVICE_PERIOD 10
|
||||
|
||||
static void mana_serv_fpga(struct pci_dev *pdev)
|
||||
{
|
||||
struct pci_bus *bus, *parent;
|
||||
|
||||
pci_lock_rescan_remove();
|
||||
|
||||
bus = pdev->bus;
|
||||
if (!bus) {
|
||||
dev_err(&pdev->dev, "MANA service: no bus\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
parent = bus->parent;
|
||||
if (!parent) {
|
||||
dev_err(&pdev->dev, "MANA service: no parent bus\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
pci_stop_and_remove_bus_device(bus->self);
|
||||
|
||||
msleep(MANA_SERVICE_PERIOD * 1000);
|
||||
|
||||
pci_rescan_bus(parent);
|
||||
|
||||
out:
|
||||
pci_unlock_rescan_remove();
|
||||
}
|
||||
|
||||
static void mana_serv_reset(struct pci_dev *pdev)
|
||||
{
|
||||
struct gdma_context *gc = pci_get_drvdata(pdev);
|
||||
struct hw_channel_context *hwc;
|
||||
|
||||
if (!gc) {
|
||||
dev_err(&pdev->dev, "MANA service: no GC\n");
|
||||
return;
|
||||
}
|
||||
|
||||
hwc = gc->hwc.driver_data;
|
||||
if (!hwc) {
|
||||
dev_err(&pdev->dev, "MANA service: no HWC\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* HWC is not responding in this case, so don't wait */
|
||||
hwc->hwc_timeout = 0;
|
||||
|
||||
dev_info(&pdev->dev, "MANA reset cycle start\n");
|
||||
|
||||
mana_gd_suspend(pdev, PMSG_SUSPEND);
|
||||
|
||||
msleep(MANA_SERVICE_PERIOD * 1000);
|
||||
|
||||
mana_gd_resume(pdev);
|
||||
|
||||
dev_info(&pdev->dev, "MANA reset cycle completed\n");
|
||||
|
||||
out:
|
||||
gc->in_service = false;
|
||||
}
|
||||
|
||||
struct mana_serv_work {
|
||||
struct work_struct serv_work;
|
||||
struct pci_dev *pdev;
|
||||
enum gdma_eqe_type type;
|
||||
};
|
||||
|
||||
static void mana_serv_func(struct work_struct *w)
|
||||
{
|
||||
struct mana_serv_work *mns_wk;
|
||||
struct pci_dev *pdev;
|
||||
|
||||
mns_wk = container_of(w, struct mana_serv_work, serv_work);
|
||||
pdev = mns_wk->pdev;
|
||||
|
||||
if (!pdev)
|
||||
goto out;
|
||||
|
||||
switch (mns_wk->type) {
|
||||
case GDMA_EQE_HWC_FPGA_RECONFIG:
|
||||
mana_serv_fpga(pdev);
|
||||
break;
|
||||
|
||||
case GDMA_EQE_HWC_RESET_REQUEST:
|
||||
mana_serv_reset(pdev);
|
||||
break;
|
||||
|
||||
default:
|
||||
dev_err(&pdev->dev, "MANA service: unknown type %d\n",
|
||||
mns_wk->type);
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
pci_dev_put(pdev);
|
||||
kfree(mns_wk);
|
||||
module_put(THIS_MODULE);
|
||||
}
|
||||
|
||||
static void mana_gd_process_eqe(struct gdma_queue *eq)
|
||||
{
|
||||
u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE);
|
||||
struct gdma_context *gc = eq->gdma_dev->gdma_context;
|
||||
struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr;
|
||||
struct mana_serv_work *mns_wk;
|
||||
union gdma_eqe_info eqe_info;
|
||||
enum gdma_eqe_type type;
|
||||
struct gdma_event event;
|
||||
@ -392,6 +523,35 @@ static void mana_gd_process_eqe(struct gdma_queue *eq)
|
||||
eq->eq.callback(eq->eq.context, eq, &event);
|
||||
break;
|
||||
|
||||
case GDMA_EQE_HWC_FPGA_RECONFIG:
|
||||
case GDMA_EQE_HWC_RESET_REQUEST:
|
||||
dev_info(gc->dev, "Recv MANA service type:%d\n", type);
|
||||
|
||||
if (gc->in_service) {
|
||||
dev_info(gc->dev, "Already in service\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (!try_module_get(THIS_MODULE)) {
|
||||
dev_info(gc->dev, "Module is unloading\n");
|
||||
break;
|
||||
}
|
||||
|
||||
mns_wk = kzalloc(sizeof(*mns_wk), GFP_ATOMIC);
|
||||
if (!mns_wk) {
|
||||
module_put(THIS_MODULE);
|
||||
break;
|
||||
}
|
||||
|
||||
dev_info(gc->dev, "Start MANA service type:%d\n", type);
|
||||
gc->in_service = true;
|
||||
mns_wk->pdev = to_pci_dev(gc->dev);
|
||||
mns_wk->type = type;
|
||||
pci_dev_get(mns_wk->pdev);
|
||||
INIT_WORK(&mns_wk->serv_work, mana_serv_func);
|
||||
schedule_work(&mns_wk->serv_work);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -533,7 +693,8 @@ int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
|
||||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err) {
|
||||
dev_err(dev, "test_eq failed: %d\n", err);
|
||||
if (mana_need_log(gc, err))
|
||||
dev_err(dev, "test_eq failed: %d\n", err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -568,7 +729,7 @@ static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,
|
||||
|
||||
if (flush_evenets) {
|
||||
err = mana_gd_test_eq(gc, queue);
|
||||
if (err)
|
||||
if (err && mana_need_log(gc, err))
|
||||
dev_warn(gc->dev, "Failed to flush EQ: %d\n", err);
|
||||
}
|
||||
|
||||
@ -666,8 +827,11 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
|
||||
|
||||
gmi = &queue->mem_info;
|
||||
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
|
||||
if (err)
|
||||
if (err) {
|
||||
dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n",
|
||||
spec->type, spec->queue_size, err);
|
||||
goto free_q;
|
||||
}
|
||||
|
||||
queue->head = 0;
|
||||
queue->tail = 0;
|
||||
@ -688,6 +852,8 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
|
||||
*queue_ptr = queue;
|
||||
return 0;
|
||||
out:
|
||||
dev_err(gc->dev, "Failed to create queue type %d of size %u, err: %d\n",
|
||||
spec->type, spec->queue_size, err);
|
||||
mana_gd_free_memory(gmi);
|
||||
free_q:
|
||||
kfree(queue);
|
||||
@ -709,8 +875,9 @@ int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle)
|
||||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err || resp.hdr.status) {
|
||||
dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
|
||||
err, resp.hdr.status);
|
||||
if (mana_need_log(gc, err))
|
||||
dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
|
||||
err, resp.hdr.status);
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
@ -770,7 +937,13 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
|
||||
}
|
||||
|
||||
gmi->dma_region_handle = resp.dma_region_handle;
|
||||
dev_dbg(gc->dev, "Created DMA region handle 0x%llx\n",
|
||||
gmi->dma_region_handle);
|
||||
out:
|
||||
if (err)
|
||||
dev_dbg(gc->dev,
|
||||
"Failed to create DMA region of length: %u, page_type: %d, status: 0x%x, err: %d\n",
|
||||
length, req->gdma_page_type, resp.hdr.status, err);
|
||||
kfree(req);
|
||||
return err;
|
||||
}
|
||||
@ -793,8 +966,11 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd,
|
||||
|
||||
gmi = &queue->mem_info;
|
||||
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
|
||||
if (err)
|
||||
if (err) {
|
||||
dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n",
|
||||
spec->type, spec->queue_size, err);
|
||||
goto free_q;
|
||||
}
|
||||
|
||||
err = mana_gd_create_dma_region(gd, gmi);
|
||||
if (err)
|
||||
@ -815,6 +991,8 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd,
|
||||
*queue_ptr = queue;
|
||||
return 0;
|
||||
out:
|
||||
dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n",
|
||||
spec->type, spec->queue_size, err);
|
||||
mana_gd_free_memory(gmi);
|
||||
free_q:
|
||||
kfree(queue);
|
||||
@ -841,8 +1019,11 @@ int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
|
||||
|
||||
gmi = &queue->mem_info;
|
||||
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
|
||||
if (err)
|
||||
if (err) {
|
||||
dev_err(gc->dev, "GDMA queue type: %d, size: %u, memory allocation err: %d\n",
|
||||
spec->type, spec->queue_size, err);
|
||||
goto free_q;
|
||||
}
|
||||
|
||||
err = mana_gd_create_dma_region(gd, gmi);
|
||||
if (err)
|
||||
@ -862,6 +1043,8 @@ int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
|
||||
*queue_ptr = queue;
|
||||
return 0;
|
||||
out:
|
||||
dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n",
|
||||
spec->type, spec->queue_size, err);
|
||||
mana_gd_free_memory(gmi);
|
||||
free_q:
|
||||
kfree(queue);
|
||||
@ -993,8 +1176,9 @@ int mana_gd_deregister_device(struct gdma_dev *gd)
|
||||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err || resp.hdr.status) {
|
||||
dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
|
||||
err, resp.hdr.status);
|
||||
if (mana_need_log(gc, err))
|
||||
dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
|
||||
err, resp.hdr.status);
|
||||
if (!err)
|
||||
err = -EPROTO;
|
||||
}
|
||||
@ -1157,8 +1341,11 @@ int mana_gd_post_and_ring(struct gdma_queue *queue,
|
||||
int err;
|
||||
|
||||
err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
|
||||
if (err)
|
||||
if (err) {
|
||||
dev_err(gc->dev, "Failed to post work req from queue type %d of size %u (err=%d)\n",
|
||||
queue->type, queue->queue_size, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
mana_gd_wq_ring_doorbell(gc, queue);
|
||||
|
||||
@ -1435,8 +1622,10 @@ static int mana_gd_setup(struct pci_dev *pdev)
|
||||
mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
|
||||
|
||||
err = mana_gd_setup_irqs(pdev);
|
||||
if (err)
|
||||
if (err) {
|
||||
dev_err(gc->dev, "Failed to setup IRQs: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mana_hwc_create_channel(gc);
|
||||
if (err)
|
||||
@ -1454,12 +1643,14 @@ static int mana_gd_setup(struct pci_dev *pdev)
|
||||
if (err)
|
||||
goto destroy_hwc;
|
||||
|
||||
dev_dbg(&pdev->dev, "mana gdma setup successful\n");
|
||||
return 0;
|
||||
|
||||
destroy_hwc:
|
||||
mana_hwc_destroy_channel(gc);
|
||||
remove_irq:
|
||||
mana_gd_remove_irqs(pdev);
|
||||
dev_err(&pdev->dev, "%s failed (error %d)\n", __func__, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1470,6 +1661,7 @@ static void mana_gd_cleanup(struct pci_dev *pdev)
|
||||
mana_hwc_destroy_channel(gc);
|
||||
|
||||
mana_gd_remove_irqs(pdev);
|
||||
dev_dbg(&pdev->dev, "mana gdma cleanup successful\n");
|
||||
}
|
||||
|
||||
static bool mana_is_pf(unsigned short dev_id)
|
||||
@ -1488,8 +1680,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
BUILD_BUG_ON(2 * MAX_PORTS_IN_MANA_DEV * GDMA_EQE_SIZE > EQ_SIZE);
|
||||
|
||||
err = pci_enable_device(pdev);
|
||||
if (err)
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to enable pci device (err=%d)\n", err);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
@ -1498,9 +1692,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
goto disable_dev;
|
||||
|
||||
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (err)
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "DMA set mask failed: %d\n", err);
|
||||
goto release_region;
|
||||
|
||||
}
|
||||
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
|
||||
|
||||
err = -ENOMEM;
|
||||
@ -1578,10 +1773,12 @@ static void mana_gd_remove(struct pci_dev *pdev)
|
||||
|
||||
pci_release_regions(pdev);
|
||||
pci_disable_device(pdev);
|
||||
|
||||
dev_dbg(&pdev->dev, "mana gdma remove successful\n");
|
||||
}
|
||||
|
||||
/* The 'state' parameter is not used. */
|
||||
static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
{
|
||||
struct gdma_context *gc = pci_get_drvdata(pdev);
|
||||
|
||||
@ -1596,7 +1793,7 @@ static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
* fail -- if this happens, it's safer to just report an error than try to undo
|
||||
* what has been done.
|
||||
*/
|
||||
static int mana_gd_resume(struct pci_dev *pdev)
|
||||
int mana_gd_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct gdma_context *gc = pci_get_drvdata(pdev);
|
||||
int err;
|
||||
|
@ -2,6 +2,7 @@
|
||||
/* Copyright (c) 2021, Microsoft Corporation. */
|
||||
|
||||
#include <net/mana/gdma.h>
|
||||
#include <net/mana/mana.h>
|
||||
#include <net/mana/hw_channel.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
@ -440,7 +441,8 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
|
||||
gmi = &dma_buf->mem_info;
|
||||
err = mana_gd_alloc_memory(gc, buf_size, gmi);
|
||||
if (err) {
|
||||
dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err);
|
||||
dev_err(hwc->dev, "Failed to allocate DMA buffer size: %u, err %d\n",
|
||||
buf_size, err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -529,6 +531,9 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc,
|
||||
out:
|
||||
if (err)
|
||||
mana_hwc_destroy_wq(hwc, hwc_wq);
|
||||
|
||||
dev_err(hwc->dev, "Failed to create HWC queue size= %u type= %d err= %d\n",
|
||||
queue_size, q_type, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -856,7 +861,9 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
|
||||
|
||||
if (!wait_for_completion_timeout(&ctx->comp_event,
|
||||
(msecs_to_jiffies(hwc->hwc_timeout)))) {
|
||||
dev_err(hwc->dev, "HWC: Request timed out!\n");
|
||||
if (hwc->hwc_timeout != 0)
|
||||
dev_err(hwc->dev, "HWC: Request timed out!\n");
|
||||
|
||||
err = -ETIMEDOUT;
|
||||
goto out;
|
||||
}
|
||||
@ -867,8 +874,13 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
|
||||
}
|
||||
|
||||
if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
|
||||
dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
|
||||
ctx->status_code);
|
||||
if (ctx->status_code == GDMA_STATUS_CMD_UNSUPPORTED) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
if (req_msg->req.msg_type != MANA_QUERY_PHY_STAT)
|
||||
dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
|
||||
ctx->status_code);
|
||||
err = -EPROTO;
|
||||
goto out;
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
|
||||
goto out;
|
||||
|
||||
xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq);
|
||||
xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, false);
|
||||
xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, true);
|
||||
|
||||
act = bpf_prog_run_xdp(prog, xdp);
|
||||
|
||||
|
@ -46,16 +46,27 @@ static const struct file_operations mana_dbg_q_fops = {
|
||||
.read = mana_dbg_q_read,
|
||||
};
|
||||
|
||||
static bool mana_en_need_log(struct mana_port_context *apc, int err)
|
||||
{
|
||||
if (apc && apc->ac && apc->ac->gdma_dev &&
|
||||
apc->ac->gdma_dev->gdma_context)
|
||||
return mana_need_log(apc->ac->gdma_dev->gdma_context, err);
|
||||
else
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Microsoft Azure Network Adapter (MANA) functions */
|
||||
|
||||
static int mana_open(struct net_device *ndev)
|
||||
{
|
||||
struct mana_port_context *apc = netdev_priv(ndev);
|
||||
int err;
|
||||
|
||||
err = mana_alloc_queues(ndev);
|
||||
if (err)
|
||||
|
||||
if (err) {
|
||||
netdev_err(ndev, "%s failed to allocate queues: %d\n", __func__, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
apc->port_is_up = true;
|
||||
|
||||
@ -64,7 +75,7 @@ static int mana_open(struct net_device *ndev)
|
||||
|
||||
netif_carrier_on(ndev);
|
||||
netif_tx_wake_all_queues(ndev);
|
||||
|
||||
netdev_dbg(ndev, "%s successful\n", __func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -176,6 +187,9 @@ static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
|
||||
return 0;
|
||||
|
||||
frag_err:
|
||||
if (net_ratelimit())
|
||||
netdev_err(apc->ndev, "Failed to map skb of size %u to DMA\n",
|
||||
skb->len);
|
||||
for (i = sg_i - 1; i >= hsg; i--)
|
||||
dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
|
||||
DMA_TO_DEVICE);
|
||||
@ -245,10 +259,10 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
struct netdev_queue *net_txq;
|
||||
struct mana_stats_tx *tx_stats;
|
||||
struct gdma_queue *gdma_sq;
|
||||
int err, len, num_gso_seg;
|
||||
unsigned int csum_type;
|
||||
struct mana_txq *txq;
|
||||
struct mana_cq *cq;
|
||||
int err, len;
|
||||
|
||||
if (unlikely(!apc->port_is_up))
|
||||
goto tx_drop;
|
||||
@ -256,6 +270,9 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
if (skb_cow_head(skb, MANA_HEADROOM))
|
||||
goto tx_drop_count;
|
||||
|
||||
if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
|
||||
goto tx_drop_count;
|
||||
|
||||
txq = &apc->tx_qp[txq_idx].txq;
|
||||
gdma_sq = txq->gdma_sq;
|
||||
cq = &apc->tx_qp[txq_idx].tx_cq;
|
||||
@ -398,6 +415,7 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
skb_queue_tail(&txq->pending_skbs, skb);
|
||||
|
||||
len = skb->len;
|
||||
num_gso_seg = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
|
||||
net_txq = netdev_get_tx_queue(ndev, txq_idx);
|
||||
|
||||
err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
|
||||
@ -422,10 +440,13 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
/* skb may be freed after mana_gd_post_work_request. Do not use it. */
|
||||
skb = NULL;
|
||||
|
||||
/* Populated the packet and bytes counters based on post GSO packet
|
||||
* calculations
|
||||
*/
|
||||
tx_stats = &txq->stats;
|
||||
u64_stats_update_begin(&tx_stats->syncp);
|
||||
tx_stats->packets++;
|
||||
tx_stats->bytes += len;
|
||||
tx_stats->packets += num_gso_seg;
|
||||
tx_stats->bytes += len + ((num_gso_seg - 1) * gso_hs);
|
||||
u64_stats_update_end(&tx_stats->syncp);
|
||||
|
||||
tx_busy:
|
||||
@ -652,30 +673,16 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
|
||||
mpc->rxbpre_total = 0;
|
||||
|
||||
for (i = 0; i < num_rxb; i++) {
|
||||
if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
|
||||
va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
|
||||
if (!va)
|
||||
goto error;
|
||||
page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size));
|
||||
if (!page)
|
||||
goto error;
|
||||
|
||||
page = virt_to_head_page(va);
|
||||
/* Check if the frag falls back to single page */
|
||||
if (compound_order(page) <
|
||||
get_order(mpc->rxbpre_alloc_size)) {
|
||||
put_page(page);
|
||||
goto error;
|
||||
}
|
||||
} else {
|
||||
page = dev_alloc_page();
|
||||
if (!page)
|
||||
goto error;
|
||||
|
||||
va = page_to_virt(page);
|
||||
}
|
||||
va = page_to_virt(page);
|
||||
|
||||
da = dma_map_single(dev, va + mpc->rxbpre_headroom,
|
||||
mpc->rxbpre_datasize, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(dev, da)) {
|
||||
put_page(virt_to_head_page(va));
|
||||
put_page(page);
|
||||
goto error;
|
||||
}
|
||||
|
||||
@ -687,6 +694,7 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
|
||||
return 0;
|
||||
|
||||
error:
|
||||
netdev_err(mpc->ndev, "Failed to pre-allocate RX buffers for %d queues\n", num_queues);
|
||||
mana_pre_dealloc_rxbufs(mpc);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -778,8 +786,13 @@ static int mana_send_request(struct mana_context *ac, void *in_buf,
|
||||
err = mana_gd_send_request(gc, in_len, in_buf, out_len,
|
||||
out_buf);
|
||||
if (err || resp->status) {
|
||||
dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
|
||||
err, resp->status);
|
||||
if (err == -EOPNOTSUPP)
|
||||
return err;
|
||||
|
||||
if (req->req.msg_type != MANA_QUERY_PHY_STAT &&
|
||||
mana_need_log(gc, err))
|
||||
dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
|
||||
err, resp->status);
|
||||
return err ? err : -EPROTO;
|
||||
}
|
||||
|
||||
@ -854,8 +867,10 @@ static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
|
||||
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
|
||||
sizeof(resp));
|
||||
if (err) {
|
||||
netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
|
||||
err);
|
||||
if (mana_en_need_log(apc, err))
|
||||
netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
|
||||
err);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@ -910,8 +925,10 @@ static void mana_pf_deregister_filter(struct mana_port_context *apc)
|
||||
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
|
||||
sizeof(resp));
|
||||
if (err) {
|
||||
netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
|
||||
err);
|
||||
if (mana_en_need_log(apc, err))
|
||||
netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
|
||||
err);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@ -925,7 +942,7 @@ static void mana_pf_deregister_filter(struct mana_port_context *apc)
|
||||
|
||||
static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
|
||||
u32 proto_minor_ver, u32 proto_micro_ver,
|
||||
u16 *max_num_vports)
|
||||
u16 *max_num_vports, u8 *bm_hostmode)
|
||||
{
|
||||
struct gdma_context *gc = ac->gdma_dev->gdma_context;
|
||||
struct mana_query_device_cfg_resp resp = {};
|
||||
@ -936,7 +953,7 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
|
||||
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
|
||||
sizeof(req), sizeof(resp));
|
||||
|
||||
req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
|
||||
req.hdr.resp.msg_version = GDMA_MESSAGE_V3;
|
||||
|
||||
req.proto_major_ver = proto_major_ver;
|
||||
req.proto_minor_ver = proto_minor_ver;
|
||||
@ -960,11 +977,16 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
|
||||
|
||||
*max_num_vports = resp.max_num_vports;
|
||||
|
||||
if (resp.hdr.response.msg_version == GDMA_MESSAGE_V2)
|
||||
if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V2)
|
||||
gc->adapter_mtu = resp.adapter_mtu;
|
||||
else
|
||||
gc->adapter_mtu = ETH_FRAME_LEN;
|
||||
|
||||
if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V3)
|
||||
*bm_hostmode = resp.bm_hostmode;
|
||||
else
|
||||
*bm_hostmode = 0;
|
||||
|
||||
debugfs_create_u16("adapter-MTU", 0400, gc->mana_pci_debugfs, &gc->adapter_mtu);
|
||||
|
||||
return 0;
|
||||
@ -1136,7 +1158,9 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
|
||||
err = mana_send_request(apc->ac, req, req_buf_size, &resp,
|
||||
sizeof(resp));
|
||||
if (err) {
|
||||
netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
|
||||
if (mana_en_need_log(apc, err))
|
||||
netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1231,7 +1255,9 @@ void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
|
||||
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
|
||||
sizeof(resp));
|
||||
if (err) {
|
||||
netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
|
||||
if (mana_en_need_log(apc, err))
|
||||
netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1304,8 +1330,10 @@ static int mana_create_eq(struct mana_context *ac)
|
||||
for (i = 0; i < gc->max_num_queues; i++) {
|
||||
spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
|
||||
err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
|
||||
if (err)
|
||||
if (err) {
|
||||
dev_err(gc->dev, "Failed to create EQ %d : %d\n", i, err);
|
||||
goto out;
|
||||
}
|
||||
mana_create_eq_debugfs(ac, i);
|
||||
}
|
||||
|
||||
@ -1547,8 +1575,12 @@ static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
|
||||
return NULL;
|
||||
|
||||
if (xdp->data_hard_start) {
|
||||
u32 metasize = xdp->data - xdp->data_meta;
|
||||
|
||||
skb_reserve(skb, xdp->data - xdp->data_hard_start);
|
||||
skb_put(skb, xdp->data_end - xdp->data);
|
||||
if (metasize)
|
||||
skb_metadata_set(skb, metasize);
|
||||
return skb;
|
||||
}
|
||||
|
||||
@ -1660,7 +1692,7 @@ drop:
|
||||
}
|
||||
|
||||
static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
|
||||
dma_addr_t *da, bool *from_pool, bool is_napi)
|
||||
dma_addr_t *da, bool *from_pool)
|
||||
{
|
||||
struct page *page;
|
||||
void *va;
|
||||
@ -1671,21 +1703,6 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
|
||||
if (rxq->xdp_save_va) {
|
||||
va = rxq->xdp_save_va;
|
||||
rxq->xdp_save_va = NULL;
|
||||
} else if (rxq->alloc_size > PAGE_SIZE) {
|
||||
if (is_napi)
|
||||
va = napi_alloc_frag(rxq->alloc_size);
|
||||
else
|
||||
va = netdev_alloc_frag(rxq->alloc_size);
|
||||
|
||||
if (!va)
|
||||
return NULL;
|
||||
|
||||
page = virt_to_head_page(va);
|
||||
/* Check if the frag falls back to single page */
|
||||
if (compound_order(page) < get_order(rxq->alloc_size)) {
|
||||
put_page(page);
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
page = page_pool_dev_alloc_pages(rxq->page_pool);
|
||||
if (!page)
|
||||
@ -1718,7 +1735,7 @@ static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
|
||||
dma_addr_t da;
|
||||
void *va;
|
||||
|
||||
va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
|
||||
va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
|
||||
if (!va)
|
||||
return;
|
||||
|
||||
@ -2081,6 +2098,8 @@ static int mana_create_txq(struct mana_port_context *apc,
|
||||
|
||||
return 0;
|
||||
out:
|
||||
netdev_err(net, "Failed to create %d TX queues, %d\n",
|
||||
apc->num_queues, err);
|
||||
mana_destroy_txq(apc);
|
||||
return err;
|
||||
}
|
||||
@ -2158,7 +2177,7 @@ static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
|
||||
if (mpc->rxbufs_pre)
|
||||
va = mana_get_rxbuf_pre(rxq, &da);
|
||||
else
|
||||
va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
|
||||
va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
|
||||
|
||||
if (!va)
|
||||
return -ENOMEM;
|
||||
@ -2244,6 +2263,7 @@ static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
|
||||
pprm.nid = gc->numa_node;
|
||||
pprm.napi = &rxq->rx_cq.napi;
|
||||
pprm.netdev = rxq->ndev;
|
||||
pprm.order = get_order(rxq->alloc_size);
|
||||
|
||||
rxq->page_pool = page_pool_create(&pprm);
|
||||
|
||||
@ -2417,6 +2437,7 @@ static int mana_add_rx_queues(struct mana_port_context *apc,
|
||||
rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
|
||||
if (!rxq) {
|
||||
err = -ENOMEM;
|
||||
netdev_err(ndev, "Failed to create rxq %d : %d\n", i, err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -2450,7 +2471,7 @@ static void mana_destroy_vport(struct mana_port_context *apc)
|
||||
mana_destroy_txq(apc);
|
||||
mana_uncfg_vport(apc);
|
||||
|
||||
if (gd->gdma_context->is_pf)
|
||||
if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode)
|
||||
mana_pf_deregister_hw_vport(apc);
|
||||
}
|
||||
|
||||
@ -2462,7 +2483,7 @@ static int mana_create_vport(struct mana_port_context *apc,
|
||||
|
||||
apc->default_rxobj = INVALID_MANA_HANDLE;
|
||||
|
||||
if (gd->gdma_context->is_pf) {
|
||||
if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) {
|
||||
err = mana_pf_register_hw_vport(apc);
|
||||
if (err)
|
||||
return err;
|
||||
@ -2615,6 +2636,88 @@ void mana_query_gf_stats(struct mana_port_context *apc)
|
||||
apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma;
|
||||
}
|
||||
|
||||
void mana_query_phy_stats(struct mana_port_context *apc)
|
||||
{
|
||||
struct mana_query_phy_stat_resp resp = {};
|
||||
struct mana_query_phy_stat_req req = {};
|
||||
struct net_device *ndev = apc->ndev;
|
||||
int err;
|
||||
|
||||
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_PHY_STAT,
|
||||
sizeof(req), sizeof(resp));
|
||||
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
|
||||
sizeof(resp));
|
||||
if (err)
|
||||
return;
|
||||
|
||||
err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_PHY_STAT,
|
||||
sizeof(resp));
|
||||
if (err || resp.hdr.status) {
|
||||
netdev_err(ndev,
|
||||
"Failed to query PHY stats: %d, resp:0x%x\n",
|
||||
err, resp.hdr.status);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Aggregate drop counters */
|
||||
apc->phy_stats.rx_pkt_drop_phy = resp.rx_pkt_drop_phy;
|
||||
apc->phy_stats.tx_pkt_drop_phy = resp.tx_pkt_drop_phy;
|
||||
|
||||
/* Per TC traffic Counters */
|
||||
apc->phy_stats.rx_pkt_tc0_phy = resp.rx_pkt_tc0_phy;
|
||||
apc->phy_stats.tx_pkt_tc0_phy = resp.tx_pkt_tc0_phy;
|
||||
apc->phy_stats.rx_pkt_tc1_phy = resp.rx_pkt_tc1_phy;
|
||||
apc->phy_stats.tx_pkt_tc1_phy = resp.tx_pkt_tc1_phy;
|
||||
apc->phy_stats.rx_pkt_tc2_phy = resp.rx_pkt_tc2_phy;
|
||||
apc->phy_stats.tx_pkt_tc2_phy = resp.tx_pkt_tc2_phy;
|
||||
apc->phy_stats.rx_pkt_tc3_phy = resp.rx_pkt_tc3_phy;
|
||||
apc->phy_stats.tx_pkt_tc3_phy = resp.tx_pkt_tc3_phy;
|
||||
apc->phy_stats.rx_pkt_tc4_phy = resp.rx_pkt_tc4_phy;
|
||||
apc->phy_stats.tx_pkt_tc4_phy = resp.tx_pkt_tc4_phy;
|
||||
apc->phy_stats.rx_pkt_tc5_phy = resp.rx_pkt_tc5_phy;
|
||||
apc->phy_stats.tx_pkt_tc5_phy = resp.tx_pkt_tc5_phy;
|
||||
apc->phy_stats.rx_pkt_tc6_phy = resp.rx_pkt_tc6_phy;
|
||||
apc->phy_stats.tx_pkt_tc6_phy = resp.tx_pkt_tc6_phy;
|
||||
apc->phy_stats.rx_pkt_tc7_phy = resp.rx_pkt_tc7_phy;
|
||||
apc->phy_stats.tx_pkt_tc7_phy = resp.tx_pkt_tc7_phy;
|
||||
|
||||
/* Per TC byte Counters */
|
||||
apc->phy_stats.rx_byte_tc0_phy = resp.rx_byte_tc0_phy;
|
||||
apc->phy_stats.tx_byte_tc0_phy = resp.tx_byte_tc0_phy;
|
||||
apc->phy_stats.rx_byte_tc1_phy = resp.rx_byte_tc1_phy;
|
||||
apc->phy_stats.tx_byte_tc1_phy = resp.tx_byte_tc1_phy;
|
||||
apc->phy_stats.rx_byte_tc2_phy = resp.rx_byte_tc2_phy;
|
||||
apc->phy_stats.tx_byte_tc2_phy = resp.tx_byte_tc2_phy;
|
||||
apc->phy_stats.rx_byte_tc3_phy = resp.rx_byte_tc3_phy;
|
||||
apc->phy_stats.tx_byte_tc3_phy = resp.tx_byte_tc3_phy;
|
||||
apc->phy_stats.rx_byte_tc4_phy = resp.rx_byte_tc4_phy;
|
||||
apc->phy_stats.tx_byte_tc4_phy = resp.tx_byte_tc4_phy;
|
||||
apc->phy_stats.rx_byte_tc5_phy = resp.rx_byte_tc5_phy;
|
||||
apc->phy_stats.tx_byte_tc5_phy = resp.tx_byte_tc5_phy;
|
||||
apc->phy_stats.rx_byte_tc6_phy = resp.rx_byte_tc6_phy;
|
||||
apc->phy_stats.tx_byte_tc6_phy = resp.tx_byte_tc6_phy;
|
||||
apc->phy_stats.rx_byte_tc7_phy = resp.rx_byte_tc7_phy;
|
||||
apc->phy_stats.tx_byte_tc7_phy = resp.tx_byte_tc7_phy;
|
||||
|
||||
/* Per TC pause Counters */
|
||||
apc->phy_stats.rx_pause_tc0_phy = resp.rx_pause_tc0_phy;
|
||||
apc->phy_stats.tx_pause_tc0_phy = resp.tx_pause_tc0_phy;
|
||||
apc->phy_stats.rx_pause_tc1_phy = resp.rx_pause_tc1_phy;
|
||||
apc->phy_stats.tx_pause_tc1_phy = resp.tx_pause_tc1_phy;
|
||||
apc->phy_stats.rx_pause_tc2_phy = resp.rx_pause_tc2_phy;
|
||||
apc->phy_stats.tx_pause_tc2_phy = resp.tx_pause_tc2_phy;
|
||||
apc->phy_stats.rx_pause_tc3_phy = resp.rx_pause_tc3_phy;
|
||||
apc->phy_stats.tx_pause_tc3_phy = resp.tx_pause_tc3_phy;
|
||||
apc->phy_stats.rx_pause_tc4_phy = resp.rx_pause_tc4_phy;
|
||||
apc->phy_stats.tx_pause_tc4_phy = resp.tx_pause_tc4_phy;
|
||||
apc->phy_stats.rx_pause_tc5_phy = resp.rx_pause_tc5_phy;
|
||||
apc->phy_stats.tx_pause_tc5_phy = resp.tx_pause_tc5_phy;
|
||||
apc->phy_stats.rx_pause_tc6_phy = resp.rx_pause_tc6_phy;
|
||||
apc->phy_stats.tx_pause_tc6_phy = resp.tx_pause_tc6_phy;
|
||||
apc->phy_stats.rx_pause_tc7_phy = resp.rx_pause_tc7_phy;
|
||||
apc->phy_stats.tx_pause_tc7_phy = resp.tx_pause_tc7_phy;
|
||||
}
|
||||
|
||||
static int mana_init_port(struct net_device *ndev)
|
||||
{
|
||||
struct mana_port_context *apc = netdev_priv(ndev);
|
||||
@ -2663,12 +2766,18 @@ int mana_alloc_queues(struct net_device *ndev)
|
||||
int err;
|
||||
|
||||
err = mana_create_vport(apc, ndev);
|
||||
if (err)
|
||||
if (err) {
|
||||
netdev_err(ndev, "Failed to create vPort %u : %d\n", apc->port_idx, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
|
||||
if (err)
|
||||
if (err) {
|
||||
netdev_err(ndev,
|
||||
"netif_set_real_num_tx_queues () failed for ndev with num_queues %u : %d\n",
|
||||
apc->num_queues, err);
|
||||
goto destroy_vport;
|
||||
}
|
||||
|
||||
err = mana_add_rx_queues(apc, ndev);
|
||||
if (err)
|
||||
@ -2677,16 +2786,22 @@ int mana_alloc_queues(struct net_device *ndev)
|
||||
apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
|
||||
|
||||
err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
|
||||
if (err)
|
||||
if (err) {
|
||||
netdev_err(ndev,
|
||||
"netif_set_real_num_rx_queues () failed for ndev with num_queues %u : %d\n",
|
||||
apc->num_queues, err);
|
||||
goto destroy_vport;
|
||||
}
|
||||
|
||||
mana_rss_table_init(apc);
|
||||
|
||||
err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
|
||||
if (err)
|
||||
if (err) {
|
||||
netdev_err(ndev, "Failed to configure RSS table: %d\n", err);
|
||||
goto destroy_vport;
|
||||
}
|
||||
|
||||
if (gd->gdma_context->is_pf) {
|
||||
if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) {
|
||||
err = mana_pf_register_filter(apc);
|
||||
if (err)
|
||||
goto destroy_vport;
|
||||
@ -2748,7 +2863,7 @@ static int mana_dealloc_queues(struct net_device *ndev)
|
||||
|
||||
mana_chn_setxdp(apc, NULL);
|
||||
|
||||
if (gd->gdma_context->is_pf)
|
||||
if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode)
|
||||
mana_pf_deregister_filter(apc);
|
||||
|
||||
/* No packet can be transmitted now since apc->port_is_up is false.
|
||||
@ -2797,11 +2912,10 @@ static int mana_dealloc_queues(struct net_device *ndev)
|
||||
|
||||
apc->rss_state = TRI_STATE_FALSE;
|
||||
err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
|
||||
if (err) {
|
||||
if (err && mana_en_need_log(apc, err))
|
||||
netdev_err(ndev, "Failed to disable vPort: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Even in err case, still need to cleanup the vPort */
|
||||
mana_destroy_vport(apc);
|
||||
|
||||
return 0;
|
||||
@ -2825,8 +2939,10 @@ int mana_detach(struct net_device *ndev, bool from_close)
|
||||
|
||||
if (apc->port_st_save) {
|
||||
err = mana_dealloc_queues(ndev);
|
||||
if (err)
|
||||
if (err) {
|
||||
netdev_err(ndev, "%s failed to deallocate queues: %d\n", __func__, err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
if (!from_close) {
|
||||
@ -2875,6 +2991,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
|
||||
ndev->dev_port = port_idx;
|
||||
SET_NETDEV_DEV(ndev, gc->dev);
|
||||
|
||||
netif_set_tso_max_size(ndev, GSO_MAX_SIZE);
|
||||
|
||||
netif_carrier_off(ndev);
|
||||
|
||||
netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
|
||||
@ -2970,6 +3088,8 @@ static int add_adev(struct gdma_dev *gd)
|
||||
goto add_fail;
|
||||
|
||||
gd->adev = adev;
|
||||
dev_dbg(gd->gdma_context->dev,
|
||||
"Auxiliary device added successfully\n");
|
||||
return 0;
|
||||
|
||||
add_fail:
|
||||
@ -2989,6 +3109,7 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
|
||||
struct gdma_context *gc = gd->gdma_context;
|
||||
struct mana_context *ac = gd->driver_data;
|
||||
struct device *dev = gc->dev;
|
||||
u8 bm_hostmode = 0;
|
||||
u16 num_ports = 0;
|
||||
int err;
|
||||
int i;
|
||||
@ -3011,13 +3132,17 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
|
||||
}
|
||||
|
||||
err = mana_create_eq(ac);
|
||||
if (err) {
|
||||
dev_err(dev, "Failed to create EQs: %d\n", err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
|
||||
MANA_MICRO_VERSION, &num_ports, &bm_hostmode);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
|
||||
MANA_MICRO_VERSION, &num_ports);
|
||||
if (err)
|
||||
goto out;
|
||||
ac->bm_hostmode = bm_hostmode;
|
||||
|
||||
if (!resuming) {
|
||||
ac->num_ports = num_ports;
|
||||
@ -3068,8 +3193,14 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
|
||||
|
||||
err = add_adev(gd);
|
||||
out:
|
||||
if (err)
|
||||
if (err) {
|
||||
mana_remove(gd, false);
|
||||
} else {
|
||||
dev_dbg(dev, "gd=%p, id=%u, num_ports=%d, type=%u, instance=%u\n",
|
||||
gd, gd->dev_id.as_uint32, ac->num_ports,
|
||||
gd->dev_id.type, gd->dev_id.instance);
|
||||
dev_dbg(dev, "%s succeeded\n", __func__);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -3131,23 +3262,31 @@ out:
|
||||
gd->driver_data = NULL;
|
||||
gd->gdma_context = NULL;
|
||||
kfree(ac);
|
||||
dev_dbg(dev, "%s succeeded\n", __func__);
|
||||
}
|
||||
|
||||
struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index)
|
||||
struct net_device *mana_get_primary_netdev(struct mana_context *ac,
|
||||
u32 port_index,
|
||||
netdevice_tracker *tracker)
|
||||
{
|
||||
struct net_device *ndev;
|
||||
|
||||
RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
|
||||
"Taking primary netdev without holding the RCU read lock");
|
||||
if (port_index >= ac->num_ports)
|
||||
return NULL;
|
||||
|
||||
/* When mana is used in netvsc, the upper netdevice should be returned. */
|
||||
if (ac->ports[port_index]->flags & IFF_SLAVE)
|
||||
ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]);
|
||||
else
|
||||
rcu_read_lock();
|
||||
|
||||
/* If mana is used in netvsc, the upper netdevice should be returned. */
|
||||
ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]);
|
||||
|
||||
/* If there is no upper device, use the parent Ethernet device */
|
||||
if (!ndev)
|
||||
ndev = ac->ports[port_index];
|
||||
|
||||
netdev_hold(ndev, tracker, GFP_ATOMIC);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ndev;
|
||||
}
|
||||
EXPORT_SYMBOL_NS(mana_get_primary_netdev_rcu, NET_MANA);
|
||||
EXPORT_SYMBOL_NS(mana_get_primary_netdev, NET_MANA);
|
||||
|
||||
|
@ -7,10 +7,12 @@
|
||||
|
||||
#include <net/mana/mana.h>
|
||||
|
||||
static const struct {
|
||||
struct mana_stats_desc {
|
||||
char name[ETH_GSTRING_LEN];
|
||||
u16 offset;
|
||||
} mana_eth_stats[] = {
|
||||
};
|
||||
|
||||
static const struct mana_stats_desc mana_eth_stats[] = {
|
||||
{"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
|
||||
{"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
|
||||
{"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats,
|
||||
@ -75,6 +77,59 @@ static const struct {
|
||||
rx_cqe_unknown_type)},
|
||||
};
|
||||
|
||||
static const struct mana_stats_desc mana_phy_stats[] = {
|
||||
{ "hc_rx_pkt_drop_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_drop_phy) },
|
||||
{ "hc_tx_pkt_drop_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_drop_phy) },
|
||||
{ "hc_tc0_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc0_phy) },
|
||||
{ "hc_tc0_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc0_phy) },
|
||||
{ "hc_tc0_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc0_phy) },
|
||||
{ "hc_tc0_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc0_phy) },
|
||||
{ "hc_tc1_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc1_phy) },
|
||||
{ "hc_tc1_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc1_phy) },
|
||||
{ "hc_tc1_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc1_phy) },
|
||||
{ "hc_tc1_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc1_phy) },
|
||||
{ "hc_tc2_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc2_phy) },
|
||||
{ "hc_tc2_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc2_phy) },
|
||||
{ "hc_tc2_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc2_phy) },
|
||||
{ "hc_tc2_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc2_phy) },
|
||||
{ "hc_tc3_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc3_phy) },
|
||||
{ "hc_tc3_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc3_phy) },
|
||||
{ "hc_tc3_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc3_phy) },
|
||||
{ "hc_tc3_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc3_phy) },
|
||||
{ "hc_tc4_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc4_phy) },
|
||||
{ "hc_tc4_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc4_phy) },
|
||||
{ "hc_tc4_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc4_phy) },
|
||||
{ "hc_tc4_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc4_phy) },
|
||||
{ "hc_tc5_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc5_phy) },
|
||||
{ "hc_tc5_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc5_phy) },
|
||||
{ "hc_tc5_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc5_phy) },
|
||||
{ "hc_tc5_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc5_phy) },
|
||||
{ "hc_tc6_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc6_phy) },
|
||||
{ "hc_tc6_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc6_phy) },
|
||||
{ "hc_tc6_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc6_phy) },
|
||||
{ "hc_tc6_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc6_phy) },
|
||||
{ "hc_tc7_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc7_phy) },
|
||||
{ "hc_tc7_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc7_phy) },
|
||||
{ "hc_tc7_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc7_phy) },
|
||||
{ "hc_tc7_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc7_phy) },
|
||||
{ "hc_tc0_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc0_phy) },
|
||||
{ "hc_tc0_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc0_phy) },
|
||||
{ "hc_tc1_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc1_phy) },
|
||||
{ "hc_tc1_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc1_phy) },
|
||||
{ "hc_tc2_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc2_phy) },
|
||||
{ "hc_tc2_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc2_phy) },
|
||||
{ "hc_tc3_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc3_phy) },
|
||||
{ "hc_tc3_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc3_phy) },
|
||||
{ "hc_tc4_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc4_phy) },
|
||||
{ "hc_tc4_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc4_phy) },
|
||||
{ "hc_tc5_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc5_phy) },
|
||||
{ "hc_tc5_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc5_phy) },
|
||||
{ "hc_tc6_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc6_phy) },
|
||||
{ "hc_tc6_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc6_phy) },
|
||||
{ "hc_tc7_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc7_phy) },
|
||||
{ "hc_tc7_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc7_phy) },
|
||||
};
|
||||
|
||||
static int mana_get_sset_count(struct net_device *ndev, int stringset)
|
||||
{
|
||||
struct mana_port_context *apc = netdev_priv(ndev);
|
||||
@ -83,8 +138,8 @@ static int mana_get_sset_count(struct net_device *ndev, int stringset)
|
||||
if (stringset != ETH_SS_STATS)
|
||||
return -EINVAL;
|
||||
|
||||
return ARRAY_SIZE(mana_eth_stats) + num_queues *
|
||||
(MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
|
||||
return ARRAY_SIZE(mana_eth_stats) + ARRAY_SIZE(mana_phy_stats) +
|
||||
num_queues * (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
|
||||
}
|
||||
|
||||
static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
|
||||
@ -99,6 +154,9 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
|
||||
for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++)
|
||||
ethtool_puts(&data, mana_eth_stats[i].name);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mana_phy_stats); i++)
|
||||
ethtool_puts(&data, mana_phy_stats[i].name);
|
||||
|
||||
for (i = 0; i < num_queues; i++) {
|
||||
ethtool_sprintf(&data, "rx_%d_packets", i);
|
||||
ethtool_sprintf(&data, "rx_%d_bytes", i);
|
||||
@ -128,6 +186,7 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
|
||||
struct mana_port_context *apc = netdev_priv(ndev);
|
||||
unsigned int num_queues = apc->num_queues;
|
||||
void *eth_stats = &apc->eth_stats;
|
||||
void *phy_stats = &apc->phy_stats;
|
||||
struct mana_stats_rx *rx_stats;
|
||||
struct mana_stats_tx *tx_stats;
|
||||
unsigned int start;
|
||||
@ -151,9 +210,18 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
|
||||
/* we call mana function to update stats from GDMA */
|
||||
mana_query_gf_stats(apc);
|
||||
|
||||
/* We call this mana function to get the phy stats from GDMA and includes
|
||||
* aggregate tx/rx drop counters, Per-TC(Traffic Channel) tx/rx and pause
|
||||
* counters.
|
||||
*/
|
||||
mana_query_phy_stats(apc);
|
||||
|
||||
for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++)
|
||||
data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
|
||||
|
||||
for (q = 0; q < ARRAY_SIZE(mana_phy_stats); q++)
|
||||
data[i++] = *(u64 *)(phy_stats + mana_phy_stats[q].offset);
|
||||
|
||||
for (q = 0; q < num_queues; q++) {
|
||||
rx_stats = &apc->rxqs[q]->stats;
|
||||
|
||||
|
@ -1050,6 +1050,7 @@ struct net_device_context {
|
||||
struct net_device __rcu *vf_netdev;
|
||||
struct netvsc_vf_pcpu_stats __percpu *vf_stats;
|
||||
struct delayed_work vf_takeover;
|
||||
struct delayed_work vfns_work;
|
||||
|
||||
/* 1: allocated, serial number is valid. 0: not allocated */
|
||||
u32 vf_alloc;
|
||||
@ -1064,6 +1065,8 @@ struct net_device_context {
|
||||
struct netvsc_device_info *saved_netvsc_dev_info;
|
||||
};
|
||||
|
||||
void netvsc_vfns_work(struct work_struct *w);
|
||||
|
||||
/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
|
||||
* packets. We can use ethtool to change UDP hash level when necessary.
|
||||
*/
|
||||
@ -1166,6 +1169,8 @@ struct netvsc_device {
|
||||
u32 max_chn;
|
||||
u32 num_chn;
|
||||
|
||||
u32 netvsc_gso_max_size;
|
||||
|
||||
atomic_t open_chn;
|
||||
struct work_struct subchan_work;
|
||||
wait_queue_head_t subchan_open;
|
||||
|
@ -712,8 +712,13 @@ void netvsc_device_remove(struct hv_device *device)
|
||||
for (i = 0; i < net_device->num_chn; i++) {
|
||||
/* See also vmbus_reset_channel_cb(). */
|
||||
/* only disable enabled NAPI channel */
|
||||
if (i < ndev->real_num_rx_queues)
|
||||
if (i < ndev->real_num_rx_queues) {
|
||||
netif_queue_set_napi(ndev, i, NETDEV_QUEUE_TYPE_TX,
|
||||
NULL);
|
||||
netif_queue_set_napi(ndev, i, NETDEV_QUEUE_TYPE_RX,
|
||||
NULL);
|
||||
napi_disable(&net_device->chan_table[i].napi);
|
||||
}
|
||||
|
||||
netif_napi_del(&net_device->chan_table[i].napi);
|
||||
}
|
||||
@ -1787,6 +1792,10 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
|
||||
netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
|
||||
|
||||
napi_enable(&net_device->chan_table[0].napi);
|
||||
netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX,
|
||||
&net_device->chan_table[0].napi);
|
||||
netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX,
|
||||
&net_device->chan_table[0].napi);
|
||||
|
||||
/* Connect with the NetVsp */
|
||||
ret = netvsc_connect_vsp(device, net_device, device_info);
|
||||
@ -1805,6 +1814,8 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
|
||||
|
||||
close:
|
||||
RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
|
||||
netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
|
||||
netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
|
||||
napi_disable(&net_device->chan_table[0].napi);
|
||||
|
||||
/* Now, we can close the channel safely */
|
||||
|
@ -2350,8 +2350,11 @@ static int netvsc_prepare_bonding(struct net_device *vf_netdev)
|
||||
if (!ndev)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/* set slave flag before open to prevent IPv6 addrconf */
|
||||
/* Set slave flag and no addrconf flag before open
|
||||
* to prevent IPv6 addrconf.
|
||||
*/
|
||||
vf_netdev->flags |= IFF_SLAVE;
|
||||
vf_netdev->priv_flags |= IFF_NO_ADDRCONF;
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
@ -2461,6 +2464,21 @@ static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event)
|
||||
} else {
|
||||
netdev_info(ndev, "Data path switched %s VF: %s\n",
|
||||
vf_is_up ? "to" : "from", vf_netdev->name);
|
||||
|
||||
/* In Azure, when accelerated networking in enabled, other NICs
|
||||
* like MANA, MLX, are configured as a bonded nic with
|
||||
* Netvsc(failover) NIC. For bonded NICs, the min of the max
|
||||
* pkt aggregate size of the members is propagated in the stack.
|
||||
* In order to allow these NICs (MANA/MLX) to use up to
|
||||
* GSO_MAX_SIZE gso packet size, we need to allow Netvsc NIC to
|
||||
* also support this in the guest.
|
||||
* This value is only increased for netvsc NIC when datapath is
|
||||
* switched over to the VF
|
||||
*/
|
||||
if (vf_is_up)
|
||||
netif_set_tso_max_size(ndev, vf_netdev->tso_max_size);
|
||||
else
|
||||
netif_set_tso_max_size(ndev, netvsc_dev->netvsc_gso_max_size);
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
@ -2547,6 +2565,7 @@ static int netvsc_probe(struct hv_device *dev,
|
||||
spin_lock_init(&net_device_ctx->lock);
|
||||
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
|
||||
INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
|
||||
INIT_DELAYED_WORK(&net_device_ctx->vfns_work, netvsc_vfns_work);
|
||||
|
||||
net_device_ctx->vf_stats
|
||||
= netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
|
||||
@ -2689,6 +2708,8 @@ static void netvsc_remove(struct hv_device *dev)
|
||||
cancel_delayed_work_sync(&ndev_ctx->dwork);
|
||||
|
||||
rtnl_lock();
|
||||
cancel_delayed_work_sync(&ndev_ctx->vfns_work);
|
||||
|
||||
nvdev = rtnl_dereference(ndev_ctx->nvdev);
|
||||
if (nvdev) {
|
||||
cancel_work_sync(&nvdev->subchan_work);
|
||||
@ -2730,6 +2751,7 @@ static int netvsc_suspend(struct hv_device *dev)
|
||||
cancel_delayed_work_sync(&ndev_ctx->dwork);
|
||||
|
||||
rtnl_lock();
|
||||
cancel_delayed_work_sync(&ndev_ctx->vfns_work);
|
||||
|
||||
nvdev = rtnl_dereference(ndev_ctx->nvdev);
|
||||
if (nvdev == NULL) {
|
||||
@ -2823,6 +2845,27 @@ static void netvsc_event_set_vf_ns(struct net_device *ndev)
|
||||
}
|
||||
}
|
||||
|
||||
void netvsc_vfns_work(struct work_struct *w)
|
||||
{
|
||||
struct net_device_context *ndev_ctx =
|
||||
container_of(w, struct net_device_context, vfns_work.work);
|
||||
struct net_device *ndev;
|
||||
|
||||
if (!rtnl_trylock()) {
|
||||
schedule_delayed_work(&ndev_ctx->vfns_work, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
ndev = hv_get_drvdata(ndev_ctx->device_ctx);
|
||||
if (!ndev)
|
||||
goto out;
|
||||
|
||||
netvsc_event_set_vf_ns(ndev);
|
||||
|
||||
out:
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* On Hyper-V, every VF interface is matched with a corresponding
|
||||
* synthetic interface. The synthetic interface is presented first
|
||||
@ -2833,10 +2876,12 @@ static int netvsc_netdev_event(struct notifier_block *this,
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
|
||||
struct net_device_context *ndev_ctx;
|
||||
int ret = 0;
|
||||
|
||||
if (event_dev->netdev_ops == &device_ops && event == NETDEV_REGISTER) {
|
||||
netvsc_event_set_vf_ns(event_dev);
|
||||
ndev_ctx = netdev_priv(event_dev);
|
||||
schedule_delayed_work(&ndev_ctx->vfns_work, 0);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
|
@ -1269,10 +1269,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
|
||||
ret = vmbus_open(new_sc, netvsc_ring_bytes,
|
||||
netvsc_ring_bytes, NULL, 0,
|
||||
netvsc_channel_cb, nvchan);
|
||||
if (ret == 0)
|
||||
if (ret == 0) {
|
||||
napi_enable(&nvchan->napi);
|
||||
else
|
||||
netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX,
|
||||
&nvchan->napi);
|
||||
netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX,
|
||||
&nvchan->napi);
|
||||
} else {
|
||||
netdev_notice(ndev, "sub channel open failed: %d\n", ret);
|
||||
}
|
||||
|
||||
if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn)
|
||||
wake_up(&nvscdev->subchan_open);
|
||||
@ -1351,9 +1356,10 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
|
||||
struct net_device_context *net_device_ctx = netdev_priv(net);
|
||||
struct ndis_offload hwcaps;
|
||||
struct ndis_offload_params offloads;
|
||||
unsigned int gso_max_size = GSO_LEGACY_MAX_SIZE;
|
||||
int ret;
|
||||
|
||||
nvdev->netvsc_gso_max_size = GSO_LEGACY_MAX_SIZE;
|
||||
|
||||
/* Find HW offload capabilities */
|
||||
ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
|
||||
if (ret != 0)
|
||||
@ -1385,8 +1391,8 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
|
||||
offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
|
||||
net->hw_features |= NETIF_F_TSO;
|
||||
|
||||
if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
|
||||
gso_max_size = hwcaps.lsov2.ip4_maxsz;
|
||||
if (hwcaps.lsov2.ip4_maxsz < nvdev->netvsc_gso_max_size)
|
||||
nvdev->netvsc_gso_max_size = hwcaps.lsov2.ip4_maxsz;
|
||||
}
|
||||
|
||||
if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
|
||||
@ -1406,8 +1412,8 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
|
||||
offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
|
||||
net->hw_features |= NETIF_F_TSO6;
|
||||
|
||||
if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
|
||||
gso_max_size = hwcaps.lsov2.ip6_maxsz;
|
||||
if (hwcaps.lsov2.ip6_maxsz < nvdev->netvsc_gso_max_size)
|
||||
nvdev->netvsc_gso_max_size = hwcaps.lsov2.ip6_maxsz;
|
||||
}
|
||||
|
||||
if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
|
||||
@ -1433,7 +1439,7 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
|
||||
*/
|
||||
net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
|
||||
|
||||
netif_set_tso_max_size(net, gso_max_size);
|
||||
netif_set_tso_max_size(net, nvdev->netvsc_gso_max_size);
|
||||
|
||||
ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
|
||||
|
||||
|
@ -128,6 +128,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
|
||||
struct iov_iter iter;
|
||||
|
||||
|
@ -2246,8 +2246,14 @@ static __net_init int nfsd_net_init(struct net *net)
|
||||
NFSD_STATS_COUNTERS_NUM);
|
||||
if (retval)
|
||||
goto out_repcache_error;
|
||||
|
||||
memset(&nn->nfsd_svcstats, 0, sizeof(nn->nfsd_svcstats));
|
||||
nn->nfsd_svcstats.program = &nfsd_programs[0];
|
||||
if (!nfsd_proc_stat_init(net)) {
|
||||
retval = -ENOMEM;
|
||||
goto out_proc_error;
|
||||
}
|
||||
|
||||
for (i = 0; i < sizeof(nn->nfsd_versions); i++)
|
||||
nn->nfsd_versions[i] = nfsd_support_version(i);
|
||||
for (i = 0; i < sizeof(nn->nfsd4_minorversions); i++)
|
||||
@ -2257,13 +2263,14 @@ static __net_init int nfsd_net_init(struct net *net)
|
||||
nfsd4_init_leases_net(nn);
|
||||
get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key));
|
||||
seqlock_init(&nn->writeverf_lock);
|
||||
nfsd_proc_stat_init(net);
|
||||
#if IS_ENABLED(CONFIG_NFS_LOCALIO)
|
||||
spin_lock_init(&nn->local_clients_lock);
|
||||
INIT_LIST_HEAD(&nn->local_clients);
|
||||
#endif
|
||||
return 0;
|
||||
|
||||
out_proc_error:
|
||||
percpu_counter_destroy_many(nn->counter, NFSD_STATS_COUNTERS_NUM);
|
||||
out_repcache_error:
|
||||
nfsd_idmap_shutdown(net);
|
||||
out_idmap_error:
|
||||
|
@ -73,11 +73,11 @@ static int nfsd_show(struct seq_file *seq, void *v)
|
||||
|
||||
DEFINE_PROC_SHOW_ATTRIBUTE(nfsd);
|
||||
|
||||
void nfsd_proc_stat_init(struct net *net)
|
||||
struct proc_dir_entry *nfsd_proc_stat_init(struct net *net)
|
||||
{
|
||||
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
|
||||
|
||||
svc_proc_register(net, &nn->nfsd_svcstats, &nfsd_proc_ops);
|
||||
return svc_proc_register(net, &nn->nfsd_svcstats, &nfsd_proc_ops);
|
||||
}
|
||||
|
||||
void nfsd_proc_stat_shutdown(struct net *net)
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include <uapi/linux/nfsd/stats.h>
|
||||
#include <linux/percpu_counter.h>
|
||||
|
||||
void nfsd_proc_stat_init(struct net *net);
|
||||
struct proc_dir_entry *nfsd_proc_stat_init(struct net *net);
|
||||
void nfsd_proc_stat_shutdown(struct net *net);
|
||||
|
||||
static inline void nfsd_stats_rc_hits_inc(struct nfsd_net *nn)
|
||||
|
@ -343,7 +343,7 @@ static struct ntlmssp2_name *find_next_av(struct cifs_ses *ses,
|
||||
len = AV_LEN(av);
|
||||
if (AV_TYPE(av) == NTLMSSP_AV_EOL)
|
||||
return NULL;
|
||||
if (!len || (u8 *)av + sizeof(*av) + len > end)
|
||||
if ((u8 *)av + sizeof(*av) + len > end)
|
||||
return NULL;
|
||||
return av;
|
||||
}
|
||||
@ -363,7 +363,7 @@ static int find_av_name(struct cifs_ses *ses, u16 type, char **name, u16 maxlen)
|
||||
|
||||
av_for_each_entry(ses, av) {
|
||||
len = AV_LEN(av);
|
||||
if (AV_TYPE(av) != type)
|
||||
if (AV_TYPE(av) != type || !len)
|
||||
continue;
|
||||
if (!IS_ALIGNED(len, sizeof(__le16))) {
|
||||
cifs_dbg(VFS | ONCE, "%s: bad length(%u) for type %u\n",
|
||||
@ -532,17 +532,67 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash, struct shash_
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up NTLMv2 response blob with SPN (cifs/<hostname>) appended to the
|
||||
* existing list of AV pairs.
|
||||
*/
|
||||
static int set_auth_key_response(struct cifs_ses *ses)
|
||||
{
|
||||
size_t baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp);
|
||||
size_t len, spnlen, tilen = 0, num_avs = 2 /* SPN + EOL */;
|
||||
struct TCP_Server_Info *server = ses->server;
|
||||
char *spn __free(kfree) = NULL;
|
||||
struct ntlmssp2_name *av;
|
||||
char *rsp = NULL;
|
||||
int rc;
|
||||
|
||||
spnlen = strlen(server->hostname);
|
||||
len = sizeof("cifs/") + spnlen;
|
||||
spn = kmalloc(len, GFP_KERNEL);
|
||||
if (!spn) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
spnlen = scnprintf(spn, len, "cifs/%.*s",
|
||||
(int)spnlen, server->hostname);
|
||||
|
||||
av_for_each_entry(ses, av)
|
||||
tilen += sizeof(*av) + AV_LEN(av);
|
||||
|
||||
len = baselen + tilen + spnlen * sizeof(__le16) + num_avs * sizeof(*av);
|
||||
rsp = kmalloc(len, GFP_KERNEL);
|
||||
if (!rsp) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
memcpy(rsp + baselen, ses->auth_key.response, tilen);
|
||||
av = (void *)(rsp + baselen + tilen);
|
||||
av->type = cpu_to_le16(NTLMSSP_AV_TARGET_NAME);
|
||||
av->length = cpu_to_le16(spnlen * sizeof(__le16));
|
||||
cifs_strtoUTF16((__le16 *)av->data, spn, spnlen, ses->local_nls);
|
||||
av = (void *)((__u8 *)av + sizeof(*av) + AV_LEN(av));
|
||||
av->type = cpu_to_le16(NTLMSSP_AV_EOL);
|
||||
av->length = 0;
|
||||
|
||||
rc = 0;
|
||||
ses->auth_key.len = len;
|
||||
out:
|
||||
ses->auth_key.response = rsp;
|
||||
return rc;
|
||||
}
|
||||
|
||||
int
|
||||
setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
|
||||
{
|
||||
struct shash_desc *hmacmd5 = NULL;
|
||||
int rc;
|
||||
int baselen;
|
||||
unsigned int tilen;
|
||||
unsigned char *tiblob = NULL; /* target info blob */
|
||||
struct ntlmv2_resp *ntlmv2;
|
||||
char ntlmv2_hash[16];
|
||||
unsigned char *tiblob = NULL; /* target info blob */
|
||||
__le64 rsp_timestamp;
|
||||
__u64 cc;
|
||||
int rc;
|
||||
|
||||
if (nls_cp == NULL) {
|
||||
cifs_dbg(VFS, "%s called with nls_cp==NULL\n", __func__);
|
||||
@ -588,32 +638,25 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
|
||||
* (as Windows 7 does)
|
||||
*/
|
||||
rsp_timestamp = find_timestamp(ses);
|
||||
get_random_bytes(&cc, sizeof(cc));
|
||||
|
||||
cifs_server_lock(ses->server);
|
||||
|
||||
baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp);
|
||||
tilen = ses->auth_key.len;
|
||||
tiblob = ses->auth_key.response;
|
||||
|
||||
ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL);
|
||||
if (!ses->auth_key.response) {
|
||||
rc = -ENOMEM;
|
||||
rc = set_auth_key_response(ses);
|
||||
if (rc) {
|
||||
ses->auth_key.len = 0;
|
||||
goto setup_ntlmv2_rsp_ret;
|
||||
goto unlock;
|
||||
}
|
||||
ses->auth_key.len += baselen;
|
||||
|
||||
ntlmv2 = (struct ntlmv2_resp *)
|
||||
(ses->auth_key.response + CIFS_SESS_KEY_SIZE);
|
||||
ntlmv2->blob_signature = cpu_to_le32(0x00000101);
|
||||
ntlmv2->reserved = 0;
|
||||
ntlmv2->time = rsp_timestamp;
|
||||
|
||||
get_random_bytes(&ntlmv2->client_chal, sizeof(ntlmv2->client_chal));
|
||||
ntlmv2->client_chal = cc;
|
||||
ntlmv2->reserved2 = 0;
|
||||
|
||||
memcpy(ses->auth_key.response + baselen, tiblob, tilen);
|
||||
|
||||
cifs_server_lock(ses->server);
|
||||
|
||||
rc = cifs_alloc_hash("hmac(md5)", &hmacmd5);
|
||||
if (rc) {
|
||||
cifs_dbg(VFS, "Could not allocate HMAC-MD5, rc=%d\n", rc);
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "shm_channel.h"
|
||||
|
||||
#define GDMA_STATUS_MORE_ENTRIES 0x00000105
|
||||
#define GDMA_STATUS_CMD_UNSUPPORTED 0xffffffff
|
||||
|
||||
/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
|
||||
* them are naturally aligned and hence don't need __packed.
|
||||
@ -58,8 +59,9 @@ enum gdma_eqe_type {
|
||||
GDMA_EQE_HWC_INIT_EQ_ID_DB = 129,
|
||||
GDMA_EQE_HWC_INIT_DATA = 130,
|
||||
GDMA_EQE_HWC_INIT_DONE = 131,
|
||||
GDMA_EQE_HWC_SOC_RECONFIG = 132,
|
||||
GDMA_EQE_HWC_FPGA_RECONFIG = 132,
|
||||
GDMA_EQE_HWC_SOC_RECONFIG_DATA = 133,
|
||||
GDMA_EQE_HWC_RESET_REQUEST = 135,
|
||||
GDMA_EQE_RNIC_QP_FATAL = 176,
|
||||
};
|
||||
|
||||
@ -152,6 +154,7 @@ struct gdma_general_req {
|
||||
#define GDMA_MESSAGE_V1 1
|
||||
#define GDMA_MESSAGE_V2 2
|
||||
#define GDMA_MESSAGE_V3 3
|
||||
#define GDMA_MESSAGE_V4 4
|
||||
|
||||
struct gdma_general_resp {
|
||||
struct gdma_resp_hdr hdr;
|
||||
@ -387,6 +390,8 @@ struct gdma_context {
|
||||
u32 test_event_eq_id;
|
||||
|
||||
bool is_pf;
|
||||
bool in_service;
|
||||
|
||||
phys_addr_t bar0_pa;
|
||||
void __iomem *bar0_va;
|
||||
void __iomem *shm_base;
|
||||
@ -408,8 +413,6 @@ struct gdma_context {
|
||||
struct gdma_dev mana_ib;
|
||||
};
|
||||
|
||||
#define MAX_NUM_GDMA_DEVICES 4
|
||||
|
||||
static inline bool mana_gd_is_mana(struct gdma_dev *gd)
|
||||
{
|
||||
return gd->dev_id.type == GDMA_DEVICE_MANA;
|
||||
@ -556,11 +559,23 @@ enum {
|
||||
#define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)
|
||||
#define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5)
|
||||
|
||||
/* Driver can handle holes (zeros) in the device list */
|
||||
#define GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP BIT(11)
|
||||
|
||||
/* Driver can self reset on EQE notification */
|
||||
#define GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE BIT(14)
|
||||
|
||||
/* Driver can self reset on FPGA Reconfig EQE notification */
|
||||
#define GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE BIT(17)
|
||||
|
||||
#define GDMA_DRV_CAP_FLAGS1 \
|
||||
(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
|
||||
GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
|
||||
GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \
|
||||
GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT)
|
||||
GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \
|
||||
GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP | \
|
||||
GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE | \
|
||||
GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE)
|
||||
|
||||
#define GDMA_DRV_CAP_FLAGS2 0
|
||||
|
||||
@ -621,11 +636,12 @@ struct gdma_query_max_resources_resp {
|
||||
}; /* HW DATA */
|
||||
|
||||
/* GDMA_LIST_DEVICES */
|
||||
#define GDMA_DEV_LIST_SIZE 64
|
||||
struct gdma_list_devices_resp {
|
||||
struct gdma_resp_hdr hdr;
|
||||
u32 num_of_devs;
|
||||
u32 reserved;
|
||||
struct gdma_dev_id devs[64];
|
||||
struct gdma_dev_id devs[GDMA_DEV_LIST_SIZE];
|
||||
}; /* HW DATA */
|
||||
|
||||
/* GDMA_REGISTER_DEVICE */
|
||||
@ -883,4 +899,9 @@ int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle);
|
||||
void mana_register_debugfs(void);
|
||||
void mana_unregister_debugfs(void);
|
||||
|
||||
int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state);
|
||||
int mana_gd_resume(struct pci_dev *pdev);
|
||||
|
||||
bool mana_need_log(struct gdma_context *gc, int err);
|
||||
|
||||
#endif /* _GDMA_H */
|
||||
|
@ -404,10 +404,70 @@ struct mana_ethtool_stats {
|
||||
u64 rx_cqe_unknown_type;
|
||||
};
|
||||
|
||||
struct mana_ethtool_phy_stats {
|
||||
/* Drop Counters */
|
||||
u64 rx_pkt_drop_phy;
|
||||
u64 tx_pkt_drop_phy;
|
||||
|
||||
/* Per TC traffic Counters */
|
||||
u64 rx_pkt_tc0_phy;
|
||||
u64 tx_pkt_tc0_phy;
|
||||
u64 rx_pkt_tc1_phy;
|
||||
u64 tx_pkt_tc1_phy;
|
||||
u64 rx_pkt_tc2_phy;
|
||||
u64 tx_pkt_tc2_phy;
|
||||
u64 rx_pkt_tc3_phy;
|
||||
u64 tx_pkt_tc3_phy;
|
||||
u64 rx_pkt_tc4_phy;
|
||||
u64 tx_pkt_tc4_phy;
|
||||
u64 rx_pkt_tc5_phy;
|
||||
u64 tx_pkt_tc5_phy;
|
||||
u64 rx_pkt_tc6_phy;
|
||||
u64 tx_pkt_tc6_phy;
|
||||
u64 rx_pkt_tc7_phy;
|
||||
u64 tx_pkt_tc7_phy;
|
||||
|
||||
u64 rx_byte_tc0_phy;
|
||||
u64 tx_byte_tc0_phy;
|
||||
u64 rx_byte_tc1_phy;
|
||||
u64 tx_byte_tc1_phy;
|
||||
u64 rx_byte_tc2_phy;
|
||||
u64 tx_byte_tc2_phy;
|
||||
u64 rx_byte_tc3_phy;
|
||||
u64 tx_byte_tc3_phy;
|
||||
u64 rx_byte_tc4_phy;
|
||||
u64 tx_byte_tc4_phy;
|
||||
u64 rx_byte_tc5_phy;
|
||||
u64 tx_byte_tc5_phy;
|
||||
u64 rx_byte_tc6_phy;
|
||||
u64 tx_byte_tc6_phy;
|
||||
u64 rx_byte_tc7_phy;
|
||||
u64 tx_byte_tc7_phy;
|
||||
|
||||
/* Per TC pause Counters */
|
||||
u64 rx_pause_tc0_phy;
|
||||
u64 tx_pause_tc0_phy;
|
||||
u64 rx_pause_tc1_phy;
|
||||
u64 tx_pause_tc1_phy;
|
||||
u64 rx_pause_tc2_phy;
|
||||
u64 tx_pause_tc2_phy;
|
||||
u64 rx_pause_tc3_phy;
|
||||
u64 tx_pause_tc3_phy;
|
||||
u64 rx_pause_tc4_phy;
|
||||
u64 tx_pause_tc4_phy;
|
||||
u64 rx_pause_tc5_phy;
|
||||
u64 tx_pause_tc5_phy;
|
||||
u64 rx_pause_tc6_phy;
|
||||
u64 tx_pause_tc6_phy;
|
||||
u64 rx_pause_tc7_phy;
|
||||
u64 tx_pause_tc7_phy;
|
||||
};
|
||||
|
||||
struct mana_context {
|
||||
struct gdma_dev *gdma_dev;
|
||||
|
||||
u16 num_ports;
|
||||
u8 bm_hostmode;
|
||||
|
||||
struct mana_eq *eqs;
|
||||
struct dentry *mana_eqs_debugfs;
|
||||
@ -473,6 +533,8 @@ struct mana_port_context {
|
||||
|
||||
struct mana_ethtool_stats eth_stats;
|
||||
|
||||
struct mana_ethtool_phy_stats phy_stats;
|
||||
|
||||
/* Debugfs */
|
||||
struct dentry *mana_port_debugfs;
|
||||
};
|
||||
@ -497,6 +559,7 @@ struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
|
||||
void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
|
||||
int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
|
||||
void mana_query_gf_stats(struct mana_port_context *apc);
|
||||
void mana_query_phy_stats(struct mana_port_context *apc);
|
||||
int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues);
|
||||
void mana_pre_dealloc_rxbufs(struct mana_port_context *apc);
|
||||
|
||||
@ -523,6 +586,7 @@ enum mana_command_code {
|
||||
MANA_FENCE_RQ = 0x20006,
|
||||
MANA_CONFIG_VPORT_RX = 0x20007,
|
||||
MANA_QUERY_VPORT_CONFIG = 0x20008,
|
||||
MANA_QUERY_PHY_STAT = 0x2000c,
|
||||
|
||||
/* Privileged commands for the PF mode */
|
||||
MANA_REGISTER_FILTER = 0x28000,
|
||||
@ -557,7 +621,8 @@ struct mana_query_device_cfg_resp {
|
||||
u64 pf_cap_flags4;
|
||||
|
||||
u16 max_num_vports;
|
||||
u16 reserved;
|
||||
u8 bm_hostmode; /* response v3: Bare Metal Host Mode */
|
||||
u8 reserved;
|
||||
u32 max_num_eqs;
|
||||
|
||||
/* response v2: */
|
||||
@ -684,6 +749,74 @@ struct mana_query_gf_stat_resp {
|
||||
u64 tx_err_gdma;
|
||||
}; /* HW DATA */
|
||||
|
||||
/* Query phy stats */
|
||||
struct mana_query_phy_stat_req {
|
||||
struct gdma_req_hdr hdr;
|
||||
u64 req_stats;
|
||||
}; /* HW DATA */
|
||||
|
||||
struct mana_query_phy_stat_resp {
|
||||
struct gdma_resp_hdr hdr;
|
||||
u64 reported_stats;
|
||||
|
||||
/* Aggregate Drop Counters */
|
||||
u64 rx_pkt_drop_phy;
|
||||
u64 tx_pkt_drop_phy;
|
||||
|
||||
/* Per TC(Traffic class) traffic Counters */
|
||||
u64 rx_pkt_tc0_phy;
|
||||
u64 tx_pkt_tc0_phy;
|
||||
u64 rx_pkt_tc1_phy;
|
||||
u64 tx_pkt_tc1_phy;
|
||||
u64 rx_pkt_tc2_phy;
|
||||
u64 tx_pkt_tc2_phy;
|
||||
u64 rx_pkt_tc3_phy;
|
||||
u64 tx_pkt_tc3_phy;
|
||||
u64 rx_pkt_tc4_phy;
|
||||
u64 tx_pkt_tc4_phy;
|
||||
u64 rx_pkt_tc5_phy;
|
||||
u64 tx_pkt_tc5_phy;
|
||||
u64 rx_pkt_tc6_phy;
|
||||
u64 tx_pkt_tc6_phy;
|
||||
u64 rx_pkt_tc7_phy;
|
||||
u64 tx_pkt_tc7_phy;
|
||||
|
||||
u64 rx_byte_tc0_phy;
|
||||
u64 tx_byte_tc0_phy;
|
||||
u64 rx_byte_tc1_phy;
|
||||
u64 tx_byte_tc1_phy;
|
||||
u64 rx_byte_tc2_phy;
|
||||
u64 tx_byte_tc2_phy;
|
||||
u64 rx_byte_tc3_phy;
|
||||
u64 tx_byte_tc3_phy;
|
||||
u64 rx_byte_tc4_phy;
|
||||
u64 tx_byte_tc4_phy;
|
||||
u64 rx_byte_tc5_phy;
|
||||
u64 tx_byte_tc5_phy;
|
||||
u64 rx_byte_tc6_phy;
|
||||
u64 tx_byte_tc6_phy;
|
||||
u64 rx_byte_tc7_phy;
|
||||
u64 tx_byte_tc7_phy;
|
||||
|
||||
/* Per TC(Traffic Class) pause Counters */
|
||||
u64 rx_pause_tc0_phy;
|
||||
u64 tx_pause_tc0_phy;
|
||||
u64 rx_pause_tc1_phy;
|
||||
u64 tx_pause_tc1_phy;
|
||||
u64 rx_pause_tc2_phy;
|
||||
u64 tx_pause_tc2_phy;
|
||||
u64 rx_pause_tc3_phy;
|
||||
u64 tx_pause_tc3_phy;
|
||||
u64 rx_pause_tc4_phy;
|
||||
u64 tx_pause_tc4_phy;
|
||||
u64 rx_pause_tc5_phy;
|
||||
u64 tx_pause_tc5_phy;
|
||||
u64 rx_pause_tc6_phy;
|
||||
u64 tx_pause_tc6_phy;
|
||||
u64 rx_pause_tc7_phy;
|
||||
u64 tx_pause_tc7_phy;
|
||||
}; /* HW DATA */
|
||||
|
||||
/* Configure vPort Rx Steering */
|
||||
struct mana_cfg_rx_steer_req_v2 {
|
||||
struct gdma_req_hdr hdr;
|
||||
@ -827,5 +960,7 @@ int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
|
||||
u32 doorbell_pg_id);
|
||||
void mana_uncfg_vport(struct mana_port_context *apc);
|
||||
|
||||
struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index);
|
||||
struct net_device *mana_get_primary_netdev(struct mana_context *ac,
|
||||
u32 port_index,
|
||||
netdevice_tracker *tracker);
|
||||
#endif /* _MANA_H */
|
||||
|
@ -83,9 +83,6 @@ struct net {
|
||||
struct llist_node defer_free_list;
|
||||
struct llist_node cleanup_list; /* namespaces on death row */
|
||||
|
||||
RH_KABI_EXTEND(struct list_head ptype_all)
|
||||
RH_KABI_EXTEND(struct list_head ptype_specific)
|
||||
|
||||
#ifdef CONFIG_KEYS
|
||||
struct key_tag *key_domain; /* Key domain of operation tag */
|
||||
#endif
|
||||
@ -192,6 +189,9 @@ struct net {
|
||||
#if IS_ENABLED(CONFIG_SMC)
|
||||
struct netns_smc smc;
|
||||
#endif
|
||||
|
||||
RH_KABI_EXTEND(struct list_head ptype_all)
|
||||
RH_KABI_EXTEND(struct list_head ptype_specific)
|
||||
} __randomize_layout;
|
||||
|
||||
#include <linux/seq_file_net.h>
|
||||
|
@ -190,6 +190,8 @@ struct page_pool {
|
||||
#endif
|
||||
u32 xdp_mem_id;
|
||||
|
||||
RH_KABI_FILL_HOLE(struct xarray dma_mapped)
|
||||
|
||||
/*
|
||||
* Data structure for allocation side
|
||||
*
|
||||
@ -220,8 +222,6 @@ struct page_pool {
|
||||
|
||||
void *mp_priv;
|
||||
|
||||
RH_KABI_FILL_HOLE(struct xarray dma_mapped)
|
||||
|
||||
#ifdef CONFIG_PAGE_POOL_STATS
|
||||
/* recycle stats are per-cpu to avoid locking */
|
||||
struct page_pool_recycle_stats __percpu *recycle_stats;
|
||||
|
@ -117,7 +117,7 @@ int sctp_rcv(struct sk_buff *skb)
|
||||
* it's better to just linearize it otherwise crc computing
|
||||
* takes longer.
|
||||
*/
|
||||
if ((!is_gso && skb_linearize(skb)) ||
|
||||
if (((!is_gso || skb_cloned(skb)) && skb_linearize(skb)) ||
|
||||
!pskb_may_pull(skb, sizeof(struct sctphdr)))
|
||||
goto discard_it;
|
||||
|
||||
|
@ -311,7 +311,7 @@ else
|
||||
else
|
||||
# This value is used by the dist[g]-targets. Changing this value has significant
|
||||
# consequences for all of RHEL kernel engineering.
|
||||
BUILD_TARGET:=rhel-$(RHEL_MAJOR).$(RHEL_MINOR)-test-pesign
|
||||
BUILD_TARGET:=rhel-$(RHEL_MAJOR).$(RHEL_MINOR)-z-test-pesign
|
||||
endif
|
||||
endif
|
||||
ifeq ("$(DIST)", ".eln")
|
||||
|
@ -142,7 +142,7 @@ SPECPACKAGE_NAME ?= kernel
|
||||
# should be tracked in a local branch. This would be "master" for the
|
||||
# Linus master branch or linux-5.x.y for a stable branch. It can also be
|
||||
# any other upstream you have added as a branch locally.
|
||||
UPSTREAM_BRANCH ?= v6.10
|
||||
UPSTREAM_BRANCH ?= v6.12
|
||||
|
||||
# If VERSION_ON_UPSTREAM is set, the versioning of the rpm package is based
|
||||
# on a branch tracking upstream. This allows for generating rpms
|
||||
|
@ -1,3 +1,448 @@
|
||||
* Tue Sep 30 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [6.12.0-55.38.1.el10_0]
|
||||
- hv_netvsc: Fix panic during namespace deletion with VF (Maxim Levitsky) [RHEL-114930]
|
||||
- RDMA/mana_ib: Fix DSCP value in modify QP (Maxim Levitsky) [RHEL-114930]
|
||||
- net: mana: Handle Reset Request from MANA NIC (Maxim Levitsky) [RHEL-114930]
|
||||
- net: mana: Set tx_packets to post gso processing packet count (Maxim Levitsky) [RHEL-114930]
|
||||
- net: mana: Handle unsupported HWC commands (Maxim Levitsky) [RHEL-114930]
|
||||
- net: mana: Add handler for hardware servicing events (Maxim Levitsky) [RHEL-114930]
|
||||
- net: mana: Expose additional hardware counters for drop and TC via ethtool. (Maxim Levitsky) [RHEL-114930]
|
||||
- hv_netvsc: Set VF priv_flags to IFF_NO_ADDRCONF before open to prevent IPv6 addrconf (Maxim Levitsky) [RHEL-114930]
|
||||
- net: mana: Record doorbell physical address in PF mode (Maxim Levitsky) [RHEL-114930]
|
||||
- net: mana: Add support for Multi Vports on Bare metal (Maxim Levitsky) [RHEL-114930]
|
||||
- net: mana: Switch to page pool for jumbo frames (Maxim Levitsky) [RHEL-114930]
|
||||
- net: mana: Add metadata support for xdp mode (Maxim Levitsky) [RHEL-114930]
|
||||
- RDMA/mana_ib: Handle net event for pointing to the current netdev (Maxim Levitsky) [RHEL-114930]
|
||||
- net: mana: Change the function signature of mana_get_primary_netdev_rcu (Maxim Levitsky) [RHEL-114930]
|
||||
- RDMA/mana_ib: Ensure variable err is initialized (Maxim Levitsky) [RHEL-114930]
|
||||
- net: mana: Add debug logs in MANA network driver (Maxim Levitsky) [RHEL-114930]
|
||||
- hv_netvsc: Use VF's tso_max_size value when data path is VF (Maxim Levitsky) [RHEL-114930]
|
||||
- net: mana: Allow tso_max_size to go up-to GSO_MAX_SIZE (Maxim Levitsky) [RHEL-114930]
|
||||
- RDMA/mana_ib: request error CQEs when supported (Maxim Levitsky) [RHEL-114930]
|
||||
- RDMA/mana_ib: Query feature_flags bitmask from FW (Maxim Levitsky) [RHEL-114930]
|
||||
- net: mana: Support holes in device list reply msg (Maxim Levitsky) [RHEL-114930]
|
||||
- RDMA/mana_ib: Allocate PAGE aligned doorbell index (Maxim Levitsky) [RHEL-114930]
|
||||
- hv_netvsc: Link queues to NAPIs (Maxim Levitsky) [RHEL-114930]
|
||||
- sctp: linearize cloned gso packets in sctp_rcv (CKI Backport Bot) [RHEL-113339] {CVE-2025-38718}
|
||||
- nfsd: don't ignore the return code of svc_proc_register() (Olga Kornievskaia) [RHEL-93626] {CVE-2025-22026}
|
||||
- smb: client: fix session setup against servers that require SPN (Paulo Alcantara) [RHEL-107110]
|
||||
- smb: client: allow parsing zero-length AV pairs (Paulo Alcantara) [RHEL-107110]
|
||||
- RDMA/cxgb4: Notify rdma stack for IB_EVENT_QP_LAST_WQE_REACHED event (CKI Backport Bot) [RHEL-100817]
|
||||
Resolves: RHEL-100817, RHEL-107110, RHEL-113339, RHEL-114930, RHEL-93626
|
||||
|
||||
* Wed Sep 24 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [6.12.0-55.37.1.el10_0]
|
||||
- selftests: tls: add tests for zero-length records (Sabrina Dubroca) [RHEL-114331] {CVE-2025-39682}
|
||||
- tls: fix handling of zero-length records on the rx_list (Sabrina Dubroca) [RHEL-114331] {CVE-2025-39682}
|
||||
- fs: export anon_inode_make_secure_inode() and fix secretmem LSM bypass (Audra Mitchell) [RHEL-110313] {CVE-2025-38396}
|
||||
Resolves: RHEL-110313, RHEL-114331
|
||||
|
||||
* Mon Sep 22 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [6.12.0-55.36.1.el10_0]
|
||||
- io_uring/futex: ensure io_futex_wait() cleans up properly on failure (CKI Backport Bot) [RHEL-114341] {CVE-2025-39698}
|
||||
- ice: use fixed adapter index for E825C embedded devices (CKI Backport Bot) [RHEL-111792]
|
||||
- ice: use DSN instead of PCI BDF for ice_adapter index (CKI Backport Bot) [RHEL-111792]
|
||||
- tcp: drop secpath at the same time as we currently drop dst (Sabrina Dubroca) [RHEL-82133]
|
||||
Resolves: RHEL-111792, RHEL-114341, RHEL-82133
|
||||
|
||||
* Fri Sep 19 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [6.12.0-55.35.1.el10_0]
|
||||
- cifs: Fix reading into an ITER_FOLIOQ from the smbdirect code (Paulo Alcantara) [RHEL-111177]
|
||||
- cifs: Fix the smbd_response slab to allow usercopy (Paulo Alcantara) [RHEL-111177] {CVE-2025-38523}
|
||||
- smb: client: let smbd_post_send_iter() respect the peers max_send_size and transmit all data (Paulo Alcantara) [RHEL-111177]
|
||||
- smb: client: fix max_sge overflow in smb_extract_folioq_to_rdma() (Paulo Alcantara) [RHEL-111177]
|
||||
- smb: client: make use of common smbdirect_socket_parameters (Paulo Alcantara) [RHEL-111177]
|
||||
- smb: smbdirect: introduce smbdirect_socket_parameters (Paulo Alcantara) [RHEL-111177]
|
||||
- smb: client: make use of common smbdirect_socket (Paulo Alcantara) [RHEL-111177]
|
||||
- smb: smbdirect: add smbdirect_socket.h (Paulo Alcantara) [RHEL-111177]
|
||||
- smb: client: make use of common smbdirect.h (Paulo Alcantara) [RHEL-111177]
|
||||
- smb: smbdirect: add smbdirect.h with public structures (Paulo Alcantara) [RHEL-111177]
|
||||
- smb: client: make use of common smbdirect_pdu.h (Paulo Alcantara) [RHEL-111177]
|
||||
- smb: smbdirect: add smbdirect_pdu.h with protocol definitions (Paulo Alcantara) [RHEL-111177]
|
||||
- s390/sclp: Fix SCCB present check (CKI Backport Bot) [RHEL-113561] {CVE-2025-39694}
|
||||
- net: stmmac: fix TSO DMA API usage causing oops (Izabela Bakollari) [RHEL-105352]
|
||||
- smb: client: fix use-after-free in cifs_oplock_break (CKI Backport Bot) [RHEL-111198] {CVE-2025-38527}
|
||||
Resolves: RHEL-105352, RHEL-111177, RHEL-111198, RHEL-113561
|
||||
|
||||
* Mon Sep 15 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [6.12.0-55.34.1.el10_0]
|
||||
- sunrpc: fix handling of server side tls alerts (Olga Kornievskaia) [RHEL-111073] {CVE-2025-38566}
|
||||
- i40e: When removing VF MAC filters, only check PF-set MAC (CKI Backport Bot) [RHEL-109771]
|
||||
- usb: dwc3: gadget: check that event count does not exceed event buffer length (CKI Backport Bot) [RHEL-107659] {CVE-2025-37810}
|
||||
Resolves: RHEL-107659, RHEL-109771, RHEL-111073
|
||||
|
||||
* Tue Sep 09 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.33.1.el10_0]
|
||||
- xfrm: interface: fix use-after-free after changing collect_md xfrm interface (CKI Backport Bot) [RHEL-109530] {CVE-2025-38500}
|
||||
- idpf: convert control queue mutex to a spinlock (CKI Backport Bot) [RHEL-106061] {CVE-2025-38392}
|
||||
- eth: bnxt: fix missing ring index trim on error path (CKI Backport Bot) [RHEL-104564] {CVE-2025-37873}
|
||||
- tcp: Correct signedness in skb remaining space calculation (CKI Backport Bot) [RHEL-107844] {CVE-2025-38463}
|
||||
- ipv6: mcast: Delay put pmc->idev in mld_del_delrec() (CKI Backport Bot) [RHEL-111154] {CVE-2025-38550}
|
||||
- redhat: selftests/bpf: Add cpuv4 variant (Viktor Malik) [RHEL-109928]
|
||||
- i40e: report VF tx_dropped with tx_errors instead of tx_discards (Dennis Chen) [RHEL-105138] {CVE-2025-38200}
|
||||
- use uniform permission checks for all mount propagation changes (Ian Kent) [RHEL-107306] {CVE-2025-38498}
|
||||
- do_change_type(): refuse to operate on unmounted/not ours mounts (Ian Kent) [RHEL-107306] {CVE-2025-38498}
|
||||
- ublk: make sure ubq->canceling is set when queue is frozen (Ming Lei) [RHEL-99437] {CVE-2025-22068}
|
||||
- net: gso: Forbid IPv6 TSO with extensions on devices with only IPV6_CSUM JIRA: https://issues.redhat.com/browse/RHEL-109821 Y-JIRA: https://issues.redhat.com/browse/RHEL-79173 (Jakub Ramaseuski)
|
||||
- scsi: lpfc: Use memcpy() for BIOS version (Ewan D. Milne) [RHEL-105936] {CVE-2025-38332}
|
||||
- net: introduce per netns packet chains (Paolo Abeni) [RHEL-88923]
|
||||
Resolves: RHEL-104564, RHEL-105138, RHEL-105936, RHEL-106061, RHEL-107306, RHEL-107844, RHEL-109530, RHEL-109928, RHEL-111154, RHEL-88923, RHEL-99437
|
||||
|
||||
* Tue Sep 09 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.32.1.el10_0]
|
||||
- posix-cpu-timers: fix race between handle_posix_cpu_timers() and posix_cpu_timer_del() (CKI Backport Bot) [RHEL-112784] {CVE-2025-38352}
|
||||
Resolves: RHEL-112784
|
||||
|
||||
* Sun Aug 31 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.31.1.el10_0]
|
||||
- sched/fair: Adhere to place_entity() constraints (Phil Auld) [RHEL-91404]
|
||||
- sched/fair: Fix update_cfs_group() vs DELAY_DEQUEUE (Phil Auld) [RHEL-91404]
|
||||
- sched/fair: Fix EEVDF entity placement bug causing scheduling lag (Phil Auld) [RHEL-91404]
|
||||
- sched/fair: optimize the PLACE_LAG when se->vlag is zero (Phil Auld) [RHEL-91404]
|
||||
- net/sched: ets: use old 'nbands' while purging unused classes (Ivan Vecera) [RHEL-107544] {CVE-2025-38350}
|
||||
- net/sched: Always pass notifications when child class becomes empty (Ivan Vecera) [RHEL-93365] {CVE-2025-38350}
|
||||
- net_sched: ets: fix a race in ets_qdisc_change() (Ivan Vecera) [RHEL-107544] {CVE-2025-38107}
|
||||
- sch_htb: make htb_deactivate() idempotent (Ivan Vecera) [RHEL-93365] {CVE-2025-37953}
|
||||
- codel: remove sch->q.qlen check before qdisc_tree_reduce_backlog() (Ivan Vecera) [RHEL-93365] {CVE-2025-37798}
|
||||
- sch_qfq: make qfq_qlen_notify() idempotent (Ivan Vecera) [RHEL-93365] {CVE-2025-38350}
|
||||
- sch_drr: make drr_qlen_notify() idempotent (Ivan Vecera) [RHEL-93365] {CVE-2025-38350}
|
||||
- sch_htb: make htb_qlen_notify() idempotent (Ivan Vecera) [RHEL-93365] {CVE-2025-37932}
|
||||
- drm/vkms: Fix use after free and double free on init error (Jocelyn Falempe) [RHEL-99432] {CVE-2025-22097}
|
||||
- Revert "cxl/acpi: Fix load failures due to single window creation failure" (John W. Linville) [RHEL-85055]
|
||||
- udmabuf: fix a buf size overflow issue during udmabuf creation (Lyude Paul) [RHEL-99760] {CVE-2025-37803}
|
||||
- drm/framebuffer: Acquire internal references on GEM handles (Mika Penttilä) [RHEL-106710] {CVE-2025-38449}
|
||||
- drm/gem: Acquire references on GEM handles for framebuffers (Mika Penttilä) [RHEL-106710] {CVE-2025-38449}
|
||||
- nvme/ioctl: don't warn on vectorized uring_cmd with fixed buffer (Maurizio Lombardi) [RHEL-109753]
|
||||
- nvme-ioctl: fix leaked requests on mapping error (Maurizio Lombardi) [RHEL-109753]
|
||||
Resolves: RHEL-106710, RHEL-107544, RHEL-109753, RHEL-85055, RHEL-91404, RHEL-93365, RHEL-99432, RHEL-99760
|
||||
|
||||
* Sun Aug 24 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.30.1.el10_0]
|
||||
- net_sched: hfsc: Fix a potential UAF in hfsc_dequeue() too (CKI Backport Bot) [RHEL-107641] {CVE-2025-37823}
|
||||
- s390/pci: Fix zpci_bus_is_isolated_vf() for non-VFs (Mete Durlu) [RHEL-95537]
|
||||
- s390/pci: Fix handling of isolated VFs (CKI Backport Bot) [RHEL-84462]
|
||||
- s390/pci: Pull search for parent PF out of zpci_iov_setup_virtfn() (CKI Backport Bot) [RHEL-84462]
|
||||
- s390/pci: Fix SR-IOV for PFs initially in standby (CKI Backport Bot) [RHEL-84462]
|
||||
- RDMA/iwcm: Fix use-after-free of work objects after cm_id destruction (CKI Backport Bot) [RHEL-104285] {CVE-2025-38211}
|
||||
- udp: Fix memory accounting leak. (CKI Backport Bot) [RHEL-104102] {CVE-2025-22058}
|
||||
- udp: Fix multiple wraparounds of sk->sk_rmem_alloc. (Xin Long) [RHEL-104102]
|
||||
- ext4: only dirty folios when data journaling regular files (CKI Backport Bot) [RHEL-106815] {CVE-2025-38220}
|
||||
- tipc: Fix use-after-free in tipc_conn_close(). (CKI Backport Bot) [RHEL-106660] {CVE-2025-38464}
|
||||
- vsock: Fix transport_* TOCTOU (CKI Backport Bot) [RHEL-106015] {CVE-2025-38461}
|
||||
- netfilter: nf_conntrack: fix crash due to removal of uninitialised entry (CKI Backport Bot) [RHEL-106442] {CVE-2025-38472}
|
||||
Resolves: RHEL-104102, RHEL-104285, RHEL-106015, RHEL-106442, RHEL-106660, RHEL-106815, RHEL-107641, RHEL-84462, RHEL-95537
|
||||
|
||||
* Sun Aug 17 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.29.1.el10_0]
|
||||
- ice: fix eswitch code memory leak in reset scenario (CKI Backport Bot) [RHEL-108153] {CVE-2025-38417}
|
||||
- net/sched: Abort __tc_modify_qdisc if parent class does not exist (CKI Backport Bot) [RHEL-107896]
|
||||
- net_sched: ets: Fix double list add in class with netem as child qdisc (CKI Backport Bot) [RHEL-104727] {CVE-2025-37914}
|
||||
- sch_ets: make est_qlen_notify() idempotent (Ivan Vecera) [RHEL-104727]
|
||||
- i40e: fix MMIO write access to an invalid page in i40e_clear_hw (Dennis Chen) [RHEL-106047] {CVE-2025-38200}
|
||||
- cxgb4: use port number to set mac addr (CKI Backport Bot) [RHEL-79668]
|
||||
Resolves: RHEL-104727, RHEL-106047, RHEL-107896, RHEL-108153, RHEL-79668
|
||||
|
||||
* Mon Aug 11 2025 Julio Faracco <jfaracco@redhat.com> [6.12.0-55.28.1.el10_0]
|
||||
- tls: always refresh the queue when reading sock (CKI Backport Bot) [RHEL-106091] {CVE-2025-38471}
|
||||
- selftests: net: bpf_offload: add 'libbpf_global' to ignored maps (Hangbin Liu) [RHEL-105901]
|
||||
- selftests: net: fix error message in bpf_offload (Hangbin Liu) [RHEL-105901]
|
||||
- selftests: net: add more info to error in bpf_offload (Hangbin Liu) [RHEL-105901]
|
||||
- net: fix udp gso skb_segment after pull from frag_list (Guillaume Nault) [RHEL-103035] {CVE-2025-38124}
|
||||
- powerpc/pseries/vas: Add close() callback in vas_vm_ops struct (Mamatha Inamdar) [RHEL-87181]
|
||||
- s390/pci: Serialize device addition and removal (CKI Backport Bot) [RHEL-100158]
|
||||
- s390/pci: Allow re-add of a reserved but not yet removed device (CKI Backport Bot) [RHEL-100158]
|
||||
- s390/pci: Prevent self deletion in disable_slot() (CKI Backport Bot) [RHEL-100158]
|
||||
- s390/pci: Remove redundant bus removal and disable from zpci_release_device() (CKI Backport Bot) [RHEL-100158]
|
||||
- s390/pci: Fix duplicate pci_dev_put() in disable_slot() when PF has child VFs (CKI Backport Bot) [RHEL-100158]
|
||||
- s390/pci: Fix missing check for zpci_create_device() error return (CKI Backport Bot) [RHEL-100158]
|
||||
- s390/pci: Fix potential double remove of hotplug slot (CKI Backport Bot) [RHEL-100158]
|
||||
- s390/topology: Improve topology detection (CKI Backport Bot) [RHEL-92096]
|
||||
- Bluetooth: hci_core: Fix use-after-free in vhci_flush() (CKI Backport Bot) [RHEL-103270] {CVE-2025-38250}
|
||||
- redhat: Mark kernel incompatible with xdp-tools<1.5.4 (Felix Maurer) [RHEL-100447]
|
||||
- selftests/bpf: Adjust data size to have ETH_HLEN (Felix Maurer) [RHEL-100447] {CVE-2025-21867}
|
||||
- bpf, test_run: Fix use-after-free issue in eth_skb_pkt_type() (Felix Maurer) [RHEL-100447] {CVE-2025-21867}
|
||||
- i2c/designware: Fix an initialization issue (CKI Backport Bot) [RHEL-106626] {CVE-2025-38380}
|
||||
Resolves: RHEL-100158, RHEL-100447, RHEL-103035, RHEL-103270, RHEL-105901, RHEL-106091, RHEL-106626, RHEL-87181, RHEL-92096
|
||||
|
||||
* Fri Aug 08 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.27.1.el10_0]
|
||||
- Revert "sch_htb: make htb_qlen_notify() idempotent" (Jan Stancek) [RHEL-108141]
|
||||
- Revert "sch_drr: make drr_qlen_notify() idempotent" (Jan Stancek) [RHEL-108141]
|
||||
- Revert "sch_qfq: make qfq_qlen_notify() idempotent" (Jan Stancek) [RHEL-108141]
|
||||
- Revert "codel: remove sch->q.qlen check before qdisc_tree_reduce_backlog()" (Jan Stancek) [RHEL-108141]
|
||||
- Revert "sch_htb: make htb_deactivate() idempotent" (Jan Stancek) [RHEL-108141]
|
||||
- Revert "net/sched: Always pass notifications when child class becomes empty" (Jan Stancek) [RHEL-108141]
|
||||
Resolves: RHEL-108141
|
||||
|
||||
* Sun Aug 03 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.26.1.el10_0]
|
||||
- wifi: rtw88: fix the 'para' buffer size to avoid reading out of bounds (CKI Backport Bot) [RHEL-103169] {CVE-2025-38159}
|
||||
- Documentation: Fix pci=config_acs= example (Steve Dunnagan) [RHEL-102663]
|
||||
- PCI/ACS: Fix 'pci=config_acs=' parameter (Steve Dunnagan) [RHEL-102663]
|
||||
- Revert "smb: client: fix TCP timers deadlock after rmmod" (Paulo Alcantara) [RHEL-106417] {CVE-2025-22077}
|
||||
- Revert "smb: client: Fix netns refcount imbalance causing leaks and use-after-free" (Paulo Alcantara) [RHEL-106417]
|
||||
- smb: client: Fix netns refcount imbalance causing leaks and use-after-free (Paulo Alcantara) [RHEL-106417]
|
||||
- wifi: ath12k: fix invalid access to memory (CKI Backport Bot) [RHEL-103228] {CVE-2025-38292}
|
||||
- x86/CPU/AMD: Terminate the erratum_1386_microcode array (CKI Backport Bot) [RHEL-104884] {CVE-2024-56721}
|
||||
- crypto: algif_hash - fix double free in hash_accept (CKI Backport Bot) [RHEL-102247] {CVE-2025-38079}
|
||||
- net/sched: Always pass notifications when child class becomes empty (CKI Backport Bot) [RHEL-93365] {CVE-2025-38350}
|
||||
- sch_htb: make htb_deactivate() idempotent (CKI Backport Bot) [RHEL-93365] {CVE-2025-38350}
|
||||
- codel: remove sch->q.qlen check before qdisc_tree_reduce_backlog() (CKI Backport Bot) [RHEL-93365] {CVE-2025-38350}
|
||||
- sch_qfq: make qfq_qlen_notify() idempotent (CKI Backport Bot) [RHEL-93365] {CVE-2025-38350}
|
||||
- sch_drr: make drr_qlen_notify() idempotent (CKI Backport Bot) [RHEL-93365] {CVE-2025-38350}
|
||||
- sch_htb: make htb_qlen_notify() idempotent (CKI Backport Bot) [RHEL-93365] {CVE-2025-38350}
|
||||
- redhat: update BUILD_TARGET to use rhel-10.0-z-test-pesign (Jan Stancek)
|
||||
- mm/hugetlb: fix huge_pmd_unshare() vs GUP-fast race (Rafael Aquini) [RHEL-101263] {CVE-2025-38085}
|
||||
- mm/hugetlb: unshare page tables during VMA split, not before (Rafael Aquini) [RHEL-101298] {CVE-2025-38084}
|
||||
- tools/testing/vma: add missing function stub (Rafael Aquini) [RHEL-101298]
|
||||
- mm: fix copy_vma() error handling for hugetlb mappings (Rafael Aquini) [RHEL-101298]
|
||||
- PCI: Use downstream bridges for distributing resources (Jennifer Berringer) [RHEL-102664]
|
||||
- PCI/pwrctrl: Cancel outstanding rescan work when unregistering (Myron Stowe) [RHEL-103212] {CVE-2025-38137}
|
||||
- bnxt_en: Skip MAC loopback selftest if it is unsupported by FW (CKI Backport Bot) [RHEL-82538]
|
||||
- bnxt_en: Skip PHY loopback ethtool selftest if unsupported by FW (CKI Backport Bot) [RHEL-82538]
|
||||
Resolves: RHEL-101263, RHEL-101298, RHEL-102247, RHEL-102663, RHEL-102664, RHEL-103169, RHEL-103212, RHEL-103228, RHEL-104884, RHEL-106417, RHEL-82538, RHEL-93365
|
||||
|
||||
* Mon Jul 28 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.25.1.el10_0]
|
||||
- net_sched: hfsc: Address reentrant enqueue adding class to eltree twice (Ivan Vecera) [RHEL-97533] {CVE-2025-38001}
|
||||
- sch_hfsc: Fix qlen accounting bug when using peek in hfsc_enqueue() (Davide Caratti) [RHEL-97533] {CVE-2025-38000}
|
||||
- net_sched: hfsc: Fix a UAF vulnerability in class with netem as child qdisc (Ivan Vecera) [RHEL-97533] {CVE-2025-37890}
|
||||
- sch_hfsc: make hfsc_qlen_notify() idempotent (Ivan Vecera) [RHEL-97533]
|
||||
- RDMA/core: Fix "KASAN: slab-use-after-free Read in ib_register_device" problem (Michal Schmidt) [RHEL-99060] {CVE-2025-38022}
|
||||
- RDMA/core: Fix use-after-free when rename device name (Michal Schmidt) [RHEL-99060] {CVE-2025-22085}
|
||||
- nvme-tcp: sanitize request list handling (CKI Backport Bot) [RHEL-103235] {CVE-2025-38264}
|
||||
- net: tipc: fix refcount warning in tipc_aead_encrypt (Xin Long) [RHEL-103097]
|
||||
- net/tipc: fix slab-use-after-free Read in tipc_aead_encrypt_done (CKI Backport Bot) [RHEL-103097] {CVE-2025-38052}
|
||||
- tcp: adjust rcvq_space after updating scaling ratio (Guillaume Nault) [RHEL-101775]
|
||||
- ext4: avoid journaling sb update on error if journal is destroying (CKI Backport Bot) [RHEL-93608] {CVE-2025-22113}
|
||||
- ext4: define ext4_journal_destroy wrapper (CKI Backport Bot) [RHEL-93608] {CVE-2025-22113}
|
||||
- HID: intel-ish-hid: Fix use-after-free issue in ishtp_hid_remove() (CKI Backport Bot) [RHEL-98862] {CVE-2025-21928}
|
||||
- HID: intel-ish-hid: Fix use-after-free issue in hid_ishtp_cl_remove() (CKI Backport Bot) [RHEL-98886] {CVE-2025-21929}
|
||||
- usb: hub: Fix flushing of delayed work used for post resume purposes (Desnes Nunes) [RHEL-104681]
|
||||
- usb: hub: Fix flushing and scheduling of delayed work that tunes runtime pm (Desnes Nunes) [RHEL-104681]
|
||||
- usb: hub: fix detection of high tier USB3 devices behind suspended hubs (Desnes Nunes) [RHEL-104681]
|
||||
- net/sched: fix use-after-free in taprio_dev_notifier (CKI Backport Bot) [RHEL-101322] {CVE-2025-38087}
|
||||
- net: ch9200: fix uninitialised access during mii_nway_restart (CKI Backport Bot) [RHEL-101224] {CVE-2025-38086}
|
||||
- padata: avoid UAF for reorder_work (Waiman Long) [RHEL-97040] {CVE-2025-21726}
|
||||
- padata: fix UAF in padata_reorder (Waiman Long) [RHEL-97040] {CVE-2025-21727}
|
||||
- padata: add pd get/put refcnt helper (Waiman Long) [RHEL-97040]
|
||||
- padata: fix sysfs store callback check (Waiman Long) [RHEL-97040]
|
||||
- padata: Clean up in padata_do_multithreaded() (Waiman Long) [RHEL-97040]
|
||||
- memstick: rtsx_usb_ms: Fix slab-use-after-free in rtsx_usb_ms_drv_remove (CKI Backport Bot) [RHEL-99041] {CVE-2025-22020}
|
||||
Resolves: RHEL-101224, RHEL-101322, RHEL-101775, RHEL-103097, RHEL-103235, RHEL-104681, RHEL-93608, RHEL-97040, RHEL-97533, RHEL-98862, RHEL-98886, RHEL-99041, RHEL-99060
|
||||
|
||||
* Wed Jul 23 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.24.1.el10_0]
|
||||
- net_sched: hfsc: Fix a UAF vulnerability in class handling (CKI Backport Bot) [RHEL-95867] {CVE-2025-37797}
|
||||
Resolves: RHEL-95867
|
||||
|
||||
* Sun Jul 20 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.23.1.el10_0]
|
||||
- ext4: fix out-of-bound read in ext4_xattr_inode_dec_ref_all() (CKI Backport Bot) [RHEL-93570] {CVE-2025-22121}
|
||||
- ext4: introduce ITAIL helper (CKI Backport Bot) [RHEL-93570] {CVE-2025-22121}
|
||||
- net/mdiobus: Fix potential out-of-bounds clause 45 read/write access (CKI Backport Bot) [RHEL-102097] {CVE-2025-38110}
|
||||
- powerpc/vas: Return -EINVAL if the offset is non-zero in mmap() (Mamatha Inamdar) [RHEL-101307] {CVE-2025-38088}
|
||||
- powerpc/powernv/memtrace: Fix out of bounds issue in memtrace mmap (Mamatha Inamdar) [RHEL-101307] {CVE-2025-38088}
|
||||
- net/mlx5: Fill out devlink dev info only for PFs (CKI Backport Bot) [RHEL-93772]
|
||||
- RDMA/mlx5: Fix page_size variable overflow (CKI Backport Bot) [RHEL-99325] {CVE-2025-22091}
|
||||
- ACPI: CPPC: Fix _CPC register setting issue (Mark Langsdorf) [RHEL-85317]
|
||||
Resolves: RHEL-101307, RHEL-102097, RHEL-85317, RHEL-93570, RHEL-93772, RHEL-99325
|
||||
|
||||
* Sun Jul 13 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.22.1.el10_0]
|
||||
- mm/huge_memory: fix dereferencing invalid pmd migration entry (Luiz Capitulino) [RHEL-96384] {CVE-2025-37958}
|
||||
- i2c: tegra: check msg length in SMBUS block read (Steve Dunnagan) [RHEL-99824]
|
||||
- s390/virtio_ccw: Don't allocate/assign airqs for non-existing queues (CKI Backport Bot) [RHEL-87558]
|
||||
- sunrpc: handle SVC_GARBAGE during svc auth processing as auth error (CKI Backport Bot) [RHEL-101331] {CVE-2025-38089}
|
||||
- media: uvcvideo: Announce the user our deprecation intentions (Desnes Nunes) [RHEL-98779]
|
||||
- media: uvcvideo: Allow changing noparam on the fly (Desnes Nunes) [RHEL-98779]
|
||||
- media: uvcvideo: Invert default value for nodrop module param (Desnes Nunes) [RHEL-98779]
|
||||
- media: uvcvideo: Propagate buf->error to userspace (Desnes Nunes) [RHEL-98779]
|
||||
- media: uvcvideo: Flush the control cache when we get an event (Desnes Nunes) [RHEL-98779]
|
||||
- media: uvcvideo: Annotate lock requirements for uvc_ctrl_set (Desnes Nunes) [RHEL-98779]
|
||||
- media: uvcvideo: Remove dangling pointers (Desnes Nunes) [RHEL-98779] {CVE-2024-58002}
|
||||
- media: uvcvideo: Remove redundant NULL assignment (Desnes Nunes) [RHEL-98779]
|
||||
- media: uvcvideo: Only save async fh if success (Desnes Nunes) [RHEL-98779]
|
||||
- media: uvcvideo: Fix double free in error path (CKI Backport Bot) [RHEL-98805] {CVE-2024-57980}
|
||||
- wifi: iwlwifi: limit printed string from FW file (CKI Backport Bot) [RHEL-99396] {CVE-2025-21905}
|
||||
Resolves: RHEL-101331, RHEL-87558, RHEL-96384, RHEL-98779, RHEL-98805, RHEL-99396, RHEL-99824
|
||||
|
||||
* Sun Jul 06 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.21.1.el10_0]
|
||||
- ice, irdma: fix an off by one in error handling code (Petr Oros) [RHEL-80558]
|
||||
- irdma: free iwdev->rf after removing MSI-X (Petr Oros) [RHEL-80558]
|
||||
- ice: Fix signedness bug in ice_init_interrupt_scheme() (Petr Oros) [RHEL-80558]
|
||||
- ice: init flow director before RDMA (Petr Oros) [RHEL-80558]
|
||||
- ice: simplify VF MSI-X managing (Petr Oros) [RHEL-80558]
|
||||
- ice: enable_rdma devlink param (Petr Oros) [RHEL-80558]
|
||||
- ice: treat dyn_allowed only as suggestion (Petr Oros) [RHEL-80558]
|
||||
- ice, irdma: move interrupts code to irdma (Petr Oros) [RHEL-80558]
|
||||
- ice: get rid of num_lan_msix field (Petr Oros) [RHEL-80558]
|
||||
- ice: remove splitting MSI-X between features (Petr Oros) [RHEL-80558]
|
||||
- ice: devlink PF MSI-X max and min parameter (Petr Oros) [RHEL-80558]
|
||||
- ice: ice_probe: init ice_adapter after HW init (Petr Oros) [RHEL-80558]
|
||||
- ice: minor: rename goto labels from err to unroll (Petr Oros) [RHEL-80558]
|
||||
- ice: split ice_init_hw() out from ice_init_dev() (Petr Oros) [RHEL-80558]
|
||||
- ice: c827: move wait for FW to ice_init_hw() (Petr Oros) [RHEL-80558]
|
||||
- exfat: fix random stack corruption after get_block (CKI Backport Bot) [RHEL-99441] {CVE-2025-22036}
|
||||
Resolves: RHEL-80558, RHEL-99441
|
||||
|
||||
* Mon Jun 30 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.20.1.el10_0]
|
||||
- x86/microcode/AMD: Fix out-of-bounds on systems with CPU-less NUMA nodes (CKI Backport Bot) [RHEL-99007] {CVE-2025-21991}
|
||||
- page_pool: Track DMA-mapped pages and unmap them when destroying the pool (Toke Høiland-Jørgensen) [RHEL-84148]
|
||||
- page_pool: Move pp_magic check into helper functions (Toke Høiland-Jørgensen) [RHEL-84148]
|
||||
- scsi: storvsc: Explicitly set max_segment_size to UINT_MAX (Ewan D. Milne) [RHEL-97172]
|
||||
- vmxnet3: Fix malformed packet sizing in vmxnet3_process_xdp (CKI Backport Bot) [RHEL-97116] {CVE-2025-37799}
|
||||
- dm mpath: replace spin_lock_irqsave with spin_lock_irq (Benjamin Marzinski) [RHEL-89484]
|
||||
- dm-mpath: Don't grab work_mutex while probing paths (Benjamin Marzinski) [RHEL-89484]
|
||||
- dm mpath: Interface for explicit probing of active paths (Benjamin Marzinski) [RHEL-89484]
|
||||
- dm: Allow .prepare_ioctl to handle ioctls directly (Benjamin Marzinski) [RHEL-89484]
|
||||
- ipv6: mcast: extend RCU protection in igmp6_send() (CKI Backport Bot) [RHEL-94685] {CVE-2025-21759}
|
||||
- net: add dev_net_rcu() helper (Hangbin Liu) [RHEL-94685]
|
||||
Resolves: RHEL-84148, RHEL-89484, RHEL-94685, RHEL-97116, RHEL-97172, RHEL-99007
|
||||
|
||||
* Sun Jun 22 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.19.1.el10_0]
|
||||
- ibmvnic: Use kernel helpers for hex dumps (CKI Backport Bot) [RHEL-89031] {CVE-2025-22104}
|
||||
- eth: bnxt: fix truesize for mb-xdp-pass case (CKI Backport Bot) [RHEL-88329] {CVE-2025-21961}
|
||||
- ice: Avoid setting default Rx VSI twice in switchdev setup (Petr Oros) [RHEL-88309]
|
||||
- ice: Fix deinitializing VF in error path (Petr Oros) [RHEL-88309] {CVE-2025-21883}
|
||||
- ice: add E830 HW VF mailbox message limit support (Petr Oros) [RHEL-88309]
|
||||
- block/Kconfig: Allow selecting BLK_CGROUP_PUNT_BIO (Ming Lei) [RHEL-87376]
|
||||
Resolves: RHEL-87376, RHEL-88309, RHEL-88329, RHEL-89031
|
||||
|
||||
* Mon Jun 16 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.18.1.el10_0]
|
||||
- sched/rt: Fix race in push_rt_task (Phil Auld) [RHEL-91792]
|
||||
- mm/vmalloc: combine all TLB flush operations of KASAN shadow virtual address into one operation (Luiz Capitulino) [RHEL-86954] {CVE-2024-56559}
|
||||
- block: make sure ->nr_integrity_segments is cloned in blk_rq_prep_clone (Ming Lei) [RHEL-92013]
|
||||
- proc: fix UAF in proc_get_inode() (CKI Backport Bot) [RHEL-86810] {CVE-2025-21999}
|
||||
- ext4: ignore xattrs past end (CKI Backport Bot) [RHEL-94260] {CVE-2025-37738}
|
||||
- nvme-fabrics: handle zero MAXCMD without closing the connection (Maurizio Lombardi) [RHEL-94205]
|
||||
- ext4: fix off-by-one error in do_split (CKI Backport Bot) [RHEL-93645] {CVE-2025-23150}
|
||||
- r8169: disable RTL8126 ZRX-DC timeout (CKI Backport Bot) [RHEL-93482]
|
||||
- r8169: enable RTL8168H/RTL8168EP/RTL8168FP ASPM support (CKI Backport Bot) [RHEL-93482]
|
||||
Resolves: RHEL-86810, RHEL-86954, RHEL-91792, RHEL-92013, RHEL-93482, RHEL-93645, RHEL-94205, RHEL-94260
|
||||
|
||||
* Sun Jun 08 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.17.1.el10_0]
|
||||
- vmxnet3: unregister xdp rxq info in the reset path (CKI Backport Bot) [RHEL-92473]
|
||||
- block: fix 'kmem_cache of name 'bio-108' already exists' (Ming Lei) [RHEL-89955]
|
||||
- ice: implement low latency PHY timer updates (Petr Oros) [RHEL-89810]
|
||||
- ice: check low latency PHY timer update firmware capability (Petr Oros) [RHEL-89810]
|
||||
- ice: add lock to protect low latency interface (Petr Oros) [RHEL-89810]
|
||||
- ice: rename TS_LL_READ* macros to REG_LL_PROXY_H_* (Petr Oros) [RHEL-89810]
|
||||
- ice: use read_poll_timeout_atomic in ice_read_phy_tstamp_ll_e810 (Petr Oros) [RHEL-89810]
|
||||
- cifs: Fix integer overflow while processing acdirmax mount option (Paulo Alcantara) [RHEL-87945] {CVE-2025-21963}
|
||||
- smb: client: fix UAF in decryption with multichannel (CKI Backport Bot) [RHEL-94463] {CVE-2025-37750}
|
||||
- sched/fair: Fix CPU bandwidth limit bypass during CPU hotplug (Phil Auld) [RHEL-86346]
|
||||
- keys: Fix UAF in key_put() (CKI Backport Bot) [RHEL-86853] {CVE-2025-21893}
|
||||
- ndisc: use RCU protection in ndisc_alloc_skb() (Xin Long) [RHEL-89556] {CVE-2025-21764}
|
||||
- ipv6: use RCU protection in ip6_default_advmss() (Xin Long) [RHEL-89556] {CVE-2025-21765}
|
||||
- net: add dev_net_rcu() helper (Xin Long) [RHEL-89556] {CVE-2025-21765}
|
||||
- vfio/pci: Align huge faults to order (Alex Williamson) [RHEL-93762]
|
||||
- Bluetooth: L2CAP: Fix corrupted list in hci_chan_del (David Marlin) [RHEL-87894] {CVE-2025-21969}
|
||||
- Bluetooth: L2CAP: Fix slab-use-after-free Read in l2cap_send_cmd (CKI Backport Bot) [RHEL-87894] {CVE-2025-21969}
|
||||
- cifs: Fix integer overflow while processing closetimeo mount option (CKI Backport Bot) [RHEL-87904] {CVE-2025-21962}
|
||||
- ovl: fix UAF in ovl_dentry_update_reval by moving dput() in ovl_link_up (CKI Backport Bot) [RHEL-93459] {CVE-2025-21887}
|
||||
- wifi: cfg80211: init wiphy_work before allocating rfkill fails (CKI Backport Bot) [RHEL-87935] {CVE-2025-21979}
|
||||
- wifi: cfg80211: cancel wiphy_work before freeing wiphy (CKI Backport Bot) [RHEL-87935] {CVE-2025-21979}
|
||||
Resolves: RHEL-86346, RHEL-86853, RHEL-87894, RHEL-87904, RHEL-87935, RHEL-87945, RHEL-89556, RHEL-89810, RHEL-89955, RHEL-92473, RHEL-93459, RHEL-93762, RHEL-94463
|
||||
|
||||
* Tue Jun 03 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.16.1.el10_0]
|
||||
- net: fix geneve_opt length integer overflow (Guillaume Nault) [RHEL-87980] {CVE-2025-22055}
|
||||
- vsock/virtio: discard packets if the transport changes (Jon Maloy) [RHEL-86874] {CVE-2025-21669}
|
||||
Resolves: RHEL-86874, RHEL-87980
|
||||
|
||||
* Fri May 30 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.15.1.el10_0]
|
||||
- net: gso: fix ownership in __udp_gso_segment (CKI Backport Bot) [RHEL-88518] {CVE-2025-21926}
|
||||
- xsk: fix an integer overflow in xp_create_and_assign_umem() (CKI Backport Bot) [RHEL-87915] {CVE-2025-21997}
|
||||
- wifi: ath12k: Fix invalid data access in ath12k_dp_rx_h_undecap_nwifi (CKI Backport Bot) [RHEL-93269] {CVE-2025-37943}
|
||||
Resolves: RHEL-87915, RHEL-88518, RHEL-93269
|
||||
|
||||
* Mon May 26 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.14.1.el10_0]
|
||||
- sched/fair: Fix potential memory corruption in child_cfs_rq_on_list (CKI Backport Bot) [RHEL-88323] {CVE-2025-21919}
|
||||
- drm/i915/dp_mst: Don't require DSC hblank quirk for a non-DSC compatible mode (Jocelyn Falempe) [RHEL-88536]
|
||||
- drm/i915/dp_mst: Handle error during DSC BW overhead/slice calculation (Jocelyn Falempe) [RHEL-88536]
|
||||
- drm/i915/display: Use joined pipes in dsc helpers for slices, bpp (Jocelyn Falempe) [RHEL-88536]
|
||||
- drm/i915/display: Use joined pipes in intel_mode_valid_max_plane_size (Jocelyn Falempe) [RHEL-88536]
|
||||
- drm/i915/display: Use joined pipes in intel_dp_joiner_needs_dsc (Jocelyn Falempe) [RHEL-88536]
|
||||
- drm/i915/display: Simplify intel_joiner_num_pipes and its usage (Jocelyn Falempe) [RHEL-88536]
|
||||
- drm/i915/display: Check whether platform supports joiner (Jocelyn Falempe) [RHEL-88536]
|
||||
- Revert "drm/i915/dp_mst: Handle error during DSC BW overhead/slice calculation" (Jocelyn Falempe) [RHEL-88536]
|
||||
- Revert "drm/i915/dp_mst: Don't require DSC hblank quirk for a non-DSC compatible mode" (Jocelyn Falempe) [RHEL-88536]
|
||||
- drm/mgag200: Added support for the new device G200eH5 (Jocelyn Falempe) [RHEL-88909]
|
||||
- cifs: Fix integer overflow while processing acregmax mount option (CKI Backport Bot) [RHEL-87925] {CVE-2025-21964}
|
||||
- ext4: fix OOB read when checking dotdot dir (CKI Backport Bot) [RHEL-87991] {CVE-2025-37785}
|
||||
Resolves: RHEL-87925, RHEL-87991, RHEL-88323, RHEL-88536, RHEL-88909
|
||||
|
||||
* Sun May 18 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.13.1.el10_0]
|
||||
- gitlab-ci: use rhel10.0 builder image (Michael Hofmann)
|
||||
- vsock: Orphan socket after transport release (Jay Shin) [RHEL-89161] {CVE-2025-21756}
|
||||
- vsock: Keep the binding until socket destruction (Jay Shin) [RHEL-89161] {CVE-2025-21756}
|
||||
- bpf, vsock: Invoke proto::close on close() (Jay Shin) [RHEL-89161] {CVE-2025-21756}
|
||||
- idpf: call set_real_num_queues in idpf_open (CKI Backport Bot) [RHEL-79479]
|
||||
- media: uvcvideo: Skip parsing frames of type UVC_VS_UNDEFINED in uvc_parse_format (CKI Backport Bot) [RHEL-89591] {CVE-2024-53104}
|
||||
- redhat: enable CONFIG_WERROR=y (Jan Stancek) [RHEL-89564]
|
||||
- redhat: don't enforce WERROR for 3rd-party OOT kmods (Jan Stancek) [RHEL-89564]
|
||||
- redhat: make ENABLE_WERROR enable also KVM_WERROR (Jan Stancek) [RHEL-89564]
|
||||
- fortify: Hide run-time copy size from value range tracking (Jan Stancek) [RHEL-89564]
|
||||
- resolve_btfids: Fix compiler warnings (Jan Stancek) [RHEL-89564]
|
||||
- ixgbe: fix media type detection for E610 device (Corinna Vinschen) [RHEL-85810]
|
||||
- ixgbevf: Add support for Intel(R) E610 device (Corinna Vinschen) [RHEL-85810]
|
||||
- PCI: Add PCI_VDEVICE_SUB helper macro (Corinna Vinschen) [RHEL-85810]
|
||||
- ixgbe: fix media cage present detection for E610 device (Corinna Vinschen) [RHEL-85810]
|
||||
- ixgbe: Enable link management in E610 device (Corinna Vinschen) [RHEL-85810]
|
||||
- ixgbe: Clean up the E610 link management related code (Corinna Vinschen) [RHEL-85810]
|
||||
- ixgbe: Add ixgbe_x540 multiple header inclusion protection (Corinna Vinschen) [RHEL-85810]
|
||||
- ixgbe: Add support for EEPROM dump in E610 device (Corinna Vinschen) [RHEL-85810]
|
||||
- ixgbe: Add support for NVM handling in E610 device (Corinna Vinschen) [RHEL-85810]
|
||||
- ixgbe: Add link management support for E610 device (Corinna Vinschen) [RHEL-85810]
|
||||
- ixgbe: Add support for E610 device capabilities detection (Corinna Vinschen) [RHEL-85810]
|
||||
- ixgbe: Add support for E610 FW Admin Command Interface (Corinna Vinschen) [RHEL-85810]
|
||||
- smb: client: don't retry IO on failed negprotos with soft mounts (Jay Shin) [RHEL-85525]
|
||||
Resolves: RHEL-79479, RHEL-85525, RHEL-85810, RHEL-89161, RHEL-89564, RHEL-89591
|
||||
|
||||
* Mon May 12 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.12.1.el10_0]
|
||||
- scsi: core: Fix command pass through retry regression (Ewan D. Milne) [RHEL-77121]
|
||||
- dm-flakey: Fix memory corruption in optional corrupt_bio_byte feature (CKI Backport Bot) [RHEL-86903] {CVE-2025-21966}
|
||||
- ice: stop storing XDP verdict within ice_rx_buf (Petr Oros) [RHEL-86860]
|
||||
- ice: gather page_count()'s of each frag right before XDP prog call (Petr Oros) [RHEL-86860]
|
||||
- ice: put Rx buffers after being done with current frame (Petr Oros) [RHEL-86860]
|
||||
- iscsi_ibft: Fix UBSAN shift-out-of-bounds warning in ibft_attr_show_nic() (CKI Backport Bot) [RHEL-86847] {CVE-2025-21993}
|
||||
Resolves: RHEL-77121, RHEL-86847, RHEL-86860, RHEL-86903
|
||||
|
||||
* Thu Apr 24 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.11.1.el10_0]
|
||||
- smb: client: fix regression with guest option (Jay Shin) [RHEL-83861]
|
||||
- pnfs/flexfiles: retry getting layout segment for reads (Benjamin Coddington) [RHEL-87770]
|
||||
- nfs: fix incorrect error handling in LOCALIO (Benjamin Coddington) [RHEL-87770]
|
||||
- nfs: probe for LOCALIO when v3 client reconnects to server (Benjamin Coddington) [RHEL-87770]
|
||||
- nfs: probe for LOCALIO when v4 client reconnects to server (Benjamin Coddington) [RHEL-87770]
|
||||
- nfs/localio: remove redundant code and simplify LOCALIO enablement (Benjamin Coddington) [RHEL-87770]
|
||||
- nfs_common: add nfs_localio trace events (Benjamin Coddington) [RHEL-87770]
|
||||
- nfs_common: track all open nfsd_files per LOCALIO nfs_client (Benjamin Coddington) [RHEL-87770]
|
||||
- nfs_common: rename nfslocalio nfs_uuid_lock to nfs_uuids_lock (Benjamin Coddington) [RHEL-87770]
|
||||
- nfsd: nfsd_file_acquire_local no longer returns GC'd nfsd_file (Benjamin Coddington) [RHEL-87770]
|
||||
- nfsd: rename nfsd_serv_ prefixed methods and variables with nfsd_net_ (Benjamin Coddington) [RHEL-87770]
|
||||
- nfsd: update percpu_ref to manage references on nfsd_net (Benjamin Coddington) [RHEL-87770]
|
||||
- nfs: cache all open LOCALIO nfsd_file(s) in client (Benjamin Coddington) [RHEL-87770]
|
||||
- nfs_common: move localio_lock to new lock member of nfs_uuid_t (Benjamin Coddington) [RHEL-87770]
|
||||
- nfs_common: rename functions that invalidate LOCALIO nfs_clients (Benjamin Coddington) [RHEL-87770]
|
||||
- nfsd: add nfsd_file_{get,put} to 'nfs_to' nfsd_localio_operations (Benjamin Coddington) [RHEL-87770]
|
||||
- nfs/localio: add direct IO enablement with sync and async IO support (Benjamin Coddington) [RHEL-87770]
|
||||
- ice: ensure periodic output start time is in the future (Petr Oros) [RHEL-86858]
|
||||
- ice: fix PHY Clock Recovery availability check (Petr Oros) [RHEL-86858]
|
||||
- ice: Drop auxbus use for PTP to finalize ice_adapter move (Petr Oros) [RHEL-86858]
|
||||
- ice: Use ice_adapter for PTP shared data instead of auxdev (Petr Oros) [RHEL-86858]
|
||||
- ice: Initial support for E825C hardware in ice_adapter (Petr Oros) [RHEL-86858]
|
||||
- ice: Add ice_get_ctrl_ptp() wrapper to simplify the code (Petr Oros) [RHEL-86858]
|
||||
- ice: Introduce ice_get_phy_model() wrapper (Petr Oros) [RHEL-86858]
|
||||
- ice: Enable 1PPS out from CGU for E825C products (Petr Oros) [RHEL-86858]
|
||||
- ice: Read SDP section from NVM for pin definitions (Petr Oros) [RHEL-86858]
|
||||
- ice: Disable shared pin on E810 on setfunc (Petr Oros) [RHEL-86858]
|
||||
- ice: Cache perout/extts requests and check flags (Petr Oros) [RHEL-86858]
|
||||
- ice: Align E810T GPIO to other products (Petr Oros) [RHEL-86858]
|
||||
- ice: Add SDPs support for E825C (Petr Oros) [RHEL-86858]
|
||||
- ice: Implement ice_ptp_pin_desc (Petr Oros) [RHEL-86858]
|
||||
- nvme-tcp: fix potential memory corruption in nvme_tcp_recv_pdu() (Chris Leech) [RHEL-86922] {CVE-2025-21927}
|
||||
- scsi: storvsc: Set correct data length for sending SCSI command without payload (Cathy Avery) [RHEL-83216]
|
||||
- smb: client: fix chmod(2) regression with ATTR_READONLY (Jan Stancek) [RHEL-82677]
|
||||
- mm/hugetlb: fix hugepage allocation for interleaved memory nodes (CKI Backport Bot) [RHEL-85441]
|
||||
Resolves: RHEL-82677, RHEL-83216, RHEL-83861, RHEL-85441, RHEL-86858, RHEL-86922, RHEL-87770
|
||||
|
||||
* Thu Apr 17 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.10.1.el10_0]
|
||||
- net: mana: use ethtool string helpers (Maxim Levitsky) [RHEL-85943]
|
||||
- net: mana: cleanup mana struct after debugfs_remove() (Maxim Levitsky) [RHEL-85943]
|
||||
- net: mana: Cleanup "mana" debugfs dir after cleanup of all children (Maxim Levitsky) [RHEL-85943]
|
||||
- net: mana: Fix irq_contexts memory leak in mana_gd_setup_irqs (Maxim Levitsky) [RHEL-85943]
|
||||
- net: mana: Fix memory leak in mana_gd_setup_irqs (Maxim Levitsky) [RHEL-85943]
|
||||
- net :mana :Request a V2 response version for MANA_QUERY_GF_STAT (Maxim Levitsky) [RHEL-85943]
|
||||
- net: mana: Enable debugfs files for MANA device (Maxim Levitsky) [RHEL-85943]
|
||||
- net: mana: Add get_link and get_link_ksettings in ethtool (Maxim Levitsky) [RHEL-85943]
|
||||
- net: mana: Increase the DEF_RX_BUFFERS_PER_QUEUE to 1024 (Maxim Levitsky) [RHEL-85943]
|
||||
- redhat: drop Y issues from changelog (Jan Stancek)
|
||||
Resolves: RHEL-85943
|
||||
|
||||
* Tue Mar 25 2025 Jan Stancek <jstancek@redhat.com> [6.12.0-55.9.1.el10_0]
|
||||
- af_packet: fix vlan_get_protocol_dgram() vs MSG_PEEK (Davide Caratti) [RHEL-80306] {CVE-2024-57901}
|
||||
- redhat: kabi: update stablelist checksums (Čestmír Kalina) [RHEL-80552]
|
||||
|
@ -1124,6 +1124,7 @@ Recommends: linux-firmware\
|
||||
Requires(preun): systemd >= 200\
|
||||
Conflicts: xfsprogs < 4.3.0-1\
|
||||
Conflicts: xorg-x11-drv-vmmouse < 13.0.99\
|
||||
Conflicts: xdp-tools < 1.5.4\
|
||||
%{expand:%%{?kernel%{?1:_%{1}}_conflicts:Conflicts: %%{kernel%{?1:_%{1}}_conflicts}}}\
|
||||
%{expand:%%{?kernel%{?1:_%{1}}_obsoletes:Obsoletes: %%{kernel%{?1:_%{1}}_obsoletes}}}\
|
||||
%{expand:%%{?kernel%{?1:_%{1}}_provides:Provides: %%{kernel%{?1:_%{1}}_provides}}}\
|
||||
|
@ -79,8 +79,8 @@ class CommitTags:
|
||||
def get_changelog_str(self):
|
||||
chnglog = []
|
||||
tickets = sorted(self.tag_dict['Bugzilla']) + sorted(self.tag_dict['JIRA'])
|
||||
if self.tag_dict['Y-Bugzilla'] or self.tag_dict['Y-JIRA']:
|
||||
tickets = tickets + sorted(self.tag_dict['Y-Bugzilla']) + sorted(self.tag_dict['Y-JIRA'])
|
||||
#if self.tag_dict['Y-Bugzilla'] or self.tag_dict['Y-JIRA']:
|
||||
# tickets = tickets + sorted(self.tag_dict['Y-Bugzilla']) + sorted(self.tag_dict['Y-JIRA'])
|
||||
if tickets:
|
||||
chnglog.append('[' + ' '.join(tickets) + ']')
|
||||
if self.tag_dict['CVE']:
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=0.rc5.
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16-rc5
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=0.rc5.
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16-rc5
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=0.rc5.
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16-rc5
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=0.rc5.
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16-rc5
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=0.rc5.
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16-rc5
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=0.rc5.
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16-rc5
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=0.rc5.
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16-rc5
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=0.rc5.
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16-rc5
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=0.rc5.
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16-rc5
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=0.rc5.
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16-rc5
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=0.rc5.
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16-rc5
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
@ -97,7 +97,7 @@ TESTPATCH=../redhat/linux-kernel-test.patch
|
||||
TOPDIR=..
|
||||
UPSTREAMBUILD=0.rc5.
|
||||
UPSTREAMBUILD_GIT_ONLY=
|
||||
UPSTREAM_BRANCH=v6.10
|
||||
UPSTREAM_BRANCH=v6.12
|
||||
UPSTREAM_TARBALL_NAME=5.16-rc5
|
||||
VERSION_ON_UPSTREAM=0
|
||||
YSTREAM_FLAG=no
|
||||
|
Loading…
Reference in New Issue
Block a user