Import of kernel-5.14.0-570.44.1.el9_6
This commit is contained in:
parent
4d6c34bc1c
commit
49ffffa68d
@ -12,7 +12,7 @@ RHEL_MINOR = 6
|
||||
#
|
||||
# Use this spot to avoid future merge conflicts.
|
||||
# Do not trim this comment.
|
||||
RHEL_RELEASE = 570.42.2
|
||||
RHEL_RELEASE = 570.44.1
|
||||
|
||||
#
|
||||
# ZSTREAM
|
||||
|
@ -1003,7 +1003,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start,
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
|
||||
bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
|
||||
{
|
||||
if (radix_enabled())
|
||||
@ -1011,6 +1011,7 @@ bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
|
||||
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
|
||||
unsigned long addr, unsigned long next)
|
||||
@ -1147,6 +1148,19 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
/*
|
||||
* Make sure we align the start vmemmap addr so that we calculate
|
||||
* the correct start_pfn in altmap boundary check to decided whether
|
||||
* we should use altmap or RAM based backing memory allocation. Also
|
||||
* the address need to be aligned for set_pte operation.
|
||||
|
||||
* If the start addr is already PMD_SIZE aligned we will try to use
|
||||
* a pmd mapping. We don't want to be too aggressive here beacause
|
||||
* that will cause more allocations in RAM. So only if the namespace
|
||||
* vmemmap start addr is PMD_SIZE aligned we will use PMD mapping.
|
||||
*/
|
||||
|
||||
start = ALIGN_DOWN(start, PAGE_SIZE);
|
||||
for (addr = start; addr < end; addr = next) {
|
||||
next = pmd_addr_end(addr, end);
|
||||
|
||||
@ -1172,8 +1186,8 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
|
||||
* in altmap block allocation failures, in which case
|
||||
* we fallback to RAM for vmemmap allocation.
|
||||
*/
|
||||
if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) ||
|
||||
altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
|
||||
if (!IS_ALIGNED(addr, PMD_SIZE) || (altmap &&
|
||||
altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
|
||||
/*
|
||||
* make sure we don't create altmap mappings
|
||||
* covering things outside the device.
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/swiotlb.h>
|
||||
@ -2559,7 +2558,6 @@ static const char *dev_uevent_name(const struct kobject *kobj)
|
||||
static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
|
||||
{
|
||||
const struct device *dev = kobj_to_dev(kobj);
|
||||
struct device_driver *driver;
|
||||
int retval = 0;
|
||||
|
||||
/* add device node properties if present */
|
||||
@ -2588,12 +2586,8 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
|
||||
if (dev->type && dev->type->name)
|
||||
add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
|
||||
|
||||
/* Synchronize with module_remove_driver() */
|
||||
rcu_read_lock();
|
||||
driver = READ_ONCE(dev->driver);
|
||||
if (driver)
|
||||
add_uevent_var(env, "DRIVER=%s", driver->name);
|
||||
rcu_read_unlock();
|
||||
if (dev->driver)
|
||||
add_uevent_var(env, "DRIVER=%s", dev->driver->name);
|
||||
|
||||
/* Add common DT information about the device */
|
||||
of_device_uevent(dev, env);
|
||||
@ -2663,8 +2657,11 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
|
||||
if (!env)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Synchronize with really_probe() */
|
||||
device_lock(dev);
|
||||
/* let the kset specific function add its keys */
|
||||
retval = kset->uevent_ops->uevent(&dev->kobj, env);
|
||||
device_unlock(dev);
|
||||
if (retval)
|
||||
goto out;
|
||||
|
||||
|
@ -7,7 +7,6 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include "base.h"
|
||||
|
||||
static char *make_driver_name(struct device_driver *drv)
|
||||
@ -78,9 +77,6 @@ void module_remove_driver(struct device_driver *drv)
|
||||
if (!drv)
|
||||
return;
|
||||
|
||||
/* Synchronize with dev_uevent() */
|
||||
synchronize_rcu();
|
||||
|
||||
sysfs_remove_link(&drv->p->kobj, "module");
|
||||
|
||||
if (drv->owner)
|
||||
|
@ -758,7 +758,7 @@ tx_free:
|
||||
dev_kfree_skb_any(skb);
|
||||
tx_kick_pending:
|
||||
if (BNXT_TX_PTP_IS_SET(lflags)) {
|
||||
txr->tx_buf_ring[txr->tx_prod].is_ts_pkt = 0;
|
||||
txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0;
|
||||
atomic64_inc(&bp->ptp_cfg->stats.ts_err);
|
||||
if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
|
||||
/* set SKB to err so PTP worker will clean up */
|
||||
@ -766,7 +766,7 @@ tx_kick_pending:
|
||||
}
|
||||
if (txr->kick_pending)
|
||||
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
|
||||
txr->tx_buf_ring[txr->tx_prod].skb = NULL;
|
||||
txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL;
|
||||
dev_core_stats_tx_dropped_inc(dev);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
@ -6,5 +6,6 @@
|
||||
config ENIC
|
||||
tristate "Cisco VIC Ethernet NIC Support"
|
||||
depends on PCI
|
||||
select PAGE_POOL
|
||||
help
|
||||
This enables the support for the Cisco VIC Ethernet card.
|
||||
|
@ -3,5 +3,5 @@ obj-$(CONFIG_ENIC) := enic.o
|
||||
|
||||
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
|
||||
enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \
|
||||
enic_ethtool.o enic_api.o enic_clsf.o
|
||||
enic_ethtool.o enic_api.o enic_clsf.o enic_rq.o enic_wq.o
|
||||
|
||||
|
@ -40,28 +40,7 @@ struct cq_desc {
|
||||
#define CQ_DESC_COMP_NDX_BITS 12
|
||||
#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
|
||||
|
||||
static inline void cq_desc_dec(const struct cq_desc *desc_arg,
|
||||
u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
|
||||
{
|
||||
const struct cq_desc *desc = desc_arg;
|
||||
const u8 type_color = desc->type_color;
|
||||
|
||||
*color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
|
||||
|
||||
/*
|
||||
* Make sure color bit is read from desc *before* other fields
|
||||
* are read from desc. Hardware guarantees color bit is last
|
||||
* bit (byte) written. Adding the rmb() prevents the compiler
|
||||
* and/or CPU from reordering the reads which would potentially
|
||||
* result in reading stale values.
|
||||
*/
|
||||
|
||||
rmb();
|
||||
|
||||
*type = type_color & CQ_DESC_TYPE_MASK;
|
||||
*q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
|
||||
*completed_index = le16_to_cpu(desc->completed_index) &
|
||||
CQ_DESC_COMP_NDX_MASK;
|
||||
}
|
||||
#define CQ_DESC_32_FI_MASK (BIT(0) | BIT(1))
|
||||
#define CQ_DESC_64_FI_MASK (BIT(0) | BIT(1))
|
||||
|
||||
#endif /* _CQ_DESC_H_ */
|
||||
|
@ -17,12 +17,22 @@ struct cq_enet_wq_desc {
|
||||
u8 type_color;
|
||||
};
|
||||
|
||||
static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
|
||||
u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
|
||||
{
|
||||
cq_desc_dec((struct cq_desc *)desc, type,
|
||||
color, q_number, completed_index);
|
||||
}
|
||||
/*
|
||||
* Defines and Capabilities for CMD_CQ_ENTRY_SIZE_SET
|
||||
*/
|
||||
#define VNIC_RQ_ALL (~0ULL)
|
||||
|
||||
#define VNIC_RQ_CQ_ENTRY_SIZE_16 0
|
||||
#define VNIC_RQ_CQ_ENTRY_SIZE_32 1
|
||||
#define VNIC_RQ_CQ_ENTRY_SIZE_64 2
|
||||
|
||||
#define VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_16)
|
||||
#define VNIC_RQ_CQ_ENTRY_SIZE_32_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_32)
|
||||
#define VNIC_RQ_CQ_ENTRY_SIZE_64_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_64)
|
||||
|
||||
#define VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT (VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE | \
|
||||
VNIC_RQ_CQ_ENTRY_SIZE_32_CAPABLE | \
|
||||
VNIC_RQ_CQ_ENTRY_SIZE_64_CAPABLE)
|
||||
|
||||
/* Completion queue descriptor: Ethernet receive queue, 16B */
|
||||
struct cq_enet_rq_desc {
|
||||
@ -36,6 +46,45 @@ struct cq_enet_rq_desc {
|
||||
u8 type_color;
|
||||
};
|
||||
|
||||
/* Completion queue descriptor: Ethernet receive queue, 32B */
|
||||
struct cq_enet_rq_desc_32 {
|
||||
__le16 completed_index_flags;
|
||||
__le16 q_number_rss_type_flags;
|
||||
__le32 rss_hash;
|
||||
__le16 bytes_written_flags;
|
||||
__le16 vlan;
|
||||
__le16 checksum_fcoe;
|
||||
u8 flags;
|
||||
u8 fetch_index_flags;
|
||||
__le32 time_stamp;
|
||||
__le16 time_stamp2;
|
||||
__le16 pie_info;
|
||||
__le32 pie_info2;
|
||||
__le16 pie_info3;
|
||||
u8 pie_info4;
|
||||
u8 type_color;
|
||||
};
|
||||
|
||||
/* Completion queue descriptor: Ethernet receive queue, 64B */
|
||||
struct cq_enet_rq_desc_64 {
|
||||
__le16 completed_index_flags;
|
||||
__le16 q_number_rss_type_flags;
|
||||
__le32 rss_hash;
|
||||
__le16 bytes_written_flags;
|
||||
__le16 vlan;
|
||||
__le16 checksum_fcoe;
|
||||
u8 flags;
|
||||
u8 fetch_index_flags;
|
||||
__le32 time_stamp;
|
||||
__le16 time_stamp2;
|
||||
__le16 pie_info;
|
||||
__le32 pie_info2;
|
||||
__le16 pie_info3;
|
||||
u8 pie_info4;
|
||||
u8 reserved[32];
|
||||
u8 type_color;
|
||||
};
|
||||
|
||||
#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
|
||||
#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
|
||||
#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
|
||||
@ -88,85 +137,4 @@ struct cq_enet_rq_desc {
|
||||
#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
|
||||
#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
|
||||
|
||||
static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
|
||||
u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
|
||||
u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
|
||||
u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
|
||||
u8 *vlan_stripped, u16 *vlan_tci, u16 *checksum, u8 *fcoe_sof,
|
||||
u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
|
||||
u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
|
||||
u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
|
||||
{
|
||||
u16 completed_index_flags;
|
||||
u16 q_number_rss_type_flags;
|
||||
u16 bytes_written_flags;
|
||||
|
||||
cq_desc_dec((struct cq_desc *)desc, type,
|
||||
color, q_number, completed_index);
|
||||
|
||||
completed_index_flags = le16_to_cpu(desc->completed_index_flags);
|
||||
q_number_rss_type_flags =
|
||||
le16_to_cpu(desc->q_number_rss_type_flags);
|
||||
bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
|
||||
|
||||
*ingress_port = (completed_index_flags &
|
||||
CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
|
||||
*fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
|
||||
1 : 0;
|
||||
*eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
|
||||
1 : 0;
|
||||
*sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
|
||||
1 : 0;
|
||||
|
||||
*rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
|
||||
CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
|
||||
*csum_not_calc = (q_number_rss_type_flags &
|
||||
CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
|
||||
|
||||
*rss_hash = le32_to_cpu(desc->rss_hash);
|
||||
|
||||
*bytes_written = bytes_written_flags &
|
||||
CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
|
||||
*packet_error = (bytes_written_flags &
|
||||
CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
|
||||
*vlan_stripped = (bytes_written_flags &
|
||||
CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
|
||||
|
||||
/*
|
||||
* Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
|
||||
*/
|
||||
*vlan_tci = le16_to_cpu(desc->vlan);
|
||||
|
||||
if (*fcoe) {
|
||||
*fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
|
||||
CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
|
||||
*fcoe_fc_crc_ok = (desc->flags &
|
||||
CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
|
||||
*fcoe_enc_error = (desc->flags &
|
||||
CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
|
||||
*fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
|
||||
CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
|
||||
CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
|
||||
*checksum = 0;
|
||||
} else {
|
||||
*fcoe_sof = 0;
|
||||
*fcoe_fc_crc_ok = 0;
|
||||
*fcoe_enc_error = 0;
|
||||
*fcoe_eof = 0;
|
||||
*checksum = le16_to_cpu(desc->checksum_fcoe);
|
||||
}
|
||||
|
||||
*tcp_udp_csum_ok =
|
||||
(desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
|
||||
*udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
|
||||
*tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
|
||||
*ipv4_csum_ok =
|
||||
(desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
|
||||
*ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
|
||||
*ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
|
||||
*ipv4_fragment =
|
||||
(desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
|
||||
*fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
|
||||
}
|
||||
|
||||
#endif /* _CQ_ENET_DESC_H_ */
|
||||
|
@ -17,21 +17,28 @@
|
||||
#include "vnic_nic.h"
|
||||
#include "vnic_rss.h"
|
||||
#include <linux/irq.h>
|
||||
#include <net/page_pool/helpers.h>
|
||||
|
||||
#define DRV_NAME "enic"
|
||||
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
|
||||
|
||||
#define ENIC_BARS_MAX 6
|
||||
|
||||
#define ENIC_WQ_MAX 8
|
||||
#define ENIC_RQ_MAX 8
|
||||
#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
|
||||
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
|
||||
#define ENIC_WQ_MAX 256
|
||||
#define ENIC_RQ_MAX 256
|
||||
#define ENIC_RQ_MIN_DEFAULT 8
|
||||
|
||||
#define ENIC_WQ_NAPI_BUDGET 256
|
||||
|
||||
#define ENIC_AIC_LARGE_PKT_DIFF 3
|
||||
|
||||
enum ext_cq {
|
||||
ENIC_RQ_CQ_ENTRY_SIZE_16,
|
||||
ENIC_RQ_CQ_ENTRY_SIZE_32,
|
||||
ENIC_RQ_CQ_ENTRY_SIZE_64,
|
||||
ENIC_RQ_CQ_ENTRY_SIZE_MAX,
|
||||
};
|
||||
|
||||
struct enic_msix_entry {
|
||||
int requested;
|
||||
char devname[IFNAMSIZ + 8];
|
||||
@ -77,6 +84,10 @@ struct enic_rx_coal {
|
||||
#define ENIC_SET_INSTANCE (1 << 3)
|
||||
#define ENIC_SET_HOST (1 << 4)
|
||||
|
||||
#define MAX_TSO BIT(16)
|
||||
#define WQ_ENET_MAX_DESC_LEN BIT(WQ_ENET_LEN_BITS)
|
||||
#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
|
||||
|
||||
struct enic_port_profile {
|
||||
u32 set;
|
||||
u8 request;
|
||||
@ -128,6 +139,53 @@ struct vxlan_offload {
|
||||
u8 flags;
|
||||
};
|
||||
|
||||
struct enic_wq_stats {
|
||||
u64 packets; /* pkts queued for Tx */
|
||||
u64 stopped; /* Tx ring almost full, queue stopped */
|
||||
u64 wake; /* Tx ring no longer full, queue woken up*/
|
||||
u64 tso; /* non-encap tso pkt */
|
||||
u64 encap_tso; /* encap tso pkt */
|
||||
u64 encap_csum; /* encap HW csum */
|
||||
u64 csum_partial; /* skb->ip_summed = CHECKSUM_PARTIAL */
|
||||
u64 csum_none; /* HW csum not required */
|
||||
u64 bytes; /* bytes queued for Tx */
|
||||
u64 add_vlan; /* HW adds vlan tag */
|
||||
u64 cq_work; /* Tx completions processed */
|
||||
u64 cq_bytes; /* Tx bytes processed */
|
||||
u64 null_pkt; /* skb length <= 0 */
|
||||
u64 skb_linear_fail; /* linearize failures */
|
||||
u64 desc_full_awake; /* TX ring full while queue awake */
|
||||
};
|
||||
|
||||
struct enic_rq_stats {
|
||||
u64 packets; /* pkts received */
|
||||
u64 bytes; /* bytes received */
|
||||
u64 l4_rss_hash; /* hashed on l4 */
|
||||
u64 l3_rss_hash; /* hashed on l3 */
|
||||
u64 csum_unnecessary; /* HW verified csum */
|
||||
u64 csum_unnecessary_encap; /* HW verified csum on encap packet */
|
||||
u64 vlan_stripped; /* HW stripped vlan */
|
||||
u64 napi_complete; /* napi complete intr reenabled */
|
||||
u64 napi_repoll; /* napi poll again */
|
||||
u64 bad_fcs; /* bad pkts */
|
||||
u64 pkt_truncated; /* truncated pkts */
|
||||
u64 no_skb; /* out of skbs */
|
||||
u64 desc_skip; /* Rx pkt went into later buffer */
|
||||
u64 pp_alloc_fail; /* page pool alloc failure */
|
||||
};
|
||||
|
||||
struct enic_wq {
|
||||
spinlock_t lock; /* spinlock for wq */
|
||||
struct vnic_wq vwq;
|
||||
struct enic_wq_stats stats;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
struct enic_rq {
|
||||
struct vnic_rq vrq;
|
||||
struct enic_rq_stats stats;
|
||||
struct page_pool *pool;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
/* Per-instance private data structure */
|
||||
struct enic {
|
||||
struct net_device *netdev;
|
||||
@ -139,8 +197,8 @@ struct enic {
|
||||
struct work_struct reset;
|
||||
struct work_struct tx_hang_reset;
|
||||
struct work_struct change_mtu_work;
|
||||
struct msix_entry msix_entry[ENIC_INTR_MAX];
|
||||
struct enic_msix_entry msix[ENIC_INTR_MAX];
|
||||
struct msix_entry *msix_entry;
|
||||
struct enic_msix_entry *msix;
|
||||
u32 msg_enable;
|
||||
spinlock_t devcmd_lock;
|
||||
u8 mac_addr[ETH_ALEN];
|
||||
@ -159,33 +217,30 @@ struct enic {
|
||||
bool enic_api_busy;
|
||||
struct enic_port_profile *pp;
|
||||
|
||||
/* work queue cache line section */
|
||||
____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
|
||||
spinlock_t wq_lock[ENIC_WQ_MAX];
|
||||
struct enic_wq *wq;
|
||||
unsigned int wq_avail;
|
||||
unsigned int wq_count;
|
||||
u16 loop_enable;
|
||||
u16 loop_tag;
|
||||
|
||||
/* receive queue cache line section */
|
||||
____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
|
||||
struct enic_rq *rq;
|
||||
unsigned int rq_avail;
|
||||
unsigned int rq_count;
|
||||
struct vxlan_offload vxlan;
|
||||
u64 rq_truncated_pkts;
|
||||
u64 rq_bad_fcs;
|
||||
struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX];
|
||||
struct napi_struct *napi;
|
||||
|
||||
/* interrupt resource cache line section */
|
||||
____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX];
|
||||
struct vnic_intr *intr;
|
||||
unsigned int intr_avail;
|
||||
unsigned int intr_count;
|
||||
u32 __iomem *legacy_pba; /* memory-mapped */
|
||||
|
||||
/* completion queue cache line section */
|
||||
____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX];
|
||||
struct vnic_cq *cq;
|
||||
unsigned int cq_avail;
|
||||
unsigned int cq_count;
|
||||
struct enic_rfs_flw_tbl rfs_h;
|
||||
u32 rx_copybreak;
|
||||
u8 rss_key[ENIC_RSS_LEN];
|
||||
struct vnic_gen_stats gen_stats;
|
||||
enum ext_cq ext_cq;
|
||||
};
|
||||
|
||||
static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev)
|
||||
@ -238,18 +293,28 @@ static inline unsigned int enic_msix_wq_intr(struct enic *enic,
|
||||
return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
|
||||
}
|
||||
|
||||
static inline unsigned int enic_msix_err_intr(struct enic *enic)
|
||||
{
|
||||
return enic->rq_count + enic->wq_count;
|
||||
}
|
||||
/* MSIX interrupts are organized as the error interrupt, then the notify
|
||||
* interrupt followed by all the I/O interrupts. The error interrupt needs
|
||||
* to fit in 7 bits due to hardware constraints
|
||||
*/
|
||||
#define ENIC_MSIX_RESERVED_INTR 2
|
||||
#define ENIC_MSIX_ERR_INTR 0
|
||||
#define ENIC_MSIX_NOTIFY_INTR 1
|
||||
#define ENIC_MSIX_IO_INTR_BASE ENIC_MSIX_RESERVED_INTR
|
||||
#define ENIC_MSIX_MIN_INTR (ENIC_MSIX_RESERVED_INTR + 2)
|
||||
|
||||
#define ENIC_LEGACY_IO_INTR 0
|
||||
#define ENIC_LEGACY_ERR_INTR 1
|
||||
#define ENIC_LEGACY_NOTIFY_INTR 2
|
||||
|
||||
static inline unsigned int enic_msix_err_intr(struct enic *enic)
|
||||
{
|
||||
return ENIC_MSIX_ERR_INTR;
|
||||
}
|
||||
|
||||
static inline unsigned int enic_msix_notify_intr(struct enic *enic)
|
||||
{
|
||||
return enic->rq_count + enic->wq_count + 1;
|
||||
return ENIC_MSIX_NOTIFY_INTR;
|
||||
}
|
||||
|
||||
static inline bool enic_is_err_intr(struct enic *enic, int intr)
|
||||
@ -297,5 +362,6 @@ int enic_is_valid_vf(struct enic *enic, int vf);
|
||||
int enic_is_dynamic(struct enic *enic);
|
||||
void enic_set_ethtool_ops(struct net_device *netdev);
|
||||
int __enic_set_rsskey(struct enic *enic);
|
||||
void enic_ext_cq(struct enic *enic);
|
||||
|
||||
#endif /* _ENIC_H_ */
|
||||
|
@ -32,6 +32,41 @@ struct enic_stat {
|
||||
.index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
|
||||
}
|
||||
|
||||
#define ENIC_PER_RQ_STAT(stat) { \
|
||||
.name = "rq[%d]_"#stat, \
|
||||
.index = offsetof(struct enic_rq_stats, stat) / sizeof(u64) \
|
||||
}
|
||||
|
||||
#define ENIC_PER_WQ_STAT(stat) { \
|
||||
.name = "wq[%d]_"#stat, \
|
||||
.index = offsetof(struct enic_wq_stats, stat) / sizeof(u64) \
|
||||
}
|
||||
|
||||
static const struct enic_stat enic_per_rq_stats[] = {
|
||||
ENIC_PER_RQ_STAT(l4_rss_hash),
|
||||
ENIC_PER_RQ_STAT(l3_rss_hash),
|
||||
ENIC_PER_RQ_STAT(csum_unnecessary_encap),
|
||||
ENIC_PER_RQ_STAT(vlan_stripped),
|
||||
ENIC_PER_RQ_STAT(napi_complete),
|
||||
ENIC_PER_RQ_STAT(napi_repoll),
|
||||
ENIC_PER_RQ_STAT(no_skb),
|
||||
ENIC_PER_RQ_STAT(desc_skip),
|
||||
};
|
||||
|
||||
#define NUM_ENIC_PER_RQ_STATS ARRAY_SIZE(enic_per_rq_stats)
|
||||
|
||||
static const struct enic_stat enic_per_wq_stats[] = {
|
||||
ENIC_PER_WQ_STAT(encap_tso),
|
||||
ENIC_PER_WQ_STAT(encap_csum),
|
||||
ENIC_PER_WQ_STAT(add_vlan),
|
||||
ENIC_PER_WQ_STAT(cq_work),
|
||||
ENIC_PER_WQ_STAT(cq_bytes),
|
||||
ENIC_PER_WQ_STAT(null_pkt),
|
||||
ENIC_PER_WQ_STAT(skb_linear_fail),
|
||||
ENIC_PER_WQ_STAT(desc_full_awake),
|
||||
};
|
||||
|
||||
#define NUM_ENIC_PER_WQ_STATS ARRAY_SIZE(enic_per_wq_stats)
|
||||
static const struct enic_stat enic_tx_stats[] = {
|
||||
ENIC_TX_STAT(tx_frames_ok),
|
||||
ENIC_TX_STAT(tx_unicast_frames_ok),
|
||||
@ -46,6 +81,8 @@ static const struct enic_stat enic_tx_stats[] = {
|
||||
ENIC_TX_STAT(tx_tso),
|
||||
};
|
||||
|
||||
#define NUM_ENIC_TX_STATS ARRAY_SIZE(enic_tx_stats)
|
||||
|
||||
static const struct enic_stat enic_rx_stats[] = {
|
||||
ENIC_RX_STAT(rx_frames_ok),
|
||||
ENIC_RX_STAT(rx_frames_total),
|
||||
@ -70,13 +107,13 @@ static const struct enic_stat enic_rx_stats[] = {
|
||||
ENIC_RX_STAT(rx_frames_to_max),
|
||||
};
|
||||
|
||||
#define NUM_ENIC_RX_STATS ARRAY_SIZE(enic_rx_stats)
|
||||
|
||||
static const struct enic_stat enic_gen_stats[] = {
|
||||
ENIC_GEN_STAT(dma_map_error),
|
||||
};
|
||||
|
||||
static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
|
||||
static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
|
||||
static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
|
||||
#define NUM_ENIC_GEN_STATS ARRAY_SIZE(enic_gen_stats)
|
||||
|
||||
static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
|
||||
{
|
||||
@ -141,22 +178,38 @@ static void enic_get_drvinfo(struct net_device *netdev,
|
||||
static void enic_get_strings(struct net_device *netdev, u32 stringset,
|
||||
u8 *data)
|
||||
{
|
||||
struct enic *enic = netdev_priv(netdev);
|
||||
unsigned int i;
|
||||
unsigned int j;
|
||||
|
||||
switch (stringset) {
|
||||
case ETH_SS_STATS:
|
||||
for (i = 0; i < enic_n_tx_stats; i++) {
|
||||
for (i = 0; i < NUM_ENIC_TX_STATS; i++) {
|
||||
memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
|
||||
data += ETH_GSTRING_LEN;
|
||||
}
|
||||
for (i = 0; i < enic_n_rx_stats; i++) {
|
||||
for (i = 0; i < NUM_ENIC_RX_STATS; i++) {
|
||||
memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
|
||||
data += ETH_GSTRING_LEN;
|
||||
}
|
||||
for (i = 0; i < enic_n_gen_stats; i++) {
|
||||
for (i = 0; i < NUM_ENIC_GEN_STATS; i++) {
|
||||
memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
|
||||
data += ETH_GSTRING_LEN;
|
||||
}
|
||||
for (i = 0; i < enic->rq_count; i++) {
|
||||
for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) {
|
||||
snprintf(data, ETH_GSTRING_LEN,
|
||||
enic_per_rq_stats[j].name, i);
|
||||
data += ETH_GSTRING_LEN;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < enic->wq_count; i++) {
|
||||
for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) {
|
||||
snprintf(data, ETH_GSTRING_LEN,
|
||||
enic_per_wq_stats[j].name, i);
|
||||
data += ETH_GSTRING_LEN;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -169,9 +222,9 @@ static void enic_get_ringparam(struct net_device *netdev,
|
||||
struct enic *enic = netdev_priv(netdev);
|
||||
struct vnic_enet_config *c = &enic->config;
|
||||
|
||||
ring->rx_max_pending = ENIC_MAX_RQ_DESCS;
|
||||
ring->rx_max_pending = c->max_rq_ring;
|
||||
ring->rx_pending = c->rq_desc_count;
|
||||
ring->tx_max_pending = ENIC_MAX_WQ_DESCS;
|
||||
ring->tx_max_pending = c->max_wq_ring;
|
||||
ring->tx_pending = c->wq_desc_count;
|
||||
}
|
||||
|
||||
@ -199,18 +252,18 @@ static int enic_set_ringparam(struct net_device *netdev,
|
||||
}
|
||||
rx_pending = c->rq_desc_count;
|
||||
tx_pending = c->wq_desc_count;
|
||||
if (ring->rx_pending > ENIC_MAX_RQ_DESCS ||
|
||||
if (ring->rx_pending > c->max_rq_ring ||
|
||||
ring->rx_pending < ENIC_MIN_RQ_DESCS) {
|
||||
netdev_info(netdev, "rx pending (%u) not in range [%u,%u]",
|
||||
ring->rx_pending, ENIC_MIN_RQ_DESCS,
|
||||
ENIC_MAX_RQ_DESCS);
|
||||
c->max_rq_ring);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (ring->tx_pending > ENIC_MAX_WQ_DESCS ||
|
||||
if (ring->tx_pending > c->max_wq_ring ||
|
||||
ring->tx_pending < ENIC_MIN_WQ_DESCS) {
|
||||
netdev_info(netdev, "tx pending (%u) not in range [%u,%u]",
|
||||
ring->tx_pending, ENIC_MIN_WQ_DESCS,
|
||||
ENIC_MAX_WQ_DESCS);
|
||||
c->max_wq_ring);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (running)
|
||||
@ -242,9 +295,19 @@ err_out:
|
||||
|
||||
static int enic_get_sset_count(struct net_device *netdev, int sset)
|
||||
{
|
||||
struct enic *enic = netdev_priv(netdev);
|
||||
unsigned int n_per_rq_stats;
|
||||
unsigned int n_per_wq_stats;
|
||||
unsigned int n_stats;
|
||||
|
||||
switch (sset) {
|
||||
case ETH_SS_STATS:
|
||||
return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats;
|
||||
n_per_rq_stats = NUM_ENIC_PER_RQ_STATS * enic->rq_count;
|
||||
n_per_wq_stats = NUM_ENIC_PER_WQ_STATS * enic->wq_count;
|
||||
n_stats = NUM_ENIC_TX_STATS + NUM_ENIC_RX_STATS +
|
||||
NUM_ENIC_GEN_STATS +
|
||||
n_per_rq_stats + n_per_wq_stats;
|
||||
return n_stats;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@ -256,6 +319,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
|
||||
struct enic *enic = netdev_priv(netdev);
|
||||
struct vnic_stats *vstats;
|
||||
unsigned int i;
|
||||
unsigned int j;
|
||||
int err;
|
||||
|
||||
err = enic_dev_stats_dump(enic, &vstats);
|
||||
@ -266,12 +330,30 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
|
||||
if (err == -ENOMEM)
|
||||
return;
|
||||
|
||||
for (i = 0; i < enic_n_tx_stats; i++)
|
||||
for (i = 0; i < NUM_ENIC_TX_STATS; i++)
|
||||
*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
|
||||
for (i = 0; i < enic_n_rx_stats; i++)
|
||||
for (i = 0; i < NUM_ENIC_RX_STATS; i++)
|
||||
*(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
|
||||
for (i = 0; i < enic_n_gen_stats; i++)
|
||||
for (i = 0; i < NUM_ENIC_GEN_STATS; i++)
|
||||
*(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
|
||||
for (i = 0; i < enic->rq_count; i++) {
|
||||
struct enic_rq_stats *rqstats = &enic->rq[i].stats;
|
||||
int index;
|
||||
|
||||
for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) {
|
||||
index = enic_per_rq_stats[j].index;
|
||||
*(data++) = ((u64 *)rqstats)[index];
|
||||
}
|
||||
}
|
||||
for (i = 0; i < enic->wq_count; i++) {
|
||||
struct enic_wq_stats *wqstats = &enic->wq[i].stats;
|
||||
int index;
|
||||
|
||||
for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) {
|
||||
index = enic_per_wq_stats[j].index;
|
||||
*(data++) = ((u64 *)wqstats)[index];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static u32 enic_get_msglevel(struct net_device *netdev)
|
||||
@ -526,43 +608,6 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int enic_get_tunable(struct net_device *dev,
|
||||
const struct ethtool_tunable *tuna, void *data)
|
||||
{
|
||||
struct enic *enic = netdev_priv(dev);
|
||||
int ret = 0;
|
||||
|
||||
switch (tuna->id) {
|
||||
case ETHTOOL_RX_COPYBREAK:
|
||||
*(u32 *)data = enic->rx_copybreak;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int enic_set_tunable(struct net_device *dev,
|
||||
const struct ethtool_tunable *tuna,
|
||||
const void *data)
|
||||
{
|
||||
struct enic *enic = netdev_priv(dev);
|
||||
int ret = 0;
|
||||
|
||||
switch (tuna->id) {
|
||||
case ETHTOOL_RX_COPYBREAK:
|
||||
enic->rx_copybreak = *(u32 *)data;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u32 enic_get_rxfh_key_size(struct net_device *netdev)
|
||||
{
|
||||
return ENIC_RSS_LEN;
|
||||
@ -606,6 +651,28 @@ static int enic_get_ts_info(struct net_device *netdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void enic_get_channels(struct net_device *netdev,
|
||||
struct ethtool_channels *channels)
|
||||
{
|
||||
struct enic *enic = netdev_priv(netdev);
|
||||
|
||||
switch (vnic_dev_get_intr_mode(enic->vdev)) {
|
||||
case VNIC_DEV_INTR_MODE_MSIX:
|
||||
channels->max_rx = min(enic->rq_avail, ENIC_RQ_MAX);
|
||||
channels->max_tx = min(enic->wq_avail, ENIC_WQ_MAX);
|
||||
channels->rx_count = enic->rq_count;
|
||||
channels->tx_count = enic->wq_count;
|
||||
break;
|
||||
case VNIC_DEV_INTR_MODE_MSI:
|
||||
case VNIC_DEV_INTR_MODE_INTX:
|
||||
channels->max_combined = 1;
|
||||
channels->combined_count = 1;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static const struct ethtool_ops enic_ethtool_ops = {
|
||||
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
|
||||
ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
|
||||
@ -623,13 +690,12 @@ static const struct ethtool_ops enic_ethtool_ops = {
|
||||
.get_coalesce = enic_get_coalesce,
|
||||
.set_coalesce = enic_set_coalesce,
|
||||
.get_rxnfc = enic_get_rxnfc,
|
||||
.get_tunable = enic_get_tunable,
|
||||
.set_tunable = enic_set_tunable,
|
||||
.get_rxfh_key_size = enic_get_rxfh_key_size,
|
||||
.get_rxfh = enic_get_rxfh,
|
||||
.set_rxfh = enic_set_rxfh,
|
||||
.get_link_ksettings = enic_get_ksettings,
|
||||
.get_ts_info = enic_get_ts_info,
|
||||
.get_channels = enic_get_channels,
|
||||
};
|
||||
|
||||
void enic_set_ethtool_ops(struct net_device *netdev)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -59,31 +59,38 @@ int enic_get_vnic_config(struct enic *enic)
|
||||
GET_CONFIG(intr_timer_usec);
|
||||
GET_CONFIG(loop_tag);
|
||||
GET_CONFIG(num_arfs);
|
||||
GET_CONFIG(max_rq_ring);
|
||||
GET_CONFIG(max_wq_ring);
|
||||
GET_CONFIG(max_cq_ring);
|
||||
|
||||
if (!c->max_wq_ring)
|
||||
c->max_wq_ring = ENIC_MAX_WQ_DESCS_DEFAULT;
|
||||
if (!c->max_rq_ring)
|
||||
c->max_rq_ring = ENIC_MAX_RQ_DESCS_DEFAULT;
|
||||
if (!c->max_cq_ring)
|
||||
c->max_cq_ring = ENIC_MAX_CQ_DESCS_DEFAULT;
|
||||
|
||||
c->wq_desc_count =
|
||||
min_t(u32, ENIC_MAX_WQ_DESCS,
|
||||
max_t(u32, ENIC_MIN_WQ_DESCS,
|
||||
c->wq_desc_count));
|
||||
min_t(u32, c->max_wq_ring,
|
||||
max_t(u32, ENIC_MIN_WQ_DESCS, c->wq_desc_count));
|
||||
c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
|
||||
|
||||
c->rq_desc_count =
|
||||
min_t(u32, ENIC_MAX_RQ_DESCS,
|
||||
max_t(u32, ENIC_MIN_RQ_DESCS,
|
||||
c->rq_desc_count));
|
||||
min_t(u32, c->max_rq_ring,
|
||||
max_t(u32, ENIC_MIN_RQ_DESCS, c->rq_desc_count));
|
||||
c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
|
||||
|
||||
if (c->mtu == 0)
|
||||
c->mtu = 1500;
|
||||
c->mtu = min_t(u16, ENIC_MAX_MTU,
|
||||
max_t(u16, ENIC_MIN_MTU,
|
||||
c->mtu));
|
||||
c->mtu = min_t(u16, ENIC_MAX_MTU, max_t(u16, ENIC_MIN_MTU, c->mtu));
|
||||
|
||||
c->intr_timer_usec = min_t(u32, c->intr_timer_usec,
|
||||
vnic_dev_get_intr_coal_timer_max(enic->vdev));
|
||||
|
||||
dev_info(enic_get_dev(enic),
|
||||
"vNIC MAC addr %pM wq/rq %d/%d mtu %d\n",
|
||||
enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu);
|
||||
"vNIC MAC addr %pM wq/rq %d/%d max wq/rq/cq %d/%d/%d mtu %d\n",
|
||||
enic->mac_addr, c->wq_desc_count, c->rq_desc_count,
|
||||
c->max_wq_ring, c->max_rq_ring, c->max_cq_ring, c->mtu);
|
||||
|
||||
dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s "
|
||||
"tso/lro %s/%s rss %s intr mode %s type %s timer %d usec "
|
||||
@ -176,9 +183,9 @@ void enic_free_vnic_resources(struct enic *enic)
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < enic->wq_count; i++)
|
||||
vnic_wq_free(&enic->wq[i]);
|
||||
vnic_wq_free(&enic->wq[i].vwq);
|
||||
for (i = 0; i < enic->rq_count; i++)
|
||||
vnic_rq_free(&enic->rq[i]);
|
||||
vnic_rq_free(&enic->rq[i].vrq);
|
||||
for (i = 0; i < enic->cq_count; i++)
|
||||
vnic_cq_free(&enic->cq[i]);
|
||||
for (i = 0; i < enic->intr_count; i++)
|
||||
@ -187,16 +194,21 @@ void enic_free_vnic_resources(struct enic *enic)
|
||||
|
||||
void enic_get_res_counts(struct enic *enic)
|
||||
{
|
||||
enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
|
||||
enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
|
||||
enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
|
||||
enic->intr_count = vnic_dev_get_res_count(enic->vdev,
|
||||
RES_TYPE_INTR_CTRL);
|
||||
enic->wq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
|
||||
enic->rq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
|
||||
enic->cq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
|
||||
enic->intr_avail = vnic_dev_get_res_count(enic->vdev,
|
||||
RES_TYPE_INTR_CTRL);
|
||||
|
||||
enic->wq_count = enic->wq_avail;
|
||||
enic->rq_count = enic->rq_avail;
|
||||
enic->cq_count = enic->cq_avail;
|
||||
enic->intr_count = enic->intr_avail;
|
||||
|
||||
dev_info(enic_get_dev(enic),
|
||||
"vNIC resources avail: wq %d rq %d cq %d intr %d\n",
|
||||
enic->wq_count, enic->rq_count,
|
||||
enic->cq_count, enic->intr_count);
|
||||
enic->wq_avail, enic->rq_avail,
|
||||
enic->cq_avail, enic->intr_avail);
|
||||
}
|
||||
|
||||
void enic_init_vnic_resources(struct enic *enic)
|
||||
@ -221,9 +233,12 @@ void enic_init_vnic_resources(struct enic *enic)
|
||||
|
||||
switch (intr_mode) {
|
||||
case VNIC_DEV_INTR_MODE_INTX:
|
||||
error_interrupt_enable = 1;
|
||||
error_interrupt_offset = ENIC_LEGACY_ERR_INTR;
|
||||
break;
|
||||
case VNIC_DEV_INTR_MODE_MSIX:
|
||||
error_interrupt_enable = 1;
|
||||
error_interrupt_offset = enic->intr_count - 2;
|
||||
error_interrupt_offset = enic_msix_err_intr(enic);
|
||||
break;
|
||||
default:
|
||||
error_interrupt_enable = 0;
|
||||
@ -233,7 +248,7 @@ void enic_init_vnic_resources(struct enic *enic)
|
||||
|
||||
for (i = 0; i < enic->rq_count; i++) {
|
||||
cq_index = i;
|
||||
vnic_rq_init(&enic->rq[i],
|
||||
vnic_rq_init(&enic->rq[i].vrq,
|
||||
cq_index,
|
||||
error_interrupt_enable,
|
||||
error_interrupt_offset);
|
||||
@ -241,7 +256,7 @@ void enic_init_vnic_resources(struct enic *enic)
|
||||
|
||||
for (i = 0; i < enic->wq_count; i++) {
|
||||
cq_index = enic->rq_count + i;
|
||||
vnic_wq_init(&enic->wq[i],
|
||||
vnic_wq_init(&enic->wq[i].vwq,
|
||||
cq_index,
|
||||
error_interrupt_enable,
|
||||
error_interrupt_offset);
|
||||
@ -249,15 +264,15 @@ void enic_init_vnic_resources(struct enic *enic)
|
||||
|
||||
/* Init CQ resources
|
||||
*
|
||||
* CQ[0 - n+m-1] point to INTR[0] for INTx, MSI
|
||||
* CQ[0 - n+m-1] point to INTR[0 - n+m-1] for MSI-X
|
||||
* All CQs point to INTR[0] for INTx, MSI
|
||||
* CQ[i] point to INTR[ENIC_MSIX_IO_INTR_BASE + i] for MSI-X
|
||||
*/
|
||||
|
||||
for (i = 0; i < enic->cq_count; i++) {
|
||||
|
||||
switch (intr_mode) {
|
||||
case VNIC_DEV_INTR_MODE_MSIX:
|
||||
interrupt_offset = i;
|
||||
interrupt_offset = ENIC_MSIX_IO_INTR_BASE + i;
|
||||
break;
|
||||
default:
|
||||
interrupt_offset = 0;
|
||||
@ -304,6 +319,7 @@ void enic_init_vnic_resources(struct enic *enic)
|
||||
int enic_alloc_vnic_resources(struct enic *enic)
|
||||
{
|
||||
enum vnic_dev_intr_mode intr_mode;
|
||||
int rq_cq_desc_size;
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
||||
@ -318,11 +334,29 @@ int enic_alloc_vnic_resources(struct enic *enic)
|
||||
intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" :
|
||||
"unknown");
|
||||
|
||||
switch (enic->ext_cq) {
|
||||
case ENIC_RQ_CQ_ENTRY_SIZE_16:
|
||||
rq_cq_desc_size = 16;
|
||||
break;
|
||||
case ENIC_RQ_CQ_ENTRY_SIZE_32:
|
||||
rq_cq_desc_size = 32;
|
||||
break;
|
||||
case ENIC_RQ_CQ_ENTRY_SIZE_64:
|
||||
rq_cq_desc_size = 64;
|
||||
break;
|
||||
default:
|
||||
dev_err(enic_get_dev(enic),
|
||||
"Unable to determine rq cq desc size: %d",
|
||||
enic->ext_cq);
|
||||
err = -ENODEV;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* Allocate queue resources
|
||||
*/
|
||||
|
||||
for (i = 0; i < enic->wq_count; i++) {
|
||||
err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i,
|
||||
err = vnic_wq_alloc(enic->vdev, &enic->wq[i].vwq, i,
|
||||
enic->config.wq_desc_count,
|
||||
sizeof(struct wq_enet_desc));
|
||||
if (err)
|
||||
@ -330,7 +364,7 @@ int enic_alloc_vnic_resources(struct enic *enic)
|
||||
}
|
||||
|
||||
for (i = 0; i < enic->rq_count; i++) {
|
||||
err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i,
|
||||
err = vnic_rq_alloc(enic->vdev, &enic->rq[i].vrq, i,
|
||||
enic->config.rq_desc_count,
|
||||
sizeof(struct rq_enet_desc));
|
||||
if (err)
|
||||
@ -340,8 +374,8 @@ int enic_alloc_vnic_resources(struct enic *enic)
|
||||
for (i = 0; i < enic->cq_count; i++) {
|
||||
if (i < enic->rq_count)
|
||||
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
|
||||
enic->config.rq_desc_count,
|
||||
sizeof(struct cq_enet_rq_desc));
|
||||
enic->config.rq_desc_count,
|
||||
rq_cq_desc_size);
|
||||
else
|
||||
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
|
||||
enic->config.wq_desc_count,
|
||||
@ -372,6 +406,39 @@ int enic_alloc_vnic_resources(struct enic *enic)
|
||||
|
||||
err_out_cleanup:
|
||||
enic_free_vnic_resources(enic);
|
||||
|
||||
err_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* CMD_CQ_ENTRY_SIZE_SET can fail on older hw generations that don't support
|
||||
* that command
|
||||
*/
|
||||
void enic_ext_cq(struct enic *enic)
|
||||
{
|
||||
u64 a0 = CMD_CQ_ENTRY_SIZE_SET, a1 = 0;
|
||||
int wait = 1000;
|
||||
int ret;
|
||||
|
||||
spin_lock_bh(&enic->devcmd_lock);
|
||||
ret = vnic_dev_cmd(enic->vdev, CMD_CAPABILITY, &a0, &a1, wait);
|
||||
if (ret || a0) {
|
||||
dev_info(&enic->pdev->dev,
|
||||
"CMD_CQ_ENTRY_SIZE_SET not supported.");
|
||||
enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16;
|
||||
goto out;
|
||||
}
|
||||
a1 &= VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT;
|
||||
enic->ext_cq = fls(a1) - 1;
|
||||
a0 = VNIC_RQ_ALL;
|
||||
a1 = enic->ext_cq;
|
||||
ret = vnic_dev_cmd(enic->vdev, CMD_CQ_ENTRY_SIZE_SET, &a0, &a1, wait);
|
||||
if (ret) {
|
||||
dev_info(&enic->pdev->dev, "CMD_CQ_ENTRY_SIZE_SET failed.");
|
||||
enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16;
|
||||
}
|
||||
out:
|
||||
spin_unlock_bh(&enic->devcmd_lock);
|
||||
dev_info(&enic->pdev->dev, "CQ entry size set to %d bytes",
|
||||
16 << enic->ext_cq);
|
||||
}
|
||||
|
@ -12,10 +12,13 @@
|
||||
#include "vnic_wq.h"
|
||||
#include "vnic_rq.h"
|
||||
|
||||
#define ENIC_MIN_WQ_DESCS 64
|
||||
#define ENIC_MAX_WQ_DESCS 4096
|
||||
#define ENIC_MIN_RQ_DESCS 64
|
||||
#define ENIC_MAX_RQ_DESCS 4096
|
||||
#define ENIC_MIN_WQ_DESCS 64
|
||||
#define ENIC_MAX_WQ_DESCS_DEFAULT 4096
|
||||
#define ENIC_MAX_WQ_DESCS 16384
|
||||
#define ENIC_MIN_RQ_DESCS 64
|
||||
#define ENIC_MAX_RQ_DESCS 16384
|
||||
#define ENIC_MAX_RQ_DESCS_DEFAULT 4096
|
||||
#define ENIC_MAX_CQ_DESCS_DEFAULT (64 * 1024)
|
||||
|
||||
#define ENIC_MIN_MTU ETH_MIN_MTU
|
||||
#define ENIC_MAX_MTU 9000
|
||||
|
436
drivers/net/ethernet/cisco/enic/enic_rq.c
Normal file
436
drivers/net/ethernet/cisco/enic/enic_rq.c
Normal file
@ -0,0 +1,436 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// Copyright 2024 Cisco Systems, Inc. All rights reserved.
|
||||
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <net/busy_poll.h>
|
||||
#include "enic.h"
|
||||
#include "enic_res.h"
|
||||
#include "enic_rq.h"
|
||||
#include "vnic_rq.h"
|
||||
#include "cq_enet_desc.h"
|
||||
|
||||
#define ENIC_LARGE_PKT_THRESHOLD 1000
|
||||
|
||||
static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
|
||||
u32 pkt_len)
|
||||
{
|
||||
if (pkt_len > ENIC_LARGE_PKT_THRESHOLD)
|
||||
pkt_size->large_pkt_bytes_cnt += pkt_len;
|
||||
else
|
||||
pkt_size->small_pkt_bytes_cnt += pkt_len;
|
||||
}
|
||||
|
||||
static void enic_rq_cq_desc_dec(void *cq_desc, u8 cq_desc_size, u8 *type,
|
||||
u8 *color, u16 *q_number, u16 *completed_index)
|
||||
{
|
||||
/* type_color is the last field for all cq structs */
|
||||
u8 type_color;
|
||||
|
||||
switch (cq_desc_size) {
|
||||
case VNIC_RQ_CQ_ENTRY_SIZE_16: {
|
||||
struct cq_enet_rq_desc *desc =
|
||||
(struct cq_enet_rq_desc *)cq_desc;
|
||||
type_color = desc->type_color;
|
||||
|
||||
/* Make sure color bit is read from desc *before* other fields
|
||||
* are read from desc. Hardware guarantees color bit is last
|
||||
* bit (byte) written. Adding the rmb() prevents the compiler
|
||||
* and/or CPU from reordering the reads which would potentially
|
||||
* result in reading stale values.
|
||||
*/
|
||||
rmb();
|
||||
|
||||
*q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
|
||||
CQ_DESC_Q_NUM_MASK;
|
||||
*completed_index = le16_to_cpu(desc->completed_index_flags) &
|
||||
CQ_DESC_COMP_NDX_MASK;
|
||||
break;
|
||||
}
|
||||
case VNIC_RQ_CQ_ENTRY_SIZE_32: {
|
||||
struct cq_enet_rq_desc_32 *desc =
|
||||
(struct cq_enet_rq_desc_32 *)cq_desc;
|
||||
type_color = desc->type_color;
|
||||
|
||||
/* Make sure color bit is read from desc *before* other fields
|
||||
* are read from desc. Hardware guarantees color bit is last
|
||||
* bit (byte) written. Adding the rmb() prevents the compiler
|
||||
* and/or CPU from reordering the reads which would potentially
|
||||
* result in reading stale values.
|
||||
*/
|
||||
rmb();
|
||||
|
||||
*q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
|
||||
CQ_DESC_Q_NUM_MASK;
|
||||
*completed_index = le16_to_cpu(desc->completed_index_flags) &
|
||||
CQ_DESC_COMP_NDX_MASK;
|
||||
*completed_index |= (desc->fetch_index_flags & CQ_DESC_32_FI_MASK) <<
|
||||
CQ_DESC_COMP_NDX_BITS;
|
||||
break;
|
||||
}
|
||||
case VNIC_RQ_CQ_ENTRY_SIZE_64: {
|
||||
struct cq_enet_rq_desc_64 *desc =
|
||||
(struct cq_enet_rq_desc_64 *)cq_desc;
|
||||
type_color = desc->type_color;
|
||||
|
||||
/* Make sure color bit is read from desc *before* other fields
|
||||
* are read from desc. Hardware guarantees color bit is last
|
||||
* bit (byte) written. Adding the rmb() prevents the compiler
|
||||
* and/or CPU from reordering the reads which would potentially
|
||||
* result in reading stale values.
|
||||
*/
|
||||
rmb();
|
||||
|
||||
*q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
|
||||
CQ_DESC_Q_NUM_MASK;
|
||||
*completed_index = le16_to_cpu(desc->completed_index_flags) &
|
||||
CQ_DESC_COMP_NDX_MASK;
|
||||
*completed_index |= (desc->fetch_index_flags & CQ_DESC_64_FI_MASK) <<
|
||||
CQ_DESC_COMP_NDX_BITS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
*color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
|
||||
*type = type_color & CQ_DESC_TYPE_MASK;
|
||||
}
|
||||
|
||||
static void enic_rq_set_skb_flags(struct vnic_rq *vrq, u8 type, u32 rss_hash,
|
||||
u8 rss_type, u8 fcoe, u8 fcoe_fc_crc_ok,
|
||||
u8 vlan_stripped, u8 csum_not_calc,
|
||||
u8 tcp_udp_csum_ok, u8 ipv6, u8 ipv4_csum_ok,
|
||||
u16 vlan_tci, struct sk_buff *skb)
|
||||
{
|
||||
struct enic *enic = vnic_dev_priv(vrq->vdev);
|
||||
struct net_device *netdev = enic->netdev;
|
||||
struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats;
|
||||
bool outer_csum_ok = true, encap = false;
|
||||
|
||||
if ((netdev->features & NETIF_F_RXHASH) && rss_hash && type == 3) {
|
||||
switch (rss_type) {
|
||||
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
|
||||
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
|
||||
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
|
||||
skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
|
||||
rqstats->l4_rss_hash++;
|
||||
break;
|
||||
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
|
||||
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
|
||||
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
|
||||
skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
|
||||
rqstats->l3_rss_hash++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (enic->vxlan.vxlan_udp_port_number) {
|
||||
switch (enic->vxlan.patch_level) {
|
||||
case 0:
|
||||
if (fcoe) {
|
||||
encap = true;
|
||||
outer_csum_ok = fcoe_fc_crc_ok;
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
if (type == 7 && (rss_hash & BIT(0))) {
|
||||
encap = true;
|
||||
outer_csum_ok = (rss_hash & BIT(1)) &&
|
||||
(rss_hash & BIT(2));
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Hardware does not provide whole packet checksum. It only
|
||||
* provides pseudo checksum. Since hw validates the packet
|
||||
* checksum but not provide us the checksum value. use
|
||||
* CHECSUM_UNNECESSARY.
|
||||
*
|
||||
* In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
|
||||
* inner csum_ok. outer_csum_ok is set by hw when outer udp
|
||||
* csum is correct or is zero.
|
||||
*/
|
||||
if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
|
||||
tcp_udp_csum_ok && outer_csum_ok && (ipv4_csum_ok || ipv6)) {
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
skb->csum_level = encap;
|
||||
if (encap)
|
||||
rqstats->csum_unnecessary_encap++;
|
||||
else
|
||||
rqstats->csum_unnecessary++;
|
||||
}
|
||||
|
||||
if (vlan_stripped) {
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
|
||||
rqstats->vlan_stripped++;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* cq_enet_rq_desc accesses section uses only the 1st 15 bytes of the cq which
|
||||
* is identical for all type (16,32 and 64 byte) of cqs.
|
||||
*/
|
||||
static void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, u8 *ingress_port,
|
||||
u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
|
||||
u8 *csum_not_calc, u32 *rss_hash,
|
||||
u16 *bytes_written, u8 *packet_error,
|
||||
u8 *vlan_stripped, u16 *vlan_tci,
|
||||
u16 *checksum, u8 *fcoe_sof,
|
||||
u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error,
|
||||
u8 *fcoe_eof, u8 *tcp_udp_csum_ok, u8 *udp,
|
||||
u8 *tcp, u8 *ipv4_csum_ok, u8 *ipv6, u8 *ipv4,
|
||||
u8 *ipv4_fragment, u8 *fcs_ok)
|
||||
{
|
||||
u16 completed_index_flags;
|
||||
u16 q_number_rss_type_flags;
|
||||
u16 bytes_written_flags;
|
||||
|
||||
completed_index_flags = le16_to_cpu(desc->completed_index_flags);
|
||||
q_number_rss_type_flags =
|
||||
le16_to_cpu(desc->q_number_rss_type_flags);
|
||||
bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
|
||||
|
||||
*ingress_port = (completed_index_flags &
|
||||
CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
|
||||
*fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
|
||||
1 : 0;
|
||||
*eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
|
||||
1 : 0;
|
||||
*sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
|
||||
1 : 0;
|
||||
|
||||
*rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
|
||||
CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
|
||||
*csum_not_calc = (q_number_rss_type_flags &
|
||||
CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
|
||||
|
||||
*rss_hash = le32_to_cpu(desc->rss_hash);
|
||||
|
||||
*bytes_written = bytes_written_flags &
|
||||
CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
|
||||
*packet_error = (bytes_written_flags &
|
||||
CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
|
||||
*vlan_stripped = (bytes_written_flags &
|
||||
CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
|
||||
|
||||
/*
|
||||
* Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
|
||||
*/
|
||||
*vlan_tci = le16_to_cpu(desc->vlan);
|
||||
|
||||
if (*fcoe) {
|
||||
*fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
|
||||
CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
|
||||
*fcoe_fc_crc_ok = (desc->flags &
|
||||
CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
|
||||
*fcoe_enc_error = (desc->flags &
|
||||
CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
|
||||
*fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
|
||||
CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
|
||||
CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
|
||||
*checksum = 0;
|
||||
} else {
|
||||
*fcoe_sof = 0;
|
||||
*fcoe_fc_crc_ok = 0;
|
||||
*fcoe_enc_error = 0;
|
||||
*fcoe_eof = 0;
|
||||
*checksum = le16_to_cpu(desc->checksum_fcoe);
|
||||
}
|
||||
|
||||
*tcp_udp_csum_ok =
|
||||
(desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
|
||||
*udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
|
||||
*tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
|
||||
*ipv4_csum_ok =
|
||||
(desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
|
||||
*ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
|
||||
*ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
|
||||
*ipv4_fragment =
|
||||
(desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
|
||||
*fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
|
||||
}
|
||||
|
||||
static bool enic_rq_pkt_error(struct vnic_rq *vrq, u8 packet_error, u8 fcs_ok,
|
||||
u16 bytes_written)
|
||||
{
|
||||
struct enic *enic = vnic_dev_priv(vrq->vdev);
|
||||
struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats;
|
||||
|
||||
if (packet_error) {
|
||||
if (!fcs_ok) {
|
||||
if (bytes_written > 0)
|
||||
rqstats->bad_fcs++;
|
||||
else if (bytes_written == 0)
|
||||
rqstats->pkt_truncated++;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
int enic_rq_alloc_buf(struct vnic_rq *rq)
|
||||
{
|
||||
struct enic *enic = vnic_dev_priv(rq->vdev);
|
||||
struct net_device *netdev = enic->netdev;
|
||||
struct enic_rq *erq = &enic->rq[rq->index];
|
||||
struct enic_rq_stats *rqstats = &erq->stats;
|
||||
unsigned int offset = 0;
|
||||
unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
|
||||
unsigned int os_buf_index = 0;
|
||||
dma_addr_t dma_addr;
|
||||
struct vnic_rq_buf *buf = rq->to_use;
|
||||
struct page *page;
|
||||
unsigned int truesize = len;
|
||||
|
||||
if (buf->os_buf) {
|
||||
enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
|
||||
buf->len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
page = page_pool_dev_alloc(erq->pool, &offset, &truesize);
|
||||
if (unlikely(!page)) {
|
||||
rqstats->pp_alloc_fail++;
|
||||
return -ENOMEM;
|
||||
}
|
||||
buf->offset = offset;
|
||||
buf->truesize = truesize;
|
||||
dma_addr = page_pool_get_dma_addr(page) + offset;
|
||||
enic_queue_rq_desc(rq, (void *)page, os_buf_index, dma_addr, len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
|
||||
{
|
||||
struct enic *enic = vnic_dev_priv(rq->vdev);
|
||||
struct enic_rq *erq = &enic->rq[rq->index];
|
||||
|
||||
if (!buf->os_buf)
|
||||
return;
|
||||
|
||||
page_pool_put_full_page(erq->pool, (struct page *)buf->os_buf, true);
|
||||
buf->os_buf = NULL;
|
||||
}
|
||||
|
||||
static void enic_rq_indicate_buf(struct enic *enic, struct vnic_rq *rq,
|
||||
struct vnic_rq_buf *buf, void *cq_desc,
|
||||
u8 type, u16 q_number, u16 completed_index)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
|
||||
struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats;
|
||||
struct napi_struct *napi;
|
||||
|
||||
u8 eop, sop, ingress_port, vlan_stripped;
|
||||
u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
|
||||
u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
|
||||
u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
|
||||
u8 packet_error;
|
||||
u16 bytes_written, vlan_tci, checksum;
|
||||
u32 rss_hash;
|
||||
|
||||
rqstats->packets++;
|
||||
|
||||
cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &ingress_port,
|
||||
&fcoe, &eop, &sop, &rss_type, &csum_not_calc,
|
||||
&rss_hash, &bytes_written, &packet_error,
|
||||
&vlan_stripped, &vlan_tci, &checksum, &fcoe_sof,
|
||||
&fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof,
|
||||
&tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6,
|
||||
&ipv4, &ipv4_fragment, &fcs_ok);
|
||||
|
||||
if (enic_rq_pkt_error(rq, packet_error, fcs_ok, bytes_written))
|
||||
return;
|
||||
|
||||
if (eop && bytes_written > 0) {
|
||||
/* Good receive
|
||||
*/
|
||||
rqstats->bytes += bytes_written;
|
||||
napi = &enic->napi[rq->index];
|
||||
skb = napi_get_frags(napi);
|
||||
if (unlikely(!skb)) {
|
||||
net_warn_ratelimited("%s: skb alloc error rq[%d], desc[%d]\n",
|
||||
enic->netdev->name, rq->index,
|
||||
completed_index);
|
||||
rqstats->no_skb++;
|
||||
return;
|
||||
}
|
||||
|
||||
prefetch(skb->data - NET_IP_ALIGN);
|
||||
|
||||
dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr,
|
||||
bytes_written, DMA_FROM_DEVICE);
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
(struct page *)buf->os_buf, buf->offset,
|
||||
bytes_written, buf->truesize);
|
||||
skb_record_rx_queue(skb, q_number);
|
||||
enic_rq_set_skb_flags(rq, type, rss_hash, rss_type, fcoe,
|
||||
fcoe_fc_crc_ok, vlan_stripped,
|
||||
csum_not_calc, tcp_udp_csum_ok, ipv6,
|
||||
ipv4_csum_ok, vlan_tci, skb);
|
||||
skb_mark_for_recycle(skb);
|
||||
napi_gro_frags(napi);
|
||||
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
|
||||
enic_intr_update_pkt_size(&cq->pkt_size_counter,
|
||||
bytes_written);
|
||||
buf->os_buf = NULL;
|
||||
buf->dma_addr = 0;
|
||||
buf = buf->next;
|
||||
} else {
|
||||
/* Buffer overflow
|
||||
*/
|
||||
rqstats->pkt_truncated++;
|
||||
}
|
||||
}
|
||||
|
||||
static void enic_rq_service(struct enic *enic, void *cq_desc, u8 type,
|
||||
u16 q_number, u16 completed_index)
|
||||
{
|
||||
struct enic_rq_stats *rqstats = &enic->rq[q_number].stats;
|
||||
struct vnic_rq *vrq = &enic->rq[q_number].vrq;
|
||||
struct vnic_rq_buf *vrq_buf = vrq->to_clean;
|
||||
int skipped;
|
||||
|
||||
while (1) {
|
||||
skipped = (vrq_buf->index != completed_index);
|
||||
if (!skipped)
|
||||
enic_rq_indicate_buf(enic, vrq, vrq_buf, cq_desc, type,
|
||||
q_number, completed_index);
|
||||
else
|
||||
rqstats->desc_skip++;
|
||||
|
||||
vrq->ring.desc_avail++;
|
||||
vrq->to_clean = vrq_buf->next;
|
||||
vrq_buf = vrq_buf->next;
|
||||
if (!skipped)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
unsigned int enic_rq_cq_service(struct enic *enic, unsigned int cq_index,
|
||||
unsigned int work_to_do)
|
||||
{
|
||||
struct vnic_cq *cq = &enic->cq[cq_index];
|
||||
void *cq_desc = vnic_cq_to_clean(cq);
|
||||
u16 q_number, completed_index;
|
||||
unsigned int work_done = 0;
|
||||
u8 type, color;
|
||||
|
||||
enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color, &q_number,
|
||||
&completed_index);
|
||||
|
||||
while (color != cq->last_color) {
|
||||
enic_rq_service(enic, cq_desc, type, q_number, completed_index);
|
||||
vnic_cq_inc_to_clean(cq);
|
||||
|
||||
if (++work_done >= work_to_do)
|
||||
break;
|
||||
|
||||
cq_desc = vnic_cq_to_clean(cq);
|
||||
enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color,
|
||||
&q_number, &completed_index);
|
||||
}
|
||||
|
||||
return work_done;
|
||||
}
|
8
drivers/net/ethernet/cisco/enic/enic_rq.h
Normal file
8
drivers/net/ethernet/cisco/enic/enic_rq.h
Normal file
@ -0,0 +1,8 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only
|
||||
* Copyright 2024 Cisco Systems, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
unsigned int enic_rq_cq_service(struct enic *enic, unsigned int cq_index,
|
||||
unsigned int work_to_do);
|
||||
int enic_rq_alloc_buf(struct vnic_rq *rq);
|
||||
void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
|
117
drivers/net/ethernet/cisco/enic/enic_wq.c
Normal file
117
drivers/net/ethernet/cisco/enic/enic_wq.c
Normal file
@ -0,0 +1,117 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// Copyright 2025 Cisco Systems, Inc. All rights reserved.
|
||||
|
||||
#include <net/netdev_queues.h>
|
||||
#include "enic_res.h"
|
||||
#include "enic.h"
|
||||
#include "enic_wq.h"
|
||||
|
||||
#define ENET_CQ_DESC_COMP_NDX_BITS 14
|
||||
#define ENET_CQ_DESC_COMP_NDX_MASK GENMASK(ENET_CQ_DESC_COMP_NDX_BITS - 1, 0)
|
||||
|
||||
static void enic_wq_cq_desc_dec(const struct cq_desc *desc_arg, bool ext_wq,
|
||||
u8 *type, u8 *color, u16 *q_number,
|
||||
u16 *completed_index)
|
||||
{
|
||||
const struct cq_desc *desc = desc_arg;
|
||||
const u8 type_color = desc->type_color;
|
||||
|
||||
*color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
|
||||
|
||||
/*
|
||||
* Make sure color bit is read from desc *before* other fields
|
||||
* are read from desc. Hardware guarantees color bit is last
|
||||
* bit (byte) written. Adding the rmb() prevents the compiler
|
||||
* and/or CPU from reordering the reads which would potentially
|
||||
* result in reading stale values.
|
||||
*/
|
||||
rmb();
|
||||
|
||||
*type = type_color & CQ_DESC_TYPE_MASK;
|
||||
*q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
|
||||
|
||||
if (ext_wq)
|
||||
*completed_index = le16_to_cpu(desc->completed_index) &
|
||||
ENET_CQ_DESC_COMP_NDX_MASK;
|
||||
else
|
||||
*completed_index = le16_to_cpu(desc->completed_index) &
|
||||
CQ_DESC_COMP_NDX_MASK;
|
||||
}
|
||||
|
||||
void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
|
||||
{
|
||||
struct enic *enic = vnic_dev_priv(wq->vdev);
|
||||
|
||||
if (buf->sop)
|
||||
dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
|
||||
DMA_TO_DEVICE);
|
||||
else
|
||||
dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
if (buf->os_buf)
|
||||
dev_kfree_skb_any(buf->os_buf);
|
||||
}
|
||||
|
||||
static void enic_wq_free_buf(struct vnic_wq *wq, struct cq_desc *cq_desc,
|
||||
struct vnic_wq_buf *buf, void *opaque)
|
||||
{
|
||||
struct enic *enic = vnic_dev_priv(wq->vdev);
|
||||
|
||||
enic->wq[wq->index].stats.cq_work++;
|
||||
enic->wq[wq->index].stats.cq_bytes += buf->len;
|
||||
enic_free_wq_buf(wq, buf);
|
||||
}
|
||||
|
||||
static void enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
|
||||
u8 type, u16 q_number, u16 completed_index)
|
||||
{
|
||||
struct enic *enic = vnic_dev_priv(vdev);
|
||||
|
||||
spin_lock(&enic->wq[q_number].lock);
|
||||
|
||||
vnic_wq_service(&enic->wq[q_number].vwq, cq_desc,
|
||||
completed_index, enic_wq_free_buf, NULL);
|
||||
|
||||
if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number))
|
||||
&& vnic_wq_desc_avail(&enic->wq[q_number].vwq) >=
|
||||
(MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
|
||||
netif_wake_subqueue(enic->netdev, q_number);
|
||||
enic->wq[q_number].stats.wake++;
|
||||
}
|
||||
|
||||
spin_unlock(&enic->wq[q_number].lock);
|
||||
}
|
||||
|
||||
unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index,
|
||||
unsigned int work_to_do)
|
||||
{
|
||||
struct vnic_cq *cq = &enic->cq[cq_index];
|
||||
u16 q_number, completed_index;
|
||||
unsigned int work_done = 0;
|
||||
struct cq_desc *cq_desc;
|
||||
u8 type, color;
|
||||
bool ext_wq;
|
||||
|
||||
ext_wq = cq->ring.size > ENIC_MAX_WQ_DESCS_DEFAULT;
|
||||
|
||||
cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq);
|
||||
enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color,
|
||||
&q_number, &completed_index);
|
||||
|
||||
while (color != cq->last_color) {
|
||||
enic_wq_service(cq->vdev, cq_desc, type, q_number,
|
||||
completed_index);
|
||||
|
||||
vnic_cq_inc_to_clean(cq);
|
||||
|
||||
if (++work_done >= work_to_do)
|
||||
break;
|
||||
|
||||
cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq);
|
||||
enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color,
|
||||
&q_number, &completed_index);
|
||||
}
|
||||
|
||||
return work_done;
|
||||
}
|
7
drivers/net/ethernet/cisco/enic/enic_wq.h
Normal file
7
drivers/net/ethernet/cisco/enic/enic_wq.h
Normal file
@ -0,0 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only
|
||||
* Copyright 2025 Cisco Systems, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
|
||||
unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index,
|
||||
unsigned int work_to_do);
|
@ -56,45 +56,18 @@ struct vnic_cq {
|
||||
ktime_t prev_ts;
|
||||
};
|
||||
|
||||
static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
|
||||
unsigned int work_to_do,
|
||||
int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
|
||||
u8 type, u16 q_number, u16 completed_index, void *opaque),
|
||||
void *opaque)
|
||||
static inline void *vnic_cq_to_clean(struct vnic_cq *cq)
|
||||
{
|
||||
struct cq_desc *cq_desc;
|
||||
unsigned int work_done = 0;
|
||||
u16 q_number, completed_index;
|
||||
u8 type, color;
|
||||
return ((u8 *)cq->ring.descs + cq->ring.desc_size * cq->to_clean);
|
||||
}
|
||||
|
||||
cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
|
||||
cq->ring.desc_size * cq->to_clean);
|
||||
cq_desc_dec(cq_desc, &type, &color,
|
||||
&q_number, &completed_index);
|
||||
|
||||
while (color != cq->last_color) {
|
||||
|
||||
if ((*q_service)(cq->vdev, cq_desc, type,
|
||||
q_number, completed_index, opaque))
|
||||
break;
|
||||
|
||||
cq->to_clean++;
|
||||
if (cq->to_clean == cq->ring.desc_count) {
|
||||
cq->to_clean = 0;
|
||||
cq->last_color = cq->last_color ? 0 : 1;
|
||||
}
|
||||
|
||||
cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
|
||||
cq->ring.desc_size * cq->to_clean);
|
||||
cq_desc_dec(cq_desc, &type, &color,
|
||||
&q_number, &completed_index);
|
||||
|
||||
work_done++;
|
||||
if (work_done >= work_to_do)
|
||||
break;
|
||||
static inline void vnic_cq_inc_to_clean(struct vnic_cq *cq)
|
||||
{
|
||||
cq->to_clean++;
|
||||
if (cq->to_clean == cq->ring.desc_count) {
|
||||
cq->to_clean = 0;
|
||||
cq->last_color = cq->last_color ? 0 : 1;
|
||||
}
|
||||
|
||||
return work_done;
|
||||
}
|
||||
|
||||
void vnic_cq_free(struct vnic_cq *cq);
|
||||
|
@ -146,23 +146,19 @@ EXPORT_SYMBOL(vnic_dev_get_res);
|
||||
static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
|
||||
unsigned int desc_count, unsigned int desc_size)
|
||||
{
|
||||
/* The base address of the desc rings must be 512 byte aligned.
|
||||
* Descriptor count is aligned to groups of 32 descriptors. A
|
||||
* count of 0 means the maximum 4096 descriptors. Descriptor
|
||||
* size is aligned to 16 bytes.
|
||||
*/
|
||||
|
||||
unsigned int count_align = 32;
|
||||
unsigned int desc_align = 16;
|
||||
|
||||
ring->base_align = 512;
|
||||
/* Descriptor ring base address alignment in bytes*/
|
||||
ring->base_align = VNIC_DESC_BASE_ALIGN;
|
||||
|
||||
/* A count of 0 means the maximum descriptors */
|
||||
if (desc_count == 0)
|
||||
desc_count = 4096;
|
||||
desc_count = VNIC_DESC_MAX_COUNT;
|
||||
|
||||
ring->desc_count = ALIGN(desc_count, count_align);
|
||||
/* Descriptor count aligned in groups of VNIC_DESC_COUNT_ALIGN descriptors */
|
||||
ring->desc_count = ALIGN(desc_count, VNIC_DESC_COUNT_ALIGN);
|
||||
|
||||
ring->desc_size = ALIGN(desc_size, desc_align);
|
||||
/* Descriptor size alignment in bytes */
|
||||
ring->desc_size = ALIGN(desc_size, VNIC_DESC_SIZE_ALIGN);
|
||||
|
||||
ring->size = ring->desc_count * ring->desc_size;
|
||||
ring->size_unaligned = ring->size + ring->base_align;
|
||||
|
@ -31,6 +31,11 @@ static inline void writeq(u64 val, void __iomem *reg)
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#define VNIC_DESC_SIZE_ALIGN 16
|
||||
#define VNIC_DESC_COUNT_ALIGN 32
|
||||
#define VNIC_DESC_BASE_ALIGN 512
|
||||
#define VNIC_DESC_MAX_COUNT 4096
|
||||
|
||||
enum vnic_dev_intr_mode {
|
||||
VNIC_DEV_INTR_MODE_UNKNOWN,
|
||||
VNIC_DEV_INTR_MODE_INTX,
|
||||
|
@ -436,6 +436,25 @@ enum vnic_devcmd_cmd {
|
||||
* in: (u16) a2 = unsigned short int port information
|
||||
*/
|
||||
CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73),
|
||||
|
||||
/*
|
||||
* Set extended CQ field in MREGS of RQ (or all RQs)
|
||||
* for given vNIC
|
||||
* in: (u64) a0 = RQ selection (VNIC_RQ_ALL for all RQs)
|
||||
* (u32) a1 = CQ entry size
|
||||
* VNIC_RQ_CQ_ENTRY_SIZE_16 --> 16 bytes
|
||||
* VNIC_RQ_CQ_ENTRY_SIZE_32 --> 32 bytes
|
||||
* VNIC_RQ_CQ_ENTRY_SIZE_64 --> 64 bytes
|
||||
*
|
||||
* Capability query:
|
||||
* out: (u32) a0 = errno, 0:valid cmd
|
||||
* (u32) a1 = value consisting of supported entries
|
||||
* bit 0: 16 bytes
|
||||
* bit 1: 32 bytes
|
||||
* bit 2: 64 bytes
|
||||
*/
|
||||
CMD_CQ_ENTRY_SIZE_SET = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 90),
|
||||
|
||||
};
|
||||
|
||||
/* CMD_ENABLE2 flags */
|
||||
|
@ -21,6 +21,11 @@ struct vnic_enet_config {
|
||||
u16 loop_tag;
|
||||
u16 vf_rq_count;
|
||||
u16 num_arfs;
|
||||
u8 reserved[66];
|
||||
u32 max_rq_ring; // MAX RQ ring size
|
||||
u32 max_wq_ring; // MAX WQ ring size
|
||||
u32 max_cq_ring; // MAX CQ ring size
|
||||
u32 rdma_rsvd_lkey; // Reserved (privileged) LKey
|
||||
};
|
||||
|
||||
#define VENETF_TSO 0x1 /* TSO enabled */
|
||||
|
@ -50,7 +50,7 @@ struct vnic_rq_ctrl {
|
||||
(VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
|
||||
#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
|
||||
DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
|
||||
#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
|
||||
#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(16384)
|
||||
|
||||
struct vnic_rq_buf {
|
||||
struct vnic_rq_buf *next;
|
||||
@ -61,6 +61,8 @@ struct vnic_rq_buf {
|
||||
unsigned int index;
|
||||
void *desc;
|
||||
uint64_t wr_id;
|
||||
unsigned int offset;
|
||||
unsigned int truesize;
|
||||
};
|
||||
|
||||
enum enic_poll_state {
|
||||
|
@ -49,7 +49,8 @@ int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
|
||||
|
||||
tlv->type = htons(type);
|
||||
tlv->length = htons(length);
|
||||
memcpy(tlv->value, value, length);
|
||||
unsafe_memcpy(tlv->value, value, length,
|
||||
/* Flexible array of flexible arrays */);
|
||||
|
||||
vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1);
|
||||
vp->length = htonl(ntohl(vp->length) +
|
||||
|
@ -62,7 +62,7 @@ struct vnic_wq_buf {
|
||||
(VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf))
|
||||
#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
|
||||
DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries))
|
||||
#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
|
||||
#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(16384)
|
||||
|
||||
struct vnic_wq {
|
||||
unsigned int index;
|
||||
|
@ -2,11 +2,13 @@
|
||||
obj-$(CONFIG_FCOE_FNIC) += fnic.o
|
||||
|
||||
fnic-y := \
|
||||
fip.o\
|
||||
fnic_attrs.o \
|
||||
fnic_isr.o \
|
||||
fnic_main.o \
|
||||
fnic_res.o \
|
||||
fnic_fcs.o \
|
||||
fdls_disc.o \
|
||||
fnic_scsi.o \
|
||||
fnic_trace.o \
|
||||
fnic_debugfs.o \
|
||||
@ -15,4 +17,5 @@ fnic-y := \
|
||||
vnic_intr.o \
|
||||
vnic_rq.o \
|
||||
vnic_wq_copy.o \
|
||||
vnic_wq.o
|
||||
vnic_wq.o \
|
||||
fnic_pci_subsys_devid.o
|
||||
|
5091
drivers/scsi/fnic/fdls_disc.c
Normal file
5091
drivers/scsi/fnic/fdls_disc.c
Normal file
File diff suppressed because it is too large
Load Diff
253
drivers/scsi/fnic/fdls_fc.h
Normal file
253
drivers/scsi/fnic/fdls_fc.h
Normal file
@ -0,0 +1,253 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _FDLS_FC_H_
|
||||
#define _FDLS_FC_H_
|
||||
|
||||
/* This file contains the declarations for FC fabric services
|
||||
* and target discovery
|
||||
*
|
||||
* Request and Response for
|
||||
* 1. FLOGI
|
||||
* 2. PLOGI to Fabric Controller
|
||||
* 3. GPN_ID, GPN_FT
|
||||
* 4. RSCN
|
||||
* 5. PLOGI to Target
|
||||
* 6. PRLI to Target
|
||||
*/
|
||||
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/fc/fc_els.h>
|
||||
#include <uapi/scsi/fc/fc_fs.h>
|
||||
#include <uapi/scsi/fc/fc_ns.h>
|
||||
#include <uapi/scsi/fc/fc_gs.h>
|
||||
#include <uapi/linux/if_ether.h>
|
||||
#include <scsi/fc/fc_ms.h>
|
||||
#include <linux/minmax.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <scsi/fc/fc_encaps.h>
|
||||
#include <scsi/fc/fc_fcoe.h>
|
||||
|
||||
#define FDLS_MIN_FRAMES (32)
|
||||
#define FDLS_MIN_FRAME_ELEM (4)
|
||||
#define FNIC_FCP_SP_RD_XRDY_DIS 0x00000002
|
||||
#define FNIC_FCP_SP_TARGET 0x00000010
|
||||
#define FNIC_FCP_SP_INITIATOR 0x00000020
|
||||
#define FNIC_FCP_SP_CONF_CMPL 0x00000080
|
||||
#define FNIC_FCP_SP_RETRY 0x00000100
|
||||
|
||||
#define FNIC_FC_CONCUR_SEQS (0xFF)
|
||||
#define FNIC_FC_RO_INFO (0x1F)
|
||||
|
||||
/* Little Endian */
|
||||
#define FNIC_UNASSIGNED_OXID (0xffff)
|
||||
#define FNIC_UNASSIGNED_RXID (0xffff)
|
||||
#define FNIC_ELS_REQ_FCTL (0x000029)
|
||||
#define FNIC_ELS_REP_FCTL (0x000099)
|
||||
|
||||
#define FNIC_FCP_RSP_FCTL (0x000099)
|
||||
#define FNIC_REQ_ABTS_FCTL (0x000009)
|
||||
|
||||
#define FNIC_FC_PH_VER_HI (0x20)
|
||||
#define FNIC_FC_PH_VER_LO (0x20)
|
||||
#define FNIC_FC_PH_VER (0x2020)
|
||||
#define FNIC_FC_B2B_CREDIT (0x0A)
|
||||
#define FNIC_FC_B2B_RDF_SZ (0x0800)
|
||||
|
||||
#define FNIC_LOGI_RDF_SIZE(_logi) ((_logi).fl_csp.sp_bb_data)
|
||||
#define FNIC_LOGI_R_A_TOV(_logi) ((_logi).fl_csp.sp_r_a_tov)
|
||||
#define FNIC_LOGI_E_D_TOV(_logi) ((_logi).fl_csp.sp_e_d_tov)
|
||||
#define FNIC_LOGI_FEATURES(_logi) (be16_to_cpu((_logi).fl_csp.sp_features))
|
||||
#define FNIC_LOGI_PORT_NAME(_logi) ((_logi).fl_wwpn)
|
||||
#define FNIC_LOGI_NODE_NAME(_logi) ((_logi).fl_wwnn)
|
||||
|
||||
#define FNIC_LOGI_SET_RDF_SIZE(_logi, _rdf_size) \
|
||||
(FNIC_LOGI_RDF_SIZE(_logi) = cpu_to_be16(_rdf_size))
|
||||
#define FNIC_LOGI_SET_E_D_TOV(_logi, _e_d_tov) \
|
||||
(FNIC_LOGI_E_D_TOV(_logi) = cpu_to_be32(_e_d_tov))
|
||||
#define FNIC_LOGI_SET_R_A_TOV(_logi, _r_a_tov) \
|
||||
(FNIC_LOGI_R_A_TOV(_logi) = cpu_to_be32(_r_a_tov))
|
||||
|
||||
#define FNIC_STD_SET_S_ID(_fchdr, _sid) memcpy((_fchdr).fh_s_id, _sid, 3)
|
||||
#define FNIC_STD_SET_D_ID(_fchdr, _did) memcpy((_fchdr).fh_d_id, _did, 3)
|
||||
#define FNIC_STD_SET_OX_ID(_fchdr, _oxid) ((_fchdr).fh_ox_id = cpu_to_be16(_oxid))
|
||||
#define FNIC_STD_SET_RX_ID(_fchdr, _rxid) ((_fchdr).fh_rx_id = cpu_to_be16(_rxid))
|
||||
|
||||
#define FNIC_STD_SET_R_CTL(_fchdr, _rctl) ((_fchdr).fh_r_ctl = _rctl)
|
||||
#define FNIC_STD_SET_TYPE(_fchdr, _type) ((_fchdr).fh_type = _type)
|
||||
#define FNIC_STD_SET_F_CTL(_fchdr, _fctl) \
|
||||
put_unaligned_be24(_fctl, &((_fchdr).fh_f_ctl))
|
||||
|
||||
#define FNIC_STD_SET_NPORT_NAME(_ptr, _wwpn) put_unaligned_be64(_wwpn, _ptr)
|
||||
#define FNIC_STD_SET_NODE_NAME(_ptr, _wwnn) put_unaligned_be64(_wwnn, _ptr)
|
||||
#define FNIC_STD_SET_PORT_ID(__req, __portid) \
|
||||
memcpy(__req.fr_fid.fp_fid, __portid, 3)
|
||||
#define FNIC_STD_SET_PORT_NAME(_req, _pName) \
|
||||
(put_unaligned_be64(_pName, &_req.fr_wwn))
|
||||
|
||||
#define FNIC_STD_GET_OX_ID(_fchdr) (be16_to_cpu((_fchdr)->fh_ox_id))
|
||||
#define FNIC_STD_GET_RX_ID(_fchdr) (be16_to_cpu((_fchdr)->fh_rx_id))
|
||||
#define FNIC_STD_GET_S_ID(_fchdr) ((_fchdr)->fh_s_id)
|
||||
#define FNIC_STD_GET_D_ID(_fchdr) ((_fchdr)->fh_d_id)
|
||||
#define FNIC_STD_GET_TYPE(_fchdr) ((_fchdr)->fh_type)
|
||||
#define FNIC_STD_GET_F_CTL(_fchdr) ((_fchdr)->fh_f_ctl)
|
||||
#define FNIC_STD_GET_R_CTL(_fchdr) ((_fchdr)->fh_r_ctl)
|
||||
|
||||
#define FNIC_STD_GET_FC_CT_CMD(__fcct_hdr) (be16_to_cpu(__fcct_hdr->ct_cmd))
|
||||
|
||||
#define FNIC_FCOE_MAX_FRAME_SZ (2048)
|
||||
#define FNIC_FCOE_MIN_FRAME_SZ (280)
|
||||
#define FNIC_FC_MAX_PAYLOAD_LEN (2048)
|
||||
#define FNIC_MIN_DATA_FIELD_SIZE (256)
|
||||
|
||||
#define FNIC_FC_EDTOV_NSEC (0x400)
|
||||
#define FNIC_NSEC_TO_MSEC (0x1000000)
|
||||
#define FCP_PRLI_FUNC_TARGET (0x0010)
|
||||
|
||||
#define FNIC_FC_R_CTL_SOLICITED_DATA (0x21)
|
||||
#define FNIC_FC_F_CTL_LAST_END_SEQ (0x98)
|
||||
#define FNIC_FC_F_CTL_LAST_END_SEQ_INT (0x99)
|
||||
#define FNIC_FC_F_CTL_FIRST_LAST_SEQINIT (0x29)
|
||||
#define FNIC_FC_R_CTL_FC4_SCTL (0x03)
|
||||
#define FNIC_FC_CS_CTL (0x00)
|
||||
|
||||
#define FNIC_FC_FRAME_UNSOLICITED(_fchdr) \
|
||||
(_fchdr->fh_r_ctl == FC_RCTL_ELS_REQ)
|
||||
#define FNIC_FC_FRAME_SOLICITED_DATA(_fchdr) \
|
||||
(_fchdr->fh_r_ctl == FNIC_FC_R_CTL_SOLICITED_DATA)
|
||||
#define FNIC_FC_FRAME_SOLICITED_CTRL_REPLY(_fchdr) \
|
||||
(_fchdr->fh_r_ctl == FC_RCTL_ELS_REP)
|
||||
#define FNIC_FC_FRAME_FCTL_LAST_END_SEQ(_fchdr) \
|
||||
(_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_LAST_END_SEQ)
|
||||
#define FNIC_FC_FRAME_FCTL_LAST_END_SEQ_INT(_fchdr) \
|
||||
(_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_LAST_END_SEQ_INT)
|
||||
#define FNIC_FC_FRAME_FCTL_FIRST_LAST_SEQINIT(_fchdr) \
|
||||
(_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_FIRST_LAST_SEQINIT)
|
||||
#define FNIC_FC_FRAME_FC4_SCTL(_fchdr) \
|
||||
(_fchdr->fh_r_ctl == FNIC_FC_R_CTL_FC4_SCTL)
|
||||
#define FNIC_FC_FRAME_TYPE_BLS(_fchdr) (_fchdr->fh_type == FC_TYPE_BLS)
|
||||
#define FNIC_FC_FRAME_TYPE_ELS(_fchdr) (_fchdr->fh_type == FC_TYPE_ELS)
|
||||
#define FNIC_FC_FRAME_TYPE_FC_GS(_fchdr) (_fchdr->fh_type == FC_TYPE_CT)
|
||||
#define FNIC_FC_FRAME_CS_CTL(_fchdr) (_fchdr->fh_cs_ctl == FNIC_FC_CS_CTL)
|
||||
|
||||
#define FNIC_FC_C3_RDF (0xfff)
|
||||
#define FNIC_FC_PLOGI_RSP_RDF(_plogi_rsp) \
|
||||
(min(_plogi_rsp->u.csp_plogi.b2b_rdf_size, \
|
||||
(_plogi_rsp->spc3[4] & FNIC_FC_C3_RDF)))
|
||||
#define FNIC_FC_PLOGI_RSP_CONCUR_SEQ(_plogi_rsp) \
|
||||
(min((uint16_t) (be16_to_cpu(_plogi_rsp->els.fl_csp.sp_tot_seq)), \
|
||||
(uint16_t) (be16_to_cpu(_plogi_rsp->els.fl_cssp[2].cp_con_seq) & 0xff)))
|
||||
|
||||
/* FLOGI/PLOGI struct */
|
||||
struct fc_std_flogi {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_els_flogi els;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_els_acc_rsp {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_els_ls_acc acc;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_els_rjt_rsp {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_els_ls_rjt rej;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_els_adisc {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_els_adisc els;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_rls_acc {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_els_rls_resp els;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_abts_ba_acc {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_ba_acc acc;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_abts_ba_rjt {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_ba_rjt rjt;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_els_prli {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_els_prli els_prli;
|
||||
struct fc_els_spp sp;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_rpn_id {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_ct_hdr fc_std_ct_hdr;
|
||||
struct fc_ns_rn_id rpn_id;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_fdmi_rhba {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_ct_hdr fc_std_ct_hdr;
|
||||
struct fc_fdmi_rhba rhba;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_fdmi_rpa {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_ct_hdr fc_std_ct_hdr;
|
||||
struct fc_fdmi_rpa rpa;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_rft_id {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_ct_hdr fc_std_ct_hdr;
|
||||
struct fc_ns_rft_id rft_id;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_rff_id {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_ct_hdr fc_std_ct_hdr;
|
||||
struct fc_ns_rff_id rff_id;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_gpn_ft {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_ct_hdr fc_std_ct_hdr;
|
||||
struct fc_ns_gid_ft gpn_ft;
|
||||
} __packed;
|
||||
|
||||
/* Accept CT_IU for GPN_FT */
|
||||
struct fc_gpn_ft_rsp_iu {
|
||||
uint8_t ctrl;
|
||||
uint8_t fcid[3];
|
||||
uint32_t rsvd;
|
||||
__be64 wwpn;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_rls {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_els_rls els;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_scr {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_els_scr scr;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_rscn {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_els_rscn els;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_logo {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_els_logo els;
|
||||
} __packed;
|
||||
|
||||
#define FNIC_ETH_FCOE_HDRS_OFFSET \
|
||||
(sizeof(struct ethhdr) + sizeof(struct fcoe_hdr))
|
||||
|
||||
#endif /* _FDLS_FC_H */
|
1005
drivers/scsi/fnic/fip.c
Normal file
1005
drivers/scsi/fnic/fip.c
Normal file
File diff suppressed because it is too large
Load Diff
159
drivers/scsi/fnic/fip.h
Normal file
159
drivers/scsi/fnic/fip.h
Normal file
@ -0,0 +1,159 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
|
||||
*/
|
||||
#ifndef _FIP_H_
|
||||
#define _FIP_H_
|
||||
|
||||
#include "fdls_fc.h"
|
||||
#include "fnic_fdls.h"
|
||||
#include <scsi/fc/fc_fip.h>
|
||||
|
||||
/* Drop the cast from the standard definition */
|
||||
#define FCOE_ALL_FCFS_MAC {0x01, 0x10, 0x18, 0x01, 0x00, 0x02}
|
||||
#define FCOE_MAX_SIZE 0x082E
|
||||
|
||||
#define FCOE_CTLR_FIPVLAN_TOV (3*1000)
|
||||
#define FCOE_CTLR_FCS_TOV (3*1000)
|
||||
#define FCOE_CTLR_MAX_SOL (5*1000)
|
||||
|
||||
#define FIP_DISC_SOL_LEN (6)
|
||||
#define FIP_VLAN_REQ_LEN (2)
|
||||
#define FIP_ENODE_KA_LEN (2)
|
||||
#define FIP_VN_KA_LEN (7)
|
||||
#define FIP_FLOGI_LEN (38)
|
||||
|
||||
enum fdls_vlan_state {
|
||||
FIP_VLAN_AVAIL,
|
||||
FIP_VLAN_SENT
|
||||
};
|
||||
|
||||
enum fdls_fip_state {
|
||||
FDLS_FIP_INIT,
|
||||
FDLS_FIP_VLAN_DISCOVERY_STARTED,
|
||||
FDLS_FIP_FCF_DISCOVERY_STARTED,
|
||||
FDLS_FIP_FLOGI_STARTED,
|
||||
FDLS_FIP_FLOGI_COMPLETE,
|
||||
};
|
||||
|
||||
/*
|
||||
* VLAN entry.
|
||||
*/
|
||||
struct fcoe_vlan {
|
||||
struct list_head list;
|
||||
uint16_t vid; /* vlan ID */
|
||||
uint16_t sol_count; /* no. of sols sent */
|
||||
uint16_t state; /* state */
|
||||
};
|
||||
|
||||
struct fip_vlan_req {
|
||||
struct ethhdr eth;
|
||||
struct fip_header fip;
|
||||
struct fip_mac_desc mac_desc;
|
||||
} __packed;
|
||||
|
||||
struct fip_vlan_notif {
|
||||
struct fip_header fip;
|
||||
struct fip_vlan_desc vlans_desc[];
|
||||
} __packed;
|
||||
|
||||
struct fip_vn_port_ka {
|
||||
struct ethhdr eth;
|
||||
struct fip_header fip;
|
||||
struct fip_mac_desc mac_desc;
|
||||
struct fip_vn_desc vn_port_desc;
|
||||
} __packed;
|
||||
|
||||
struct fip_enode_ka {
|
||||
struct ethhdr eth;
|
||||
struct fip_header fip;
|
||||
struct fip_mac_desc mac_desc;
|
||||
} __packed;
|
||||
|
||||
struct fip_cvl {
|
||||
struct fip_header fip;
|
||||
struct fip_mac_desc fcf_mac_desc;
|
||||
struct fip_wwn_desc name_desc;
|
||||
struct fip_vn_desc vn_ports_desc[];
|
||||
} __packed;
|
||||
|
||||
struct fip_flogi_desc {
|
||||
struct fip_desc fd_desc;
|
||||
uint16_t rsvd;
|
||||
struct fc_std_flogi flogi;
|
||||
} __packed;
|
||||
|
||||
struct fip_flogi_rsp_desc {
|
||||
struct fip_desc fd_desc;
|
||||
uint16_t rsvd;
|
||||
struct fc_std_flogi flogi;
|
||||
} __packed;
|
||||
|
||||
struct fip_flogi {
|
||||
struct ethhdr eth;
|
||||
struct fip_header fip;
|
||||
struct fip_flogi_desc flogi_desc;
|
||||
struct fip_mac_desc mac_desc;
|
||||
} __packed;
|
||||
|
||||
struct fip_flogi_rsp {
|
||||
struct fip_header fip;
|
||||
struct fip_flogi_rsp_desc rsp_desc;
|
||||
struct fip_mac_desc mac_desc;
|
||||
} __packed;
|
||||
|
||||
struct fip_discovery {
|
||||
struct ethhdr eth;
|
||||
struct fip_header fip;
|
||||
struct fip_mac_desc mac_desc;
|
||||
struct fip_wwn_desc name_desc;
|
||||
struct fip_size_desc fcoe_desc;
|
||||
} __packed;
|
||||
|
||||
struct fip_disc_adv {
|
||||
struct fip_header fip;
|
||||
struct fip_pri_desc prio_desc;
|
||||
struct fip_mac_desc mac_desc;
|
||||
struct fip_wwn_desc name_desc;
|
||||
struct fip_fab_desc fabric_desc;
|
||||
struct fip_fka_desc fka_adv_desc;
|
||||
} __packed;
|
||||
|
||||
void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct fip_header *fiph);
|
||||
void fnic_fcoe_fip_discovery_resp(struct fnic *fnic, struct fip_header *fiph);
|
||||
void fnic_fcoe_process_flogi_resp(struct fnic *fnic, struct fip_header *fiph);
|
||||
void fnic_work_on_fip_timer(struct work_struct *work);
|
||||
void fnic_work_on_fcs_ka_timer(struct work_struct *work);
|
||||
void fnic_fcoe_send_vlan_req(struct fnic *fnic);
|
||||
void fnic_fcoe_start_fcf_discovery(struct fnic *fnic);
|
||||
void fnic_fcoe_start_flogi(struct fnic *fnic);
|
||||
void fnic_fcoe_process_cvl(struct fnic *fnic, struct fip_header *fiph);
|
||||
void fnic_vlan_discovery_timeout(struct fnic *fnic);
|
||||
|
||||
extern struct workqueue_struct *fnic_fip_queue;
|
||||
|
||||
#ifdef FNIC_DEBUG
|
||||
static inline void
|
||||
fnic_debug_dump_fip_frame(struct fnic *fnic, struct ethhdr *eth,
|
||||
int len, char *pfx)
|
||||
{
|
||||
struct fip_header *fiph = (struct fip_header *)(eth + 1);
|
||||
u16 op = be16_to_cpu(fiph->fip_op);
|
||||
u8 sub = fiph->fip_subcode;
|
||||
|
||||
FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
|
||||
"FIP %s packet contents: op: 0x%x sub: 0x%x (len = %d)",
|
||||
pfx, op, sub, len);
|
||||
|
||||
fnic_debug_dump(fnic, (uint8_t *)eth, len);
|
||||
}
|
||||
|
||||
#else /* FNIC_DEBUG */
|
||||
|
||||
static inline void
|
||||
fnic_debug_dump_fip_frame(struct fnic *fnic, struct ethhdr *eth,
|
||||
int len, char *pfx) {}
|
||||
#endif /* FNIC_DEBUG */
|
||||
|
||||
#endif /* _FIP_H_ */
|
@ -22,8 +22,10 @@
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <scsi/libfc.h>
|
||||
#include <scsi/libfcoe.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_transport.h>
|
||||
#include <scsi/scsi_transport_fc.h>
|
||||
#include <scsi/fc_frame.h>
|
||||
#include "fnic_io.h"
|
||||
#include "fnic_res.h"
|
||||
#include "fnic_trace.h"
|
||||
@ -36,21 +38,23 @@
|
||||
#include "vnic_intr.h"
|
||||
#include "vnic_stats.h"
|
||||
#include "vnic_scsi.h"
|
||||
#include "fnic_fdls.h"
|
||||
|
||||
#define DRV_NAME "fnic"
|
||||
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
|
||||
#define DRV_VERSION "1.6.0.53"
|
||||
#define DRV_VERSION "1.8.0.2"
|
||||
#define PFX DRV_NAME ": "
|
||||
#define DFX DRV_NAME "%d: "
|
||||
|
||||
#define FABRIC_LOGO_MAX_RETRY 3
|
||||
#define DESC_CLEAN_LOW_WATERMARK 8
|
||||
#define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */
|
||||
#define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */
|
||||
#define FNIC_MAX_IO_REQ 1024 /* scsi_cmnd tag map entries */
|
||||
#define FNIC_DFLT_IO_REQ 256 /* Default scsi_cmnd tag map entries */
|
||||
#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */
|
||||
#define FNIC_DFLT_QUEUE_DEPTH 256
|
||||
#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */
|
||||
#define LUN0_DELAY_TIME 9
|
||||
|
||||
/*
|
||||
* Tag bits used for special requests.
|
||||
@ -88,16 +92,100 @@
|
||||
#define FNIC_DEV_RST_TERM_DONE BIT(20)
|
||||
#define FNIC_DEV_RST_ABTS_PENDING BIT(21)
|
||||
|
||||
#define FNIC_FW_RESET_TIMEOUT 60000 /* mSec */
|
||||
#define FNIC_FCOE_MAX_CMD_LEN 16
|
||||
/* Retry supported by rport (returned by PRLI service parameters) */
|
||||
#define FNIC_FC_RP_FLAGS_RETRY 0x1
|
||||
|
||||
/* Cisco vendor id */
|
||||
#define PCI_VENDOR_ID_CISCO 0x1137
|
||||
#define PCI_DEVICE_ID_CISCO_VIC_FC 0x0045 /* fc vnic */
|
||||
|
||||
/* sereno pcie switch */
|
||||
#define PCI_DEVICE_ID_CISCO_SERENO 0x004e
|
||||
#define PCI_DEVICE_ID_CISCO_CRUZ 0x007a /* Cruz */
|
||||
#define PCI_DEVICE_ID_CISCO_BODEGA 0x0131 /* Bodega */
|
||||
#define PCI_DEVICE_ID_CISCO_BEVERLY 0x025f /* Beverly */
|
||||
|
||||
/* Sereno */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_VASONA 0x004f /* vasona mezz */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_COTATI 0x0084 /* cotati mlom */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_LEXINGTON 0x0085 /* lexington pcie */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_ICEHOUSE 0x00cd /* Icehouse */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_KIRKWOODLAKE 0x00ce /* KirkwoodLake pcie */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_SUSANVILLE 0x012e /* Susanville MLOM */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_TORRANCE 0x0139 /* Torrance MLOM */
|
||||
|
||||
/* Cruz */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_CALISTOGA 0x012c /* Calistoga MLOM */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_MOUNTAINVIEW 0x0137 /* Cruz Mezz */
|
||||
/* Cruz MountTian SIOC */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN 0x014b
|
||||
#define PCI_SUBDEVICE_ID_CISCO_CLEARLAKE 0x014d /* ClearLake pcie */
|
||||
/* Cruz MountTian2 SIOC */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN2 0x0157
|
||||
#define PCI_SUBDEVICE_ID_CISCO_CLAREMONT 0x015d /* Claremont MLOM */
|
||||
|
||||
/* Bodega */
|
||||
/* VIC 1457 PCIe mLOM */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_BRADBURY 0x0218
|
||||
#define PCI_SUBDEVICE_ID_CISCO_BRENTWOOD 0x0217 /* VIC 1455 PCIe */
|
||||
/* VIC 1487 PCIe mLOM */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_BURLINGAME 0x021a
|
||||
#define PCI_SUBDEVICE_ID_CISCO_BAYSIDE 0x0219 /* VIC 1485 PCIe */
|
||||
/* VIC 1440 Mezz mLOM */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_BAKERSFIELD 0x0215
|
||||
#define PCI_SUBDEVICE_ID_CISCO_BOONVILLE 0x0216 /* VIC 1480 Mezz */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_BENICIA 0x024a /* VIC 1495 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_BEAUMONT 0x024b /* VIC 1497 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_BRISBANE 0x02af /* VIC 1467 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_BENTON 0x02b0 /* VIC 1477 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_TWIN_RIVER 0x02cf /* VIC 14425 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_TWIN_PEAK 0x02d0 /* VIC 14825 */
|
||||
|
||||
/* Beverly */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_BERN 0x02de /* VIC 15420 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_STOCKHOLM 0x02dd /* VIC 15428 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_KRAKOW 0x02dc /* VIC 15411 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_LUCERNE 0x02db /* VIC 15231 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_TURKU 0x02e8 /* VIC 15238 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_TURKU_PLUS 0x02f3 /* VIC 15237 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_ZURICH 0x02df /* VIC 15230 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_RIGA 0x02e0 /* VIC 15427 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_GENEVA 0x02e1 /* VIC 15422 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_HELSINKI 0x02e4 /* VIC 15235 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_GOTHENBURG 0x02f2 /* VIC 15425 */
|
||||
|
||||
struct fnic_pcie_device {
|
||||
u32 device;
|
||||
u8 *desc;
|
||||
u32 subsystem_device;
|
||||
u8 *subsys_desc;
|
||||
};
|
||||
|
||||
/*
|
||||
* Usage of the scsi_cmnd scratchpad.
|
||||
* fnic private data per SCSI command.
|
||||
* These fields are locked by the hashed io_req_lock.
|
||||
*/
|
||||
#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
|
||||
#define CMD_STATE(Cmnd) ((Cmnd)->SCp.phase)
|
||||
#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message)
|
||||
#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
|
||||
#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command)
|
||||
#define CMD_FLAGS(Cmnd) ((Cmnd)->SCp.Status)
|
||||
struct fnic_cmd_priv {
|
||||
struct fnic_io_req *io_req;
|
||||
enum fnic_ioreq_state state;
|
||||
u32 flags;
|
||||
u16 abts_status;
|
||||
u16 lr_status;
|
||||
};
|
||||
|
||||
static inline struct fnic_cmd_priv *fnic_priv(struct scsi_cmnd *cmd)
|
||||
{
|
||||
return scsi_cmd_priv(cmd);
|
||||
}
|
||||
|
||||
static inline u64 fnic_flags_and_state(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct fnic_cmd_priv *fcmd = fnic_priv(cmd);
|
||||
|
||||
return ((u64)fcmd->flags << 32) | fcmd->state;
|
||||
}
|
||||
|
||||
#define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */
|
||||
|
||||
@ -108,7 +196,7 @@
|
||||
#define FNIC_ABT_TERM_DELAY_TIMEOUT 500 /* mSec */
|
||||
|
||||
#define FNIC_MAX_FCP_TARGET 256
|
||||
|
||||
#define FNIC_PCI_OFFSET 2
|
||||
/**
|
||||
* state_flags to identify host state along along with fnic's state
|
||||
**/
|
||||
@ -127,8 +215,38 @@
|
||||
#define fnic_clear_state_flags(fnicp, st_flags) \
|
||||
__fnic_set_state_flags(fnicp, st_flags, 1)
|
||||
|
||||
enum reset_states {
|
||||
NOT_IN_PROGRESS = 0,
|
||||
IN_PROGRESS,
|
||||
RESET_ERROR
|
||||
};
|
||||
|
||||
enum rscn_type {
|
||||
NOT_PC_RSCN = 0,
|
||||
PC_RSCN
|
||||
};
|
||||
|
||||
enum pc_rscn_handling_status {
|
||||
PC_RSCN_HANDLING_NOT_IN_PROGRESS = 0,
|
||||
PC_RSCN_HANDLING_IN_PROGRESS
|
||||
};
|
||||
|
||||
enum pc_rscn_handling_feature {
|
||||
PC_RSCN_HANDLING_FEATURE_OFF = 0,
|
||||
PC_RSCN_HANDLING_FEATURE_ON
|
||||
};
|
||||
|
||||
extern unsigned int fnic_fdmi_support;
|
||||
extern unsigned int fnic_log_level;
|
||||
extern unsigned int io_completions;
|
||||
extern struct workqueue_struct *fnic_event_queue;
|
||||
|
||||
extern unsigned int pc_rscn_handling_feature_flag;
|
||||
extern spinlock_t reset_fnic_list_lock;
|
||||
extern struct list_head reset_fnic_list;
|
||||
extern struct workqueue_struct *reset_fnic_work_queue;
|
||||
extern struct work_struct reset_fnic_work;
|
||||
|
||||
|
||||
#define FNIC_MAIN_LOGGING 0x01
|
||||
#define FNIC_FCS_LOGGING 0x02
|
||||
@ -143,31 +261,54 @@ do { \
|
||||
} while (0); \
|
||||
} while (0)
|
||||
|
||||
#define FNIC_MAIN_DBG(kern_level, host, fmt, args...) \
|
||||
#define FNIC_MAIN_DBG(kern_level, host, fnic_num, fmt, args...) \
|
||||
FNIC_CHECK_LOGGING(FNIC_MAIN_LOGGING, \
|
||||
shost_printk(kern_level, host, fmt, ##args);)
|
||||
shost_printk(kern_level, host, \
|
||||
"fnic<%d>: %s: %d: " fmt, fnic_num,\
|
||||
__func__, __LINE__, ##args);)
|
||||
|
||||
#define FNIC_FCS_DBG(kern_level, host, fmt, args...) \
|
||||
#define FNIC_FCS_DBG(kern_level, host, fnic_num, fmt, args...) \
|
||||
FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \
|
||||
shost_printk(kern_level, host, fmt, ##args);)
|
||||
shost_printk(kern_level, host, \
|
||||
"fnic<%d>: %s: %d: " fmt, fnic_num,\
|
||||
__func__, __LINE__, ##args);)
|
||||
|
||||
#define FNIC_SCSI_DBG(kern_level, host, fmt, args...) \
|
||||
#define FNIC_FIP_DBG(kern_level, host, fnic_num, fmt, args...) \
|
||||
FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \
|
||||
shost_printk(kern_level, host, \
|
||||
"fnic<%d>: %s: %d: " fmt, fnic_num,\
|
||||
__func__, __LINE__, ##args);)
|
||||
|
||||
#define FNIC_SCSI_DBG(kern_level, host, fnic_num, fmt, args...) \
|
||||
FNIC_CHECK_LOGGING(FNIC_SCSI_LOGGING, \
|
||||
shost_printk(kern_level, host, fmt, ##args);)
|
||||
shost_printk(kern_level, host, \
|
||||
"fnic<%d>: %s: %d: " fmt, fnic_num,\
|
||||
__func__, __LINE__, ##args);)
|
||||
|
||||
#define FNIC_ISR_DBG(kern_level, host, fmt, args...) \
|
||||
#define FNIC_ISR_DBG(kern_level, host, fnic_num, fmt, args...) \
|
||||
FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \
|
||||
shost_printk(kern_level, host, fmt, ##args);)
|
||||
shost_printk(kern_level, host, \
|
||||
"fnic<%d>: %s: %d: " fmt, fnic_num,\
|
||||
__func__, __LINE__, ##args);)
|
||||
|
||||
#define FNIC_MAIN_NOTE(kern_level, host, fmt, args...) \
|
||||
shost_printk(kern_level, host, fmt, ##args)
|
||||
|
||||
#define FNIC_WQ_COPY_MAX 64
|
||||
#define FNIC_WQ_MAX 1
|
||||
#define FNIC_RQ_MAX 1
|
||||
#define FNIC_CQ_MAX (FNIC_WQ_COPY_MAX + FNIC_WQ_MAX + FNIC_RQ_MAX)
|
||||
#define FNIC_DFLT_IO_COMPLETIONS 256
|
||||
|
||||
#define FNIC_MQ_CQ_INDEX 2
|
||||
|
||||
extern const char *fnic_state_str[];
|
||||
|
||||
enum fnic_intx_intr_index {
|
||||
FNIC_INTX_WQ_RQ_COPYWQ,
|
||||
FNIC_INTX_ERR,
|
||||
FNIC_INTX_DUMMY,
|
||||
FNIC_INTX_NOTIFY,
|
||||
FNIC_INTX_ERR,
|
||||
FNIC_INTX_INTR_MAX,
|
||||
};
|
||||
|
||||
@ -175,7 +316,7 @@ enum fnic_msix_intr_index {
|
||||
FNIC_MSIX_RQ,
|
||||
FNIC_MSIX_WQ,
|
||||
FNIC_MSIX_WQ_COPY,
|
||||
FNIC_MSIX_ERR_NOTIFY,
|
||||
FNIC_MSIX_ERR_NOTIFY = FNIC_MSIX_WQ_COPY + FNIC_WQ_COPY_MAX,
|
||||
FNIC_MSIX_INTR_MAX,
|
||||
};
|
||||
|
||||
@ -184,6 +325,7 @@ struct fnic_msix_entry {
|
||||
char devname[IFNAMSIZ + 11];
|
||||
irqreturn_t (*isr)(int, void *);
|
||||
void *devid;
|
||||
int irq_num;
|
||||
};
|
||||
|
||||
enum fnic_state {
|
||||
@ -193,30 +335,47 @@ enum fnic_state {
|
||||
FNIC_IN_ETH_TRANS_FC_MODE,
|
||||
};
|
||||
|
||||
#define FNIC_WQ_COPY_MAX 1
|
||||
#define FNIC_WQ_MAX 1
|
||||
#define FNIC_RQ_MAX 1
|
||||
#define FNIC_CQ_MAX (FNIC_WQ_COPY_MAX + FNIC_WQ_MAX + FNIC_RQ_MAX)
|
||||
#define FNIC_DFLT_IO_COMPLETIONS 256
|
||||
|
||||
struct mempool;
|
||||
|
||||
enum fnic_role_e {
|
||||
FNIC_ROLE_FCP_INITIATOR = 0,
|
||||
};
|
||||
|
||||
enum fnic_evt {
|
||||
FNIC_EVT_START_VLAN_DISC = 1,
|
||||
FNIC_EVT_START_FCF_DISC = 2,
|
||||
FNIC_EVT_MAX,
|
||||
};
|
||||
|
||||
struct fnic_frame_list {
|
||||
/*
|
||||
* Link to frame lists
|
||||
*/
|
||||
struct list_head links;
|
||||
void *fp;
|
||||
int frame_len;
|
||||
int rx_ethhdr_stripped;
|
||||
};
|
||||
|
||||
struct fnic_event {
|
||||
struct list_head list;
|
||||
struct fnic *fnic;
|
||||
enum fnic_evt event;
|
||||
};
|
||||
|
||||
struct fnic_cpy_wq {
|
||||
unsigned long hw_lock_flags;
|
||||
u16 active_ioreq_count;
|
||||
u16 ioreq_table_size;
|
||||
____cacheline_aligned struct fnic_io_req **io_req_table;
|
||||
};
|
||||
|
||||
/* Per-instance private data structure */
|
||||
struct fnic {
|
||||
struct fc_lport *lport;
|
||||
struct fcoe_ctlr ctlr; /* FIP FCoE controller structure */
|
||||
int fnic_num;
|
||||
enum fnic_role_e role;
|
||||
struct fnic_iport_s iport;
|
||||
struct Scsi_Host *host;
|
||||
struct vnic_dev_bar bar0;
|
||||
|
||||
struct fnic_msix_entry msix[FNIC_MSIX_INTR_MAX];
|
||||
@ -235,6 +394,10 @@ struct fnic {
|
||||
unsigned int wq_count;
|
||||
unsigned int cq_count;
|
||||
|
||||
struct completion reset_completion_wait;
|
||||
struct mutex sgreset_mutex;
|
||||
spinlock_t sgreset_lock; /* lock for sgreset */
|
||||
struct scsi_cmnd *sgreset_sc;
|
||||
struct dentry *fnic_stats_debugfs_host;
|
||||
struct dentry *fnic_stats_debugfs_file;
|
||||
struct dentry *fnic_reset_debugfs_file;
|
||||
@ -245,25 +408,27 @@ struct fnic {
|
||||
u32 vlan_hw_insert:1; /* let hw insert the tag */
|
||||
u32 in_remove:1; /* fnic device in removal */
|
||||
u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */
|
||||
u32 link_events:1; /* set when we get any link event*/
|
||||
|
||||
struct completion *remove_wait; /* device remove thread blocks */
|
||||
|
||||
struct completion *fw_reset_done;
|
||||
u32 reset_in_progress;
|
||||
atomic_t in_flight; /* io counter */
|
||||
bool internal_reset_inprogress;
|
||||
u32 _reserved; /* fill hole */
|
||||
unsigned long state_flags; /* protected by host lock */
|
||||
enum fnic_state state;
|
||||
spinlock_t fnic_lock;
|
||||
unsigned long lock_flags;
|
||||
|
||||
u16 vlan_id; /* VLAN tag including priority */
|
||||
u8 data_src_addr[ETH_ALEN];
|
||||
u64 fcp_input_bytes; /* internal statistic */
|
||||
u64 fcp_output_bytes; /* internal statistic */
|
||||
u32 link_down_cnt;
|
||||
u32 soft_reset_count;
|
||||
int link_status;
|
||||
|
||||
struct list_head list;
|
||||
struct list_head links;
|
||||
struct pci_dev *pdev;
|
||||
struct vnic_fc_config config;
|
||||
struct vnic_dev *vdev;
|
||||
@ -278,27 +443,40 @@ struct fnic {
|
||||
struct fnic_host_tag *tags;
|
||||
mempool_t *io_req_pool;
|
||||
mempool_t *io_sgl_pool[FNIC_SGL_NUM_CACHES];
|
||||
spinlock_t io_req_lock[FNIC_IO_LOCKS]; /* locks for scsi cmnds */
|
||||
|
||||
unsigned int copy_wq_base;
|
||||
struct work_struct link_work;
|
||||
struct work_struct frame_work;
|
||||
struct sk_buff_head frame_queue;
|
||||
struct sk_buff_head tx_queue;
|
||||
struct work_struct flush_work;
|
||||
struct list_head frame_queue;
|
||||
struct list_head tx_queue;
|
||||
mempool_t *frame_pool;
|
||||
mempool_t *frame_elem_pool;
|
||||
struct work_struct tport_work;
|
||||
struct list_head tport_event_list;
|
||||
|
||||
char subsys_desc[14];
|
||||
int subsys_desc_len;
|
||||
int pc_rscn_handling_status;
|
||||
|
||||
/*** FIP related data members -- start ***/
|
||||
void (*set_vlan)(struct fnic *, u16 vlan);
|
||||
struct work_struct fip_frame_work;
|
||||
struct sk_buff_head fip_frame_queue;
|
||||
struct work_struct fip_timer_work;
|
||||
struct list_head fip_frame_queue;
|
||||
struct timer_list fip_timer;
|
||||
struct list_head vlans;
|
||||
spinlock_t vlans_lock;
|
||||
|
||||
struct work_struct event_work;
|
||||
struct list_head evlist;
|
||||
struct timer_list retry_fip_timer;
|
||||
struct timer_list fcs_ka_timer;
|
||||
struct timer_list enode_ka_timer;
|
||||
struct timer_list vn_ka_timer;
|
||||
struct list_head vlan_list;
|
||||
/*** FIP related data members -- end ***/
|
||||
|
||||
/* copy work queue cache line section */
|
||||
____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX];
|
||||
____cacheline_aligned struct vnic_wq_copy hw_copy_wq[FNIC_WQ_COPY_MAX];
|
||||
____cacheline_aligned struct fnic_cpy_wq sw_copy_wq[FNIC_WQ_COPY_MAX];
|
||||
|
||||
/* completion queue cache line section */
|
||||
____cacheline_aligned struct vnic_cq cq[FNIC_CQ_MAX];
|
||||
|
||||
@ -315,44 +493,40 @@ struct fnic {
|
||||
____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX];
|
||||
};
|
||||
|
||||
static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip)
|
||||
{
|
||||
return container_of(fip, struct fnic, ctlr);
|
||||
}
|
||||
|
||||
extern struct workqueue_struct *fnic_event_queue;
|
||||
extern struct workqueue_struct *fnic_fip_queue;
|
||||
extern struct device_attribute *fnic_attrs[];
|
||||
|
||||
void fnic_clear_intr_mode(struct fnic *fnic);
|
||||
int fnic_set_intr_mode(struct fnic *fnic);
|
||||
int fnic_set_intr_mode_msix(struct fnic *fnic);
|
||||
void fnic_free_intr(struct fnic *fnic);
|
||||
int fnic_request_intr(struct fnic *fnic);
|
||||
|
||||
int fnic_send(struct fc_lport *, struct fc_frame *);
|
||||
void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
|
||||
void fnic_handle_frame(struct work_struct *work);
|
||||
void fnic_tport_event_handler(struct work_struct *work);
|
||||
void fnic_handle_link(struct work_struct *work);
|
||||
void fnic_handle_event(struct work_struct *work);
|
||||
void fdls_reclaim_oxid_handler(struct work_struct *work);
|
||||
void fdls_schedule_oxid_free(struct fnic_iport_s *iport, uint16_t *active_oxid);
|
||||
void fdls_schedule_oxid_free_retry_work(struct work_struct *work);
|
||||
int fnic_rq_cmpl_handler(struct fnic *fnic, int);
|
||||
int fnic_alloc_rq_frame(struct vnic_rq *rq);
|
||||
void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
|
||||
void fnic_flush_tx(struct fnic *);
|
||||
void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb);
|
||||
void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *);
|
||||
void fnic_update_mac(struct fc_lport *, u8 *new);
|
||||
void fnic_flush_tx(struct work_struct *work);
|
||||
void fnic_update_mac_locked(struct fnic *, u8 *new);
|
||||
|
||||
int fnic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
|
||||
int fnic_abort_cmd(struct scsi_cmnd *);
|
||||
int fnic_device_reset(struct scsi_cmnd *);
|
||||
int fnic_host_reset(struct scsi_cmnd *);
|
||||
int fnic_reset(struct Scsi_Host *);
|
||||
void fnic_scsi_cleanup(struct fc_lport *);
|
||||
void fnic_scsi_abort_io(struct fc_lport *);
|
||||
void fnic_empty_scsi_cleanup(struct fc_lport *);
|
||||
void fnic_exch_mgr_reset(struct fc_lport *, u32, u32);
|
||||
int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int);
|
||||
int fnic_eh_host_reset_handler(struct scsi_cmnd *sc);
|
||||
int fnic_host_reset(struct Scsi_Host *shost);
|
||||
void fnic_reset(struct Scsi_Host *shost);
|
||||
int fnic_issue_fc_host_lip(struct Scsi_Host *shost);
|
||||
void fnic_get_host_port_state(struct Scsi_Host *shost);
|
||||
void fnic_scsi_fcpio_reset(struct fnic *fnic);
|
||||
int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do, unsigned int cq_index);
|
||||
int fnic_wq_cmpl_handler(struct fnic *fnic, int);
|
||||
int fnic_flogi_reg_handler(struct fnic *fnic, u32);
|
||||
void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
|
||||
@ -360,17 +534,18 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
|
||||
int fnic_fw_reset_handler(struct fnic *fnic);
|
||||
void fnic_terminate_rport_io(struct fc_rport *);
|
||||
const char *fnic_state_to_str(unsigned int state);
|
||||
|
||||
void fnic_mq_map_queues_cpus(struct Scsi_Host *host);
|
||||
void fnic_log_q_error(struct fnic *fnic);
|
||||
void fnic_handle_link_event(struct fnic *fnic);
|
||||
|
||||
int fnic_stats_debugfs_init(struct fnic *fnic);
|
||||
void fnic_stats_debugfs_remove(struct fnic *fnic);
|
||||
int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *);
|
||||
|
||||
void fnic_handle_fip_frame(struct work_struct *work);
|
||||
void fnic_reset_work_handler(struct work_struct *work);
|
||||
void fnic_handle_fip_event(struct fnic *fnic);
|
||||
void fnic_fcoe_reset_vlans(struct fnic *fnic);
|
||||
void fnic_fcoe_evlist_free(struct fnic *fnic);
|
||||
extern void fnic_handle_fip_timer(struct fnic *fnic);
|
||||
extern void fnic_handle_fip_timer(struct timer_list *t);
|
||||
|
||||
static inline int
|
||||
fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
|
||||
@ -379,4 +554,90 @@ fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
|
||||
}
|
||||
void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long);
|
||||
void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *);
|
||||
void fnic_free_txq(struct list_head *head);
|
||||
int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc,
|
||||
char **subsys_desc);
|
||||
void fnic_fdls_link_status_change(struct fnic *fnic, int linkup);
|
||||
void fnic_delete_fcp_tports(struct fnic *fnic);
|
||||
void fnic_flush_tport_event_list(struct fnic *fnic);
|
||||
int fnic_count_ioreqs_wq(struct fnic *fnic, u32 hwq, u32 portid);
|
||||
unsigned int fnic_count_ioreqs(struct fnic *fnic, u32 portid);
|
||||
unsigned int fnic_count_all_ioreqs(struct fnic *fnic);
|
||||
unsigned int fnic_count_lun_ioreqs_wq(struct fnic *fnic, u32 hwq,
|
||||
struct scsi_device *device);
|
||||
unsigned int fnic_count_lun_ioreqs(struct fnic *fnic,
|
||||
struct scsi_device *device);
|
||||
void fnic_scsi_unload(struct fnic *fnic);
|
||||
void fnic_scsi_unload_cleanup(struct fnic *fnic);
|
||||
int fnic_get_debug_info(struct stats_debug_info *info,
|
||||
struct fnic *fnic);
|
||||
|
||||
struct fnic_scsi_iter_data {
|
||||
struct fnic *fnic;
|
||||
void *data1;
|
||||
void *data2;
|
||||
bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc,
|
||||
void *data1, void *data2);
|
||||
};
|
||||
|
||||
static inline bool
|
||||
fnic_io_iter_handler(struct scsi_cmnd *sc, void *iter_data)
|
||||
{
|
||||
struct fnic_scsi_iter_data *iter = iter_data;
|
||||
|
||||
return iter->fn(iter->fnic, sc, iter->data1, iter->data2);
|
||||
}
|
||||
|
||||
static inline void
|
||||
fnic_scsi_io_iter(struct fnic *fnic,
|
||||
bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc,
|
||||
void *data1, void *data2),
|
||||
void *data1, void *data2)
|
||||
{
|
||||
struct fnic_scsi_iter_data iter_data = {
|
||||
.fn = fn,
|
||||
.fnic = fnic,
|
||||
.data1 = data1,
|
||||
.data2 = data2,
|
||||
};
|
||||
scsi_host_busy_iter(fnic->host, fnic_io_iter_handler, &iter_data);
|
||||
}
|
||||
|
||||
#ifdef FNIC_DEBUG
|
||||
static inline void
|
||||
fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < len; i = i+8) {
|
||||
FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
|
||||
"%d: %02x %02x %02x %02x %02x %02x %02x %02x", i / 8,
|
||||
u8arr[i + 0], u8arr[i + 1], u8arr[i + 2], u8arr[i + 3],
|
||||
u8arr[i + 4], u8arr[i + 5], u8arr[i + 6], u8arr[i + 7]);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr,
|
||||
int len, char *pfx)
|
||||
{
|
||||
uint32_t s_id, d_id;
|
||||
|
||||
s_id = ntoh24(fchdr->fh_s_id);
|
||||
d_id = ntoh24(fchdr->fh_d_id);
|
||||
FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
|
||||
"%s packet contents: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x (len = %d)\n",
|
||||
pfx, s_id, d_id, fchdr->fh_type,
|
||||
FNIC_STD_GET_OX_ID(fchdr), len);
|
||||
|
||||
fnic_debug_dump(fnic, (uint8_t *)fchdr, len);
|
||||
|
||||
}
|
||||
#else /* FNIC_DEBUG */
|
||||
static inline void
|
||||
fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len) {}
|
||||
static inline void
|
||||
fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr,
|
||||
uint32_t len, char *pfx) {}
|
||||
#endif /* FNIC_DEBUG */
|
||||
#endif /* _FNIC_H_ */
|
||||
|
@ -23,25 +23,28 @@
|
||||
static ssize_t fnic_show_state(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct fc_lport *lp = shost_priv(class_to_shost(dev));
|
||||
struct fnic *fnic = lport_priv(lp);
|
||||
struct fnic *fnic =
|
||||
*((struct fnic **) shost_priv(class_to_shost(dev)));
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", fnic_state_str[fnic->state]);
|
||||
return sysfs_emit(buf, "%s\n", fnic_state_str[fnic->state]);
|
||||
}
|
||||
|
||||
static ssize_t fnic_show_drv_version(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
|
||||
return sysfs_emit(buf, "%s\n", DRV_VERSION);
|
||||
}
|
||||
|
||||
static ssize_t fnic_show_link_state(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct fc_lport *lp = shost_priv(class_to_shost(dev));
|
||||
struct fnic *fnic =
|
||||
*((struct fnic **) shost_priv(class_to_shost(dev)));
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", (lp->link_up)
|
||||
? "Link Up" : "Link Down");
|
||||
return sysfs_emit(buf, "%s\n",
|
||||
((fnic->iport.state != FNIC_IPORT_STATE_INIT) &&
|
||||
(fnic->iport.state != FNIC_IPORT_STATE_LINK_WAIT)) ?
|
||||
"Link Up" : "Link Down");
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL);
|
||||
|
@ -21,6 +21,9 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include "fnic.h"
|
||||
|
||||
extern int fnic_get_debug_info(struct stats_debug_info *debug_buffer,
|
||||
struct fnic *fnic);
|
||||
|
||||
static struct dentry *fnic_trace_debugfs_root;
|
||||
static struct dentry *fnic_trace_debugfs_file;
|
||||
static struct dentry *fnic_trace_enable;
|
||||
@ -66,9 +69,10 @@ int fnic_debugfs_init(void)
|
||||
fc_trc_flag->fnic_trace = 2;
|
||||
fc_trc_flag->fc_trace = 3;
|
||||
fc_trc_flag->fc_clear = 4;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -86,8 +90,7 @@ void fnic_debugfs_terminate(void)
|
||||
debugfs_remove(fnic_trace_debugfs_root);
|
||||
fnic_trace_debugfs_root = NULL;
|
||||
|
||||
if (fc_trc_flag)
|
||||
vfree(fc_trc_flag);
|
||||
vfree(fc_trc_flag);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -216,25 +219,21 @@ static int fnic_trace_debugfs_open(struct inode *inode,
|
||||
return -ENOMEM;
|
||||
|
||||
if (*rdata_ptr == fc_trc_flag->fnic_trace) {
|
||||
fnic_dbg_prt->buffer = vmalloc(array3_size(3, trace_max_pages,
|
||||
fnic_dbg_prt->buffer = vzalloc(array3_size(3, trace_max_pages,
|
||||
PAGE_SIZE));
|
||||
if (!fnic_dbg_prt->buffer) {
|
||||
kfree(fnic_dbg_prt);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset((void *)fnic_dbg_prt->buffer, 0,
|
||||
3 * (trace_max_pages * PAGE_SIZE));
|
||||
fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt);
|
||||
} else {
|
||||
fnic_dbg_prt->buffer =
|
||||
vmalloc(array3_size(3, fnic_fc_trace_max_pages,
|
||||
vzalloc(array3_size(3, fnic_fc_trace_max_pages,
|
||||
PAGE_SIZE));
|
||||
if (!fnic_dbg_prt->buffer) {
|
||||
kfree(fnic_dbg_prt);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset((void *)fnic_dbg_prt->buffer, 0,
|
||||
3 * (fnic_fc_trace_max_pages * PAGE_SIZE));
|
||||
fnic_dbg_prt->buffer_len =
|
||||
fnic_fc_trace_get_data(fnic_dbg_prt, *rdata_ptr);
|
||||
}
|
||||
@ -611,6 +610,7 @@ static int fnic_stats_debugfs_open(struct inode *inode,
|
||||
debug->buf_size = buf_size;
|
||||
memset((void *)debug->debug_buffer, 0, buf_size);
|
||||
debug->buffer_len = fnic_get_stats_data(debug, fnic_stats);
|
||||
debug->buffer_len += fnic_get_debug_info(debug, fnic);
|
||||
|
||||
file->private_data = debug;
|
||||
|
||||
@ -691,26 +691,25 @@ static const struct file_operations fnic_reset_debugfs_fops = {
|
||||
* It will create file stats and reset_stats under statistics/host# directory
|
||||
* to log per fnic stats.
|
||||
*/
|
||||
void fnic_stats_debugfs_init(struct fnic *fnic)
|
||||
int fnic_stats_debugfs_init(struct fnic *fnic)
|
||||
{
|
||||
char name[16];
|
||||
|
||||
snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no);
|
||||
snprintf(name, sizeof(name), "host%d", fnic->host->host_no);
|
||||
|
||||
fnic->fnic_stats_debugfs_host = debugfs_create_dir(name,
|
||||
fnic_stats_debugfs_root);
|
||||
|
||||
fnic->fnic_stats_debugfs_file = debugfs_create_file("stats",
|
||||
S_IFREG|S_IRUGO|S_IWUSR,
|
||||
fnic->fnic_stats_debugfs_host,
|
||||
fnic,
|
||||
&fnic_stats_debugfs_fops);
|
||||
|
||||
fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats",
|
||||
S_IFREG|S_IRUGO|S_IWUSR,
|
||||
fnic->fnic_stats_debugfs_host,
|
||||
fnic,
|
||||
&fnic_reset_debugfs_fops);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
File diff suppressed because it is too large
Load Diff
435
drivers/scsi/fnic/fnic_fdls.h
Normal file
435
drivers/scsi/fnic/fnic_fdls.h
Normal file
@ -0,0 +1,435 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _FNIC_FDLS_H_
|
||||
#define _FNIC_FDLS_H_
|
||||
|
||||
#include "fnic_stats.h"
|
||||
#include "fdls_fc.h"
|
||||
|
||||
/* FDLS - Fabric discovery and login services
|
||||
* -> VLAN discovery
|
||||
* -> retry every retry delay seconds until it succeeds.
|
||||
* <- List of VLANs
|
||||
*
|
||||
* -> Solicitation
|
||||
* <- Solicitation response (Advertisement)
|
||||
*
|
||||
* -> FCF selection & FLOGI ( FLOGI timeout - 2 * E_D_TOV)
|
||||
* <- FLOGI response
|
||||
*
|
||||
* -> FCF keep alive
|
||||
* <- FCF keep alive
|
||||
*
|
||||
* -> PLOGI to FFFFFC (DNS) (PLOGI timeout - 2 * R_A_TOV)
|
||||
* -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
|
||||
* <- PLOGI response
|
||||
* -> Retry PLOGI to FFFFFC (DNS) - Number of retries from vnic.cfg
|
||||
*
|
||||
* -> SCR to FFFFFC (DNS) (SCR timeout - 2 * R_A_TOV)
|
||||
* -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
|
||||
* <- SCR response
|
||||
* -> Retry SCR - Number of retries 2
|
||||
*
|
||||
* -> GPN_FT to FFFFFC (GPN_FT timeout - 2 * R_A_TOV)a
|
||||
* -> Retry on BUSY until it succeeds
|
||||
* -> Retry on BUSY until it succeeds
|
||||
* -> 2 retries on timeout
|
||||
*
|
||||
* -> RFT_ID to FFFFFC (DNS) (RFT_ID timeout - 3 * R_A_TOV)
|
||||
* -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
|
||||
* -> Retry RFT_ID to FFFFFC (DNS) (Number of retries 2 )
|
||||
* -> Ignore if both retires fail.
|
||||
*
|
||||
* Session establishment with targets
|
||||
* For each PWWN
|
||||
* -> PLOGI to FCID of that PWWN (PLOGI timeout 2 * R_A_TOV)
|
||||
* -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
|
||||
* <- PLOGI response
|
||||
* -> Retry PLOGI. Num retries using vnic.cfg
|
||||
*
|
||||
* -> PRLI to FCID of that PWWN (PRLI timeout 2 * R_A_TOV)
|
||||
* -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
|
||||
* <- PRLI response
|
||||
* -> Retry PRLI. Num retries using vnic.cfg
|
||||
*
|
||||
*/
|
||||
|
||||
#define FDLS_RETRY_COUNT 2
|
||||
|
||||
/*
|
||||
* OXID encoding:
|
||||
* bits 0-8: oxid idx - allocated from poool
|
||||
* bits 9-13: oxid frame code from fnic_oxid_frame_type_e
|
||||
* bits 14-15: all zeros
|
||||
*/
|
||||
#define FNIC_OXID_POOL_SZ (512) /* always power of 2 */
|
||||
#define FNIC_OXID_ENCODE(idx, frame_type) (frame_type | idx)
|
||||
#define FNIC_FRAME_MASK 0xFE00
|
||||
#define FNIC_FRAME_TYPE(oxid) (oxid & FNIC_FRAME_MASK)
|
||||
#define FNIC_OXID_IDX(oxid) ((oxid) & (FNIC_OXID_POOL_SZ - 1))
|
||||
|
||||
#define OXID_RECLAIM_TOV(iport) (2 * iport->r_a_tov) /* in milliseconds */
|
||||
|
||||
#define FNIC_FDLS_FABRIC_ABORT_ISSUED 0x1
|
||||
#define FNIC_FDLS_FPMA_LEARNT 0x2
|
||||
|
||||
/* tport flags */
|
||||
#define FNIC_FDLS_TPORT_IN_GPN_FT_LIST 0x1
|
||||
#define FNIC_FDLS_TGT_ABORT_ISSUED 0x2
|
||||
#define FNIC_FDLS_TPORT_SEND_ADISC 0x4
|
||||
#define FNIC_FDLS_RETRY_FRAME 0x8
|
||||
#define FNIC_FDLS_TPORT_BUSY 0x10
|
||||
#define FNIC_FDLS_TPORT_TERMINATING 0x20
|
||||
#define FNIC_FDLS_TPORT_DELETED 0x40
|
||||
#define FNIC_FDLS_SCSI_REGISTERED 0x200
|
||||
|
||||
/* Retry supported by rport(returned by prli service parameters) */
|
||||
#define FDLS_FC_RP_FLAGS_RETRY 0x1
|
||||
|
||||
#define fdls_set_state(_fdls_fabric, _state) ((_fdls_fabric)->state = _state)
|
||||
#define fdls_get_state(_fdls_fabric) ((_fdls_fabric)->state)
|
||||
|
||||
#define FNIC_FDMI_ACTIVE 0x8
|
||||
#define FNIC_FIRST_LINK_UP 0x2
|
||||
|
||||
#define fdls_set_tport_state(_tport, _state) (_tport->state = _state)
|
||||
#define fdls_get_tport_state(_tport) (_tport->state)
|
||||
|
||||
#define FNIC_PORTSPEED_10GBIT 1
|
||||
#define FNIC_FRAME_HT_ROOM (2148)
|
||||
#define FNIC_FCOE_FRAME_MAXSZ (2112)
|
||||
|
||||
|
||||
#define FNIC_FRAME_TYPE_FABRIC_FLOGI 0x1000
|
||||
#define FNIC_FRAME_TYPE_FABRIC_PLOGI 0x1200
|
||||
#define FNIC_FRAME_TYPE_FABRIC_RPN 0x1400
|
||||
#define FNIC_FRAME_TYPE_FABRIC_RFT 0x1600
|
||||
#define FNIC_FRAME_TYPE_FABRIC_RFF 0x1800
|
||||
#define FNIC_FRAME_TYPE_FABRIC_SCR 0x1A00
|
||||
#define FNIC_FRAME_TYPE_FABRIC_GPN_FT 0x1C00
|
||||
#define FNIC_FRAME_TYPE_FABRIC_LOGO 0x1E00
|
||||
#define FNIC_FRAME_TYPE_FDMI_PLOGI 0x2000
|
||||
#define FNIC_FRAME_TYPE_FDMI_RHBA 0x2200
|
||||
#define FNIC_FRAME_TYPE_FDMI_RPA 0x2400
|
||||
#define FNIC_FRAME_TYPE_TGT_PLOGI 0x2600
|
||||
#define FNIC_FRAME_TYPE_TGT_PRLI 0x2800
|
||||
#define FNIC_FRAME_TYPE_TGT_ADISC 0x2A00
|
||||
#define FNIC_FRAME_TYPE_TGT_LOGO 0x2C00
|
||||
|
||||
struct fnic_fip_fcf_s {
|
||||
uint16_t vlan_id;
|
||||
uint8_t fcf_mac[6];
|
||||
uint8_t fcf_priority;
|
||||
uint32_t fka_adv_period;
|
||||
uint8_t ka_disabled;
|
||||
};
|
||||
|
||||
enum fnic_fdls_state_e {
|
||||
FDLS_STATE_INIT = 0,
|
||||
FDLS_STATE_LINKDOWN,
|
||||
FDLS_STATE_FABRIC_LOGO,
|
||||
FDLS_STATE_FLOGO_DONE,
|
||||
FDLS_STATE_FABRIC_FLOGI,
|
||||
FDLS_STATE_FABRIC_PLOGI,
|
||||
FDLS_STATE_RPN_ID,
|
||||
FDLS_STATE_REGISTER_FC4_TYPES,
|
||||
FDLS_STATE_REGISTER_FC4_FEATURES,
|
||||
FDLS_STATE_SCR,
|
||||
FDLS_STATE_GPN_FT,
|
||||
FDLS_STATE_TGT_DISCOVERY,
|
||||
FDLS_STATE_RSCN_GPN_FT,
|
||||
FDLS_STATE_SEND_GPNFT
|
||||
};
|
||||
|
||||
struct fnic_fdls_fabric_s {
|
||||
enum fnic_fdls_state_e state;
|
||||
uint32_t flags;
|
||||
struct list_head tport_list; /* List of discovered tports */
|
||||
struct timer_list retry_timer;
|
||||
int del_timer_inprogress;
|
||||
int del_fdmi_timer_inprogress;
|
||||
int retry_counter;
|
||||
int timer_pending;
|
||||
int fdmi_retry;
|
||||
struct timer_list fdmi_timer;
|
||||
int fdmi_pending;
|
||||
};
|
||||
|
||||
struct fnic_fdls_fip_s {
|
||||
uint32_t state;
|
||||
uint32_t flogi_retry;
|
||||
};
|
||||
|
||||
/* Message to tport_event_handler */
|
||||
enum fnic_tgt_msg_id {
|
||||
TGT_EV_NONE = 0,
|
||||
TGT_EV_RPORT_ADD,
|
||||
TGT_EV_RPORT_DEL,
|
||||
TGT_EV_TPORT_DELETE,
|
||||
TGT_EV_REMOVE
|
||||
};
|
||||
|
||||
struct fnic_tport_event_s {
|
||||
struct list_head links;
|
||||
enum fnic_tgt_msg_id event;
|
||||
void *arg1;
|
||||
};
|
||||
|
||||
enum fdls_tgt_state_e {
|
||||
FDLS_TGT_STATE_INIT = 0,
|
||||
FDLS_TGT_STATE_PLOGI,
|
||||
FDLS_TGT_STATE_PRLI,
|
||||
FDLS_TGT_STATE_READY,
|
||||
FDLS_TGT_STATE_LOGO_RECEIVED,
|
||||
FDLS_TGT_STATE_ADISC,
|
||||
FDL_TGT_STATE_PLOGO,
|
||||
FDLS_TGT_STATE_OFFLINING,
|
||||
FDLS_TGT_STATE_OFFLINE
|
||||
};
|
||||
|
||||
struct fnic_tport_s {
|
||||
struct list_head links; /* To link the tports */
|
||||
enum fdls_tgt_state_e state;
|
||||
uint32_t flags;
|
||||
uint32_t fcid;
|
||||
uint64_t wwpn;
|
||||
uint64_t wwnn;
|
||||
uint16_t active_oxid;
|
||||
uint16_t tgt_flags;
|
||||
atomic_t in_flight; /* io counter */
|
||||
uint16_t max_payload_size;
|
||||
uint16_t r_a_tov;
|
||||
uint16_t e_d_tov;
|
||||
uint16_t lun0_delay;
|
||||
int max_concur_seqs;
|
||||
uint32_t fcp_csp;
|
||||
struct timer_list retry_timer;
|
||||
int del_timer_inprogress;
|
||||
int retry_counter;
|
||||
int timer_pending;
|
||||
unsigned int num_pending_cmds;
|
||||
int nexus_restart_count;
|
||||
int exch_reset_in_progress;
|
||||
void *iport;
|
||||
struct work_struct tport_del_work;
|
||||
struct completion *tport_del_done;
|
||||
struct fc_rport *rport;
|
||||
char str_wwpn[20];
|
||||
char str_wwnn[20];
|
||||
};
|
||||
|
||||
/* OXID pool related structures */
|
||||
struct reclaim_entry_s {
|
||||
struct list_head links;
|
||||
/* oxid that needs to be freed after 2*r_a_tov */
|
||||
uint16_t oxid_idx;
|
||||
/* in jiffies. Use this to waiting time */
|
||||
unsigned long expires;
|
||||
unsigned long *bitmap;
|
||||
};
|
||||
|
||||
/* used for allocating oxids for fabric and fdmi requests */
|
||||
struct fnic_oxid_pool_s {
|
||||
DECLARE_BITMAP(bitmap, FNIC_OXID_POOL_SZ);
|
||||
int sz; /* size of the pool or block */
|
||||
int next_idx; /* used for cycling through the oxid pool */
|
||||
|
||||
/* retry schedule free */
|
||||
DECLARE_BITMAP(pending_schedule_free, FNIC_OXID_POOL_SZ);
|
||||
struct delayed_work schedule_oxid_free_retry;
|
||||
|
||||
/* List of oxids that need to be freed and reclaimed.
|
||||
* This list is shared by all the oxid pools
|
||||
*/
|
||||
struct list_head oxid_reclaim_list;
|
||||
/* Work associated with reclaim list */
|
||||
struct delayed_work oxid_reclaim_work;
|
||||
};
|
||||
|
||||
/* iport */
|
||||
enum fnic_iport_state_e {
|
||||
FNIC_IPORT_STATE_INIT = 0,
|
||||
FNIC_IPORT_STATE_LINK_WAIT,
|
||||
FNIC_IPORT_STATE_FIP,
|
||||
FNIC_IPORT_STATE_FABRIC_DISC,
|
||||
FNIC_IPORT_STATE_READY
|
||||
};
|
||||
|
||||
struct fnic_iport_s {
|
||||
enum fnic_iport_state_e state;
|
||||
struct fnic *fnic;
|
||||
uint64_t boot_time;
|
||||
uint32_t flags;
|
||||
int usefip;
|
||||
uint8_t hwmac[6]; /* HW MAC Addr */
|
||||
uint8_t fpma[6]; /* Fabric Provided MA */
|
||||
uint8_t fcfmac[6]; /* MAC addr of Fabric */
|
||||
uint16_t vlan_id;
|
||||
uint32_t fcid;
|
||||
|
||||
/* oxid pool */
|
||||
struct fnic_oxid_pool_s oxid_pool;
|
||||
|
||||
/*
|
||||
* fabric reqs are serialized and only one req at a time.
|
||||
* Tracking the oxid for sending abort
|
||||
*/
|
||||
uint16_t active_oxid_fabric_req;
|
||||
/* fdmi only */
|
||||
uint16_t active_oxid_fdmi_plogi;
|
||||
uint16_t active_oxid_fdmi_rhba;
|
||||
uint16_t active_oxid_fdmi_rpa;
|
||||
|
||||
struct fnic_fip_fcf_s selected_fcf;
|
||||
struct fnic_fdls_fip_s fip;
|
||||
struct fnic_fdls_fabric_s fabric;
|
||||
struct list_head tport_list;
|
||||
struct list_head tport_list_pending_del;
|
||||
/* list of tports for which we are yet to send PLOGO */
|
||||
struct list_head inprocess_tport_list;
|
||||
struct list_head deleted_tport_list;
|
||||
struct work_struct tport_event_work;
|
||||
uint32_t e_d_tov; /* msec */
|
||||
uint32_t r_a_tov; /* msec */
|
||||
uint32_t link_supported_speeds;
|
||||
uint32_t max_flogi_retries;
|
||||
uint32_t max_plogi_retries;
|
||||
uint32_t plogi_timeout;
|
||||
uint32_t service_params;
|
||||
uint64_t wwpn;
|
||||
uint64_t wwnn;
|
||||
uint16_t max_payload_size;
|
||||
spinlock_t deleted_tport_lst_lock;
|
||||
struct completion *flogi_reg_done;
|
||||
struct fnic_iport_stats iport_stats;
|
||||
char str_wwpn[20];
|
||||
char str_wwnn[20];
|
||||
};
|
||||
|
||||
struct rport_dd_data_s {
|
||||
struct fnic_tport_s *tport;
|
||||
struct fnic_iport_s *iport;
|
||||
};
|
||||
|
||||
enum fnic_recv_frame_type_e {
|
||||
FNIC_FABRIC_FLOGI_RSP = 1,
|
||||
FNIC_FABRIC_PLOGI_RSP,
|
||||
FNIC_FABRIC_RPN_RSP,
|
||||
FNIC_FABRIC_RFT_RSP,
|
||||
FNIC_FABRIC_RFF_RSP,
|
||||
FNIC_FABRIC_SCR_RSP,
|
||||
FNIC_FABRIC_GPN_FT_RSP,
|
||||
FNIC_FABRIC_BLS_ABTS_RSP,
|
||||
FNIC_FDMI_PLOGI_RSP,
|
||||
FNIC_FDMI_REG_HBA_RSP,
|
||||
FNIC_FDMI_RPA_RSP,
|
||||
FNIC_FDMI_BLS_ABTS_RSP,
|
||||
FNIC_FABRIC_LOGO_RSP,
|
||||
|
||||
/* responses to target requests */
|
||||
FNIC_TPORT_PLOGI_RSP,
|
||||
FNIC_TPORT_PRLI_RSP,
|
||||
FNIC_TPORT_ADISC_RSP,
|
||||
FNIC_TPORT_BLS_ABTS_RSP,
|
||||
FNIC_TPORT_LOGO_RSP,
|
||||
|
||||
/* unsolicited requests */
|
||||
FNIC_BLS_ABTS_REQ,
|
||||
FNIC_ELS_PLOGI_REQ,
|
||||
FNIC_ELS_RSCN_REQ,
|
||||
FNIC_ELS_LOGO_REQ,
|
||||
FNIC_ELS_ECHO_REQ,
|
||||
FNIC_ELS_ADISC,
|
||||
FNIC_ELS_RLS,
|
||||
FNIC_ELS_RRQ,
|
||||
FNIC_ELS_UNSUPPORTED_REQ,
|
||||
};
|
||||
|
||||
enum fnic_port_speeds {
|
||||
DCEM_PORTSPEED_NONE = 0,
|
||||
DCEM_PORTSPEED_1G = 1000,
|
||||
DCEM_PORTSPEED_2G = 2000,
|
||||
DCEM_PORTSPEED_4G = 4000,
|
||||
DCEM_PORTSPEED_8G = 8000,
|
||||
DCEM_PORTSPEED_10G = 10000,
|
||||
DCEM_PORTSPEED_16G = 16000,
|
||||
DCEM_PORTSPEED_20G = 20000,
|
||||
DCEM_PORTSPEED_25G = 25000,
|
||||
DCEM_PORTSPEED_32G = 32000,
|
||||
DCEM_PORTSPEED_40G = 40000,
|
||||
DCEM_PORTSPEED_4x10G = 41000,
|
||||
DCEM_PORTSPEED_50G = 50000,
|
||||
DCEM_PORTSPEED_64G = 64000,
|
||||
DCEM_PORTSPEED_100G = 100000,
|
||||
DCEM_PORTSPEED_128G = 128000,
|
||||
};
|
||||
|
||||
/* Function Declarations */
|
||||
/* fdls_disc.c */
|
||||
void fnic_fdls_disc_init(struct fnic_iport_s *iport);
|
||||
void fnic_fdls_disc_start(struct fnic_iport_s *iport);
|
||||
void fnic_fdls_recv_frame(struct fnic_iport_s *iport, void *rx_frame,
|
||||
int len, int fchdr_offset);
|
||||
void fnic_fdls_link_down(struct fnic_iport_s *iport);
|
||||
int fdls_init_frame_pool(struct fnic_iport_s *iport);
|
||||
uint8_t *fdls_alloc_frame(struct fnic_iport_s *iport);
|
||||
uint16_t fdls_alloc_oxid(struct fnic_iport_s *iport, int oxid_frame_type,
|
||||
uint16_t *active_oxid);
|
||||
void fdls_free_oxid(struct fnic_iport_s *iport,
|
||||
uint16_t oxid, uint16_t *active_oxid);
|
||||
void fdls_tgt_logout(struct fnic_iport_s *iport,
|
||||
struct fnic_tport_s *tport);
|
||||
void fnic_del_fabric_timer_sync(struct fnic *fnic);
|
||||
void fnic_del_tport_timer_sync(struct fnic *fnic,
|
||||
struct fnic_tport_s *tport);
|
||||
void fdls_send_fabric_logo(struct fnic_iport_s *iport);
|
||||
int fnic_fdls_validate_and_get_frame_type(struct fnic_iport_s *iport,
|
||||
struct fc_frame_header *fchdr);
|
||||
void fdls_send_tport_abts(struct fnic_iport_s *iport,
|
||||
struct fnic_tport_s *tport);
|
||||
bool fdls_delete_tport(struct fnic_iport_s *iport,
|
||||
struct fnic_tport_s *tport);
|
||||
void fdls_fdmi_timer_callback(struct timer_list *t);
|
||||
void fdls_fdmi_retry_plogi(struct fnic_iport_s *iport);
|
||||
|
||||
/* fnic_fcs.c */
|
||||
void fnic_fdls_init(struct fnic *fnic, int usefip);
|
||||
void fnic_send_fcoe_frame(struct fnic_iport_s *iport, void *frame,
|
||||
int frame_size);
|
||||
void fnic_fcoe_send_vlan_req(struct fnic *fnic);
|
||||
int fnic_send_fip_frame(struct fnic_iport_s *iport,
|
||||
void *frame, int frame_size);
|
||||
void fnic_fdls_learn_fcoe_macs(struct fnic_iport_s *iport, void *rx_frame,
|
||||
uint8_t *fcid);
|
||||
void fnic_fdls_add_tport(struct fnic_iport_s *iport,
|
||||
struct fnic_tport_s *tport, unsigned long flags);
|
||||
void fnic_fdls_remove_tport(struct fnic_iport_s *iport,
|
||||
struct fnic_tport_s *tport,
|
||||
unsigned long flags);
|
||||
|
||||
/* fip.c */
|
||||
void fnic_fcoe_send_vlan_req(struct fnic *fnic);
|
||||
void fnic_common_fip_cleanup(struct fnic *fnic);
|
||||
int fdls_fip_recv_frame(struct fnic *fnic, void *frame);
|
||||
void fnic_handle_fcs_ka_timer(struct timer_list *t);
|
||||
void fnic_handle_enode_ka_timer(struct timer_list *t);
|
||||
void fnic_handle_vn_ka_timer(struct timer_list *t);
|
||||
void fnic_handle_fip_timer(struct timer_list *t);
|
||||
extern void fdls_fabric_timer_callback(struct timer_list *t);
|
||||
|
||||
/* fnic_scsi.c */
|
||||
void fnic_scsi_fcpio_reset(struct fnic *fnic);
|
||||
extern void fdls_fabric_timer_callback(struct timer_list *t);
|
||||
void fnic_rport_exch_reset(struct fnic *fnic, u32 fcid);
|
||||
int fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id,
|
||||
void *fp);
|
||||
struct fnic_tport_s *fnic_find_tport_by_fcid(struct fnic_iport_s *iport,
|
||||
uint32_t fcid);
|
||||
struct fnic_tport_s *fnic_find_tport_by_wwpn(struct fnic_iport_s *iport,
|
||||
uint64_t wwpn);
|
||||
|
||||
#endif /* _FNIC_FDLS_H_ */
|
@ -1,60 +0,0 @@
|
||||
/*
|
||||
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you may redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; version 2 of the License.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _FNIC_FIP_H_
|
||||
#define _FNIC_FIP_H_
|
||||
|
||||
|
||||
#define FCOE_CTLR_START_DELAY 2000 /* ms after first adv. to choose FCF */
|
||||
#define FCOE_CTLR_FIPVLAN_TOV 2000 /* ms after FIP VLAN disc */
|
||||
#define FCOE_CTLR_MAX_SOL 8
|
||||
|
||||
#define FINC_MAX_FLOGI_REJECTS 8
|
||||
|
||||
struct vlan {
|
||||
__be16 vid;
|
||||
__be16 type;
|
||||
};
|
||||
|
||||
/*
|
||||
* VLAN entry.
|
||||
*/
|
||||
struct fcoe_vlan {
|
||||
struct list_head list;
|
||||
u16 vid; /* vlan ID */
|
||||
u16 sol_count; /* no. of sols sent */
|
||||
u16 state; /* state */
|
||||
};
|
||||
|
||||
enum fip_vlan_state {
|
||||
FIP_VLAN_AVAIL = 0, /* don't do anything */
|
||||
FIP_VLAN_SENT = 1, /* sent */
|
||||
FIP_VLAN_USED = 2, /* succeed */
|
||||
FIP_VLAN_FAILED = 3, /* failed to response */
|
||||
};
|
||||
|
||||
struct fip_vlan {
|
||||
struct ethhdr eth;
|
||||
struct fip_header fip;
|
||||
struct {
|
||||
struct fip_mac_desc mac;
|
||||
struct fip_wwn_desc wwnn;
|
||||
} desc;
|
||||
};
|
||||
|
||||
#endif /* __FINC_FIP_H_ */
|
@ -19,6 +19,7 @@
|
||||
#define _FNIC_IO_H_
|
||||
|
||||
#include <scsi/fc/fc_fcp.h>
|
||||
#include "fnic_fdls.h"
|
||||
|
||||
#define FNIC_DFLT_SG_DESC_CNT 32
|
||||
#define FNIC_MAX_SG_DESC_CNT 256 /* Maximum descriptors per sgl */
|
||||
@ -53,6 +54,8 @@ enum fnic_ioreq_state {
|
||||
};
|
||||
|
||||
struct fnic_io_req {
|
||||
struct fnic_iport_s *iport;
|
||||
struct fnic_tport_s *tport;
|
||||
struct host_sg_desc *sgl_list; /* sgl list */
|
||||
void *sgl_list_alloc; /* sgl list address used for free */
|
||||
dma_addr_t sense_buf_pa; /* dma address for sense buffer*/
|
||||
@ -64,16 +67,7 @@ struct fnic_io_req {
|
||||
unsigned long start_time; /* in jiffies */
|
||||
struct completion *abts_done; /* completion for abts */
|
||||
struct completion *dr_done; /* completion for device reset */
|
||||
};
|
||||
|
||||
enum fnic_port_speeds {
|
||||
DCEM_PORTSPEED_NONE = 0,
|
||||
DCEM_PORTSPEED_1G = 1000,
|
||||
DCEM_PORTSPEED_10G = 10000,
|
||||
DCEM_PORTSPEED_20G = 20000,
|
||||
DCEM_PORTSPEED_25G = 25000,
|
||||
DCEM_PORTSPEED_40G = 40000,
|
||||
DCEM_PORTSPEED_4x10G = 41000,
|
||||
DCEM_PORTSPEED_100G = 100000,
|
||||
unsigned int tag;
|
||||
struct scsi_cmnd *sc; /* midlayer's cmd pointer */
|
||||
};
|
||||
#endif /* _FNIC_IO_H_ */
|
||||
|
@ -19,7 +19,7 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <scsi/libfc.h>
|
||||
#include <scsi/scsi_transport_fc.h>
|
||||
#include <scsi/fc_frame.h>
|
||||
#include "vnic_dev.h"
|
||||
#include "vnic_intr.h"
|
||||
@ -50,8 +50,13 @@ static irqreturn_t fnic_isr_legacy(int irq, void *data)
|
||||
fnic_log_q_error(fnic);
|
||||
}
|
||||
|
||||
if (pba & (1 << FNIC_INTX_DUMMY)) {
|
||||
atomic64_inc(&fnic->fnic_stats.misc_stats.intx_dummy);
|
||||
vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_DUMMY]);
|
||||
}
|
||||
|
||||
if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) {
|
||||
work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions);
|
||||
work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions, FNIC_MQ_CQ_INDEX);
|
||||
work_done += fnic_wq_cmpl_handler(fnic, -1);
|
||||
work_done += fnic_rq_cmpl_handler(fnic, -1);
|
||||
|
||||
@ -72,7 +77,7 @@ static irqreturn_t fnic_isr_msi(int irq, void *data)
|
||||
fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
|
||||
atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
|
||||
|
||||
work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions);
|
||||
work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions, FNIC_MQ_CQ_INDEX);
|
||||
work_done += fnic_wq_cmpl_handler(fnic, -1);
|
||||
work_done += fnic_rq_cmpl_handler(fnic, -1);
|
||||
|
||||
@ -121,12 +126,22 @@ static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data)
|
||||
{
|
||||
struct fnic *fnic = data;
|
||||
unsigned long wq_copy_work_done = 0;
|
||||
int i;
|
||||
|
||||
fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
|
||||
atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
|
||||
|
||||
wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, io_completions);
|
||||
vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY],
|
||||
i = irq - fnic->msix[0].irq_num;
|
||||
if (i >= fnic->wq_copy_count + fnic->copy_wq_base ||
|
||||
i < 0 || fnic->msix[i].irq_num != irq) {
|
||||
for (i = fnic->copy_wq_base; i < fnic->wq_copy_count + fnic->copy_wq_base ; i++) {
|
||||
if (fnic->msix[i].irq_num == irq)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, io_completions, i);
|
||||
vnic_intr_return_credits(&fnic->intr[i],
|
||||
wq_copy_work_done,
|
||||
1 /* unmask intr */,
|
||||
1 /* reset intr timer */);
|
||||
@ -140,7 +155,7 @@ static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data)
|
||||
fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
|
||||
atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
|
||||
|
||||
vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]);
|
||||
vnic_intr_return_all_credits(&fnic->intr[fnic->err_intr_offset]);
|
||||
fnic_log_q_error(fnic);
|
||||
fnic_handle_link_event(fnic);
|
||||
|
||||
@ -198,26 +213,30 @@ int fnic_request_intr(struct fnic *fnic)
|
||||
fnic->msix[FNIC_MSIX_WQ].isr = fnic_isr_msix_wq;
|
||||
fnic->msix[FNIC_MSIX_WQ].devid = fnic;
|
||||
|
||||
sprintf(fnic->msix[FNIC_MSIX_WQ_COPY].devname,
|
||||
"%.11s-scsi-wq", fnic->name);
|
||||
fnic->msix[FNIC_MSIX_WQ_COPY].isr = fnic_isr_msix_wq_copy;
|
||||
fnic->msix[FNIC_MSIX_WQ_COPY].devid = fnic;
|
||||
for (i = fnic->copy_wq_base; i < fnic->wq_copy_count + fnic->copy_wq_base; i++) {
|
||||
sprintf(fnic->msix[i].devname,
|
||||
"%.11s-scsi-wq-%d", fnic->name, i-FNIC_MSIX_WQ_COPY);
|
||||
fnic->msix[i].isr = fnic_isr_msix_wq_copy;
|
||||
fnic->msix[i].devid = fnic;
|
||||
}
|
||||
|
||||
sprintf(fnic->msix[FNIC_MSIX_ERR_NOTIFY].devname,
|
||||
sprintf(fnic->msix[fnic->err_intr_offset].devname,
|
||||
"%.11s-err-notify", fnic->name);
|
||||
fnic->msix[FNIC_MSIX_ERR_NOTIFY].isr =
|
||||
fnic->msix[fnic->err_intr_offset].isr =
|
||||
fnic_isr_msix_err_notify;
|
||||
fnic->msix[FNIC_MSIX_ERR_NOTIFY].devid = fnic;
|
||||
fnic->msix[fnic->err_intr_offset].devid = fnic;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) {
|
||||
err = request_irq(pci_irq_vector(fnic->pdev, i),
|
||||
fnic->msix[i].isr, 0,
|
||||
fnic->msix[i].devname,
|
||||
fnic->msix[i].devid);
|
||||
for (i = 0; i < fnic->intr_count; i++) {
|
||||
fnic->msix[i].irq_num = pci_irq_vector(fnic->pdev, i);
|
||||
|
||||
err = request_irq(fnic->msix[i].irq_num,
|
||||
fnic->msix[i].isr, 0,
|
||||
fnic->msix[i].devname,
|
||||
fnic->msix[i].devid);
|
||||
if (err) {
|
||||
shost_printk(KERN_ERR, fnic->lport->host,
|
||||
"MSIX: request_irq"
|
||||
" failed %d\n", err);
|
||||
FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
|
||||
"request_irq failed with error: %d\n",
|
||||
err);
|
||||
fnic_free_intr(fnic);
|
||||
break;
|
||||
}
|
||||
@ -232,44 +251,99 @@ int fnic_request_intr(struct fnic *fnic)
|
||||
return err;
|
||||
}
|
||||
|
||||
int fnic_set_intr_mode(struct fnic *fnic)
|
||||
int fnic_set_intr_mode_msix(struct fnic *fnic)
|
||||
{
|
||||
unsigned int n = ARRAY_SIZE(fnic->rq);
|
||||
unsigned int m = ARRAY_SIZE(fnic->wq);
|
||||
unsigned int o = ARRAY_SIZE(fnic->wq_copy);
|
||||
unsigned int o = ARRAY_SIZE(fnic->hw_copy_wq);
|
||||
unsigned int min_irqs = n + m + 1 + 1; /*rq, raw wq, wq, err*/
|
||||
|
||||
/*
|
||||
* We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs
|
||||
* (last INTR is used for WQ/RQ errors and notification area)
|
||||
*/
|
||||
FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"rq-array size: %d wq-array size: %d copy-wq array size: %d\n",
|
||||
n, m, o);
|
||||
FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"rq_count: %d raw_wq_count: %d wq_copy_count: %d cq_count: %d\n",
|
||||
fnic->rq_count, fnic->raw_wq_count,
|
||||
fnic->wq_copy_count, fnic->cq_count);
|
||||
|
||||
if (fnic->rq_count <= n && fnic->raw_wq_count <= m &&
|
||||
fnic->wq_copy_count <= o) {
|
||||
int vec_count = 0;
|
||||
int vecs = fnic->rq_count + fnic->raw_wq_count + fnic->wq_copy_count + 1;
|
||||
|
||||
vec_count = pci_alloc_irq_vectors(fnic->pdev, min_irqs, vecs,
|
||||
PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
|
||||
FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"allocated %d MSI-X vectors\n",
|
||||
vec_count);
|
||||
|
||||
if (vec_count > 0) {
|
||||
if (vec_count < vecs) {
|
||||
FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
|
||||
"interrupts number mismatch: vec_count: %d vecs: %d\n",
|
||||
vec_count, vecs);
|
||||
if (vec_count < min_irqs) {
|
||||
FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
|
||||
"no interrupts for copy wq\n");
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
fnic->rq_count = n;
|
||||
fnic->raw_wq_count = m;
|
||||
fnic->copy_wq_base = fnic->rq_count + fnic->raw_wq_count;
|
||||
fnic->wq_copy_count = vec_count - n - m - 1;
|
||||
fnic->wq_count = fnic->raw_wq_count + fnic->wq_copy_count;
|
||||
if (fnic->cq_count != vec_count - 1) {
|
||||
FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
|
||||
"CQ count: %d does not match MSI-X vector count: %d\n",
|
||||
fnic->cq_count, vec_count);
|
||||
fnic->cq_count = vec_count - 1;
|
||||
}
|
||||
fnic->intr_count = vec_count;
|
||||
fnic->err_intr_offset = fnic->rq_count + fnic->wq_count;
|
||||
|
||||
FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"rq_count: %d raw_wq_count: %d copy_wq_base: %d\n",
|
||||
fnic->rq_count,
|
||||
fnic->raw_wq_count, fnic->copy_wq_base);
|
||||
|
||||
FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"wq_copy_count: %d wq_count: %d cq_count: %d\n",
|
||||
fnic->wq_copy_count,
|
||||
fnic->wq_count, fnic->cq_count);
|
||||
|
||||
FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"intr_count: %d err_intr_offset: %u",
|
||||
fnic->intr_count,
|
||||
fnic->err_intr_offset);
|
||||
|
||||
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSIX);
|
||||
FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"fnic using MSI-X\n");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
int fnic_set_intr_mode(struct fnic *fnic)
|
||||
{
|
||||
int ret_status = 0;
|
||||
|
||||
/*
|
||||
* Set interrupt mode (INTx, MSI, MSI-X) depending
|
||||
* system capabilities.
|
||||
*
|
||||
* Try MSI-X first
|
||||
*
|
||||
* We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs
|
||||
* (last INTR is used for WQ/RQ errors and notification area)
|
||||
*/
|
||||
if (fnic->rq_count >= n &&
|
||||
fnic->raw_wq_count >= m &&
|
||||
fnic->wq_copy_count >= o &&
|
||||
fnic->cq_count >= n + m + o) {
|
||||
int vecs = n + m + o + 1;
|
||||
|
||||
if (pci_alloc_irq_vectors(fnic->pdev, vecs, vecs,
|
||||
PCI_IRQ_MSIX) == vecs) {
|
||||
fnic->rq_count = n;
|
||||
fnic->raw_wq_count = m;
|
||||
fnic->wq_copy_count = o;
|
||||
fnic->wq_count = m + o;
|
||||
fnic->cq_count = n + m + o;
|
||||
fnic->intr_count = vecs;
|
||||
fnic->err_intr_offset = FNIC_MSIX_ERR_NOTIFY;
|
||||
|
||||
FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"Using MSI-X Interrupts\n");
|
||||
vnic_dev_set_intr_mode(fnic->vdev,
|
||||
VNIC_DEV_INTR_MODE_MSIX);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
ret_status = fnic_set_intr_mode_msix(fnic);
|
||||
if (ret_status == 0)
|
||||
return ret_status;
|
||||
|
||||
/*
|
||||
* Next try MSI
|
||||
@ -289,7 +363,7 @@ int fnic_set_intr_mode(struct fnic *fnic)
|
||||
fnic->intr_count = 1;
|
||||
fnic->err_intr_offset = 0;
|
||||
|
||||
FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
FNIC_ISR_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
|
||||
"Using MSI Interrupts\n");
|
||||
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI);
|
||||
|
||||
@ -315,7 +389,7 @@ int fnic_set_intr_mode(struct fnic *fnic)
|
||||
fnic->cq_count = 3;
|
||||
fnic->intr_count = 3;
|
||||
|
||||
FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
FNIC_ISR_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
|
||||
"Using Legacy Interrupts\n");
|
||||
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
131
drivers/scsi/fnic/fnic_pci_subsys_devid.c
Normal file
131
drivers/scsi/fnic/fnic_pci_subsys_devid.c
Normal file
@ -0,0 +1,131 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include "fnic.h"
|
||||
|
||||
static struct fnic_pcie_device fnic_pcie_device_table[] = {
|
||||
{PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_VASONA,
|
||||
"VIC 1280"},
|
||||
{PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_COTATI,
|
||||
"VIC 1240"},
|
||||
{PCI_DEVICE_ID_CISCO_SERENO, "Sereno",
|
||||
PCI_SUBDEVICE_ID_CISCO_LEXINGTON, "VIC 1225"},
|
||||
{PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_ICEHOUSE,
|
||||
"VIC 1285"},
|
||||
{PCI_DEVICE_ID_CISCO_SERENO, "Sereno",
|
||||
PCI_SUBDEVICE_ID_CISCO_KIRKWOODLAKE, "VIC 1225T"},
|
||||
{PCI_DEVICE_ID_CISCO_SERENO, "Sereno",
|
||||
PCI_SUBDEVICE_ID_CISCO_SUSANVILLE, "VIC 1227"},
|
||||
{PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_TORRANCE,
|
||||
"VIC 1227T"},
|
||||
|
||||
{PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CALISTOGA,
|
||||
"VIC 1340"},
|
||||
{PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTAINVIEW,
|
||||
"VIC 1380"},
|
||||
{PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN,
|
||||
"C3260-SIOC"},
|
||||
{PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CLEARLAKE,
|
||||
"VIC 1385"},
|
||||
{PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN2,
|
||||
"C3260-SIOC"},
|
||||
{PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CLAREMONT,
|
||||
"VIC 1387"},
|
||||
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BRADBURY,
|
||||
"VIC 1457"},
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
|
||||
PCI_SUBDEVICE_ID_CISCO_BRENTWOOD, "VIC 1455"},
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
|
||||
PCI_SUBDEVICE_ID_CISCO_BURLINGAME, "VIC 1487"},
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BAYSIDE,
|
||||
"VIC 1485"},
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
|
||||
PCI_SUBDEVICE_ID_CISCO_BAKERSFIELD, "VIC 1440"},
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
|
||||
PCI_SUBDEVICE_ID_CISCO_BOONVILLE, "VIC 1480"},
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BENICIA,
|
||||
"VIC 1495"},
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BEAUMONT,
|
||||
"VIC 1497"},
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BRISBANE,
|
||||
"VIC 1467"},
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BENTON,
|
||||
"VIC 1477"},
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
|
||||
PCI_SUBDEVICE_ID_CISCO_TWIN_RIVER, "VIC 14425"},
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
|
||||
PCI_SUBDEVICE_ID_CISCO_TWIN_PEAK, "VIC 14825"},
|
||||
|
||||
{PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_BERN,
|
||||
"VIC 15420"},
|
||||
{PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
|
||||
PCI_SUBDEVICE_ID_CISCO_STOCKHOLM, "VIC 15428"},
|
||||
{PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_KRAKOW,
|
||||
"VIC 15411"},
|
||||
{PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
|
||||
PCI_SUBDEVICE_ID_CISCO_LUCERNE, "VIC 15231"},
|
||||
{PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_TURKU,
|
||||
"VIC 15238"},
|
||||
{PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_GENEVA,
|
||||
"VIC 15422"},
|
||||
{PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
|
||||
PCI_SUBDEVICE_ID_CISCO_HELSINKI, "VIC 15235"},
|
||||
{PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
|
||||
PCI_SUBDEVICE_ID_CISCO_GOTHENBURG, "VIC 15425"},
|
||||
{PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
|
||||
PCI_SUBDEVICE_ID_CISCO_TURKU_PLUS, "VIC 15237"},
|
||||
{PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_ZURICH,
|
||||
"VIC 15230"},
|
||||
{PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_RIGA,
|
||||
"VIC 15427"},
|
||||
|
||||
{0,}
|
||||
};
|
||||
|
||||
int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc,
|
||||
char **subsys_desc)
|
||||
{
|
||||
unsigned short device = PCI_DEVICE_ID_CISCO_VIC_FC;
|
||||
int max = ARRAY_SIZE(fnic_pcie_device_table);
|
||||
struct fnic_pcie_device *t = fnic_pcie_device_table;
|
||||
int index = 0;
|
||||
|
||||
if (pdev->device != device)
|
||||
return 1;
|
||||
|
||||
while (t->device != 0) {
|
||||
if (memcmp
|
||||
((char *) &pdev->subsystem_device,
|
||||
(char *) &t->subsystem_device, sizeof(short)) == 0)
|
||||
break;
|
||||
t++;
|
||||
index++;
|
||||
}
|
||||
|
||||
if (index >= max - 1) {
|
||||
*desc = NULL;
|
||||
*subsys_desc = NULL;
|
||||
return 1;
|
||||
}
|
||||
|
||||
*desc = fnic_pcie_device_table[index].desc;
|
||||
*subsys_desc = fnic_pcie_device_table[index].subsys_desc;
|
||||
return 0;
|
||||
}
|
@ -42,9 +42,7 @@ int fnic_get_vnic_config(struct fnic *fnic)
|
||||
offsetof(struct vnic_fc_config, m), \
|
||||
sizeof(c->m), &c->m); \
|
||||
if (err) { \
|
||||
shost_printk(KERN_ERR, fnic->lport->host, \
|
||||
"Error getting %s, %d\n", #m, \
|
||||
err); \
|
||||
dev_err(&fnic->pdev->dev, "Error getting %s, %d\n", #m, err); \
|
||||
return err; \
|
||||
} \
|
||||
} while (0);
|
||||
@ -69,6 +67,13 @@ int fnic_get_vnic_config(struct fnic *fnic)
|
||||
GET_CONFIG(port_down_timeout);
|
||||
GET_CONFIG(port_down_io_retries);
|
||||
GET_CONFIG(luns_per_tgt);
|
||||
GET_CONFIG(intr_mode);
|
||||
GET_CONFIG(wq_copy_count);
|
||||
|
||||
if ((c->flags & (VFCF_FC_INITIATOR)) == 0) {
|
||||
dev_info(&fnic->pdev->dev, "vNIC role not defined (def role: FC Init)\n");
|
||||
c->flags |= VFCF_FC_INITIATOR;
|
||||
}
|
||||
|
||||
c->wq_enet_desc_count =
|
||||
min_t(u32, VNIC_FNIC_WQ_DESCS_MAX,
|
||||
@ -143,36 +148,34 @@ int fnic_get_vnic_config(struct fnic *fnic)
|
||||
c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
|
||||
c->intr_timer_type = c->intr_timer_type;
|
||||
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC MAC addr %pM "
|
||||
"wq/wq_copy/rq %d/%d/%d\n",
|
||||
fnic->ctlr.ctl_src_addr,
|
||||
/* for older firmware, GET_CONFIG will not return anything */
|
||||
if (c->wq_copy_count == 0)
|
||||
c->wq_copy_count = 1;
|
||||
|
||||
c->wq_copy_count = min_t(u16, FNIC_WQ_COPY_MAX, c->wq_copy_count);
|
||||
|
||||
dev_info(&fnic->pdev->dev, "fNIC MAC addr %p wq/wq_copy/rq %d/%d/%d\n",
|
||||
fnic->data_src_addr,
|
||||
c->wq_enet_desc_count, c->wq_copy_desc_count,
|
||||
c->rq_desc_count);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC node wwn %llx port wwn %llx\n",
|
||||
dev_info(&fnic->pdev->dev, "fNIC node wwn 0x%llx port wwn 0x%llx\n",
|
||||
c->node_wwn, c->port_wwn);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC ed_tov %d ra_tov %d\n",
|
||||
dev_info(&fnic->pdev->dev, "fNIC ed_tov %d ra_tov %d\n",
|
||||
c->ed_tov, c->ra_tov);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC mtu %d intr timer %d\n",
|
||||
dev_info(&fnic->pdev->dev, "fNIC mtu %d intr timer %d\n",
|
||||
c->maxdatafieldsize, c->intr_timer);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC flags 0x%x luns per tgt %d\n",
|
||||
dev_info(&fnic->pdev->dev, "fNIC flags 0x%x luns per tgt %d\n",
|
||||
c->flags, c->luns_per_tgt);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC flogi_retries %d flogi timeout %d\n",
|
||||
dev_info(&fnic->pdev->dev, "fNIC flogi_retries %d flogi timeout %d\n",
|
||||
c->flogi_retries, c->flogi_timeout);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC plogi retries %d plogi timeout %d\n",
|
||||
dev_info(&fnic->pdev->dev, "fNIC plogi retries %d plogi timeout %d\n",
|
||||
c->plogi_retries, c->plogi_timeout);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC io throttle count %d link dn timeout %d\n",
|
||||
dev_info(&fnic->pdev->dev, "fNIC io throttle count %d link dn timeout %d\n",
|
||||
c->io_throttle_count, c->link_down_timeout);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC port dn io retries %d port dn timeout %d\n",
|
||||
dev_info(&fnic->pdev->dev, "fNIC port dn io retries %d port dn timeout %d\n",
|
||||
c->port_down_io_retries, c->port_down_timeout);
|
||||
dev_info(&fnic->pdev->dev, "fNIC wq_copy_count: %d\n", c->wq_copy_count);
|
||||
dev_info(&fnic->pdev->dev, "fNIC intr mode: %d\n", c->intr_mode);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -199,12 +202,19 @@ int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu,
|
||||
void fnic_get_res_counts(struct fnic *fnic)
|
||||
{
|
||||
fnic->wq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_WQ);
|
||||
fnic->raw_wq_count = fnic->wq_count - 1;
|
||||
fnic->wq_copy_count = fnic->wq_count - fnic->raw_wq_count;
|
||||
fnic->raw_wq_count = 1;
|
||||
fnic->wq_copy_count = fnic->config.wq_copy_count;
|
||||
fnic->rq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_RQ);
|
||||
fnic->cq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_CQ);
|
||||
fnic->intr_count = vnic_dev_get_res_count(fnic->vdev,
|
||||
RES_TYPE_INTR_CTRL);
|
||||
|
||||
dev_info(&fnic->pdev->dev, "vNIC fw resources wq_count: %d\n", fnic->wq_count);
|
||||
dev_info(&fnic->pdev->dev, "vNIC fw resources raw_wq_count: %d\n", fnic->raw_wq_count);
|
||||
dev_info(&fnic->pdev->dev, "vNIC fw resources wq_copy_count: %d\n", fnic->wq_copy_count);
|
||||
dev_info(&fnic->pdev->dev, "vNIC fw resources rq_count: %d\n", fnic->rq_count);
|
||||
dev_info(&fnic->pdev->dev, "vNIC fw resources cq_count: %d\n", fnic->cq_count);
|
||||
dev_info(&fnic->pdev->dev, "vNIC fw resources intr_count: %d\n", fnic->intr_count);
|
||||
}
|
||||
|
||||
void fnic_free_vnic_resources(struct fnic *fnic)
|
||||
@ -215,7 +225,7 @@ void fnic_free_vnic_resources(struct fnic *fnic)
|
||||
vnic_wq_free(&fnic->wq[i]);
|
||||
|
||||
for (i = 0; i < fnic->wq_copy_count; i++)
|
||||
vnic_wq_copy_free(&fnic->wq_copy[i]);
|
||||
vnic_wq_copy_free(&fnic->hw_copy_wq[i]);
|
||||
|
||||
for (i = 0; i < fnic->rq_count; i++)
|
||||
vnic_rq_free(&fnic->rq[i]);
|
||||
@ -240,16 +250,19 @@ int fnic_alloc_vnic_resources(struct fnic *fnic)
|
||||
|
||||
intr_mode = vnic_dev_get_intr_mode(fnic->vdev);
|
||||
|
||||
shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n",
|
||||
dev_info(&fnic->pdev->dev, "vNIC interrupt mode: %s\n",
|
||||
intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
|
||||
intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
|
||||
intr_mode == VNIC_DEV_INTR_MODE_MSIX ?
|
||||
"MSI-X" : "unknown");
|
||||
|
||||
shost_printk(KERN_INFO, fnic->lport->host, "vNIC resources avail: "
|
||||
"wq %d cp_wq %d raw_wq %d rq %d cq %d intr %d\n",
|
||||
fnic->wq_count, fnic->wq_copy_count, fnic->raw_wq_count,
|
||||
fnic->rq_count, fnic->cq_count, fnic->intr_count);
|
||||
dev_info(&fnic->pdev->dev, "res avail: wq %d cp_wq %d raw_wq %d rq %d",
|
||||
fnic->wq_count, fnic->wq_copy_count,
|
||||
fnic->raw_wq_count, fnic->rq_count);
|
||||
|
||||
dev_info(&fnic->pdev->dev, "res avail: cq %d intr %d cpy-wq desc count %d\n",
|
||||
fnic->cq_count, fnic->intr_count,
|
||||
fnic->config.wq_copy_desc_count);
|
||||
|
||||
/* Allocate Raw WQ used for FCS frames */
|
||||
for (i = 0; i < fnic->raw_wq_count; i++) {
|
||||
@ -262,7 +275,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic)
|
||||
|
||||
/* Allocate Copy WQs used for SCSI IOs */
|
||||
for (i = 0; i < fnic->wq_copy_count; i++) {
|
||||
err = vnic_wq_copy_alloc(fnic->vdev, &fnic->wq_copy[i],
|
||||
err = vnic_wq_copy_alloc(fnic->vdev, &fnic->hw_copy_wq[i],
|
||||
(fnic->raw_wq_count + i),
|
||||
fnic->config.wq_copy_desc_count,
|
||||
sizeof(struct fcpio_host_req));
|
||||
@ -322,8 +335,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic)
|
||||
RES_TYPE_INTR_PBA_LEGACY, 0);
|
||||
|
||||
if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
|
||||
shost_printk(KERN_ERR, fnic->lport->host,
|
||||
"Failed to hook legacy pba resource\n");
|
||||
dev_err(&fnic->pdev->dev, "Failed to hook legacy pba resource\n");
|
||||
err = -ENODEV;
|
||||
goto err_out_cleanup;
|
||||
}
|
||||
@ -369,7 +381,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic)
|
||||
}
|
||||
|
||||
for (i = 0; i < fnic->wq_copy_count; i++) {
|
||||
vnic_wq_copy_init(&fnic->wq_copy[i],
|
||||
vnic_wq_copy_init(&fnic->hw_copy_wq[i],
|
||||
0 /* cq_index 0 - always */,
|
||||
error_interrupt_enable,
|
||||
error_interrupt_offset);
|
||||
@ -426,8 +438,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic)
|
||||
/* init the stats memory by making the first call here */
|
||||
err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
|
||||
if (err) {
|
||||
shost_printk(KERN_ERR, fnic->lport->host,
|
||||
"vnic_dev_stats_dump failed - x%x\n", err);
|
||||
dev_err(&fnic->pdev->dev, "vnic_dev_stats_dump failed - x%x\n", err);
|
||||
goto err_out_cleanup;
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -16,6 +16,8 @@
|
||||
*/
|
||||
#ifndef _FNIC_STATS_H_
|
||||
#define _FNIC_STATS_H_
|
||||
#define FNIC_MQ_MAX_QUEUES 64
|
||||
#include <scsi/scsi_transport_fc.h>
|
||||
|
||||
struct stats_timestamps {
|
||||
struct timespec64 last_reset_time;
|
||||
@ -40,6 +42,7 @@ struct io_path_stats {
|
||||
atomic64_t io_btw_10000_to_30000_msec;
|
||||
atomic64_t io_greater_than_30000_msec;
|
||||
atomic64_t current_max_io_time;
|
||||
atomic64_t ios[FNIC_MQ_MAX_QUEUES];
|
||||
};
|
||||
|
||||
struct abort_stats {
|
||||
@ -75,6 +78,7 @@ struct reset_stats {
|
||||
atomic64_t fw_resets;
|
||||
atomic64_t fw_reset_completions;
|
||||
atomic64_t fw_reset_failures;
|
||||
atomic64_t fw_reset_timeouts;
|
||||
atomic64_t fnic_resets;
|
||||
atomic64_t fnic_reset_completions;
|
||||
atomic64_t fnic_reset_failures;
|
||||
@ -114,9 +118,51 @@ struct misc_stats {
|
||||
atomic64_t no_icmnd_itmf_cmpls;
|
||||
atomic64_t check_condition;
|
||||
atomic64_t queue_fulls;
|
||||
atomic64_t rport_not_ready;
|
||||
atomic64_t tport_not_ready;
|
||||
atomic64_t iport_not_ready;
|
||||
atomic64_t frame_errors;
|
||||
atomic64_t current_port_speed;
|
||||
atomic64_t intx_dummy;
|
||||
atomic64_t port_speed_in_mbps;
|
||||
};
|
||||
|
||||
struct fnic_iport_stats {
|
||||
atomic64_t num_linkdn;
|
||||
atomic64_t num_linkup;
|
||||
atomic64_t link_failure_count;
|
||||
atomic64_t num_rscns;
|
||||
atomic64_t rscn_redisc;
|
||||
atomic64_t rscn_not_redisc;
|
||||
atomic64_t frame_err;
|
||||
atomic64_t num_rnid;
|
||||
atomic64_t fabric_flogi_sent;
|
||||
atomic64_t fabric_flogi_ls_accepts;
|
||||
atomic64_t fabric_flogi_ls_rejects;
|
||||
atomic64_t fabric_flogi_misc_rejects;
|
||||
atomic64_t fabric_plogi_sent;
|
||||
atomic64_t fabric_plogi_ls_accepts;
|
||||
atomic64_t fabric_plogi_ls_rejects;
|
||||
atomic64_t fabric_plogi_misc_rejects;
|
||||
atomic64_t fabric_scr_sent;
|
||||
atomic64_t fabric_scr_ls_accepts;
|
||||
atomic64_t fabric_scr_ls_rejects;
|
||||
atomic64_t fabric_scr_misc_rejects;
|
||||
atomic64_t fabric_logo_sent;
|
||||
atomic64_t tport_alive;
|
||||
atomic64_t tport_plogi_sent;
|
||||
atomic64_t tport_plogi_ls_accepts;
|
||||
atomic64_t tport_plogi_ls_rejects;
|
||||
atomic64_t tport_plogi_misc_rejects;
|
||||
atomic64_t tport_prli_sent;
|
||||
atomic64_t tport_prli_ls_accepts;
|
||||
atomic64_t tport_prli_ls_rejects;
|
||||
atomic64_t tport_prli_misc_rejects;
|
||||
atomic64_t tport_adisc_sent;
|
||||
atomic64_t tport_adisc_ls_accepts;
|
||||
atomic64_t tport_adisc_ls_rejects;
|
||||
atomic64_t tport_logo_sent;
|
||||
atomic64_t unsupported_frames_ls_rejects;
|
||||
atomic64_t unsupported_frames_dropped;
|
||||
};
|
||||
|
||||
struct fnic_stats {
|
||||
@ -127,6 +173,7 @@ struct fnic_stats {
|
||||
struct reset_stats reset_stats;
|
||||
struct fw_stats fw_stats;
|
||||
struct vlan_stats vlan_stats;
|
||||
struct fc_host_statistics host_stats;
|
||||
struct misc_stats misc_stats;
|
||||
};
|
||||
|
||||
@ -138,6 +185,5 @@ struct stats_debug_info {
|
||||
};
|
||||
|
||||
int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *);
|
||||
void fnic_stats_debugfs_init(struct fnic *);
|
||||
void fnic_stats_debugfs_remove(struct fnic *);
|
||||
const char *fnic_role_to_str(unsigned int role);
|
||||
#endif /* _FNIC_STATS_H_ */
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <scsi/scsi_transport_fc.h>
|
||||
#include "fnic_io.h"
|
||||
#include "fnic.h"
|
||||
|
||||
@ -43,6 +44,17 @@ int fnic_fc_tracing_enabled = 1;
|
||||
int fnic_fc_trace_cleared = 1;
|
||||
static DEFINE_SPINLOCK(fnic_fc_trace_lock);
|
||||
|
||||
static const char * const fnic_role_str[] = {
|
||||
[FNIC_ROLE_FCP_INITIATOR] = "FCP_Initiator",
|
||||
};
|
||||
|
||||
const char *fnic_role_to_str(unsigned int role)
|
||||
{
|
||||
if (role >= ARRAY_SIZE(fnic_role_str) || !fnic_role_str[role])
|
||||
return "Unknown";
|
||||
|
||||
return fnic_role_str[role];
|
||||
}
|
||||
|
||||
/*
|
||||
* fnic_trace_get_buf - Give buffer pointer to user to fill up trace information
|
||||
@ -218,6 +230,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
|
||||
int len = 0;
|
||||
int buf_size = debug->buf_size;
|
||||
struct timespec64 val1, val2;
|
||||
int i = 0;
|
||||
|
||||
ktime_get_real_ts64(&val1);
|
||||
len = scnprintf(debug->debug_buffer + len, buf_size - len,
|
||||
@ -280,6 +293,16 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
|
||||
(u64)atomic64_read(&stats->io_stats.io_btw_10000_to_30000_msec),
|
||||
(u64)atomic64_read(&stats->io_stats.io_greater_than_30000_msec));
|
||||
|
||||
len += scnprintf(debug->debug_buffer + len, buf_size - len,
|
||||
"------------------------------------------\n"
|
||||
"\t\tIO Queues and cumulative IOs\n"
|
||||
"------------------------------------------\n");
|
||||
|
||||
for (i = 0; i < FNIC_MQ_MAX_QUEUES; i++) {
|
||||
len += scnprintf(debug->debug_buffer + len, buf_size - len,
|
||||
"Q:%d -> %lld\n", i, (u64)atomic64_read(&stats->io_stats.ios[i]));
|
||||
}
|
||||
|
||||
len += scnprintf(debug->debug_buffer + len, buf_size - len,
|
||||
"\nCurrent Max IO time : %lld\n",
|
||||
(u64)atomic64_read(&stats->io_stats.current_max_io_time));
|
||||
@ -426,7 +449,8 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
|
||||
"Number of Check Conditions encountered: %lld\n"
|
||||
"Number of QUEUE Fulls: %lld\n"
|
||||
"Number of rport not ready: %lld\n"
|
||||
"Number of receive frame errors: %lld\n",
|
||||
"Number of receive frame errors: %lld\n"
|
||||
"Port speed (in Mbps): %lld\n",
|
||||
(u64)stats->misc_stats.last_isr_time,
|
||||
(s64)val1.tv_sec, val1.tv_nsec,
|
||||
(u64)stats->misc_stats.last_ack_time,
|
||||
@ -449,18 +473,68 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
|
||||
(u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls),
|
||||
(u64)atomic64_read(&stats->misc_stats.check_condition),
|
||||
(u64)atomic64_read(&stats->misc_stats.queue_fulls),
|
||||
(u64)atomic64_read(&stats->misc_stats.rport_not_ready),
|
||||
(u64)atomic64_read(&stats->misc_stats.frame_errors));
|
||||
|
||||
len += scnprintf(debug->debug_buffer + len, buf_size - len,
|
||||
"Firmware reported port speed: %llu\n",
|
||||
(u64)atomic64_read(
|
||||
&stats->misc_stats.current_port_speed));
|
||||
(u64)atomic64_read(&stats->misc_stats.tport_not_ready),
|
||||
(u64)atomic64_read(&stats->misc_stats.frame_errors),
|
||||
(u64)atomic64_read(&stats->misc_stats.port_speed_in_mbps));
|
||||
|
||||
return len;
|
||||
|
||||
}
|
||||
|
||||
int fnic_get_debug_info(struct stats_debug_info *info, struct fnic *fnic)
|
||||
{
|
||||
struct fnic_iport_s *iport = &fnic->iport;
|
||||
int buf_size = info->buf_size;
|
||||
int len = info->buffer_len;
|
||||
struct fnic_tport_s *tport, *next;
|
||||
unsigned long flags;
|
||||
|
||||
len += snprintf(info->debug_buffer + len, buf_size - len,
|
||||
"------------------------------------------\n"
|
||||
"\t\t Debug Info\n"
|
||||
"------------------------------------------\n");
|
||||
len += snprintf(info->debug_buffer + len, buf_size - len,
|
||||
"fnic Name:%s number:%d Role:%s State:%s\n",
|
||||
fnic->name, fnic->fnic_num,
|
||||
fnic_role_to_str(fnic->role),
|
||||
fnic_state_to_str(fnic->state));
|
||||
len +=
|
||||
snprintf(info->debug_buffer + len, buf_size - len,
|
||||
"iport State:%d Flags:0x%x vlan_id:%d fcid:0x%x\n",
|
||||
iport->state, iport->flags, iport->vlan_id, iport->fcid);
|
||||
len +=
|
||||
snprintf(info->debug_buffer + len, buf_size - len,
|
||||
"usefip:%d fip_state:%d fip_flogi_retry:%d\n",
|
||||
iport->usefip, iport->fip.state, iport->fip.flogi_retry);
|
||||
len +=
|
||||
snprintf(info->debug_buffer + len, buf_size - len,
|
||||
"fpma %02x:%02x:%02x:%02x:%02x:%02x",
|
||||
iport->fpma[5], iport->fpma[4], iport->fpma[3],
|
||||
iport->fpma[2], iport->fpma[1], iport->fpma[0]);
|
||||
len +=
|
||||
snprintf(info->debug_buffer + len, buf_size - len,
|
||||
"fcfmac %02x:%02x:%02x:%02x:%02x:%02x\n",
|
||||
iport->fcfmac[5], iport->fcfmac[4], iport->fcfmac[3],
|
||||
iport->fcfmac[2], iport->fcfmac[1], iport->fcfmac[0]);
|
||||
len +=
|
||||
snprintf(info->debug_buffer + len, buf_size - len,
|
||||
"fabric state:%d flags:0x%x retry_counter:%d e_d_tov:%d r_a_tov:%d\n",
|
||||
iport->fabric.state, iport->fabric.flags,
|
||||
iport->fabric.retry_counter, iport->e_d_tov,
|
||||
iport->r_a_tov);
|
||||
|
||||
spin_lock_irqsave(&fnic->fnic_lock, flags);
|
||||
list_for_each_entry_safe(tport, next, &iport->tport_list, links) {
|
||||
len += snprintf(info->debug_buffer + len, buf_size - len,
|
||||
"tport fcid:0x%x state:%d flags:0x%x inflight:%d retry_counter:%d\n",
|
||||
tport->fcid, tport->state, tport->flags,
|
||||
atomic_read(&tport->in_flight),
|
||||
tport->retry_counter);
|
||||
}
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
|
||||
return len;
|
||||
}
|
||||
|
||||
/*
|
||||
* fnic_trace_buf_init - Initialize fnic trace buffer logging facility
|
||||
*
|
||||
@ -479,7 +553,7 @@ int fnic_trace_buf_init(void)
|
||||
fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/
|
||||
FNIC_ENTRY_SIZE_BYTES;
|
||||
|
||||
fnic_trace_buf_p = (unsigned long)vzalloc(trace_max_pages * PAGE_SIZE);
|
||||
fnic_trace_buf_p = (unsigned long)vcalloc(trace_max_pages, PAGE_SIZE);
|
||||
if (!fnic_trace_buf_p) {
|
||||
printk(KERN_ERR PFX "Failed to allocate memory "
|
||||
"for fnic_trace_buf_p\n");
|
||||
@ -488,8 +562,7 @@ int fnic_trace_buf_init(void)
|
||||
}
|
||||
|
||||
fnic_trace_entries.page_offset =
|
||||
vmalloc(array_size(fnic_max_trace_entries,
|
||||
sizeof(unsigned long)));
|
||||
vcalloc(fnic_max_trace_entries, sizeof(unsigned long));
|
||||
if (!fnic_trace_entries.page_offset) {
|
||||
printk(KERN_ERR PFX "Failed to allocate memory for"
|
||||
" page_offset\n");
|
||||
@ -500,8 +573,6 @@ int fnic_trace_buf_init(void)
|
||||
err = -ENOMEM;
|
||||
goto err_fnic_trace_buf_init;
|
||||
}
|
||||
memset((void *)fnic_trace_entries.page_offset, 0,
|
||||
(fnic_max_trace_entries * sizeof(unsigned long)));
|
||||
fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0;
|
||||
fnic_buf_head = fnic_trace_buf_p;
|
||||
|
||||
@ -562,8 +633,7 @@ int fnic_fc_trace_init(void)
|
||||
fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/
|
||||
FC_TRC_SIZE_BYTES;
|
||||
fnic_fc_ctlr_trace_buf_p =
|
||||
(unsigned long)vmalloc(array_size(PAGE_SIZE,
|
||||
fnic_fc_trace_max_pages));
|
||||
(unsigned long)vcalloc(fnic_fc_trace_max_pages, PAGE_SIZE);
|
||||
if (!fnic_fc_ctlr_trace_buf_p) {
|
||||
pr_err("fnic: Failed to allocate memory for "
|
||||
"FC Control Trace Buf\n");
|
||||
@ -571,13 +641,9 @@ int fnic_fc_trace_init(void)
|
||||
goto err_fnic_fc_ctlr_trace_buf_init;
|
||||
}
|
||||
|
||||
memset((void *)fnic_fc_ctlr_trace_buf_p, 0,
|
||||
fnic_fc_trace_max_pages * PAGE_SIZE);
|
||||
|
||||
/* Allocate memory for page offset */
|
||||
fc_trace_entries.page_offset =
|
||||
vmalloc(array_size(fc_trace_max_entries,
|
||||
sizeof(unsigned long)));
|
||||
vcalloc(fc_trace_max_entries, sizeof(unsigned long));
|
||||
if (!fc_trace_entries.page_offset) {
|
||||
pr_err("fnic:Failed to allocate memory for page_offset\n");
|
||||
if (fnic_fc_ctlr_trace_buf_p) {
|
||||
@ -588,8 +654,6 @@ int fnic_fc_trace_init(void)
|
||||
err = -ENOMEM;
|
||||
goto err_fnic_fc_ctlr_trace_buf_init;
|
||||
}
|
||||
memset((void *)fc_trace_entries.page_offset, 0,
|
||||
(fc_trace_max_entries * sizeof(unsigned long)));
|
||||
|
||||
fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0;
|
||||
fc_trace_buf_head = fnic_fc_ctlr_trace_buf_p;
|
||||
@ -691,7 +755,7 @@ int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
|
||||
*/
|
||||
if (frame_type == FNIC_FC_RECV) {
|
||||
eth_fcoe_hdr_len = sizeof(struct ethhdr) +
|
||||
sizeof(struct fcoe_hdr);
|
||||
sizeof(struct fcoe_hdr);
|
||||
memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len);
|
||||
/* Copy the rest of data frame */
|
||||
memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame,
|
||||
|
@ -155,6 +155,10 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
|
||||
vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
|
||||
}
|
||||
|
||||
pr_info("res_type_wq: %d res_type_rq: %d res_type_cq: %d res_type_intr_ctrl: %d\n",
|
||||
vdev->res[RES_TYPE_WQ].count, vdev->res[RES_TYPE_RQ].count,
|
||||
vdev->res[RES_TYPE_CQ].count, vdev->res[RES_TYPE_INTR_CTRL].count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -38,7 +38,7 @@
|
||||
#define VNIC_FNIC_RATOV_MAX 255000
|
||||
|
||||
#define VNIC_FNIC_MAXDATAFIELDSIZE_MIN 256
|
||||
#define VNIC_FNIC_MAXDATAFIELDSIZE_MAX 2112
|
||||
#define VNIC_FNIC_MAXDATAFIELDSIZE_MAX 2048
|
||||
|
||||
#define VNIC_FNIC_FLOGI_RETRIES_MIN 0
|
||||
#define VNIC_FNIC_FLOGI_RETRIES_MAX 0xffffffff
|
||||
@ -67,7 +67,7 @@
|
||||
#define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX 255
|
||||
|
||||
#define VNIC_FNIC_LUNS_PER_TARGET_MIN 1
|
||||
#define VNIC_FNIC_LUNS_PER_TARGET_MAX 1024
|
||||
#define VNIC_FNIC_LUNS_PER_TARGET_MAX 4096
|
||||
|
||||
/* Device-specific region: scsi configuration */
|
||||
struct vnic_fc_config {
|
||||
@ -91,10 +91,19 @@ struct vnic_fc_config {
|
||||
u16 ra_tov;
|
||||
u16 intr_timer;
|
||||
u8 intr_timer_type;
|
||||
u8 intr_mode;
|
||||
u8 lun_queue_depth;
|
||||
u8 io_timeout_retry;
|
||||
u16 wq_copy_count;
|
||||
};
|
||||
|
||||
#define VFCF_FCP_SEQ_LVL_ERR 0x1 /* Enable FCP-2 Error Recovery */
|
||||
#define VFCF_PERBI 0x2 /* persistent binding info available */
|
||||
#define VFCF_FIP_CAPABLE 0x4 /* firmware can handle FIP */
|
||||
|
||||
#define VFCF_FC_INITIATOR 0x20 /* FC Initiator Mode */
|
||||
#define VFCF_FC_TARGET 0x40 /* FC Target Mode */
|
||||
#define VFCF_FC_NVME_INITIATOR 0x80 /* FC-NVMe Initiator Mode */
|
||||
#define VFCF_FC_NVME_TARGET 0x100 /* FC-NVMe Target Mode */
|
||||
|
||||
#endif /* _VNIC_SCSI_H_ */
|
||||
|
@ -4157,7 +4157,17 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool dev_nit_active(struct net_device *dev);
|
||||
bool dev_nit_active_rcu(const struct net_device *dev);
|
||||
static inline bool dev_nit_active(const struct net_device *dev)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
rcu_read_lock();
|
||||
ret = dev_nit_active_rcu(dev);
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
|
||||
|
||||
static inline void __dev_put(struct net_device *dev)
|
||||
|
@ -185,6 +185,9 @@ struct net {
|
||||
#if IS_ENABLED(CONFIG_SMC)
|
||||
struct netns_smc smc;
|
||||
#endif
|
||||
|
||||
RH_KABI_EXTEND(struct list_head ptype_all)
|
||||
RH_KABI_EXTEND(struct list_head ptype_specific)
|
||||
} __randomize_layout;
|
||||
|
||||
#include <linux/seq_file_net.h>
|
||||
|
@ -164,7 +164,6 @@
|
||||
|
||||
static DEFINE_SPINLOCK(ptype_lock);
|
||||
struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
|
||||
struct list_head ptype_all __read_mostly; /* Taps */
|
||||
|
||||
static int netif_rx_internal(struct sk_buff *skb);
|
||||
static int call_netdevice_notifiers_extack(unsigned long val,
|
||||
@ -569,10 +568,18 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
|
||||
|
||||
static inline struct list_head *ptype_head(const struct packet_type *pt)
|
||||
{
|
||||
if (pt->type == htons(ETH_P_ALL))
|
||||
return pt->dev ? &pt->dev->ptype_all : &ptype_all;
|
||||
else
|
||||
return pt->dev ? &pt->dev->ptype_specific :
|
||||
if (pt->type == htons(ETH_P_ALL)) {
|
||||
if (!pt->af_packet_net && !pt->dev)
|
||||
return NULL;
|
||||
|
||||
return pt->dev ? &pt->dev->ptype_all :
|
||||
&pt->af_packet_net->ptype_all;
|
||||
}
|
||||
|
||||
if (pt->dev)
|
||||
return &pt->dev->ptype_specific;
|
||||
|
||||
return pt->af_packet_net ? &pt->af_packet_net->ptype_specific :
|
||||
&ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
|
||||
}
|
||||
|
||||
@ -593,6 +600,9 @@ void dev_add_pack(struct packet_type *pt)
|
||||
{
|
||||
struct list_head *head = ptype_head(pt);
|
||||
|
||||
if (WARN_ON_ONCE(!head))
|
||||
return;
|
||||
|
||||
spin_lock(&ptype_lock);
|
||||
list_add_rcu(&pt->list, head);
|
||||
spin_unlock(&ptype_lock);
|
||||
@ -617,6 +627,9 @@ void __dev_remove_pack(struct packet_type *pt)
|
||||
struct list_head *head = ptype_head(pt);
|
||||
struct packet_type *pt1;
|
||||
|
||||
if (!head)
|
||||
return;
|
||||
|
||||
spin_lock(&ptype_lock);
|
||||
|
||||
list_for_each_entry(pt1, head, list) {
|
||||
@ -2301,15 +2314,21 @@ static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_nit_active - return true if any network interface taps are in use
|
||||
* dev_nit_active_rcu - return true if any network interface taps are in use
|
||||
*
|
||||
* The caller must hold the RCU lock
|
||||
*
|
||||
* @dev: network device to check for the presence of taps
|
||||
*/
|
||||
bool dev_nit_active(struct net_device *dev)
|
||||
bool dev_nit_active_rcu(const struct net_device *dev)
|
||||
{
|
||||
return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
|
||||
/* Callers may hold either RCU or RCU BH lock */
|
||||
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
|
||||
|
||||
return !list_empty(&dev_net(dev)->ptype_all) ||
|
||||
!list_empty(&dev->ptype_all);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_nit_active);
|
||||
EXPORT_SYMBOL_GPL(dev_nit_active_rcu);
|
||||
|
||||
/*
|
||||
* Support routine. Sends outgoing frames to any network
|
||||
@ -2321,9 +2340,10 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
|
||||
struct packet_type *ptype;
|
||||
struct sk_buff *skb2 = NULL;
|
||||
struct packet_type *pt_prev = NULL;
|
||||
struct list_head *ptype_list = &ptype_all;
|
||||
struct list_head *ptype_list;
|
||||
|
||||
rcu_read_lock();
|
||||
ptype_list = &dev_net_rcu(dev)->ptype_all;
|
||||
again:
|
||||
list_for_each_entry_rcu(ptype, ptype_list, list) {
|
||||
if (READ_ONCE(ptype->ignore_outgoing))
|
||||
@ -2367,7 +2387,7 @@ again:
|
||||
pt_prev = ptype;
|
||||
}
|
||||
|
||||
if (ptype_list == &ptype_all) {
|
||||
if (ptype_list != &dev->ptype_all) {
|
||||
ptype_list = &dev->ptype_all;
|
||||
goto again;
|
||||
}
|
||||
@ -3581,7 +3601,7 @@ static int xmit_one(struct sk_buff *skb, struct net_device *dev,
|
||||
unsigned int len;
|
||||
int rc;
|
||||
|
||||
if (dev_nit_active(dev))
|
||||
if (dev_nit_active_rcu(dev))
|
||||
dev_queue_xmit_nit(skb, dev);
|
||||
|
||||
len = skb->len;
|
||||
@ -5445,7 +5465,8 @@ another_round:
|
||||
if (pfmemalloc)
|
||||
goto skip_taps;
|
||||
|
||||
list_for_each_entry_rcu(ptype, &ptype_all, list) {
|
||||
list_for_each_entry_rcu(ptype, &dev_net_rcu(skb->dev)->ptype_all,
|
||||
list) {
|
||||
if (pt_prev)
|
||||
ret = deliver_skb(skb, pt_prev, orig_dev);
|
||||
pt_prev = ptype;
|
||||
@ -5557,6 +5578,14 @@ check_vlan_id:
|
||||
deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
|
||||
&ptype_base[ntohs(type) &
|
||||
PTYPE_HASH_MASK]);
|
||||
|
||||
/* orig_dev and skb->dev could belong to different netns;
|
||||
* Even in such case we need to traverse only the list
|
||||
* coming from skb->dev, as the ptype owner (packet socket)
|
||||
* will use dev_net(skb->dev) to do namespace filtering.
|
||||
*/
|
||||
deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
|
||||
&dev_net_rcu(skb->dev)->ptype_specific);
|
||||
}
|
||||
|
||||
deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
|
||||
@ -11921,7 +11950,6 @@ static int __init net_dev_init(void)
|
||||
if (netdev_kobject_init())
|
||||
goto out;
|
||||
|
||||
INIT_LIST_HEAD(&ptype_all);
|
||||
for (i = 0; i < PTYPE_HASH_SIZE; i++)
|
||||
INIT_LIST_HEAD(&ptype_base[i]);
|
||||
|
||||
|
@ -175,12 +175,18 @@ static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry_rcu(pt, &ptype_all, list) {
|
||||
list_for_each_entry_rcu(pt, &seq_file_net(seq)->ptype_all, list) {
|
||||
if (i == pos)
|
||||
return pt;
|
||||
++i;
|
||||
}
|
||||
|
||||
list_for_each_entry_rcu(pt, &seq_file_net(seq)->ptype_specific, list) {
|
||||
if (i == pos)
|
||||
return pt;
|
||||
++i;
|
||||
}
|
||||
|
||||
for (t = 0; t < PTYPE_HASH_SIZE; t++) {
|
||||
list_for_each_entry_rcu(pt, &ptype_base[t], list) {
|
||||
if (i == pos)
|
||||
@ -200,6 +206,7 @@ static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
|
||||
static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
{
|
||||
struct net *net = seq_file_net(seq);
|
||||
struct net_device *dev;
|
||||
struct packet_type *pt;
|
||||
struct list_head *nxt;
|
||||
@ -223,14 +230,22 @@ static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
}
|
||||
}
|
||||
|
||||
nxt = ptype_all.next;
|
||||
goto ptype_all;
|
||||
nxt = net->ptype_all.next;
|
||||
goto net_ptype_all;
|
||||
}
|
||||
|
||||
if (pt->type == htons(ETH_P_ALL)) {
|
||||
ptype_all:
|
||||
if (nxt != &ptype_all)
|
||||
if (pt->af_packet_net) {
|
||||
net_ptype_all:
|
||||
if (nxt != &net->ptype_all && nxt != &net->ptype_specific)
|
||||
goto found;
|
||||
|
||||
if (nxt == &net->ptype_all) {
|
||||
/* continue with ->ptype_specific if it's not empty */
|
||||
nxt = net->ptype_specific.next;
|
||||
if (nxt != &net->ptype_specific)
|
||||
goto found;
|
||||
}
|
||||
|
||||
hash = 0;
|
||||
nxt = ptype_base[0].next;
|
||||
} else
|
||||
|
@ -311,6 +311,9 @@ EXPORT_SYMBOL_GPL(get_net_ns_by_id);
|
||||
static __net_init void preinit_net(struct net *net)
|
||||
{
|
||||
ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net notrefcnt");
|
||||
|
||||
INIT_LIST_HEAD(&net->ptype_all);
|
||||
INIT_LIST_HEAD(&net->ptype_specific);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -803,8 +803,8 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
|
||||
} else {
|
||||
im->mca_crcount = idev->mc_qrv;
|
||||
}
|
||||
in6_dev_put(pmc->idev);
|
||||
ip6_mc_clear_src(pmc);
|
||||
in6_dev_put(pmc->idev);
|
||||
kfree_rcu(pmc, rcu);
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,120 @@
|
||||
* Mon Sep 08 2025 Patrick Talbert <ptalbert@redhat.com> [5.14.0-570.42.2.el9_6]
|
||||
* Tue Sep 09 2025 Chao YE <cye@redhat.com> [5.14.0-570.44.1.el9_6]
|
||||
- ipv6: mcast: Delay put pmc->idev in mld_del_delrec() (CKI Backport Bot) [RHEL-111149] {CVE-2025-38550}
|
||||
- posix-cpu-timers: fix race between handle_posix_cpu_timers() and posix_cpu_timer_del() (CKI Backport Bot) [RHEL-112780] {CVE-2025-38352}
|
||||
- powerpc/pseries/iommu: create DDW for devices with DMA mask less than 64-bits (CKI Backport Bot) [RHEL-113173]
|
||||
Resolves: RHEL-112780, RHEL-113173
|
||||
Resolves: RHEL-111149, RHEL-112780, RHEL-113173
|
||||
|
||||
* Sat Sep 06 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.43.1.el9_6]
|
||||
- eth: bnxt: fix missing ring index trim on error path (CKI Backport Bot) [RHEL-104561] {CVE-2025-37873}
|
||||
- book3s64/radix : Align section vmemmap start address to PAGE_SIZE (Mamatha Inamdar) [RHEL-109492]
|
||||
- book3s64/radix: Fix compile errors when CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP=n (Mamatha Inamdar) [RHEL-109492]
|
||||
- net: introduce per netns packet chains (Paolo Abeni) [RHEL-89050]
|
||||
- enic: fix incorrect MTU comparison in enic_change_mtu() (John Meneghini) [RHEL-108274]
|
||||
- net/enic: Allow at least 8 RQs to always be used (John Meneghini) [RHEL-108274]
|
||||
- enic: get max rq & wq entries supported by hw, 16K queues (John Meneghini) [RHEL-106604]
|
||||
- enic: cleanup of enic wq request completion path (John Meneghini) [RHEL-106604]
|
||||
- enic: added enic_wq.c and enic_wq.h (John Meneghini) [RHEL-106604]
|
||||
- enic: remove unused function cq_enet_wq_desc_dec (John Meneghini) [RHEL-106604]
|
||||
- enic: enable rq extended cq support (John Meneghini) [RHEL-106604]
|
||||
- enic: enic rq extended cq defines (John Meneghini) [RHEL-106604]
|
||||
- enic: enic rq code reorg (John Meneghini) [RHEL-106604]
|
||||
- enic: Move function from header file to c file (John Meneghini) [RHEL-106604]
|
||||
- enic: add dependency on Page Pool (John Meneghini) [RHEL-106604]
|
||||
- enic: remove copybreak tunable (John Meneghini) [RHEL-106604]
|
||||
- enic: Use the Page Pool API for RX (John Meneghini) [RHEL-106604]
|
||||
- enic: Simplify RX handler function (John Meneghini) [RHEL-106604]
|
||||
- enic: Move RX functions to their own file (John Meneghini) [RHEL-106604]
|
||||
- enic: Fix typo in comment in table indexed by link speed (John Meneghini) [RHEL-106604]
|
||||
- enic: Obtain the Link speed only after the link comes up (John Meneghini) [RHEL-106604]
|
||||
- enic: Move RX coalescing set function (John Meneghini) [RHEL-106604]
|
||||
- enic: Move kdump check into enic_adjust_resources() (John Meneghini) [RHEL-106604]
|
||||
- enic: Move enic resource adjustments to separate function (John Meneghini) [RHEL-106604]
|
||||
- enic: Adjust used MSI-X wq/rq/cq/interrupt resources in a more robust way (John Meneghini) [RHEL-106604]
|
||||
- enic: Allocate arrays in enic struct based on VIC config (John Meneghini) [RHEL-106604]
|
||||
- enic: Save resource counts we read from HW (John Meneghini) [RHEL-106604]
|
||||
- enic: Make MSI-X I/O interrupts come after the other required ones (John Meneghini) [RHEL-106604]
|
||||
- enic: Create enic_wq/rq structures to bundle per wq/rq data (John Meneghini) [RHEL-106604]
|
||||
- enic: Report some per queue statistics in ethtool (John Meneghini) [RHEL-106604]
|
||||
- enic: Report per queue statistics in netdev qstats (John Meneghini) [RHEL-106604]
|
||||
- enic: Collect per queue statistics (John Meneghini) [RHEL-106604]
|
||||
- enic: Use macro instead of static const variables for array sizes (John Meneghini) [RHEL-106604]
|
||||
- enic: add ethtool get_channel support (John Meneghini) [RHEL-106604]
|
||||
- enic: Validate length of nl attributes in enic_set_vf_port (John Meneghini) [RHEL-106604]
|
||||
- enic: Replace hardcoded values for vnic descriptor by defines (John Meneghini) [RHEL-106604]
|
||||
- enic: Avoid false positive under FORTIFY_SOURCE (John Meneghini) [RHEL-106604]
|
||||
- scsi: fnic: Fix missing DMA mapping error in fnic_send_frame() (John Meneghini) [RHEL-106420]
|
||||
- scsi: fnic: Set appropriate logging level for log message (John Meneghini) [RHEL-106420]
|
||||
- scsi: fnic: Add and improve logs in FDMI and FDMI ABTS paths (John Meneghini) [RHEL-106420]
|
||||
- scsi: fnic: Turn off FDMI ACTIVE flags on link down (John Meneghini) [RHEL-106420]
|
||||
- scsi: fnic: Fix crash in fnic_wq_cmpl_handler when FDMI times out (John Meneghini) [RHEL-106420]
|
||||
- scsi: fnic: Remove unnecessary NUL-terminations (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Remove redundant flush_workqueue() calls (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Remove unnecessary spinlock locking and unlocking (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Replace fnic->lock_flags with local flags (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Replace use of sizeof with standard usage (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Fix indentation and remove unnecessary parenthesis (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Remove unnecessary debug print (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Propagate SCSI error code from fnic_scsi_drv_init() (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Test for memory allocation failure and return error code (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Return appropriate error code from failure of scsi drv init (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Return appropriate error code for mem alloc failure (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Remove always-true IS_FNIC_FCP_INITIATOR macro (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Fix use of uninitialized value in debug message (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Delete incorrect debugfs error handling (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Remove unnecessary else to fix warning in FDLS FIP (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Remove extern definition from .c files (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Remove unnecessary else and unnecessary break in FDLS (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Increment driver version (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Add support to handle port channel RSCN (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Code cleanup (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Add stats and related functionality (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Modify fnic interfaces to use FDLS (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Modify IO path to use FDLS (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Add functionality in fnic to support FDLS (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Add and integrate support for FIP (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Add and integrate support for FDMI (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Add Cisco hardware model names (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Add support for unsolicited requests and responses (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Add support for target based solicited requests and responses (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Add support for fabric based solicited requests and responses (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Add headers and definitions for FDLS (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Replace shost_printk() with dev_info()/dev_err() (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Use vcalloc() instead of vmalloc() and memset(0) (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Move flush_work initialization out of if block (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Move fnic_fnic_flush_tx() to a work queue (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Convert snprintf() to sysfs_emit() (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Clean up some inconsistent indenting (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: unlock on error path in fnic_queuecommand() (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Increment driver version (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Improve logs and add support for multiqueue (MQ) (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Add support for multiqueue (MQ) in fnic driver (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Add support for multiqueue (MQ) in fnic_main.c (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Remove usage of host_lock (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Define stats to track multiqueue (MQ) IOs (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Modify ISRs to support multiqueue (MQ) (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Refactor and redefine fnic.h for multiqueue (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Get copy workqueue count and interrupt mode from config (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Rename wq_copy to hw_copy_wq (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Add and improve log messages (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Add and use fnic number (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Modify definitions to sync with VIC firmware (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Return error if vmalloc() failed (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Clean up some inconsistent indenting (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Fix sg_reset success path (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Remove unused functions fnic_scsi_host_start/end_tag() (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Replace sgreset tag with max_tag_id (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Replace return codes in fnic_clean_pending_aborts() (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Use vmalloc_array() and vcalloc() (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Use vzalloc() (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Refactor code in fnic probe to initialize SCSI layer (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Replace DMA mask of 64 bits with 47 bits (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Remove unneeded flush_workqueue() (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Remove redundant NULL check (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Stop using the SCSI pointer (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Fix a tracing statement (John Meneghini) [RHEL-106419]
|
||||
- scsi: fnic: Call scsi_done() directly (John Meneghini) [RHEL-106419]
|
||||
- Revert "driver core: Fix uevent_show() vs driver detach race" (Mark Langsdorf) [RHEL-85410]
|
||||
Resolves: RHEL-104561, RHEL-106419, RHEL-106420, RHEL-106604, RHEL-108274, RHEL-109492, RHEL-85410, RHEL-89050
|
||||
|
||||
* Sat Aug 30 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.42.1.el9_6]
|
||||
- powerpc/pseries/iommu: memory notifier incorrectly adds TCEs for pmemory (Mamatha Inamdar) [RHEL-103015]
|
||||
|
Loading…
Reference in New Issue
Block a user