Import of kernel-6.12.0-55.40.1.el10_0
This commit is contained in:
parent
02b9685173
commit
ecf94b4a2b
@ -12,7 +12,7 @@ RHEL_MINOR = 0
|
||||
#
|
||||
# Use this spot to avoid future merge conflicts.
|
||||
# Do not trim this comment.
|
||||
RHEL_RELEASE = 55.39.1
|
||||
RHEL_RELEASE = 55.40.1
|
||||
|
||||
#
|
||||
# RHEL_REBASE_NUM
|
||||
|
@ -270,6 +270,7 @@
|
||||
#define H_QUERY_INT_STATE 0x1E4
|
||||
#define H_POLL_PENDING 0x1D8
|
||||
#define H_ILLAN_ATTRIBUTES 0x244
|
||||
#define H_ADD_LOGICAL_LAN_BUFFERS 0x248
|
||||
#define H_MODIFY_HEA_QP 0x250
|
||||
#define H_QUERY_HEA_QP 0x254
|
||||
#define H_QUERY_HEA 0x258
|
||||
|
@ -6,6 +6,7 @@
|
||||
* Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/security.h>
|
||||
#include <linux/slab.h>
|
||||
#include "hypfs.h"
|
||||
|
||||
@ -66,23 +67,27 @@ static long dbfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
long rc;
|
||||
|
||||
mutex_lock(&df->lock);
|
||||
if (df->unlocked_ioctl)
|
||||
rc = df->unlocked_ioctl(file, cmd, arg);
|
||||
else
|
||||
rc = -ENOTTY;
|
||||
rc = df->unlocked_ioctl(file, cmd, arg);
|
||||
mutex_unlock(&df->lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static const struct file_operations dbfs_ops = {
|
||||
static const struct file_operations dbfs_ops_ioctl = {
|
||||
.read = dbfs_read,
|
||||
.unlocked_ioctl = dbfs_ioctl,
|
||||
};
|
||||
|
||||
static const struct file_operations dbfs_ops = {
|
||||
.read = dbfs_read,
|
||||
};
|
||||
|
||||
void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
|
||||
{
|
||||
df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df,
|
||||
&dbfs_ops);
|
||||
const struct file_operations *fops = &dbfs_ops;
|
||||
|
||||
if (df->unlocked_ioctl && !security_locked_down(LOCKDOWN_DEBUGFS))
|
||||
fops = &dbfs_ops_ioctl;
|
||||
df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, fops);
|
||||
mutex_init(&df->lock);
|
||||
}
|
||||
|
||||
|
@ -106,6 +106,10 @@ static pci_ers_result_t zpci_event_do_error_state_clear(struct pci_dev *pdev,
|
||||
struct zpci_dev *zdev = to_zpci(pdev);
|
||||
int rc;
|
||||
|
||||
/* The underlying device may have been disabled by the event */
|
||||
if (!zdev_enabled(zdev))
|
||||
return PCI_ERS_RESULT_NEED_RESET;
|
||||
|
||||
pr_info("%s: Unblocking device access for examination\n", pci_name(pdev));
|
||||
rc = zpci_reset_load_store_blocked(zdev);
|
||||
if (rc) {
|
||||
@ -273,6 +277,8 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
|
||||
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
|
||||
struct pci_dev *pdev = NULL;
|
||||
pci_ers_result_t ers_res;
|
||||
u32 fh = 0;
|
||||
int rc;
|
||||
|
||||
zpci_dbg(3, "err fid:%x, fh:%x, pec:%x\n",
|
||||
ccdf->fid, ccdf->fh, ccdf->pec);
|
||||
@ -281,6 +287,15 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
|
||||
|
||||
if (zdev) {
|
||||
mutex_lock(&zdev->state_lock);
|
||||
rc = clp_refresh_fh(zdev->fid, &fh);
|
||||
if (rc)
|
||||
goto no_pdev;
|
||||
if (!fh || ccdf->fh != fh) {
|
||||
/* Ignore events with stale handles */
|
||||
zpci_dbg(3, "err fid:%x, fh:%x (stale %x)\n",
|
||||
ccdf->fid, fh, ccdf->fh);
|
||||
goto no_pdev;
|
||||
}
|
||||
zpci_update_fh(zdev, ccdf->fh);
|
||||
if (zdev->zbus->bus)
|
||||
pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
|
||||
|
@ -1980,6 +1980,9 @@ int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
|
||||
if (entries[i] == KVM_HV_TLB_FLUSHALL_ENTRY)
|
||||
goto out_flush_all;
|
||||
|
||||
if (is_noncanonical_invlpg_address(entries[i], vcpu))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Lower 12 bits of 'address' encode the number of additional
|
||||
* pages to flush.
|
||||
|
@ -6,5 +6,6 @@
|
||||
config ENIC
|
||||
tristate "Cisco VIC Ethernet NIC Support"
|
||||
depends on PCI
|
||||
select PAGE_POOL
|
||||
help
|
||||
This enables the support for the Cisco VIC Ethernet card.
|
||||
|
@ -3,5 +3,5 @@ obj-$(CONFIG_ENIC) := enic.o
|
||||
|
||||
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
|
||||
enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \
|
||||
enic_ethtool.o enic_api.o enic_clsf.o
|
||||
enic_ethtool.o enic_api.o enic_clsf.o enic_rq.o enic_wq.o
|
||||
|
||||
|
@ -40,28 +40,7 @@ struct cq_desc {
|
||||
#define CQ_DESC_COMP_NDX_BITS 12
|
||||
#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
|
||||
|
||||
static inline void cq_desc_dec(const struct cq_desc *desc_arg,
|
||||
u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
|
||||
{
|
||||
const struct cq_desc *desc = desc_arg;
|
||||
const u8 type_color = desc->type_color;
|
||||
|
||||
*color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
|
||||
|
||||
/*
|
||||
* Make sure color bit is read from desc *before* other fields
|
||||
* are read from desc. Hardware guarantees color bit is last
|
||||
* bit (byte) written. Adding the rmb() prevents the compiler
|
||||
* and/or CPU from reordering the reads which would potentially
|
||||
* result in reading stale values.
|
||||
*/
|
||||
|
||||
rmb();
|
||||
|
||||
*type = type_color & CQ_DESC_TYPE_MASK;
|
||||
*q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
|
||||
*completed_index = le16_to_cpu(desc->completed_index) &
|
||||
CQ_DESC_COMP_NDX_MASK;
|
||||
}
|
||||
#define CQ_DESC_32_FI_MASK (BIT(0) | BIT(1))
|
||||
#define CQ_DESC_64_FI_MASK (BIT(0) | BIT(1))
|
||||
|
||||
#endif /* _CQ_DESC_H_ */
|
||||
|
@ -17,12 +17,22 @@ struct cq_enet_wq_desc {
|
||||
u8 type_color;
|
||||
};
|
||||
|
||||
static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
|
||||
u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
|
||||
{
|
||||
cq_desc_dec((struct cq_desc *)desc, type,
|
||||
color, q_number, completed_index);
|
||||
}
|
||||
/*
|
||||
* Defines and Capabilities for CMD_CQ_ENTRY_SIZE_SET
|
||||
*/
|
||||
#define VNIC_RQ_ALL (~0ULL)
|
||||
|
||||
#define VNIC_RQ_CQ_ENTRY_SIZE_16 0
|
||||
#define VNIC_RQ_CQ_ENTRY_SIZE_32 1
|
||||
#define VNIC_RQ_CQ_ENTRY_SIZE_64 2
|
||||
|
||||
#define VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_16)
|
||||
#define VNIC_RQ_CQ_ENTRY_SIZE_32_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_32)
|
||||
#define VNIC_RQ_CQ_ENTRY_SIZE_64_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_64)
|
||||
|
||||
#define VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT (VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE | \
|
||||
VNIC_RQ_CQ_ENTRY_SIZE_32_CAPABLE | \
|
||||
VNIC_RQ_CQ_ENTRY_SIZE_64_CAPABLE)
|
||||
|
||||
/* Completion queue descriptor: Ethernet receive queue, 16B */
|
||||
struct cq_enet_rq_desc {
|
||||
@ -36,6 +46,45 @@ struct cq_enet_rq_desc {
|
||||
u8 type_color;
|
||||
};
|
||||
|
||||
/* Completion queue descriptor: Ethernet receive queue, 32B */
|
||||
struct cq_enet_rq_desc_32 {
|
||||
__le16 completed_index_flags;
|
||||
__le16 q_number_rss_type_flags;
|
||||
__le32 rss_hash;
|
||||
__le16 bytes_written_flags;
|
||||
__le16 vlan;
|
||||
__le16 checksum_fcoe;
|
||||
u8 flags;
|
||||
u8 fetch_index_flags;
|
||||
__le32 time_stamp;
|
||||
__le16 time_stamp2;
|
||||
__le16 pie_info;
|
||||
__le32 pie_info2;
|
||||
__le16 pie_info3;
|
||||
u8 pie_info4;
|
||||
u8 type_color;
|
||||
};
|
||||
|
||||
/* Completion queue descriptor: Ethernet receive queue, 64B */
|
||||
struct cq_enet_rq_desc_64 {
|
||||
__le16 completed_index_flags;
|
||||
__le16 q_number_rss_type_flags;
|
||||
__le32 rss_hash;
|
||||
__le16 bytes_written_flags;
|
||||
__le16 vlan;
|
||||
__le16 checksum_fcoe;
|
||||
u8 flags;
|
||||
u8 fetch_index_flags;
|
||||
__le32 time_stamp;
|
||||
__le16 time_stamp2;
|
||||
__le16 pie_info;
|
||||
__le32 pie_info2;
|
||||
__le16 pie_info3;
|
||||
u8 pie_info4;
|
||||
u8 reserved[32];
|
||||
u8 type_color;
|
||||
};
|
||||
|
||||
#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
|
||||
#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
|
||||
#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
|
||||
@ -88,85 +137,4 @@ struct cq_enet_rq_desc {
|
||||
#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
|
||||
#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
|
||||
|
||||
static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
|
||||
u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
|
||||
u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
|
||||
u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
|
||||
u8 *vlan_stripped, u16 *vlan_tci, u16 *checksum, u8 *fcoe_sof,
|
||||
u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
|
||||
u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
|
||||
u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
|
||||
{
|
||||
u16 completed_index_flags;
|
||||
u16 q_number_rss_type_flags;
|
||||
u16 bytes_written_flags;
|
||||
|
||||
cq_desc_dec((struct cq_desc *)desc, type,
|
||||
color, q_number, completed_index);
|
||||
|
||||
completed_index_flags = le16_to_cpu(desc->completed_index_flags);
|
||||
q_number_rss_type_flags =
|
||||
le16_to_cpu(desc->q_number_rss_type_flags);
|
||||
bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
|
||||
|
||||
*ingress_port = (completed_index_flags &
|
||||
CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
|
||||
*fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
|
||||
1 : 0;
|
||||
*eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
|
||||
1 : 0;
|
||||
*sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
|
||||
1 : 0;
|
||||
|
||||
*rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
|
||||
CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
|
||||
*csum_not_calc = (q_number_rss_type_flags &
|
||||
CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
|
||||
|
||||
*rss_hash = le32_to_cpu(desc->rss_hash);
|
||||
|
||||
*bytes_written = bytes_written_flags &
|
||||
CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
|
||||
*packet_error = (bytes_written_flags &
|
||||
CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
|
||||
*vlan_stripped = (bytes_written_flags &
|
||||
CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
|
||||
|
||||
/*
|
||||
* Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
|
||||
*/
|
||||
*vlan_tci = le16_to_cpu(desc->vlan);
|
||||
|
||||
if (*fcoe) {
|
||||
*fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
|
||||
CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
|
||||
*fcoe_fc_crc_ok = (desc->flags &
|
||||
CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
|
||||
*fcoe_enc_error = (desc->flags &
|
||||
CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
|
||||
*fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
|
||||
CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
|
||||
CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
|
||||
*checksum = 0;
|
||||
} else {
|
||||
*fcoe_sof = 0;
|
||||
*fcoe_fc_crc_ok = 0;
|
||||
*fcoe_enc_error = 0;
|
||||
*fcoe_eof = 0;
|
||||
*checksum = le16_to_cpu(desc->checksum_fcoe);
|
||||
}
|
||||
|
||||
*tcp_udp_csum_ok =
|
||||
(desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
|
||||
*udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
|
||||
*tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
|
||||
*ipv4_csum_ok =
|
||||
(desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
|
||||
*ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
|
||||
*ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
|
||||
*ipv4_fragment =
|
||||
(desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
|
||||
*fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
|
||||
}
|
||||
|
||||
#endif /* _CQ_ENET_DESC_H_ */
|
||||
|
@ -17,21 +17,28 @@
|
||||
#include "vnic_nic.h"
|
||||
#include "vnic_rss.h"
|
||||
#include <linux/irq.h>
|
||||
#include <net/page_pool/helpers.h>
|
||||
|
||||
#define DRV_NAME "enic"
|
||||
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
|
||||
|
||||
#define ENIC_BARS_MAX 6
|
||||
|
||||
#define ENIC_WQ_MAX 8
|
||||
#define ENIC_RQ_MAX 8
|
||||
#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
|
||||
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
|
||||
#define ENIC_WQ_MAX 256
|
||||
#define ENIC_RQ_MAX 256
|
||||
#define ENIC_RQ_MIN_DEFAULT 8
|
||||
|
||||
#define ENIC_WQ_NAPI_BUDGET 256
|
||||
|
||||
#define ENIC_AIC_LARGE_PKT_DIFF 3
|
||||
|
||||
enum ext_cq {
|
||||
ENIC_RQ_CQ_ENTRY_SIZE_16,
|
||||
ENIC_RQ_CQ_ENTRY_SIZE_32,
|
||||
ENIC_RQ_CQ_ENTRY_SIZE_64,
|
||||
ENIC_RQ_CQ_ENTRY_SIZE_MAX,
|
||||
};
|
||||
|
||||
struct enic_msix_entry {
|
||||
int requested;
|
||||
char devname[IFNAMSIZ + 8];
|
||||
@ -77,6 +84,10 @@ struct enic_rx_coal {
|
||||
#define ENIC_SET_INSTANCE (1 << 3)
|
||||
#define ENIC_SET_HOST (1 << 4)
|
||||
|
||||
#define MAX_TSO BIT(16)
|
||||
#define WQ_ENET_MAX_DESC_LEN BIT(WQ_ENET_LEN_BITS)
|
||||
#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
|
||||
|
||||
struct enic_port_profile {
|
||||
u32 set;
|
||||
u8 request;
|
||||
@ -160,8 +171,21 @@ struct enic_rq_stats {
|
||||
u64 pkt_truncated; /* truncated pkts */
|
||||
u64 no_skb; /* out of skbs */
|
||||
u64 desc_skip; /* Rx pkt went into later buffer */
|
||||
u64 pp_alloc_fail; /* page pool alloc failure */
|
||||
};
|
||||
|
||||
struct enic_wq {
|
||||
spinlock_t lock; /* spinlock for wq */
|
||||
struct vnic_wq vwq;
|
||||
struct enic_wq_stats stats;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
struct enic_rq {
|
||||
struct vnic_rq vrq;
|
||||
struct enic_rq_stats stats;
|
||||
struct page_pool *pool;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
/* Per-instance private data structure */
|
||||
struct enic {
|
||||
struct net_device *netdev;
|
||||
@ -173,8 +197,8 @@ struct enic {
|
||||
struct work_struct reset;
|
||||
struct work_struct tx_hang_reset;
|
||||
struct work_struct change_mtu_work;
|
||||
struct msix_entry msix_entry[ENIC_INTR_MAX];
|
||||
struct enic_msix_entry msix[ENIC_INTR_MAX];
|
||||
struct msix_entry *msix_entry;
|
||||
struct enic_msix_entry *msix;
|
||||
u32 msg_enable;
|
||||
spinlock_t devcmd_lock;
|
||||
u8 mac_addr[ETH_ALEN];
|
||||
@ -193,33 +217,30 @@ struct enic {
|
||||
bool enic_api_busy;
|
||||
struct enic_port_profile *pp;
|
||||
|
||||
/* work queue cache line section */
|
||||
____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
|
||||
spinlock_t wq_lock[ENIC_WQ_MAX];
|
||||
struct enic_wq_stats wq_stats[ENIC_WQ_MAX];
|
||||
struct enic_wq *wq;
|
||||
unsigned int wq_avail;
|
||||
unsigned int wq_count;
|
||||
u16 loop_enable;
|
||||
u16 loop_tag;
|
||||
|
||||
/* receive queue cache line section */
|
||||
____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
|
||||
struct enic_rq_stats rq_stats[ENIC_RQ_MAX];
|
||||
struct enic_rq *rq;
|
||||
unsigned int rq_avail;
|
||||
unsigned int rq_count;
|
||||
struct vxlan_offload vxlan;
|
||||
struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX];
|
||||
struct napi_struct *napi;
|
||||
|
||||
/* interrupt resource cache line section */
|
||||
____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX];
|
||||
struct vnic_intr *intr;
|
||||
unsigned int intr_avail;
|
||||
unsigned int intr_count;
|
||||
u32 __iomem *legacy_pba; /* memory-mapped */
|
||||
|
||||
/* completion queue cache line section */
|
||||
____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX];
|
||||
struct vnic_cq *cq;
|
||||
unsigned int cq_avail;
|
||||
unsigned int cq_count;
|
||||
struct enic_rfs_flw_tbl rfs_h;
|
||||
u32 rx_copybreak;
|
||||
u8 rss_key[ENIC_RSS_LEN];
|
||||
struct vnic_gen_stats gen_stats;
|
||||
enum ext_cq ext_cq;
|
||||
};
|
||||
|
||||
static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev)
|
||||
@ -272,18 +293,28 @@ static inline unsigned int enic_msix_wq_intr(struct enic *enic,
|
||||
return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
|
||||
}
|
||||
|
||||
static inline unsigned int enic_msix_err_intr(struct enic *enic)
|
||||
{
|
||||
return enic->rq_count + enic->wq_count;
|
||||
}
|
||||
/* MSIX interrupts are organized as the error interrupt, then the notify
|
||||
* interrupt followed by all the I/O interrupts. The error interrupt needs
|
||||
* to fit in 7 bits due to hardware constraints
|
||||
*/
|
||||
#define ENIC_MSIX_RESERVED_INTR 2
|
||||
#define ENIC_MSIX_ERR_INTR 0
|
||||
#define ENIC_MSIX_NOTIFY_INTR 1
|
||||
#define ENIC_MSIX_IO_INTR_BASE ENIC_MSIX_RESERVED_INTR
|
||||
#define ENIC_MSIX_MIN_INTR (ENIC_MSIX_RESERVED_INTR + 2)
|
||||
|
||||
#define ENIC_LEGACY_IO_INTR 0
|
||||
#define ENIC_LEGACY_ERR_INTR 1
|
||||
#define ENIC_LEGACY_NOTIFY_INTR 2
|
||||
|
||||
static inline unsigned int enic_msix_err_intr(struct enic *enic)
|
||||
{
|
||||
return ENIC_MSIX_ERR_INTR;
|
||||
}
|
||||
|
||||
static inline unsigned int enic_msix_notify_intr(struct enic *enic)
|
||||
{
|
||||
return enic->rq_count + enic->wq_count + 1;
|
||||
return ENIC_MSIX_NOTIFY_INTR;
|
||||
}
|
||||
|
||||
static inline bool enic_is_err_intr(struct enic *enic, int intr)
|
||||
@ -331,5 +362,6 @@ int enic_is_valid_vf(struct enic *enic, int vf);
|
||||
int enic_is_dynamic(struct enic *enic);
|
||||
void enic_set_ethtool_ops(struct net_device *netdev);
|
||||
int __enic_set_rsskey(struct enic *enic);
|
||||
void enic_ext_cq(struct enic *enic);
|
||||
|
||||
#endif /* _ENIC_H_ */
|
||||
|
@ -222,9 +222,9 @@ static void enic_get_ringparam(struct net_device *netdev,
|
||||
struct enic *enic = netdev_priv(netdev);
|
||||
struct vnic_enet_config *c = &enic->config;
|
||||
|
||||
ring->rx_max_pending = ENIC_MAX_RQ_DESCS;
|
||||
ring->rx_max_pending = c->max_rq_ring;
|
||||
ring->rx_pending = c->rq_desc_count;
|
||||
ring->tx_max_pending = ENIC_MAX_WQ_DESCS;
|
||||
ring->tx_max_pending = c->max_wq_ring;
|
||||
ring->tx_pending = c->wq_desc_count;
|
||||
}
|
||||
|
||||
@ -252,18 +252,18 @@ static int enic_set_ringparam(struct net_device *netdev,
|
||||
}
|
||||
rx_pending = c->rq_desc_count;
|
||||
tx_pending = c->wq_desc_count;
|
||||
if (ring->rx_pending > ENIC_MAX_RQ_DESCS ||
|
||||
if (ring->rx_pending > c->max_rq_ring ||
|
||||
ring->rx_pending < ENIC_MIN_RQ_DESCS) {
|
||||
netdev_info(netdev, "rx pending (%u) not in range [%u,%u]",
|
||||
ring->rx_pending, ENIC_MIN_RQ_DESCS,
|
||||
ENIC_MAX_RQ_DESCS);
|
||||
c->max_rq_ring);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (ring->tx_pending > ENIC_MAX_WQ_DESCS ||
|
||||
if (ring->tx_pending > c->max_wq_ring ||
|
||||
ring->tx_pending < ENIC_MIN_WQ_DESCS) {
|
||||
netdev_info(netdev, "tx pending (%u) not in range [%u,%u]",
|
||||
ring->tx_pending, ENIC_MIN_WQ_DESCS,
|
||||
ENIC_MAX_WQ_DESCS);
|
||||
c->max_wq_ring);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (running)
|
||||
@ -337,7 +337,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
|
||||
for (i = 0; i < NUM_ENIC_GEN_STATS; i++)
|
||||
*(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
|
||||
for (i = 0; i < enic->rq_count; i++) {
|
||||
struct enic_rq_stats *rqstats = &enic->rq_stats[i];
|
||||
struct enic_rq_stats *rqstats = &enic->rq[i].stats;
|
||||
int index;
|
||||
|
||||
for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) {
|
||||
@ -346,7 +346,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
|
||||
}
|
||||
}
|
||||
for (i = 0; i < enic->wq_count; i++) {
|
||||
struct enic_wq_stats *wqstats = &enic->wq_stats[i];
|
||||
struct enic_wq_stats *wqstats = &enic->wq[i].stats;
|
||||
int index;
|
||||
|
||||
for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) {
|
||||
@ -608,43 +608,6 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int enic_get_tunable(struct net_device *dev,
|
||||
const struct ethtool_tunable *tuna, void *data)
|
||||
{
|
||||
struct enic *enic = netdev_priv(dev);
|
||||
int ret = 0;
|
||||
|
||||
switch (tuna->id) {
|
||||
case ETHTOOL_RX_COPYBREAK:
|
||||
*(u32 *)data = enic->rx_copybreak;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int enic_set_tunable(struct net_device *dev,
|
||||
const struct ethtool_tunable *tuna,
|
||||
const void *data)
|
||||
{
|
||||
struct enic *enic = netdev_priv(dev);
|
||||
int ret = 0;
|
||||
|
||||
switch (tuna->id) {
|
||||
case ETHTOOL_RX_COPYBREAK:
|
||||
enic->rx_copybreak = *(u32 *)data;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u32 enic_get_rxfh_key_size(struct net_device *netdev)
|
||||
{
|
||||
return ENIC_RSS_LEN;
|
||||
@ -695,8 +658,8 @@ static void enic_get_channels(struct net_device *netdev,
|
||||
|
||||
switch (vnic_dev_get_intr_mode(enic->vdev)) {
|
||||
case VNIC_DEV_INTR_MODE_MSIX:
|
||||
channels->max_rx = ENIC_RQ_MAX;
|
||||
channels->max_tx = ENIC_WQ_MAX;
|
||||
channels->max_rx = min(enic->rq_avail, ENIC_RQ_MAX);
|
||||
channels->max_tx = min(enic->wq_avail, ENIC_WQ_MAX);
|
||||
channels->rx_count = enic->rq_count;
|
||||
channels->tx_count = enic->wq_count;
|
||||
break;
|
||||
@ -727,8 +690,6 @@ static const struct ethtool_ops enic_ethtool_ops = {
|
||||
.get_coalesce = enic_get_coalesce,
|
||||
.set_coalesce = enic_set_coalesce,
|
||||
.get_rxnfc = enic_get_rxnfc,
|
||||
.get_tunable = enic_get_tunable,
|
||||
.set_tunable = enic_set_tunable,
|
||||
.get_rxfh_key_size = enic_get_rxfh_key_size,
|
||||
.get_rxfh = enic_get_rxfh,
|
||||
.set_rxfh = enic_set_rxfh,
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -59,31 +59,38 @@ int enic_get_vnic_config(struct enic *enic)
|
||||
GET_CONFIG(intr_timer_usec);
|
||||
GET_CONFIG(loop_tag);
|
||||
GET_CONFIG(num_arfs);
|
||||
GET_CONFIG(max_rq_ring);
|
||||
GET_CONFIG(max_wq_ring);
|
||||
GET_CONFIG(max_cq_ring);
|
||||
|
||||
if (!c->max_wq_ring)
|
||||
c->max_wq_ring = ENIC_MAX_WQ_DESCS_DEFAULT;
|
||||
if (!c->max_rq_ring)
|
||||
c->max_rq_ring = ENIC_MAX_RQ_DESCS_DEFAULT;
|
||||
if (!c->max_cq_ring)
|
||||
c->max_cq_ring = ENIC_MAX_CQ_DESCS_DEFAULT;
|
||||
|
||||
c->wq_desc_count =
|
||||
min_t(u32, ENIC_MAX_WQ_DESCS,
|
||||
max_t(u32, ENIC_MIN_WQ_DESCS,
|
||||
c->wq_desc_count));
|
||||
min_t(u32, c->max_wq_ring,
|
||||
max_t(u32, ENIC_MIN_WQ_DESCS, c->wq_desc_count));
|
||||
c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
|
||||
|
||||
c->rq_desc_count =
|
||||
min_t(u32, ENIC_MAX_RQ_DESCS,
|
||||
max_t(u32, ENIC_MIN_RQ_DESCS,
|
||||
c->rq_desc_count));
|
||||
min_t(u32, c->max_rq_ring,
|
||||
max_t(u32, ENIC_MIN_RQ_DESCS, c->rq_desc_count));
|
||||
c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
|
||||
|
||||
if (c->mtu == 0)
|
||||
c->mtu = 1500;
|
||||
c->mtu = min_t(u16, ENIC_MAX_MTU,
|
||||
max_t(u16, ENIC_MIN_MTU,
|
||||
c->mtu));
|
||||
c->mtu = min_t(u16, ENIC_MAX_MTU, max_t(u16, ENIC_MIN_MTU, c->mtu));
|
||||
|
||||
c->intr_timer_usec = min_t(u32, c->intr_timer_usec,
|
||||
vnic_dev_get_intr_coal_timer_max(enic->vdev));
|
||||
|
||||
dev_info(enic_get_dev(enic),
|
||||
"vNIC MAC addr %pM wq/rq %d/%d mtu %d\n",
|
||||
enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu);
|
||||
"vNIC MAC addr %pM wq/rq %d/%d max wq/rq/cq %d/%d/%d mtu %d\n",
|
||||
enic->mac_addr, c->wq_desc_count, c->rq_desc_count,
|
||||
c->max_wq_ring, c->max_rq_ring, c->max_cq_ring, c->mtu);
|
||||
|
||||
dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s "
|
||||
"tso/lro %s/%s rss %s intr mode %s type %s timer %d usec "
|
||||
@ -176,9 +183,9 @@ void enic_free_vnic_resources(struct enic *enic)
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < enic->wq_count; i++)
|
||||
vnic_wq_free(&enic->wq[i]);
|
||||
vnic_wq_free(&enic->wq[i].vwq);
|
||||
for (i = 0; i < enic->rq_count; i++)
|
||||
vnic_rq_free(&enic->rq[i]);
|
||||
vnic_rq_free(&enic->rq[i].vrq);
|
||||
for (i = 0; i < enic->cq_count; i++)
|
||||
vnic_cq_free(&enic->cq[i]);
|
||||
for (i = 0; i < enic->intr_count; i++)
|
||||
@ -187,16 +194,21 @@ void enic_free_vnic_resources(struct enic *enic)
|
||||
|
||||
void enic_get_res_counts(struct enic *enic)
|
||||
{
|
||||
enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
|
||||
enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
|
||||
enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
|
||||
enic->intr_count = vnic_dev_get_res_count(enic->vdev,
|
||||
RES_TYPE_INTR_CTRL);
|
||||
enic->wq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
|
||||
enic->rq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
|
||||
enic->cq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
|
||||
enic->intr_avail = vnic_dev_get_res_count(enic->vdev,
|
||||
RES_TYPE_INTR_CTRL);
|
||||
|
||||
enic->wq_count = enic->wq_avail;
|
||||
enic->rq_count = enic->rq_avail;
|
||||
enic->cq_count = enic->cq_avail;
|
||||
enic->intr_count = enic->intr_avail;
|
||||
|
||||
dev_info(enic_get_dev(enic),
|
||||
"vNIC resources avail: wq %d rq %d cq %d intr %d\n",
|
||||
enic->wq_count, enic->rq_count,
|
||||
enic->cq_count, enic->intr_count);
|
||||
enic->wq_avail, enic->rq_avail,
|
||||
enic->cq_avail, enic->intr_avail);
|
||||
}
|
||||
|
||||
void enic_init_vnic_resources(struct enic *enic)
|
||||
@ -221,9 +233,12 @@ void enic_init_vnic_resources(struct enic *enic)
|
||||
|
||||
switch (intr_mode) {
|
||||
case VNIC_DEV_INTR_MODE_INTX:
|
||||
error_interrupt_enable = 1;
|
||||
error_interrupt_offset = ENIC_LEGACY_ERR_INTR;
|
||||
break;
|
||||
case VNIC_DEV_INTR_MODE_MSIX:
|
||||
error_interrupt_enable = 1;
|
||||
error_interrupt_offset = enic->intr_count - 2;
|
||||
error_interrupt_offset = enic_msix_err_intr(enic);
|
||||
break;
|
||||
default:
|
||||
error_interrupt_enable = 0;
|
||||
@ -233,7 +248,7 @@ void enic_init_vnic_resources(struct enic *enic)
|
||||
|
||||
for (i = 0; i < enic->rq_count; i++) {
|
||||
cq_index = i;
|
||||
vnic_rq_init(&enic->rq[i],
|
||||
vnic_rq_init(&enic->rq[i].vrq,
|
||||
cq_index,
|
||||
error_interrupt_enable,
|
||||
error_interrupt_offset);
|
||||
@ -241,7 +256,7 @@ void enic_init_vnic_resources(struct enic *enic)
|
||||
|
||||
for (i = 0; i < enic->wq_count; i++) {
|
||||
cq_index = enic->rq_count + i;
|
||||
vnic_wq_init(&enic->wq[i],
|
||||
vnic_wq_init(&enic->wq[i].vwq,
|
||||
cq_index,
|
||||
error_interrupt_enable,
|
||||
error_interrupt_offset);
|
||||
@ -249,15 +264,15 @@ void enic_init_vnic_resources(struct enic *enic)
|
||||
|
||||
/* Init CQ resources
|
||||
*
|
||||
* CQ[0 - n+m-1] point to INTR[0] for INTx, MSI
|
||||
* CQ[0 - n+m-1] point to INTR[0 - n+m-1] for MSI-X
|
||||
* All CQs point to INTR[0] for INTx, MSI
|
||||
* CQ[i] point to INTR[ENIC_MSIX_IO_INTR_BASE + i] for MSI-X
|
||||
*/
|
||||
|
||||
for (i = 0; i < enic->cq_count; i++) {
|
||||
|
||||
switch (intr_mode) {
|
||||
case VNIC_DEV_INTR_MODE_MSIX:
|
||||
interrupt_offset = i;
|
||||
interrupt_offset = ENIC_MSIX_IO_INTR_BASE + i;
|
||||
break;
|
||||
default:
|
||||
interrupt_offset = 0;
|
||||
@ -304,6 +319,7 @@ void enic_init_vnic_resources(struct enic *enic)
|
||||
int enic_alloc_vnic_resources(struct enic *enic)
|
||||
{
|
||||
enum vnic_dev_intr_mode intr_mode;
|
||||
int rq_cq_desc_size;
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
||||
@ -318,11 +334,29 @@ int enic_alloc_vnic_resources(struct enic *enic)
|
||||
intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" :
|
||||
"unknown");
|
||||
|
||||
switch (enic->ext_cq) {
|
||||
case ENIC_RQ_CQ_ENTRY_SIZE_16:
|
||||
rq_cq_desc_size = 16;
|
||||
break;
|
||||
case ENIC_RQ_CQ_ENTRY_SIZE_32:
|
||||
rq_cq_desc_size = 32;
|
||||
break;
|
||||
case ENIC_RQ_CQ_ENTRY_SIZE_64:
|
||||
rq_cq_desc_size = 64;
|
||||
break;
|
||||
default:
|
||||
dev_err(enic_get_dev(enic),
|
||||
"Unable to determine rq cq desc size: %d",
|
||||
enic->ext_cq);
|
||||
err = -ENODEV;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* Allocate queue resources
|
||||
*/
|
||||
|
||||
for (i = 0; i < enic->wq_count; i++) {
|
||||
err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i,
|
||||
err = vnic_wq_alloc(enic->vdev, &enic->wq[i].vwq, i,
|
||||
enic->config.wq_desc_count,
|
||||
sizeof(struct wq_enet_desc));
|
||||
if (err)
|
||||
@ -330,7 +364,7 @@ int enic_alloc_vnic_resources(struct enic *enic)
|
||||
}
|
||||
|
||||
for (i = 0; i < enic->rq_count; i++) {
|
||||
err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i,
|
||||
err = vnic_rq_alloc(enic->vdev, &enic->rq[i].vrq, i,
|
||||
enic->config.rq_desc_count,
|
||||
sizeof(struct rq_enet_desc));
|
||||
if (err)
|
||||
@ -340,8 +374,8 @@ int enic_alloc_vnic_resources(struct enic *enic)
|
||||
for (i = 0; i < enic->cq_count; i++) {
|
||||
if (i < enic->rq_count)
|
||||
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
|
||||
enic->config.rq_desc_count,
|
||||
sizeof(struct cq_enet_rq_desc));
|
||||
enic->config.rq_desc_count,
|
||||
rq_cq_desc_size);
|
||||
else
|
||||
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
|
||||
enic->config.wq_desc_count,
|
||||
@ -372,6 +406,39 @@ int enic_alloc_vnic_resources(struct enic *enic)
|
||||
|
||||
err_out_cleanup:
|
||||
enic_free_vnic_resources(enic);
|
||||
|
||||
err_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* CMD_CQ_ENTRY_SIZE_SET can fail on older hw generations that don't support
|
||||
* that command
|
||||
*/
|
||||
void enic_ext_cq(struct enic *enic)
|
||||
{
|
||||
u64 a0 = CMD_CQ_ENTRY_SIZE_SET, a1 = 0;
|
||||
int wait = 1000;
|
||||
int ret;
|
||||
|
||||
spin_lock_bh(&enic->devcmd_lock);
|
||||
ret = vnic_dev_cmd(enic->vdev, CMD_CAPABILITY, &a0, &a1, wait);
|
||||
if (ret || a0) {
|
||||
dev_info(&enic->pdev->dev,
|
||||
"CMD_CQ_ENTRY_SIZE_SET not supported.");
|
||||
enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16;
|
||||
goto out;
|
||||
}
|
||||
a1 &= VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT;
|
||||
enic->ext_cq = fls(a1) - 1;
|
||||
a0 = VNIC_RQ_ALL;
|
||||
a1 = enic->ext_cq;
|
||||
ret = vnic_dev_cmd(enic->vdev, CMD_CQ_ENTRY_SIZE_SET, &a0, &a1, wait);
|
||||
if (ret) {
|
||||
dev_info(&enic->pdev->dev, "CMD_CQ_ENTRY_SIZE_SET failed.");
|
||||
enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16;
|
||||
}
|
||||
out:
|
||||
spin_unlock_bh(&enic->devcmd_lock);
|
||||
dev_info(&enic->pdev->dev, "CQ entry size set to %d bytes",
|
||||
16 << enic->ext_cq);
|
||||
}
|
||||
|
@ -12,10 +12,13 @@
|
||||
#include "vnic_wq.h"
|
||||
#include "vnic_rq.h"
|
||||
|
||||
#define ENIC_MIN_WQ_DESCS 64
|
||||
#define ENIC_MAX_WQ_DESCS 4096
|
||||
#define ENIC_MIN_RQ_DESCS 64
|
||||
#define ENIC_MAX_RQ_DESCS 4096
|
||||
#define ENIC_MIN_WQ_DESCS 64
|
||||
#define ENIC_MAX_WQ_DESCS_DEFAULT 4096
|
||||
#define ENIC_MAX_WQ_DESCS 16384
|
||||
#define ENIC_MIN_RQ_DESCS 64
|
||||
#define ENIC_MAX_RQ_DESCS 16384
|
||||
#define ENIC_MAX_RQ_DESCS_DEFAULT 4096
|
||||
#define ENIC_MAX_CQ_DESCS_DEFAULT (64 * 1024)
|
||||
|
||||
#define ENIC_MIN_MTU ETH_MIN_MTU
|
||||
#define ENIC_MAX_MTU 9000
|
||||
|
436
drivers/net/ethernet/cisco/enic/enic_rq.c
Normal file
436
drivers/net/ethernet/cisco/enic/enic_rq.c
Normal file
@ -0,0 +1,436 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// Copyright 2024 Cisco Systems, Inc. All rights reserved.
|
||||
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <net/busy_poll.h>
|
||||
#include "enic.h"
|
||||
#include "enic_res.h"
|
||||
#include "enic_rq.h"
|
||||
#include "vnic_rq.h"
|
||||
#include "cq_enet_desc.h"
|
||||
|
||||
#define ENIC_LARGE_PKT_THRESHOLD 1000
|
||||
|
||||
static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
|
||||
u32 pkt_len)
|
||||
{
|
||||
if (pkt_len > ENIC_LARGE_PKT_THRESHOLD)
|
||||
pkt_size->large_pkt_bytes_cnt += pkt_len;
|
||||
else
|
||||
pkt_size->small_pkt_bytes_cnt += pkt_len;
|
||||
}
|
||||
|
||||
static void enic_rq_cq_desc_dec(void *cq_desc, u8 cq_desc_size, u8 *type,
|
||||
u8 *color, u16 *q_number, u16 *completed_index)
|
||||
{
|
||||
/* type_color is the last field for all cq structs */
|
||||
u8 type_color;
|
||||
|
||||
switch (cq_desc_size) {
|
||||
case VNIC_RQ_CQ_ENTRY_SIZE_16: {
|
||||
struct cq_enet_rq_desc *desc =
|
||||
(struct cq_enet_rq_desc *)cq_desc;
|
||||
type_color = desc->type_color;
|
||||
|
||||
/* Make sure color bit is read from desc *before* other fields
|
||||
* are read from desc. Hardware guarantees color bit is last
|
||||
* bit (byte) written. Adding the rmb() prevents the compiler
|
||||
* and/or CPU from reordering the reads which would potentially
|
||||
* result in reading stale values.
|
||||
*/
|
||||
rmb();
|
||||
|
||||
*q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
|
||||
CQ_DESC_Q_NUM_MASK;
|
||||
*completed_index = le16_to_cpu(desc->completed_index_flags) &
|
||||
CQ_DESC_COMP_NDX_MASK;
|
||||
break;
|
||||
}
|
||||
case VNIC_RQ_CQ_ENTRY_SIZE_32: {
|
||||
struct cq_enet_rq_desc_32 *desc =
|
||||
(struct cq_enet_rq_desc_32 *)cq_desc;
|
||||
type_color = desc->type_color;
|
||||
|
||||
/* Make sure color bit is read from desc *before* other fields
|
||||
* are read from desc. Hardware guarantees color bit is last
|
||||
* bit (byte) written. Adding the rmb() prevents the compiler
|
||||
* and/or CPU from reordering the reads which would potentially
|
||||
* result in reading stale values.
|
||||
*/
|
||||
rmb();
|
||||
|
||||
*q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
|
||||
CQ_DESC_Q_NUM_MASK;
|
||||
*completed_index = le16_to_cpu(desc->completed_index_flags) &
|
||||
CQ_DESC_COMP_NDX_MASK;
|
||||
*completed_index |= (desc->fetch_index_flags & CQ_DESC_32_FI_MASK) <<
|
||||
CQ_DESC_COMP_NDX_BITS;
|
||||
break;
|
||||
}
|
||||
case VNIC_RQ_CQ_ENTRY_SIZE_64: {
|
||||
struct cq_enet_rq_desc_64 *desc =
|
||||
(struct cq_enet_rq_desc_64 *)cq_desc;
|
||||
type_color = desc->type_color;
|
||||
|
||||
/* Make sure color bit is read from desc *before* other fields
|
||||
* are read from desc. Hardware guarantees color bit is last
|
||||
* bit (byte) written. Adding the rmb() prevents the compiler
|
||||
* and/or CPU from reordering the reads which would potentially
|
||||
* result in reading stale values.
|
||||
*/
|
||||
rmb();
|
||||
|
||||
*q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
|
||||
CQ_DESC_Q_NUM_MASK;
|
||||
*completed_index = le16_to_cpu(desc->completed_index_flags) &
|
||||
CQ_DESC_COMP_NDX_MASK;
|
||||
*completed_index |= (desc->fetch_index_flags & CQ_DESC_64_FI_MASK) <<
|
||||
CQ_DESC_COMP_NDX_BITS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
*color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
|
||||
*type = type_color & CQ_DESC_TYPE_MASK;
|
||||
}
|
||||
|
||||
static void enic_rq_set_skb_flags(struct vnic_rq *vrq, u8 type, u32 rss_hash,
|
||||
u8 rss_type, u8 fcoe, u8 fcoe_fc_crc_ok,
|
||||
u8 vlan_stripped, u8 csum_not_calc,
|
||||
u8 tcp_udp_csum_ok, u8 ipv6, u8 ipv4_csum_ok,
|
||||
u16 vlan_tci, struct sk_buff *skb)
|
||||
{
|
||||
struct enic *enic = vnic_dev_priv(vrq->vdev);
|
||||
struct net_device *netdev = enic->netdev;
|
||||
struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats;
|
||||
bool outer_csum_ok = true, encap = false;
|
||||
|
||||
if ((netdev->features & NETIF_F_RXHASH) && rss_hash && type == 3) {
|
||||
switch (rss_type) {
|
||||
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
|
||||
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
|
||||
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
|
||||
skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
|
||||
rqstats->l4_rss_hash++;
|
||||
break;
|
||||
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
|
||||
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
|
||||
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
|
||||
skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
|
||||
rqstats->l3_rss_hash++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (enic->vxlan.vxlan_udp_port_number) {
|
||||
switch (enic->vxlan.patch_level) {
|
||||
case 0:
|
||||
if (fcoe) {
|
||||
encap = true;
|
||||
outer_csum_ok = fcoe_fc_crc_ok;
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
if (type == 7 && (rss_hash & BIT(0))) {
|
||||
encap = true;
|
||||
outer_csum_ok = (rss_hash & BIT(1)) &&
|
||||
(rss_hash & BIT(2));
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Hardware does not provide whole packet checksum. It only
|
||||
* provides pseudo checksum. Since hw validates the packet
|
||||
* checksum but not provide us the checksum value. use
|
||||
* CHECSUM_UNNECESSARY.
|
||||
*
|
||||
* In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
|
||||
* inner csum_ok. outer_csum_ok is set by hw when outer udp
|
||||
* csum is correct or is zero.
|
||||
*/
|
||||
if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
|
||||
tcp_udp_csum_ok && outer_csum_ok && (ipv4_csum_ok || ipv6)) {
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
skb->csum_level = encap;
|
||||
if (encap)
|
||||
rqstats->csum_unnecessary_encap++;
|
||||
else
|
||||
rqstats->csum_unnecessary++;
|
||||
}
|
||||
|
||||
if (vlan_stripped) {
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
|
||||
rqstats->vlan_stripped++;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* cq_enet_rq_desc accesses section uses only the 1st 15 bytes of the cq which
|
||||
* is identical for all type (16,32 and 64 byte) of cqs.
|
||||
*/
|
||||
static void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, u8 *ingress_port,
|
||||
u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
|
||||
u8 *csum_not_calc, u32 *rss_hash,
|
||||
u16 *bytes_written, u8 *packet_error,
|
||||
u8 *vlan_stripped, u16 *vlan_tci,
|
||||
u16 *checksum, u8 *fcoe_sof,
|
||||
u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error,
|
||||
u8 *fcoe_eof, u8 *tcp_udp_csum_ok, u8 *udp,
|
||||
u8 *tcp, u8 *ipv4_csum_ok, u8 *ipv6, u8 *ipv4,
|
||||
u8 *ipv4_fragment, u8 *fcs_ok)
|
||||
{
|
||||
u16 completed_index_flags;
|
||||
u16 q_number_rss_type_flags;
|
||||
u16 bytes_written_flags;
|
||||
|
||||
completed_index_flags = le16_to_cpu(desc->completed_index_flags);
|
||||
q_number_rss_type_flags =
|
||||
le16_to_cpu(desc->q_number_rss_type_flags);
|
||||
bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
|
||||
|
||||
*ingress_port = (completed_index_flags &
|
||||
CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
|
||||
*fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
|
||||
1 : 0;
|
||||
*eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
|
||||
1 : 0;
|
||||
*sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
|
||||
1 : 0;
|
||||
|
||||
*rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
|
||||
CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
|
||||
*csum_not_calc = (q_number_rss_type_flags &
|
||||
CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
|
||||
|
||||
*rss_hash = le32_to_cpu(desc->rss_hash);
|
||||
|
||||
*bytes_written = bytes_written_flags &
|
||||
CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
|
||||
*packet_error = (bytes_written_flags &
|
||||
CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
|
||||
*vlan_stripped = (bytes_written_flags &
|
||||
CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
|
||||
|
||||
/*
|
||||
* Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
|
||||
*/
|
||||
*vlan_tci = le16_to_cpu(desc->vlan);
|
||||
|
||||
if (*fcoe) {
|
||||
*fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
|
||||
CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
|
||||
*fcoe_fc_crc_ok = (desc->flags &
|
||||
CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
|
||||
*fcoe_enc_error = (desc->flags &
|
||||
CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
|
||||
*fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
|
||||
CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
|
||||
CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
|
||||
*checksum = 0;
|
||||
} else {
|
||||
*fcoe_sof = 0;
|
||||
*fcoe_fc_crc_ok = 0;
|
||||
*fcoe_enc_error = 0;
|
||||
*fcoe_eof = 0;
|
||||
*checksum = le16_to_cpu(desc->checksum_fcoe);
|
||||
}
|
||||
|
||||
*tcp_udp_csum_ok =
|
||||
(desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
|
||||
*udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
|
||||
*tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
|
||||
*ipv4_csum_ok =
|
||||
(desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
|
||||
*ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
|
||||
*ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
|
||||
*ipv4_fragment =
|
||||
(desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
|
||||
*fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
|
||||
}
|
||||
|
||||
static bool enic_rq_pkt_error(struct vnic_rq *vrq, u8 packet_error, u8 fcs_ok,
|
||||
u16 bytes_written)
|
||||
{
|
||||
struct enic *enic = vnic_dev_priv(vrq->vdev);
|
||||
struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats;
|
||||
|
||||
if (packet_error) {
|
||||
if (!fcs_ok) {
|
||||
if (bytes_written > 0)
|
||||
rqstats->bad_fcs++;
|
||||
else if (bytes_written == 0)
|
||||
rqstats->pkt_truncated++;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
int enic_rq_alloc_buf(struct vnic_rq *rq)
|
||||
{
|
||||
struct enic *enic = vnic_dev_priv(rq->vdev);
|
||||
struct net_device *netdev = enic->netdev;
|
||||
struct enic_rq *erq = &enic->rq[rq->index];
|
||||
struct enic_rq_stats *rqstats = &erq->stats;
|
||||
unsigned int offset = 0;
|
||||
unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
|
||||
unsigned int os_buf_index = 0;
|
||||
dma_addr_t dma_addr;
|
||||
struct vnic_rq_buf *buf = rq->to_use;
|
||||
struct page *page;
|
||||
unsigned int truesize = len;
|
||||
|
||||
if (buf->os_buf) {
|
||||
enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
|
||||
buf->len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
page = page_pool_dev_alloc(erq->pool, &offset, &truesize);
|
||||
if (unlikely(!page)) {
|
||||
rqstats->pp_alloc_fail++;
|
||||
return -ENOMEM;
|
||||
}
|
||||
buf->offset = offset;
|
||||
buf->truesize = truesize;
|
||||
dma_addr = page_pool_get_dma_addr(page) + offset;
|
||||
enic_queue_rq_desc(rq, (void *)page, os_buf_index, dma_addr, len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
|
||||
{
|
||||
struct enic *enic = vnic_dev_priv(rq->vdev);
|
||||
struct enic_rq *erq = &enic->rq[rq->index];
|
||||
|
||||
if (!buf->os_buf)
|
||||
return;
|
||||
|
||||
page_pool_put_full_page(erq->pool, (struct page *)buf->os_buf, true);
|
||||
buf->os_buf = NULL;
|
||||
}
|
||||
|
||||
static void enic_rq_indicate_buf(struct enic *enic, struct vnic_rq *rq,
|
||||
struct vnic_rq_buf *buf, void *cq_desc,
|
||||
u8 type, u16 q_number, u16 completed_index)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
|
||||
struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats;
|
||||
struct napi_struct *napi;
|
||||
|
||||
u8 eop, sop, ingress_port, vlan_stripped;
|
||||
u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
|
||||
u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
|
||||
u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
|
||||
u8 packet_error;
|
||||
u16 bytes_written, vlan_tci, checksum;
|
||||
u32 rss_hash;
|
||||
|
||||
rqstats->packets++;
|
||||
|
||||
cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &ingress_port,
|
||||
&fcoe, &eop, &sop, &rss_type, &csum_not_calc,
|
||||
&rss_hash, &bytes_written, &packet_error,
|
||||
&vlan_stripped, &vlan_tci, &checksum, &fcoe_sof,
|
||||
&fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof,
|
||||
&tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6,
|
||||
&ipv4, &ipv4_fragment, &fcs_ok);
|
||||
|
||||
if (enic_rq_pkt_error(rq, packet_error, fcs_ok, bytes_written))
|
||||
return;
|
||||
|
||||
if (eop && bytes_written > 0) {
|
||||
/* Good receive
|
||||
*/
|
||||
rqstats->bytes += bytes_written;
|
||||
napi = &enic->napi[rq->index];
|
||||
skb = napi_get_frags(napi);
|
||||
if (unlikely(!skb)) {
|
||||
net_warn_ratelimited("%s: skb alloc error rq[%d], desc[%d]\n",
|
||||
enic->netdev->name, rq->index,
|
||||
completed_index);
|
||||
rqstats->no_skb++;
|
||||
return;
|
||||
}
|
||||
|
||||
prefetch(skb->data - NET_IP_ALIGN);
|
||||
|
||||
dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr,
|
||||
bytes_written, DMA_FROM_DEVICE);
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
(struct page *)buf->os_buf, buf->offset,
|
||||
bytes_written, buf->truesize);
|
||||
skb_record_rx_queue(skb, q_number);
|
||||
enic_rq_set_skb_flags(rq, type, rss_hash, rss_type, fcoe,
|
||||
fcoe_fc_crc_ok, vlan_stripped,
|
||||
csum_not_calc, tcp_udp_csum_ok, ipv6,
|
||||
ipv4_csum_ok, vlan_tci, skb);
|
||||
skb_mark_for_recycle(skb);
|
||||
napi_gro_frags(napi);
|
||||
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
|
||||
enic_intr_update_pkt_size(&cq->pkt_size_counter,
|
||||
bytes_written);
|
||||
buf->os_buf = NULL;
|
||||
buf->dma_addr = 0;
|
||||
buf = buf->next;
|
||||
} else {
|
||||
/* Buffer overflow
|
||||
*/
|
||||
rqstats->pkt_truncated++;
|
||||
}
|
||||
}
|
||||
|
||||
static void enic_rq_service(struct enic *enic, void *cq_desc, u8 type,
|
||||
u16 q_number, u16 completed_index)
|
||||
{
|
||||
struct enic_rq_stats *rqstats = &enic->rq[q_number].stats;
|
||||
struct vnic_rq *vrq = &enic->rq[q_number].vrq;
|
||||
struct vnic_rq_buf *vrq_buf = vrq->to_clean;
|
||||
int skipped;
|
||||
|
||||
while (1) {
|
||||
skipped = (vrq_buf->index != completed_index);
|
||||
if (!skipped)
|
||||
enic_rq_indicate_buf(enic, vrq, vrq_buf, cq_desc, type,
|
||||
q_number, completed_index);
|
||||
else
|
||||
rqstats->desc_skip++;
|
||||
|
||||
vrq->ring.desc_avail++;
|
||||
vrq->to_clean = vrq_buf->next;
|
||||
vrq_buf = vrq_buf->next;
|
||||
if (!skipped)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
unsigned int enic_rq_cq_service(struct enic *enic, unsigned int cq_index,
|
||||
unsigned int work_to_do)
|
||||
{
|
||||
struct vnic_cq *cq = &enic->cq[cq_index];
|
||||
void *cq_desc = vnic_cq_to_clean(cq);
|
||||
u16 q_number, completed_index;
|
||||
unsigned int work_done = 0;
|
||||
u8 type, color;
|
||||
|
||||
enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color, &q_number,
|
||||
&completed_index);
|
||||
|
||||
while (color != cq->last_color) {
|
||||
enic_rq_service(enic, cq_desc, type, q_number, completed_index);
|
||||
vnic_cq_inc_to_clean(cq);
|
||||
|
||||
if (++work_done >= work_to_do)
|
||||
break;
|
||||
|
||||
cq_desc = vnic_cq_to_clean(cq);
|
||||
enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color,
|
||||
&q_number, &completed_index);
|
||||
}
|
||||
|
||||
return work_done;
|
||||
}
|
8
drivers/net/ethernet/cisco/enic/enic_rq.h
Normal file
8
drivers/net/ethernet/cisco/enic/enic_rq.h
Normal file
@ -0,0 +1,8 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only
|
||||
* Copyright 2024 Cisco Systems, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
unsigned int enic_rq_cq_service(struct enic *enic, unsigned int cq_index,
|
||||
unsigned int work_to_do);
|
||||
int enic_rq_alloc_buf(struct vnic_rq *rq);
|
||||
void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
|
117
drivers/net/ethernet/cisco/enic/enic_wq.c
Normal file
117
drivers/net/ethernet/cisco/enic/enic_wq.c
Normal file
@ -0,0 +1,117 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// Copyright 2025 Cisco Systems, Inc. All rights reserved.
|
||||
|
||||
#include <net/netdev_queues.h>
|
||||
#include "enic_res.h"
|
||||
#include "enic.h"
|
||||
#include "enic_wq.h"
|
||||
|
||||
#define ENET_CQ_DESC_COMP_NDX_BITS 14
|
||||
#define ENET_CQ_DESC_COMP_NDX_MASK GENMASK(ENET_CQ_DESC_COMP_NDX_BITS - 1, 0)
|
||||
|
||||
static void enic_wq_cq_desc_dec(const struct cq_desc *desc_arg, bool ext_wq,
|
||||
u8 *type, u8 *color, u16 *q_number,
|
||||
u16 *completed_index)
|
||||
{
|
||||
const struct cq_desc *desc = desc_arg;
|
||||
const u8 type_color = desc->type_color;
|
||||
|
||||
*color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
|
||||
|
||||
/*
|
||||
* Make sure color bit is read from desc *before* other fields
|
||||
* are read from desc. Hardware guarantees color bit is last
|
||||
* bit (byte) written. Adding the rmb() prevents the compiler
|
||||
* and/or CPU from reordering the reads which would potentially
|
||||
* result in reading stale values.
|
||||
*/
|
||||
rmb();
|
||||
|
||||
*type = type_color & CQ_DESC_TYPE_MASK;
|
||||
*q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
|
||||
|
||||
if (ext_wq)
|
||||
*completed_index = le16_to_cpu(desc->completed_index) &
|
||||
ENET_CQ_DESC_COMP_NDX_MASK;
|
||||
else
|
||||
*completed_index = le16_to_cpu(desc->completed_index) &
|
||||
CQ_DESC_COMP_NDX_MASK;
|
||||
}
|
||||
|
||||
void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
|
||||
{
|
||||
struct enic *enic = vnic_dev_priv(wq->vdev);
|
||||
|
||||
if (buf->sop)
|
||||
dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
|
||||
DMA_TO_DEVICE);
|
||||
else
|
||||
dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
if (buf->os_buf)
|
||||
dev_kfree_skb_any(buf->os_buf);
|
||||
}
|
||||
|
||||
static void enic_wq_free_buf(struct vnic_wq *wq, struct cq_desc *cq_desc,
|
||||
struct vnic_wq_buf *buf, void *opaque)
|
||||
{
|
||||
struct enic *enic = vnic_dev_priv(wq->vdev);
|
||||
|
||||
enic->wq[wq->index].stats.cq_work++;
|
||||
enic->wq[wq->index].stats.cq_bytes += buf->len;
|
||||
enic_free_wq_buf(wq, buf);
|
||||
}
|
||||
|
||||
static void enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
|
||||
u8 type, u16 q_number, u16 completed_index)
|
||||
{
|
||||
struct enic *enic = vnic_dev_priv(vdev);
|
||||
|
||||
spin_lock(&enic->wq[q_number].lock);
|
||||
|
||||
vnic_wq_service(&enic->wq[q_number].vwq, cq_desc,
|
||||
completed_index, enic_wq_free_buf, NULL);
|
||||
|
||||
if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number))
|
||||
&& vnic_wq_desc_avail(&enic->wq[q_number].vwq) >=
|
||||
(MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
|
||||
netif_wake_subqueue(enic->netdev, q_number);
|
||||
enic->wq[q_number].stats.wake++;
|
||||
}
|
||||
|
||||
spin_unlock(&enic->wq[q_number].lock);
|
||||
}
|
||||
|
||||
unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index,
|
||||
unsigned int work_to_do)
|
||||
{
|
||||
struct vnic_cq *cq = &enic->cq[cq_index];
|
||||
u16 q_number, completed_index;
|
||||
unsigned int work_done = 0;
|
||||
struct cq_desc *cq_desc;
|
||||
u8 type, color;
|
||||
bool ext_wq;
|
||||
|
||||
ext_wq = cq->ring.size > ENIC_MAX_WQ_DESCS_DEFAULT;
|
||||
|
||||
cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq);
|
||||
enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color,
|
||||
&q_number, &completed_index);
|
||||
|
||||
while (color != cq->last_color) {
|
||||
enic_wq_service(cq->vdev, cq_desc, type, q_number,
|
||||
completed_index);
|
||||
|
||||
vnic_cq_inc_to_clean(cq);
|
||||
|
||||
if (++work_done >= work_to_do)
|
||||
break;
|
||||
|
||||
cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq);
|
||||
enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color,
|
||||
&q_number, &completed_index);
|
||||
}
|
||||
|
||||
return work_done;
|
||||
}
|
7
drivers/net/ethernet/cisco/enic/enic_wq.h
Normal file
7
drivers/net/ethernet/cisco/enic/enic_wq.h
Normal file
@ -0,0 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only
|
||||
* Copyright 2025 Cisco Systems, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
|
||||
unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index,
|
||||
unsigned int work_to_do);
|
@ -56,45 +56,18 @@ struct vnic_cq {
|
||||
ktime_t prev_ts;
|
||||
};
|
||||
|
||||
static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
|
||||
unsigned int work_to_do,
|
||||
int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
|
||||
u8 type, u16 q_number, u16 completed_index, void *opaque),
|
||||
void *opaque)
|
||||
static inline void *vnic_cq_to_clean(struct vnic_cq *cq)
|
||||
{
|
||||
struct cq_desc *cq_desc;
|
||||
unsigned int work_done = 0;
|
||||
u16 q_number, completed_index;
|
||||
u8 type, color;
|
||||
return ((u8 *)cq->ring.descs + cq->ring.desc_size * cq->to_clean);
|
||||
}
|
||||
|
||||
cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
|
||||
cq->ring.desc_size * cq->to_clean);
|
||||
cq_desc_dec(cq_desc, &type, &color,
|
||||
&q_number, &completed_index);
|
||||
|
||||
while (color != cq->last_color) {
|
||||
|
||||
if ((*q_service)(cq->vdev, cq_desc, type,
|
||||
q_number, completed_index, opaque))
|
||||
break;
|
||||
|
||||
cq->to_clean++;
|
||||
if (cq->to_clean == cq->ring.desc_count) {
|
||||
cq->to_clean = 0;
|
||||
cq->last_color = cq->last_color ? 0 : 1;
|
||||
}
|
||||
|
||||
cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
|
||||
cq->ring.desc_size * cq->to_clean);
|
||||
cq_desc_dec(cq_desc, &type, &color,
|
||||
&q_number, &completed_index);
|
||||
|
||||
work_done++;
|
||||
if (work_done >= work_to_do)
|
||||
break;
|
||||
static inline void vnic_cq_inc_to_clean(struct vnic_cq *cq)
|
||||
{
|
||||
cq->to_clean++;
|
||||
if (cq->to_clean == cq->ring.desc_count) {
|
||||
cq->to_clean = 0;
|
||||
cq->last_color = cq->last_color ? 0 : 1;
|
||||
}
|
||||
|
||||
return work_done;
|
||||
}
|
||||
|
||||
void vnic_cq_free(struct vnic_cq *cq);
|
||||
|
@ -436,6 +436,25 @@ enum vnic_devcmd_cmd {
|
||||
* in: (u16) a2 = unsigned short int port information
|
||||
*/
|
||||
CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73),
|
||||
|
||||
/*
|
||||
* Set extended CQ field in MREGS of RQ (or all RQs)
|
||||
* for given vNIC
|
||||
* in: (u64) a0 = RQ selection (VNIC_RQ_ALL for all RQs)
|
||||
* (u32) a1 = CQ entry size
|
||||
* VNIC_RQ_CQ_ENTRY_SIZE_16 --> 16 bytes
|
||||
* VNIC_RQ_CQ_ENTRY_SIZE_32 --> 32 bytes
|
||||
* VNIC_RQ_CQ_ENTRY_SIZE_64 --> 64 bytes
|
||||
*
|
||||
* Capability query:
|
||||
* out: (u32) a0 = errno, 0:valid cmd
|
||||
* (u32) a1 = value consisting of supported entries
|
||||
* bit 0: 16 bytes
|
||||
* bit 1: 32 bytes
|
||||
* bit 2: 64 bytes
|
||||
*/
|
||||
CMD_CQ_ENTRY_SIZE_SET = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 90),
|
||||
|
||||
};
|
||||
|
||||
/* CMD_ENABLE2 flags */
|
||||
|
@ -21,6 +21,11 @@ struct vnic_enet_config {
|
||||
u16 loop_tag;
|
||||
u16 vf_rq_count;
|
||||
u16 num_arfs;
|
||||
u8 reserved[66];
|
||||
u32 max_rq_ring; // MAX RQ ring size
|
||||
u32 max_wq_ring; // MAX WQ ring size
|
||||
u32 max_cq_ring; // MAX CQ ring size
|
||||
u32 rdma_rsvd_lkey; // Reserved (privileged) LKey
|
||||
};
|
||||
|
||||
#define VENETF_TSO 0x1 /* TSO enabled */
|
||||
|
@ -50,7 +50,7 @@ struct vnic_rq_ctrl {
|
||||
(VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
|
||||
#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
|
||||
DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
|
||||
#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
|
||||
#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(16384)
|
||||
|
||||
struct vnic_rq_buf {
|
||||
struct vnic_rq_buf *next;
|
||||
@ -61,6 +61,8 @@ struct vnic_rq_buf {
|
||||
unsigned int index;
|
||||
void *desc;
|
||||
uint64_t wr_id;
|
||||
unsigned int offset;
|
||||
unsigned int truesize;
|
||||
};
|
||||
|
||||
enum enic_poll_state {
|
||||
|
@ -62,7 +62,7 @@ struct vnic_wq_buf {
|
||||
(VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf))
|
||||
#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
|
||||
DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries))
|
||||
#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
|
||||
#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(16384)
|
||||
|
||||
struct vnic_wq {
|
||||
unsigned int index;
|
||||
|
@ -39,8 +39,6 @@
|
||||
#include "ibmveth.h"
|
||||
|
||||
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
|
||||
static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
|
||||
bool reuse);
|
||||
static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
|
||||
|
||||
static struct kobj_type ktype_veth_pool;
|
||||
@ -213,95 +211,170 @@ static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
|
||||
static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
|
||||
struct ibmveth_buff_pool *pool)
|
||||
{
|
||||
u32 i;
|
||||
u32 count = pool->size - atomic_read(&pool->available);
|
||||
u32 buffers_added = 0;
|
||||
struct sk_buff *skb;
|
||||
unsigned int free_index, index;
|
||||
u64 correlator;
|
||||
union ibmveth_buf_desc descs[IBMVETH_MAX_RX_PER_HCALL] = {0};
|
||||
u32 remaining = pool->size - atomic_read(&pool->available);
|
||||
u64 correlators[IBMVETH_MAX_RX_PER_HCALL] = {0};
|
||||
unsigned long lpar_rc;
|
||||
u32 buffers_added = 0;
|
||||
u32 i, filled, batch;
|
||||
struct vio_dev *vdev;
|
||||
dma_addr_t dma_addr;
|
||||
struct device *dev;
|
||||
u32 index;
|
||||
|
||||
vdev = adapter->vdev;
|
||||
dev = &vdev->dev;
|
||||
|
||||
mb();
|
||||
|
||||
for (i = 0; i < count; ++i) {
|
||||
union ibmveth_buf_desc desc;
|
||||
batch = adapter->rx_buffers_per_hcall;
|
||||
|
||||
free_index = pool->consumer_index;
|
||||
index = pool->free_map[free_index];
|
||||
skb = NULL;
|
||||
while (remaining > 0) {
|
||||
unsigned int free_index = pool->consumer_index;
|
||||
|
||||
BUG_ON(index == IBM_VETH_INVALID_MAP);
|
||||
/* Fill a batch of descriptors */
|
||||
for (filled = 0; filled < min(remaining, batch); filled++) {
|
||||
index = pool->free_map[free_index];
|
||||
if (WARN_ON(index == IBM_VETH_INVALID_MAP)) {
|
||||
adapter->replenish_add_buff_failure++;
|
||||
netdev_info(adapter->netdev,
|
||||
"Invalid map index %u, reset\n",
|
||||
index);
|
||||
schedule_work(&adapter->work);
|
||||
break;
|
||||
}
|
||||
|
||||
/* are we allocating a new buffer or recycling an old one */
|
||||
if (pool->skbuff[index])
|
||||
goto reuse;
|
||||
if (!pool->skbuff[index]) {
|
||||
struct sk_buff *skb = NULL;
|
||||
|
||||
skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
|
||||
skb = netdev_alloc_skb(adapter->netdev,
|
||||
pool->buff_size);
|
||||
if (!skb) {
|
||||
adapter->replenish_no_mem++;
|
||||
adapter->replenish_add_buff_failure++;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!skb) {
|
||||
netdev_dbg(adapter->netdev,
|
||||
"replenish: unable to allocate skb\n");
|
||||
adapter->replenish_no_mem++;
|
||||
dma_addr = dma_map_single(dev, skb->data,
|
||||
pool->buff_size,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(dev, dma_addr)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
adapter->replenish_add_buff_failure++;
|
||||
break;
|
||||
}
|
||||
|
||||
pool->dma_addr[index] = dma_addr;
|
||||
pool->skbuff[index] = skb;
|
||||
} else {
|
||||
/* re-use case */
|
||||
dma_addr = pool->dma_addr[index];
|
||||
}
|
||||
|
||||
if (rx_flush) {
|
||||
unsigned int len;
|
||||
|
||||
len = adapter->netdev->mtu + IBMVETH_BUFF_OH;
|
||||
len = min(pool->buff_size, len);
|
||||
ibmveth_flush_buffer(pool->skbuff[index]->data,
|
||||
len);
|
||||
}
|
||||
|
||||
descs[filled].fields.flags_len = IBMVETH_BUF_VALID |
|
||||
pool->buff_size;
|
||||
descs[filled].fields.address = dma_addr;
|
||||
|
||||
correlators[filled] = ((u64)pool->index << 32) | index;
|
||||
*(u64 *)pool->skbuff[index]->data = correlators[filled];
|
||||
|
||||
free_index++;
|
||||
if (free_index >= pool->size)
|
||||
free_index = 0;
|
||||
}
|
||||
|
||||
if (!filled)
|
||||
break;
|
||||
}
|
||||
|
||||
dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
|
||||
pool->buff_size, DMA_FROM_DEVICE);
|
||||
|
||||
if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
|
||||
goto failure;
|
||||
|
||||
pool->dma_addr[index] = dma_addr;
|
||||
pool->skbuff[index] = skb;
|
||||
|
||||
if (rx_flush) {
|
||||
unsigned int len = min(pool->buff_size,
|
||||
adapter->netdev->mtu +
|
||||
IBMVETH_BUFF_OH);
|
||||
ibmveth_flush_buffer(skb->data, len);
|
||||
}
|
||||
reuse:
|
||||
dma_addr = pool->dma_addr[index];
|
||||
desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
|
||||
desc.fields.address = dma_addr;
|
||||
|
||||
correlator = ((u64)pool->index << 32) | index;
|
||||
*(u64 *)pool->skbuff[index]->data = correlator;
|
||||
|
||||
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
|
||||
desc.desc);
|
||||
|
||||
/* single buffer case*/
|
||||
if (filled == 1)
|
||||
lpar_rc = h_add_logical_lan_buffer(vdev->unit_address,
|
||||
descs[0].desc);
|
||||
else
|
||||
/* Multi-buffer hcall */
|
||||
lpar_rc = h_add_logical_lan_buffers(vdev->unit_address,
|
||||
descs[0].desc,
|
||||
descs[1].desc,
|
||||
descs[2].desc,
|
||||
descs[3].desc,
|
||||
descs[4].desc,
|
||||
descs[5].desc,
|
||||
descs[6].desc,
|
||||
descs[7].desc);
|
||||
if (lpar_rc != H_SUCCESS) {
|
||||
netdev_warn(adapter->netdev,
|
||||
"%sadd_logical_lan failed %lu\n",
|
||||
skb ? "" : "When recycling: ", lpar_rc);
|
||||
goto failure;
|
||||
dev_warn_ratelimited(dev,
|
||||
"RX h_add_logical_lan failed: filled=%u, rc=%lu, batch=%u\n",
|
||||
filled, lpar_rc, batch);
|
||||
goto hcall_failure;
|
||||
}
|
||||
|
||||
pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
|
||||
pool->consumer_index++;
|
||||
if (pool->consumer_index >= pool->size)
|
||||
pool->consumer_index = 0;
|
||||
/* Only update pool state after hcall succeeds */
|
||||
for (i = 0; i < filled; i++) {
|
||||
free_index = pool->consumer_index;
|
||||
pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
|
||||
|
||||
buffers_added++;
|
||||
adapter->replenish_add_buff_success++;
|
||||
pool->consumer_index++;
|
||||
if (pool->consumer_index >= pool->size)
|
||||
pool->consumer_index = 0;
|
||||
}
|
||||
|
||||
buffers_added += filled;
|
||||
adapter->replenish_add_buff_success += filled;
|
||||
remaining -= filled;
|
||||
|
||||
memset(&descs, 0, sizeof(descs));
|
||||
memset(&correlators, 0, sizeof(correlators));
|
||||
continue;
|
||||
|
||||
hcall_failure:
|
||||
for (i = 0; i < filled; i++) {
|
||||
index = correlators[i] & 0xffffffffUL;
|
||||
dma_addr = pool->dma_addr[index];
|
||||
|
||||
if (pool->skbuff[index]) {
|
||||
if (dma_addr &&
|
||||
!dma_mapping_error(dev, dma_addr))
|
||||
dma_unmap_single(dev, dma_addr,
|
||||
pool->buff_size,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
dev_kfree_skb_any(pool->skbuff[index]);
|
||||
pool->skbuff[index] = NULL;
|
||||
}
|
||||
}
|
||||
adapter->replenish_add_buff_failure += filled;
|
||||
|
||||
/*
|
||||
* If multi rx buffers hcall is no longer supported by FW
|
||||
* e.g. in the case of Live Parttion Migration
|
||||
*/
|
||||
if (batch > 1 && lpar_rc == H_FUNCTION) {
|
||||
/*
|
||||
* Instead of retry submit single buffer individually
|
||||
* here just set the max rx buffer per hcall to 1
|
||||
* buffers will be respleshed next time
|
||||
* when ibmveth_replenish_buffer_pool() is called again
|
||||
* with single-buffer case
|
||||
*/
|
||||
netdev_info(adapter->netdev,
|
||||
"RX Multi buffers not supported by FW, rc=%lu\n",
|
||||
lpar_rc);
|
||||
adapter->rx_buffers_per_hcall = 1;
|
||||
netdev_info(adapter->netdev,
|
||||
"Next rx replesh will fall back to single-buffer hcall\n");
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
mb();
|
||||
atomic_add(buffers_added, &(pool->available));
|
||||
return;
|
||||
|
||||
failure:
|
||||
|
||||
if (dma_addr && !dma_mapping_error(&adapter->vdev->dev, dma_addr))
|
||||
dma_unmap_single(&adapter->vdev->dev,
|
||||
pool->dma_addr[index], pool->buff_size,
|
||||
DMA_FROM_DEVICE);
|
||||
dev_kfree_skb_any(pool->skbuff[index]);
|
||||
pool->skbuff[index] = NULL;
|
||||
adapter->replenish_add_buff_failure++;
|
||||
|
||||
mb();
|
||||
atomic_add(buffers_added, &(pool->available));
|
||||
}
|
||||
@ -370,20 +443,36 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
|
||||
}
|
||||
}
|
||||
|
||||
/* remove a buffer from a pool */
|
||||
static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
|
||||
u64 correlator, bool reuse)
|
||||
/**
|
||||
* ibmveth_remove_buffer_from_pool - remove a buffer from a pool
|
||||
* @adapter: adapter instance
|
||||
* @correlator: identifies pool and index
|
||||
* @reuse: whether to reuse buffer
|
||||
*
|
||||
* Return:
|
||||
* * %0 - success
|
||||
* * %-EINVAL - correlator maps to pool or index out of range
|
||||
* * %-EFAULT - pool and index map to null skb
|
||||
*/
|
||||
static int ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
|
||||
u64 correlator, bool reuse)
|
||||
{
|
||||
unsigned int pool = correlator >> 32;
|
||||
unsigned int index = correlator & 0xffffffffUL;
|
||||
unsigned int free_index;
|
||||
struct sk_buff *skb;
|
||||
|
||||
BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
|
||||
BUG_ON(index >= adapter->rx_buff_pool[pool].size);
|
||||
if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) ||
|
||||
WARN_ON(index >= adapter->rx_buff_pool[pool].size)) {
|
||||
schedule_work(&adapter->work);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
skb = adapter->rx_buff_pool[pool].skbuff[index];
|
||||
BUG_ON(skb == NULL);
|
||||
if (WARN_ON(!skb)) {
|
||||
schedule_work(&adapter->work);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* if we are going to reuse the buffer then keep the pointers around
|
||||
* but mark index as available. replenish will see the skb pointer and
|
||||
@ -411,6 +500,8 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
|
||||
mb();
|
||||
|
||||
atomic_dec(&(adapter->rx_buff_pool[pool].available));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* get the current buffer on the rx queue */
|
||||
@ -420,24 +511,44 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
|
||||
unsigned int pool = correlator >> 32;
|
||||
unsigned int index = correlator & 0xffffffffUL;
|
||||
|
||||
BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
|
||||
BUG_ON(index >= adapter->rx_buff_pool[pool].size);
|
||||
if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) ||
|
||||
WARN_ON(index >= adapter->rx_buff_pool[pool].size)) {
|
||||
schedule_work(&adapter->work);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return adapter->rx_buff_pool[pool].skbuff[index];
|
||||
}
|
||||
|
||||
static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
|
||||
bool reuse)
|
||||
/**
|
||||
* ibmveth_rxq_harvest_buffer - Harvest buffer from pool
|
||||
*
|
||||
* @adapter: pointer to adapter
|
||||
* @reuse: whether to reuse buffer
|
||||
*
|
||||
* Context: called from ibmveth_poll
|
||||
*
|
||||
* Return:
|
||||
* * %0 - success
|
||||
* * other - non-zero return from ibmveth_remove_buffer_from_pool
|
||||
*/
|
||||
static int ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
|
||||
bool reuse)
|
||||
{
|
||||
u64 cor;
|
||||
int rc;
|
||||
|
||||
cor = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
|
||||
ibmveth_remove_buffer_from_pool(adapter, cor, reuse);
|
||||
rc = ibmveth_remove_buffer_from_pool(adapter, cor, reuse);
|
||||
if (unlikely(rc))
|
||||
return rc;
|
||||
|
||||
if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
|
||||
adapter->rx_queue.index = 0;
|
||||
adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ibmveth_free_tx_ltb(struct ibmveth_adapter *adapter, int idx)
|
||||
@ -709,6 +820,35 @@ static int ibmveth_close(struct net_device *netdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ibmveth_reset - Handle scheduled reset work
|
||||
*
|
||||
* @w: pointer to work_struct embedded in adapter structure
|
||||
*
|
||||
* Context: This routine acquires rtnl_mutex and disables its NAPI through
|
||||
* ibmveth_close. It can't be called directly in a context that has
|
||||
* already acquired rtnl_mutex or disabled its NAPI, or directly from
|
||||
* a poll routine.
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
static void ibmveth_reset(struct work_struct *w)
|
||||
{
|
||||
struct ibmveth_adapter *adapter = container_of(w, struct ibmveth_adapter, work);
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
|
||||
netdev_dbg(netdev, "reset starting\n");
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
dev_close(adapter->netdev);
|
||||
dev_open(adapter->netdev, NULL);
|
||||
|
||||
rtnl_unlock();
|
||||
|
||||
netdev_dbg(netdev, "reset complete\n");
|
||||
}
|
||||
|
||||
static int ibmveth_set_link_ksettings(struct net_device *dev,
|
||||
const struct ethtool_link_ksettings *cmd)
|
||||
{
|
||||
@ -1324,7 +1464,8 @@ restart_poll:
|
||||
wmb(); /* suggested by larson1 */
|
||||
adapter->rx_invalid_buffer++;
|
||||
netdev_dbg(netdev, "recycling invalid buffer\n");
|
||||
ibmveth_rxq_harvest_buffer(adapter, true);
|
||||
if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true)))
|
||||
break;
|
||||
} else {
|
||||
struct sk_buff *skb, *new_skb;
|
||||
int length = ibmveth_rxq_frame_length(adapter);
|
||||
@ -1334,6 +1475,8 @@ restart_poll:
|
||||
__sum16 iph_check = 0;
|
||||
|
||||
skb = ibmveth_rxq_get_buffer(adapter);
|
||||
if (unlikely(!skb))
|
||||
break;
|
||||
|
||||
/* if the large packet bit is set in the rx queue
|
||||
* descriptor, the mss will be written by PHYP eight
|
||||
@ -1357,10 +1500,12 @@ restart_poll:
|
||||
if (rx_flush)
|
||||
ibmveth_flush_buffer(skb->data,
|
||||
length + offset);
|
||||
ibmveth_rxq_harvest_buffer(adapter, true);
|
||||
if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true)))
|
||||
break;
|
||||
skb = new_skb;
|
||||
} else {
|
||||
ibmveth_rxq_harvest_buffer(adapter, false);
|
||||
if (unlikely(ibmveth_rxq_harvest_buffer(adapter, false)))
|
||||
break;
|
||||
skb_reserve(skb, offset);
|
||||
}
|
||||
|
||||
@ -1407,7 +1552,10 @@ restart_poll:
|
||||
* then check once more to make sure we are done.
|
||||
*/
|
||||
lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE);
|
||||
BUG_ON(lpar_rc != H_SUCCESS);
|
||||
if (WARN_ON(lpar_rc != H_SUCCESS)) {
|
||||
schedule_work(&adapter->work);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ibmveth_rxq_pending_buffer(adapter) && napi_schedule(napi)) {
|
||||
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
|
||||
@ -1428,7 +1576,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
|
||||
if (napi_schedule_prep(&adapter->napi)) {
|
||||
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
|
||||
VIO_IRQ_DISABLE);
|
||||
BUG_ON(lpar_rc != H_SUCCESS);
|
||||
WARN_ON(lpar_rc != H_SUCCESS);
|
||||
__napi_schedule(&adapter->napi);
|
||||
}
|
||||
return IRQ_HANDLED;
|
||||
@ -1670,6 +1818,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
||||
|
||||
adapter->vdev = dev;
|
||||
adapter->netdev = netdev;
|
||||
INIT_WORK(&adapter->work, ibmveth_reset);
|
||||
adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
|
||||
ibmveth_init_link_settings(netdev);
|
||||
|
||||
@ -1705,6 +1854,19 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
||||
netdev->features |= NETIF_F_FRAGLIST;
|
||||
}
|
||||
|
||||
if (ret == H_SUCCESS &&
|
||||
(ret_attr & IBMVETH_ILLAN_RX_MULTI_BUFF_SUPPORT)) {
|
||||
adapter->rx_buffers_per_hcall = IBMVETH_MAX_RX_PER_HCALL;
|
||||
netdev_dbg(netdev,
|
||||
"RX Multi-buffer hcall supported by FW, batch set to %u\n",
|
||||
adapter->rx_buffers_per_hcall);
|
||||
} else {
|
||||
adapter->rx_buffers_per_hcall = 1;
|
||||
netdev_dbg(netdev,
|
||||
"RX Single-buffer hcall mode, batch set to %u\n",
|
||||
adapter->rx_buffers_per_hcall);
|
||||
}
|
||||
|
||||
netdev->min_mtu = IBMVETH_MIN_MTU;
|
||||
netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;
|
||||
|
||||
@ -1762,6 +1924,8 @@ static void ibmveth_remove(struct vio_dev *dev)
|
||||
struct ibmveth_adapter *adapter = netdev_priv(netdev);
|
||||
int i;
|
||||
|
||||
cancel_work_sync(&adapter->work);
|
||||
|
||||
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
|
||||
kobject_put(&adapter->rx_buff_pool[i].kobj);
|
||||
|
||||
|
@ -28,6 +28,7 @@
|
||||
#define IbmVethMcastRemoveFilter 0x2UL
|
||||
#define IbmVethMcastClearFilterTable 0x3UL
|
||||
|
||||
#define IBMVETH_ILLAN_RX_MULTI_BUFF_SUPPORT 0x0000000000040000UL
|
||||
#define IBMVETH_ILLAN_LRG_SR_ENABLED 0x0000000000010000UL
|
||||
#define IBMVETH_ILLAN_LRG_SND_SUPPORT 0x0000000000008000UL
|
||||
#define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000UL
|
||||
@ -46,6 +47,24 @@
|
||||
#define h_add_logical_lan_buffer(ua, buf) \
|
||||
plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
|
||||
|
||||
static inline long h_add_logical_lan_buffers(unsigned long unit_address,
|
||||
unsigned long desc1,
|
||||
unsigned long desc2,
|
||||
unsigned long desc3,
|
||||
unsigned long desc4,
|
||||
unsigned long desc5,
|
||||
unsigned long desc6,
|
||||
unsigned long desc7,
|
||||
unsigned long desc8)
|
||||
{
|
||||
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
|
||||
|
||||
return plpar_hcall9(H_ADD_LOGICAL_LAN_BUFFERS,
|
||||
retbuf, unit_address,
|
||||
desc1, desc2, desc3, desc4,
|
||||
desc5, desc6, desc7, desc8);
|
||||
}
|
||||
|
||||
/* FW allows us to send 6 descriptors but we only use one so mark
|
||||
* the other 5 as unused (0)
|
||||
*/
|
||||
@ -101,6 +120,7 @@ static inline long h_illan_attributes(unsigned long unit_address,
|
||||
#define IBMVETH_MAX_TX_BUF_SIZE (1024 * 64)
|
||||
#define IBMVETH_MAX_QUEUES 16U
|
||||
#define IBMVETH_DEFAULT_QUEUES 8U
|
||||
#define IBMVETH_MAX_RX_PER_HCALL 8U
|
||||
|
||||
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
|
||||
static int pool_count[] = { 256, 512, 256, 256, 256 };
|
||||
@ -137,6 +157,7 @@ struct ibmveth_adapter {
|
||||
struct vio_dev *vdev;
|
||||
struct net_device *netdev;
|
||||
struct napi_struct napi;
|
||||
struct work_struct work;
|
||||
unsigned int mcastFilterSize;
|
||||
void * buffer_list_addr;
|
||||
void * filter_list_addr;
|
||||
@ -150,6 +171,7 @@ struct ibmveth_adapter {
|
||||
int rx_csum;
|
||||
int large_send;
|
||||
bool is_active_trunk;
|
||||
unsigned int rx_buffers_per_hcall;
|
||||
|
||||
u64 fw_ipv6_csum_support;
|
||||
u64 fw_ipv4_csum_support;
|
||||
|
@ -2308,9 +2308,7 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
|
||||
tx_pool->num_buffers - 1 :
|
||||
tx_pool->consumer_index - 1;
|
||||
tx_buff = &tx_pool->tx_buff[index];
|
||||
adapter->netdev->stats.tx_packets--;
|
||||
adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
|
||||
adapter->tx_stats_buffers[queue_num].packets--;
|
||||
adapter->tx_stats_buffers[queue_num].batched_packets--;
|
||||
adapter->tx_stats_buffers[queue_num].bytes -=
|
||||
tx_buff->skb->len;
|
||||
dev_kfree_skb_any(tx_buff->skb);
|
||||
@ -2402,7 +2400,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
unsigned int tx_map_failed = 0;
|
||||
union sub_crq indir_arr[16];
|
||||
unsigned int tx_dropped = 0;
|
||||
unsigned int tx_packets = 0;
|
||||
unsigned int tx_dpackets = 0;
|
||||
unsigned int tx_bpackets = 0;
|
||||
unsigned int tx_bytes = 0;
|
||||
dma_addr_t data_dma_addr;
|
||||
struct netdev_queue *txq;
|
||||
@ -2575,6 +2574,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
if (lpar_rc != H_SUCCESS)
|
||||
goto tx_err;
|
||||
|
||||
tx_dpackets++;
|
||||
goto early_exit;
|
||||
}
|
||||
|
||||
@ -2603,6 +2603,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
goto tx_err;
|
||||
}
|
||||
|
||||
tx_bpackets++;
|
||||
|
||||
early_exit:
|
||||
if (atomic_add_return(num_entries, &tx_scrq->used)
|
||||
>= adapter->req_tx_entries_per_subcrq) {
|
||||
@ -2610,7 +2612,6 @@ early_exit:
|
||||
netif_stop_subqueue(netdev, queue_num);
|
||||
}
|
||||
|
||||
tx_packets++;
|
||||
tx_bytes += skb->len;
|
||||
txq_trans_cond_update(txq);
|
||||
ret = NETDEV_TX_OK;
|
||||
@ -2638,12 +2639,10 @@ tx_err:
|
||||
}
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
netdev->stats.tx_dropped += tx_dropped;
|
||||
netdev->stats.tx_bytes += tx_bytes;
|
||||
netdev->stats.tx_packets += tx_packets;
|
||||
adapter->tx_send_failed += tx_send_failed;
|
||||
adapter->tx_map_failed += tx_map_failed;
|
||||
adapter->tx_stats_buffers[queue_num].packets += tx_packets;
|
||||
adapter->tx_stats_buffers[queue_num].batched_packets += tx_bpackets;
|
||||
adapter->tx_stats_buffers[queue_num].direct_packets += tx_dpackets;
|
||||
adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
|
||||
adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
|
||||
|
||||
@ -3442,6 +3441,25 @@ err:
|
||||
return -ret;
|
||||
}
|
||||
|
||||
static void ibmvnic_get_stats64(struct net_device *netdev,
|
||||
struct rtnl_link_stats64 *stats)
|
||||
{
|
||||
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adapter->req_rx_queues; i++) {
|
||||
stats->rx_packets += adapter->rx_stats_buffers[i].packets;
|
||||
stats->rx_bytes += adapter->rx_stats_buffers[i].bytes;
|
||||
}
|
||||
|
||||
for (i = 0; i < adapter->req_tx_queues; i++) {
|
||||
stats->tx_packets += adapter->tx_stats_buffers[i].batched_packets;
|
||||
stats->tx_packets += adapter->tx_stats_buffers[i].direct_packets;
|
||||
stats->tx_bytes += adapter->tx_stats_buffers[i].bytes;
|
||||
stats->tx_dropped += adapter->tx_stats_buffers[i].dropped_packets;
|
||||
}
|
||||
}
|
||||
|
||||
static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
|
||||
{
|
||||
struct ibmvnic_adapter *adapter = netdev_priv(dev);
|
||||
@ -3557,8 +3575,6 @@ restart_poll:
|
||||
|
||||
length = skb->len;
|
||||
napi_gro_receive(napi, skb); /* send it up */
|
||||
netdev->stats.rx_packets++;
|
||||
netdev->stats.rx_bytes += length;
|
||||
adapter->rx_stats_buffers[scrq_num].packets++;
|
||||
adapter->rx_stats_buffers[scrq_num].bytes += length;
|
||||
frames_processed++;
|
||||
@ -3668,6 +3684,7 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
|
||||
.ndo_set_rx_mode = ibmvnic_set_multi,
|
||||
.ndo_set_mac_address = ibmvnic_set_mac,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_get_stats64 = ibmvnic_get_stats64,
|
||||
.ndo_tx_timeout = ibmvnic_tx_timeout,
|
||||
.ndo_change_mtu = ibmvnic_change_mtu,
|
||||
.ndo_features_check = ibmvnic_features_check,
|
||||
@ -3808,7 +3825,10 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
|
||||
memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
|
||||
|
||||
for (i = 0; i < adapter->req_tx_queues; i++) {
|
||||
snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
|
||||
snprintf(data, ETH_GSTRING_LEN, "tx%d_batched_packets", i);
|
||||
data += ETH_GSTRING_LEN;
|
||||
|
||||
snprintf(data, ETH_GSTRING_LEN, "tx%d_direct_packets", i);
|
||||
data += ETH_GSTRING_LEN;
|
||||
|
||||
snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
|
||||
@ -3873,7 +3893,9 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
|
||||
(adapter, ibmvnic_stats[i].offset));
|
||||
|
||||
for (j = 0; j < adapter->req_tx_queues; j++) {
|
||||
data[i] = adapter->tx_stats_buffers[j].packets;
|
||||
data[i] = adapter->tx_stats_buffers[j].batched_packets;
|
||||
i++;
|
||||
data[i] = adapter->tx_stats_buffers[j].direct_packets;
|
||||
i++;
|
||||
data[i] = adapter->tx_stats_buffers[j].bytes;
|
||||
i++;
|
||||
|
@ -211,20 +211,25 @@ struct ibmvnic_statistics {
|
||||
u8 reserved[72];
|
||||
} __packed __aligned(8);
|
||||
|
||||
#define NUM_TX_STATS 3
|
||||
struct ibmvnic_tx_queue_stats {
|
||||
u64 packets;
|
||||
u64 batched_packets;
|
||||
u64 direct_packets;
|
||||
u64 bytes;
|
||||
u64 dropped_packets;
|
||||
};
|
||||
|
||||
#define NUM_RX_STATS 3
|
||||
#define NUM_TX_STATS \
|
||||
(sizeof(struct ibmvnic_tx_queue_stats) / sizeof(u64))
|
||||
|
||||
struct ibmvnic_rx_queue_stats {
|
||||
u64 packets;
|
||||
u64 bytes;
|
||||
u64 interrupts;
|
||||
};
|
||||
|
||||
#define NUM_RX_STATS \
|
||||
(sizeof(struct ibmvnic_rx_queue_stats) / sizeof(u64))
|
||||
|
||||
struct ibmvnic_acl_buffer {
|
||||
__be32 len;
|
||||
__be32 version;
|
||||
|
@ -2,11 +2,13 @@
|
||||
obj-$(CONFIG_FCOE_FNIC) += fnic.o
|
||||
|
||||
fnic-y := \
|
||||
fip.o\
|
||||
fnic_attrs.o \
|
||||
fnic_isr.o \
|
||||
fnic_main.o \
|
||||
fnic_res.o \
|
||||
fnic_fcs.o \
|
||||
fdls_disc.o \
|
||||
fnic_scsi.o \
|
||||
fnic_trace.o \
|
||||
fnic_debugfs.o \
|
||||
@ -15,4 +17,5 @@ fnic-y := \
|
||||
vnic_intr.o \
|
||||
vnic_rq.o \
|
||||
vnic_wq_copy.o \
|
||||
vnic_wq.o
|
||||
vnic_wq.o \
|
||||
fnic_pci_subsys_devid.o
|
||||
|
5094
drivers/scsi/fnic/fdls_disc.c
Normal file
5094
drivers/scsi/fnic/fdls_disc.c
Normal file
File diff suppressed because it is too large
Load Diff
253
drivers/scsi/fnic/fdls_fc.h
Normal file
253
drivers/scsi/fnic/fdls_fc.h
Normal file
@ -0,0 +1,253 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _FDLS_FC_H_
|
||||
#define _FDLS_FC_H_
|
||||
|
||||
/* This file contains the declarations for FC fabric services
|
||||
* and target discovery
|
||||
*
|
||||
* Request and Response for
|
||||
* 1. FLOGI
|
||||
* 2. PLOGI to Fabric Controller
|
||||
* 3. GPN_ID, GPN_FT
|
||||
* 4. RSCN
|
||||
* 5. PLOGI to Target
|
||||
* 6. PRLI to Target
|
||||
*/
|
||||
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/fc/fc_els.h>
|
||||
#include <uapi/scsi/fc/fc_fs.h>
|
||||
#include <uapi/scsi/fc/fc_ns.h>
|
||||
#include <uapi/scsi/fc/fc_gs.h>
|
||||
#include <uapi/linux/if_ether.h>
|
||||
#include <scsi/fc/fc_ms.h>
|
||||
#include <linux/minmax.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <scsi/fc/fc_encaps.h>
|
||||
#include <scsi/fc/fc_fcoe.h>
|
||||
|
||||
#define FDLS_MIN_FRAMES (32)
|
||||
#define FDLS_MIN_FRAME_ELEM (4)
|
||||
#define FNIC_FCP_SP_RD_XRDY_DIS 0x00000002
|
||||
#define FNIC_FCP_SP_TARGET 0x00000010
|
||||
#define FNIC_FCP_SP_INITIATOR 0x00000020
|
||||
#define FNIC_FCP_SP_CONF_CMPL 0x00000080
|
||||
#define FNIC_FCP_SP_RETRY 0x00000100
|
||||
|
||||
#define FNIC_FC_CONCUR_SEQS (0xFF)
|
||||
#define FNIC_FC_RO_INFO (0x1F)
|
||||
|
||||
/* Little Endian */
|
||||
#define FNIC_UNASSIGNED_OXID (0xffff)
|
||||
#define FNIC_UNASSIGNED_RXID (0xffff)
|
||||
#define FNIC_ELS_REQ_FCTL (0x000029)
|
||||
#define FNIC_ELS_REP_FCTL (0x000099)
|
||||
|
||||
#define FNIC_FCP_RSP_FCTL (0x000099)
|
||||
#define FNIC_REQ_ABTS_FCTL (0x000009)
|
||||
|
||||
#define FNIC_FC_PH_VER_HI (0x20)
|
||||
#define FNIC_FC_PH_VER_LO (0x20)
|
||||
#define FNIC_FC_PH_VER (0x2020)
|
||||
#define FNIC_FC_B2B_CREDIT (0x0A)
|
||||
#define FNIC_FC_B2B_RDF_SZ (0x0800)
|
||||
|
||||
#define FNIC_LOGI_RDF_SIZE(_logi) ((_logi).fl_csp.sp_bb_data)
|
||||
#define FNIC_LOGI_R_A_TOV(_logi) ((_logi).fl_csp.sp_r_a_tov)
|
||||
#define FNIC_LOGI_E_D_TOV(_logi) ((_logi).fl_csp.sp_e_d_tov)
|
||||
#define FNIC_LOGI_FEATURES(_logi) (be16_to_cpu((_logi).fl_csp.sp_features))
|
||||
#define FNIC_LOGI_PORT_NAME(_logi) ((_logi).fl_wwpn)
|
||||
#define FNIC_LOGI_NODE_NAME(_logi) ((_logi).fl_wwnn)
|
||||
|
||||
#define FNIC_LOGI_SET_RDF_SIZE(_logi, _rdf_size) \
|
||||
(FNIC_LOGI_RDF_SIZE(_logi) = cpu_to_be16(_rdf_size))
|
||||
#define FNIC_LOGI_SET_E_D_TOV(_logi, _e_d_tov) \
|
||||
(FNIC_LOGI_E_D_TOV(_logi) = cpu_to_be32(_e_d_tov))
|
||||
#define FNIC_LOGI_SET_R_A_TOV(_logi, _r_a_tov) \
|
||||
(FNIC_LOGI_R_A_TOV(_logi) = cpu_to_be32(_r_a_tov))
|
||||
|
||||
#define FNIC_STD_SET_S_ID(_fchdr, _sid) memcpy((_fchdr).fh_s_id, _sid, 3)
|
||||
#define FNIC_STD_SET_D_ID(_fchdr, _did) memcpy((_fchdr).fh_d_id, _did, 3)
|
||||
#define FNIC_STD_SET_OX_ID(_fchdr, _oxid) ((_fchdr).fh_ox_id = cpu_to_be16(_oxid))
|
||||
#define FNIC_STD_SET_RX_ID(_fchdr, _rxid) ((_fchdr).fh_rx_id = cpu_to_be16(_rxid))
|
||||
|
||||
#define FNIC_STD_SET_R_CTL(_fchdr, _rctl) ((_fchdr).fh_r_ctl = _rctl)
|
||||
#define FNIC_STD_SET_TYPE(_fchdr, _type) ((_fchdr).fh_type = _type)
|
||||
#define FNIC_STD_SET_F_CTL(_fchdr, _fctl) \
|
||||
put_unaligned_be24(_fctl, &((_fchdr).fh_f_ctl))
|
||||
|
||||
#define FNIC_STD_SET_NPORT_NAME(_ptr, _wwpn) put_unaligned_be64(_wwpn, _ptr)
|
||||
#define FNIC_STD_SET_NODE_NAME(_ptr, _wwnn) put_unaligned_be64(_wwnn, _ptr)
|
||||
#define FNIC_STD_SET_PORT_ID(__req, __portid) \
|
||||
memcpy(__req.fr_fid.fp_fid, __portid, 3)
|
||||
#define FNIC_STD_SET_PORT_NAME(_req, _pName) \
|
||||
(put_unaligned_be64(_pName, &_req.fr_wwn))
|
||||
|
||||
#define FNIC_STD_GET_OX_ID(_fchdr) (be16_to_cpu((_fchdr)->fh_ox_id))
|
||||
#define FNIC_STD_GET_RX_ID(_fchdr) (be16_to_cpu((_fchdr)->fh_rx_id))
|
||||
#define FNIC_STD_GET_S_ID(_fchdr) ((_fchdr)->fh_s_id)
|
||||
#define FNIC_STD_GET_D_ID(_fchdr) ((_fchdr)->fh_d_id)
|
||||
#define FNIC_STD_GET_TYPE(_fchdr) ((_fchdr)->fh_type)
|
||||
#define FNIC_STD_GET_F_CTL(_fchdr) ((_fchdr)->fh_f_ctl)
|
||||
#define FNIC_STD_GET_R_CTL(_fchdr) ((_fchdr)->fh_r_ctl)
|
||||
|
||||
#define FNIC_STD_GET_FC_CT_CMD(__fcct_hdr) (be16_to_cpu(__fcct_hdr->ct_cmd))
|
||||
|
||||
#define FNIC_FCOE_MAX_FRAME_SZ (2048)
|
||||
#define FNIC_FCOE_MIN_FRAME_SZ (280)
|
||||
#define FNIC_FC_MAX_PAYLOAD_LEN (2048)
|
||||
#define FNIC_MIN_DATA_FIELD_SIZE (256)
|
||||
|
||||
#define FNIC_FC_EDTOV_NSEC (0x400)
|
||||
#define FNIC_NSEC_TO_MSEC (0x1000000)
|
||||
#define FCP_PRLI_FUNC_TARGET (0x0010)
|
||||
|
||||
#define FNIC_FC_R_CTL_SOLICITED_DATA (0x21)
|
||||
#define FNIC_FC_F_CTL_LAST_END_SEQ (0x98)
|
||||
#define FNIC_FC_F_CTL_LAST_END_SEQ_INT (0x99)
|
||||
#define FNIC_FC_F_CTL_FIRST_LAST_SEQINIT (0x29)
|
||||
#define FNIC_FC_R_CTL_FC4_SCTL (0x03)
|
||||
#define FNIC_FC_CS_CTL (0x00)
|
||||
|
||||
#define FNIC_FC_FRAME_UNSOLICITED(_fchdr) \
|
||||
(_fchdr->fh_r_ctl == FC_RCTL_ELS_REQ)
|
||||
#define FNIC_FC_FRAME_SOLICITED_DATA(_fchdr) \
|
||||
(_fchdr->fh_r_ctl == FNIC_FC_R_CTL_SOLICITED_DATA)
|
||||
#define FNIC_FC_FRAME_SOLICITED_CTRL_REPLY(_fchdr) \
|
||||
(_fchdr->fh_r_ctl == FC_RCTL_ELS_REP)
|
||||
#define FNIC_FC_FRAME_FCTL_LAST_END_SEQ(_fchdr) \
|
||||
(_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_LAST_END_SEQ)
|
||||
#define FNIC_FC_FRAME_FCTL_LAST_END_SEQ_INT(_fchdr) \
|
||||
(_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_LAST_END_SEQ_INT)
|
||||
#define FNIC_FC_FRAME_FCTL_FIRST_LAST_SEQINIT(_fchdr) \
|
||||
(_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_FIRST_LAST_SEQINIT)
|
||||
#define FNIC_FC_FRAME_FC4_SCTL(_fchdr) \
|
||||
(_fchdr->fh_r_ctl == FNIC_FC_R_CTL_FC4_SCTL)
|
||||
#define FNIC_FC_FRAME_TYPE_BLS(_fchdr) (_fchdr->fh_type == FC_TYPE_BLS)
|
||||
#define FNIC_FC_FRAME_TYPE_ELS(_fchdr) (_fchdr->fh_type == FC_TYPE_ELS)
|
||||
#define FNIC_FC_FRAME_TYPE_FC_GS(_fchdr) (_fchdr->fh_type == FC_TYPE_CT)
|
||||
#define FNIC_FC_FRAME_CS_CTL(_fchdr) (_fchdr->fh_cs_ctl == FNIC_FC_CS_CTL)
|
||||
|
||||
#define FNIC_FC_C3_RDF (0xfff)
|
||||
#define FNIC_FC_PLOGI_RSP_RDF(_plogi_rsp) \
|
||||
(min(_plogi_rsp->u.csp_plogi.b2b_rdf_size, \
|
||||
(_plogi_rsp->spc3[4] & FNIC_FC_C3_RDF)))
|
||||
#define FNIC_FC_PLOGI_RSP_CONCUR_SEQ(_plogi_rsp) \
|
||||
(min((uint16_t) (be16_to_cpu(_plogi_rsp->els.fl_csp.sp_tot_seq)), \
|
||||
(uint16_t) (be16_to_cpu(_plogi_rsp->els.fl_cssp[2].cp_con_seq) & 0xff)))
|
||||
|
||||
/* FLOGI/PLOGI struct */
|
||||
struct fc_std_flogi {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_els_flogi els;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_els_acc_rsp {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_els_ls_acc acc;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_els_rjt_rsp {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_els_ls_rjt rej;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_els_adisc {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_els_adisc els;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_rls_acc {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_els_rls_resp els;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_abts_ba_acc {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_ba_acc acc;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_abts_ba_rjt {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_ba_rjt rjt;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_els_prli {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_els_prli els_prli;
|
||||
struct fc_els_spp sp;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_rpn_id {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_ct_hdr fc_std_ct_hdr;
|
||||
struct fc_ns_rn_id rpn_id;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_fdmi_rhba {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_ct_hdr fc_std_ct_hdr;
|
||||
struct fc_fdmi_rhba rhba;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_fdmi_rpa {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_ct_hdr fc_std_ct_hdr;
|
||||
struct fc_fdmi_rpa rpa;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_rft_id {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_ct_hdr fc_std_ct_hdr;
|
||||
struct fc_ns_rft_id rft_id;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_rff_id {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_ct_hdr fc_std_ct_hdr;
|
||||
struct fc_ns_rff_id rff_id;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_gpn_ft {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_ct_hdr fc_std_ct_hdr;
|
||||
struct fc_ns_gid_ft gpn_ft;
|
||||
} __packed;
|
||||
|
||||
/* Accept CT_IU for GPN_FT */
|
||||
struct fc_gpn_ft_rsp_iu {
|
||||
uint8_t ctrl;
|
||||
uint8_t fcid[3];
|
||||
uint32_t rsvd;
|
||||
__be64 wwpn;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_rls {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_els_rls els;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_scr {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_els_scr scr;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_rscn {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_els_rscn els;
|
||||
} __packed;
|
||||
|
||||
struct fc_std_logo {
|
||||
struct fc_frame_header fchdr;
|
||||
struct fc_els_logo els;
|
||||
} __packed;
|
||||
|
||||
#define FNIC_ETH_FCOE_HDRS_OFFSET \
|
||||
(sizeof(struct ethhdr) + sizeof(struct fcoe_hdr))
|
||||
|
||||
#endif /* _FDLS_FC_H */
|
1005
drivers/scsi/fnic/fip.c
Normal file
1005
drivers/scsi/fnic/fip.c
Normal file
File diff suppressed because it is too large
Load Diff
159
drivers/scsi/fnic/fip.h
Normal file
159
drivers/scsi/fnic/fip.h
Normal file
@ -0,0 +1,159 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
|
||||
*/
|
||||
#ifndef _FIP_H_
|
||||
#define _FIP_H_
|
||||
|
||||
#include "fdls_fc.h"
|
||||
#include "fnic_fdls.h"
|
||||
#include <scsi/fc/fc_fip.h>
|
||||
|
||||
/* Drop the cast from the standard definition */
|
||||
#define FCOE_ALL_FCFS_MAC {0x01, 0x10, 0x18, 0x01, 0x00, 0x02}
|
||||
#define FCOE_MAX_SIZE 0x082E
|
||||
|
||||
#define FCOE_CTLR_FIPVLAN_TOV (3*1000)
|
||||
#define FCOE_CTLR_FCS_TOV (3*1000)
|
||||
#define FCOE_CTLR_MAX_SOL (5*1000)
|
||||
|
||||
#define FIP_DISC_SOL_LEN (6)
|
||||
#define FIP_VLAN_REQ_LEN (2)
|
||||
#define FIP_ENODE_KA_LEN (2)
|
||||
#define FIP_VN_KA_LEN (7)
|
||||
#define FIP_FLOGI_LEN (38)
|
||||
|
||||
enum fdls_vlan_state {
|
||||
FIP_VLAN_AVAIL,
|
||||
FIP_VLAN_SENT
|
||||
};
|
||||
|
||||
enum fdls_fip_state {
|
||||
FDLS_FIP_INIT,
|
||||
FDLS_FIP_VLAN_DISCOVERY_STARTED,
|
||||
FDLS_FIP_FCF_DISCOVERY_STARTED,
|
||||
FDLS_FIP_FLOGI_STARTED,
|
||||
FDLS_FIP_FLOGI_COMPLETE,
|
||||
};
|
||||
|
||||
/*
|
||||
* VLAN entry.
|
||||
*/
|
||||
struct fcoe_vlan {
|
||||
struct list_head list;
|
||||
uint16_t vid; /* vlan ID */
|
||||
uint16_t sol_count; /* no. of sols sent */
|
||||
uint16_t state; /* state */
|
||||
};
|
||||
|
||||
struct fip_vlan_req {
|
||||
struct ethhdr eth;
|
||||
struct fip_header fip;
|
||||
struct fip_mac_desc mac_desc;
|
||||
} __packed;
|
||||
|
||||
struct fip_vlan_notif {
|
||||
struct fip_header fip;
|
||||
struct fip_vlan_desc vlans_desc[];
|
||||
} __packed;
|
||||
|
||||
struct fip_vn_port_ka {
|
||||
struct ethhdr eth;
|
||||
struct fip_header fip;
|
||||
struct fip_mac_desc mac_desc;
|
||||
struct fip_vn_desc vn_port_desc;
|
||||
} __packed;
|
||||
|
||||
struct fip_enode_ka {
|
||||
struct ethhdr eth;
|
||||
struct fip_header fip;
|
||||
struct fip_mac_desc mac_desc;
|
||||
} __packed;
|
||||
|
||||
struct fip_cvl {
|
||||
struct fip_header fip;
|
||||
struct fip_mac_desc fcf_mac_desc;
|
||||
struct fip_wwn_desc name_desc;
|
||||
struct fip_vn_desc vn_ports_desc[];
|
||||
} __packed;
|
||||
|
||||
struct fip_flogi_desc {
|
||||
struct fip_desc fd_desc;
|
||||
uint16_t rsvd;
|
||||
struct fc_std_flogi flogi;
|
||||
} __packed;
|
||||
|
||||
struct fip_flogi_rsp_desc {
|
||||
struct fip_desc fd_desc;
|
||||
uint16_t rsvd;
|
||||
struct fc_std_flogi flogi;
|
||||
} __packed;
|
||||
|
||||
struct fip_flogi {
|
||||
struct ethhdr eth;
|
||||
struct fip_header fip;
|
||||
struct fip_flogi_desc flogi_desc;
|
||||
struct fip_mac_desc mac_desc;
|
||||
} __packed;
|
||||
|
||||
struct fip_flogi_rsp {
|
||||
struct fip_header fip;
|
||||
struct fip_flogi_rsp_desc rsp_desc;
|
||||
struct fip_mac_desc mac_desc;
|
||||
} __packed;
|
||||
|
||||
struct fip_discovery {
|
||||
struct ethhdr eth;
|
||||
struct fip_header fip;
|
||||
struct fip_mac_desc mac_desc;
|
||||
struct fip_wwn_desc name_desc;
|
||||
struct fip_size_desc fcoe_desc;
|
||||
} __packed;
|
||||
|
||||
struct fip_disc_adv {
|
||||
struct fip_header fip;
|
||||
struct fip_pri_desc prio_desc;
|
||||
struct fip_mac_desc mac_desc;
|
||||
struct fip_wwn_desc name_desc;
|
||||
struct fip_fab_desc fabric_desc;
|
||||
struct fip_fka_desc fka_adv_desc;
|
||||
} __packed;
|
||||
|
||||
void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct fip_header *fiph);
|
||||
void fnic_fcoe_fip_discovery_resp(struct fnic *fnic, struct fip_header *fiph);
|
||||
void fnic_fcoe_process_flogi_resp(struct fnic *fnic, struct fip_header *fiph);
|
||||
void fnic_work_on_fip_timer(struct work_struct *work);
|
||||
void fnic_work_on_fcs_ka_timer(struct work_struct *work);
|
||||
void fnic_fcoe_send_vlan_req(struct fnic *fnic);
|
||||
void fnic_fcoe_start_fcf_discovery(struct fnic *fnic);
|
||||
void fnic_fcoe_start_flogi(struct fnic *fnic);
|
||||
void fnic_fcoe_process_cvl(struct fnic *fnic, struct fip_header *fiph);
|
||||
void fnic_vlan_discovery_timeout(struct fnic *fnic);
|
||||
|
||||
extern struct workqueue_struct *fnic_fip_queue;
|
||||
|
||||
#ifdef FNIC_DEBUG
|
||||
static inline void
|
||||
fnic_debug_dump_fip_frame(struct fnic *fnic, struct ethhdr *eth,
|
||||
int len, char *pfx)
|
||||
{
|
||||
struct fip_header *fiph = (struct fip_header *)(eth + 1);
|
||||
u16 op = be16_to_cpu(fiph->fip_op);
|
||||
u8 sub = fiph->fip_subcode;
|
||||
|
||||
FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
|
||||
"FIP %s packet contents: op: 0x%x sub: 0x%x (len = %d)",
|
||||
pfx, op, sub, len);
|
||||
|
||||
fnic_debug_dump(fnic, (uint8_t *)eth, len);
|
||||
}
|
||||
|
||||
#else /* FNIC_DEBUG */
|
||||
|
||||
static inline void
|
||||
fnic_debug_dump_fip_frame(struct fnic *fnic, struct ethhdr *eth,
|
||||
int len, char *pfx) {}
|
||||
#endif /* FNIC_DEBUG */
|
||||
|
||||
#endif /* _FIP_H_ */
|
@ -10,8 +10,10 @@
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <scsi/libfc.h>
|
||||
#include <scsi/libfcoe.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_transport.h>
|
||||
#include <scsi/scsi_transport_fc.h>
|
||||
#include <scsi/fc_frame.h>
|
||||
#include "fnic_io.h"
|
||||
#include "fnic_res.h"
|
||||
#include "fnic_trace.h"
|
||||
@ -24,13 +26,15 @@
|
||||
#include "vnic_intr.h"
|
||||
#include "vnic_stats.h"
|
||||
#include "vnic_scsi.h"
|
||||
#include "fnic_fdls.h"
|
||||
|
||||
#define DRV_NAME "fnic"
|
||||
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
|
||||
#define DRV_VERSION "1.7.0.0"
|
||||
#define DRV_VERSION "1.8.0.2"
|
||||
#define PFX DRV_NAME ": "
|
||||
#define DFX DRV_NAME "%d: "
|
||||
|
||||
#define FABRIC_LOGO_MAX_RETRY 3
|
||||
#define DESC_CLEAN_LOW_WATERMARK 8
|
||||
#define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */
|
||||
#define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */
|
||||
@ -38,6 +42,7 @@
|
||||
#define FNIC_DFLT_IO_REQ 256 /* Default scsi_cmnd tag map entries */
|
||||
#define FNIC_DFLT_QUEUE_DEPTH 256
|
||||
#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */
|
||||
#define LUN0_DELAY_TIME 9
|
||||
|
||||
/*
|
||||
* Tag bits used for special requests.
|
||||
@ -75,6 +80,77 @@
|
||||
#define FNIC_DEV_RST_TERM_DONE BIT(20)
|
||||
#define FNIC_DEV_RST_ABTS_PENDING BIT(21)
|
||||
|
||||
#define FNIC_FW_RESET_TIMEOUT 60000 /* mSec */
|
||||
#define FNIC_FCOE_MAX_CMD_LEN 16
|
||||
/* Retry supported by rport (returned by PRLI service parameters) */
|
||||
#define FNIC_FC_RP_FLAGS_RETRY 0x1
|
||||
|
||||
/* Cisco vendor id */
|
||||
#define PCI_VENDOR_ID_CISCO 0x1137
|
||||
#define PCI_DEVICE_ID_CISCO_VIC_FC 0x0045 /* fc vnic */
|
||||
|
||||
/* sereno pcie switch */
|
||||
#define PCI_DEVICE_ID_CISCO_SERENO 0x004e
|
||||
#define PCI_DEVICE_ID_CISCO_CRUZ 0x007a /* Cruz */
|
||||
#define PCI_DEVICE_ID_CISCO_BODEGA 0x0131 /* Bodega */
|
||||
#define PCI_DEVICE_ID_CISCO_BEVERLY 0x025f /* Beverly */
|
||||
|
||||
/* Sereno */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_VASONA 0x004f /* vasona mezz */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_COTATI 0x0084 /* cotati mlom */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_LEXINGTON 0x0085 /* lexington pcie */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_ICEHOUSE 0x00cd /* Icehouse */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_KIRKWOODLAKE 0x00ce /* KirkwoodLake pcie */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_SUSANVILLE 0x012e /* Susanville MLOM */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_TORRANCE 0x0139 /* Torrance MLOM */
|
||||
|
||||
/* Cruz */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_CALISTOGA 0x012c /* Calistoga MLOM */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_MOUNTAINVIEW 0x0137 /* Cruz Mezz */
|
||||
/* Cruz MountTian SIOC */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN 0x014b
|
||||
#define PCI_SUBDEVICE_ID_CISCO_CLEARLAKE 0x014d /* ClearLake pcie */
|
||||
/* Cruz MountTian2 SIOC */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN2 0x0157
|
||||
#define PCI_SUBDEVICE_ID_CISCO_CLAREMONT 0x015d /* Claremont MLOM */
|
||||
|
||||
/* Bodega */
|
||||
/* VIC 1457 PCIe mLOM */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_BRADBURY 0x0218
|
||||
#define PCI_SUBDEVICE_ID_CISCO_BRENTWOOD 0x0217 /* VIC 1455 PCIe */
|
||||
/* VIC 1487 PCIe mLOM */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_BURLINGAME 0x021a
|
||||
#define PCI_SUBDEVICE_ID_CISCO_BAYSIDE 0x0219 /* VIC 1485 PCIe */
|
||||
/* VIC 1440 Mezz mLOM */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_BAKERSFIELD 0x0215
|
||||
#define PCI_SUBDEVICE_ID_CISCO_BOONVILLE 0x0216 /* VIC 1480 Mezz */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_BENICIA 0x024a /* VIC 1495 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_BEAUMONT 0x024b /* VIC 1497 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_BRISBANE 0x02af /* VIC 1467 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_BENTON 0x02b0 /* VIC 1477 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_TWIN_RIVER 0x02cf /* VIC 14425 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_TWIN_PEAK 0x02d0 /* VIC 14825 */
|
||||
|
||||
/* Beverly */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_BERN 0x02de /* VIC 15420 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_STOCKHOLM 0x02dd /* VIC 15428 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_KRAKOW 0x02dc /* VIC 15411 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_LUCERNE 0x02db /* VIC 15231 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_TURKU 0x02e8 /* VIC 15238 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_TURKU_PLUS 0x02f3 /* VIC 15237 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_ZURICH 0x02df /* VIC 15230 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_RIGA 0x02e0 /* VIC 15427 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_GENEVA 0x02e1 /* VIC 15422 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_HELSINKI 0x02e4 /* VIC 15235 */
|
||||
#define PCI_SUBDEVICE_ID_CISCO_GOTHENBURG 0x02f2 /* VIC 15425 */
|
||||
|
||||
struct fnic_pcie_device {
|
||||
u32 device;
|
||||
u8 *desc;
|
||||
u32 subsystem_device;
|
||||
u8 *subsys_desc;
|
||||
};
|
||||
|
||||
/*
|
||||
* fnic private data per SCSI command.
|
||||
* These fields are locked by the hashed io_req_lock.
|
||||
@ -127,8 +203,38 @@ static inline u64 fnic_flags_and_state(struct scsi_cmnd *cmd)
|
||||
#define fnic_clear_state_flags(fnicp, st_flags) \
|
||||
__fnic_set_state_flags(fnicp, st_flags, 1)
|
||||
|
||||
enum reset_states {
|
||||
NOT_IN_PROGRESS = 0,
|
||||
IN_PROGRESS,
|
||||
RESET_ERROR
|
||||
};
|
||||
|
||||
enum rscn_type {
|
||||
NOT_PC_RSCN = 0,
|
||||
PC_RSCN
|
||||
};
|
||||
|
||||
enum pc_rscn_handling_status {
|
||||
PC_RSCN_HANDLING_NOT_IN_PROGRESS = 0,
|
||||
PC_RSCN_HANDLING_IN_PROGRESS
|
||||
};
|
||||
|
||||
enum pc_rscn_handling_feature {
|
||||
PC_RSCN_HANDLING_FEATURE_OFF = 0,
|
||||
PC_RSCN_HANDLING_FEATURE_ON
|
||||
};
|
||||
|
||||
extern unsigned int fnic_fdmi_support;
|
||||
extern unsigned int fnic_log_level;
|
||||
extern unsigned int io_completions;
|
||||
extern struct workqueue_struct *fnic_event_queue;
|
||||
|
||||
extern unsigned int pc_rscn_handling_feature_flag;
|
||||
extern spinlock_t reset_fnic_list_lock;
|
||||
extern struct list_head reset_fnic_list;
|
||||
extern struct workqueue_struct *reset_fnic_work_queue;
|
||||
extern struct work_struct reset_fnic_work;
|
||||
|
||||
|
||||
#define FNIC_MAIN_LOGGING 0x01
|
||||
#define FNIC_FCS_LOGGING 0x02
|
||||
@ -155,6 +261,12 @@ do { \
|
||||
"fnic<%d>: %s: %d: " fmt, fnic_num,\
|
||||
__func__, __LINE__, ##args);)
|
||||
|
||||
#define FNIC_FIP_DBG(kern_level, host, fnic_num, fmt, args...) \
|
||||
FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \
|
||||
shost_printk(kern_level, host, \
|
||||
"fnic<%d>: %s: %d: " fmt, fnic_num,\
|
||||
__func__, __LINE__, ##args);)
|
||||
|
||||
#define FNIC_SCSI_DBG(kern_level, host, fnic_num, fmt, args...) \
|
||||
FNIC_CHECK_LOGGING(FNIC_SCSI_LOGGING, \
|
||||
shost_printk(kern_level, host, \
|
||||
@ -213,12 +325,26 @@ enum fnic_state {
|
||||
|
||||
struct mempool;
|
||||
|
||||
enum fnic_role_e {
|
||||
FNIC_ROLE_FCP_INITIATOR = 0,
|
||||
};
|
||||
|
||||
enum fnic_evt {
|
||||
FNIC_EVT_START_VLAN_DISC = 1,
|
||||
FNIC_EVT_START_FCF_DISC = 2,
|
||||
FNIC_EVT_MAX,
|
||||
};
|
||||
|
||||
struct fnic_frame_list {
|
||||
/*
|
||||
* Link to frame lists
|
||||
*/
|
||||
struct list_head links;
|
||||
void *fp;
|
||||
int frame_len;
|
||||
int rx_ethhdr_stripped;
|
||||
};
|
||||
|
||||
struct fnic_event {
|
||||
struct list_head list;
|
||||
struct fnic *fnic;
|
||||
@ -235,8 +361,9 @@ struct fnic_cpy_wq {
|
||||
/* Per-instance private data structure */
|
||||
struct fnic {
|
||||
int fnic_num;
|
||||
struct fc_lport *lport;
|
||||
struct fcoe_ctlr ctlr; /* FIP FCoE controller structure */
|
||||
enum fnic_role_e role;
|
||||
struct fnic_iport_s iport;
|
||||
struct Scsi_Host *host;
|
||||
struct vnic_dev_bar bar0;
|
||||
|
||||
struct fnic_msix_entry msix[FNIC_MSIX_INTR_MAX];
|
||||
@ -255,6 +382,7 @@ struct fnic {
|
||||
unsigned int wq_count;
|
||||
unsigned int cq_count;
|
||||
|
||||
struct completion reset_completion_wait;
|
||||
struct mutex sgreset_mutex;
|
||||
spinlock_t sgreset_lock; /* lock for sgreset */
|
||||
struct scsi_cmnd *sgreset_sc;
|
||||
@ -268,25 +396,27 @@ struct fnic {
|
||||
u32 vlan_hw_insert:1; /* let hw insert the tag */
|
||||
u32 in_remove:1; /* fnic device in removal */
|
||||
u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */
|
||||
u32 link_events:1; /* set when we get any link event*/
|
||||
|
||||
struct completion *remove_wait; /* device remove thread blocks */
|
||||
|
||||
struct completion *fw_reset_done;
|
||||
u32 reset_in_progress;
|
||||
atomic_t in_flight; /* io counter */
|
||||
bool internal_reset_inprogress;
|
||||
u32 _reserved; /* fill hole */
|
||||
unsigned long state_flags; /* protected by host lock */
|
||||
enum fnic_state state;
|
||||
spinlock_t fnic_lock;
|
||||
unsigned long lock_flags;
|
||||
|
||||
u16 vlan_id; /* VLAN tag including priority */
|
||||
u8 data_src_addr[ETH_ALEN];
|
||||
u64 fcp_input_bytes; /* internal statistic */
|
||||
u64 fcp_output_bytes; /* internal statistic */
|
||||
u32 link_down_cnt;
|
||||
u32 soft_reset_count;
|
||||
int link_status;
|
||||
|
||||
struct list_head list;
|
||||
struct list_head links;
|
||||
struct pci_dev *pdev;
|
||||
struct vnic_fc_config config;
|
||||
struct vnic_dev *vdev;
|
||||
@ -306,19 +436,29 @@ struct fnic {
|
||||
struct work_struct link_work;
|
||||
struct work_struct frame_work;
|
||||
struct work_struct flush_work;
|
||||
struct sk_buff_head frame_queue;
|
||||
struct sk_buff_head tx_queue;
|
||||
struct list_head frame_queue;
|
||||
struct list_head tx_queue;
|
||||
mempool_t *frame_pool;
|
||||
mempool_t *frame_elem_pool;
|
||||
struct work_struct tport_work;
|
||||
struct list_head tport_event_list;
|
||||
|
||||
char subsys_desc[14];
|
||||
int subsys_desc_len;
|
||||
int pc_rscn_handling_status;
|
||||
|
||||
/*** FIP related data members -- start ***/
|
||||
void (*set_vlan)(struct fnic *, u16 vlan);
|
||||
struct work_struct fip_frame_work;
|
||||
struct sk_buff_head fip_frame_queue;
|
||||
struct work_struct fip_timer_work;
|
||||
struct list_head fip_frame_queue;
|
||||
struct timer_list fip_timer;
|
||||
struct list_head vlans;
|
||||
spinlock_t vlans_lock;
|
||||
|
||||
struct work_struct event_work;
|
||||
struct list_head evlist;
|
||||
struct timer_list retry_fip_timer;
|
||||
struct timer_list fcs_ka_timer;
|
||||
struct timer_list enode_ka_timer;
|
||||
struct timer_list vn_ka_timer;
|
||||
struct list_head vlan_list;
|
||||
/*** FIP related data members -- end ***/
|
||||
|
||||
/* copy work queue cache line section */
|
||||
@ -341,11 +481,6 @@ struct fnic {
|
||||
____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX];
|
||||
};
|
||||
|
||||
static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip)
|
||||
{
|
||||
return container_of(fip, struct fnic, ctlr);
|
||||
}
|
||||
|
||||
extern struct workqueue_struct *fnic_event_queue;
|
||||
extern struct workqueue_struct *fnic_fip_queue;
|
||||
extern const struct attribute_group *fnic_host_groups[];
|
||||
@ -356,29 +491,29 @@ int fnic_set_intr_mode_msix(struct fnic *fnic);
|
||||
void fnic_free_intr(struct fnic *fnic);
|
||||
int fnic_request_intr(struct fnic *fnic);
|
||||
|
||||
int fnic_send(struct fc_lport *, struct fc_frame *);
|
||||
void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
|
||||
void fnic_handle_frame(struct work_struct *work);
|
||||
void fnic_tport_event_handler(struct work_struct *work);
|
||||
void fnic_handle_link(struct work_struct *work);
|
||||
void fnic_handle_event(struct work_struct *work);
|
||||
void fdls_reclaim_oxid_handler(struct work_struct *work);
|
||||
void fdls_schedule_oxid_free(struct fnic_iport_s *iport, uint16_t *active_oxid);
|
||||
void fdls_schedule_oxid_free_retry_work(struct work_struct *work);
|
||||
int fnic_rq_cmpl_handler(struct fnic *fnic, int);
|
||||
int fnic_alloc_rq_frame(struct vnic_rq *rq);
|
||||
void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
|
||||
void fnic_flush_tx(struct work_struct *work);
|
||||
void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb);
|
||||
void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *);
|
||||
void fnic_update_mac(struct fc_lport *, u8 *new);
|
||||
void fnic_update_mac_locked(struct fnic *, u8 *new);
|
||||
|
||||
int fnic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
|
||||
int fnic_abort_cmd(struct scsi_cmnd *);
|
||||
int fnic_device_reset(struct scsi_cmnd *);
|
||||
int fnic_host_reset(struct scsi_cmnd *);
|
||||
int fnic_reset(struct Scsi_Host *);
|
||||
void fnic_scsi_cleanup(struct fc_lport *);
|
||||
void fnic_scsi_abort_io(struct fc_lport *);
|
||||
void fnic_empty_scsi_cleanup(struct fc_lport *);
|
||||
void fnic_exch_mgr_reset(struct fc_lport *, u32, u32);
|
||||
int fnic_eh_host_reset_handler(struct scsi_cmnd *sc);
|
||||
int fnic_host_reset(struct Scsi_Host *shost);
|
||||
void fnic_reset(struct Scsi_Host *shost);
|
||||
int fnic_issue_fc_host_lip(struct Scsi_Host *shost);
|
||||
void fnic_get_host_port_state(struct Scsi_Host *shost);
|
||||
void fnic_scsi_fcpio_reset(struct fnic *fnic);
|
||||
int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do, unsigned int cq_index);
|
||||
int fnic_wq_cmpl_handler(struct fnic *fnic, int);
|
||||
int fnic_flogi_reg_handler(struct fnic *fnic, u32);
|
||||
@ -390,14 +525,15 @@ const char *fnic_state_to_str(unsigned int state);
|
||||
void fnic_mq_map_queues_cpus(struct Scsi_Host *host);
|
||||
void fnic_log_q_error(struct fnic *fnic);
|
||||
void fnic_handle_link_event(struct fnic *fnic);
|
||||
|
||||
int fnic_stats_debugfs_init(struct fnic *fnic);
|
||||
void fnic_stats_debugfs_remove(struct fnic *fnic);
|
||||
int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *);
|
||||
|
||||
void fnic_handle_fip_frame(struct work_struct *work);
|
||||
void fnic_reset_work_handler(struct work_struct *work);
|
||||
void fnic_handle_fip_event(struct fnic *fnic);
|
||||
void fnic_fcoe_reset_vlans(struct fnic *fnic);
|
||||
void fnic_fcoe_evlist_free(struct fnic *fnic);
|
||||
extern void fnic_handle_fip_timer(struct fnic *fnic);
|
||||
extern void fnic_handle_fip_timer(struct timer_list *t);
|
||||
|
||||
static inline int
|
||||
fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
|
||||
@ -406,4 +542,90 @@ fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
|
||||
}
|
||||
void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long);
|
||||
void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *);
|
||||
void fnic_free_txq(struct list_head *head);
|
||||
int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc,
|
||||
char **subsys_desc);
|
||||
void fnic_fdls_link_status_change(struct fnic *fnic, int linkup);
|
||||
void fnic_delete_fcp_tports(struct fnic *fnic);
|
||||
void fnic_flush_tport_event_list(struct fnic *fnic);
|
||||
int fnic_count_ioreqs_wq(struct fnic *fnic, u32 hwq, u32 portid);
|
||||
unsigned int fnic_count_ioreqs(struct fnic *fnic, u32 portid);
|
||||
unsigned int fnic_count_all_ioreqs(struct fnic *fnic);
|
||||
unsigned int fnic_count_lun_ioreqs_wq(struct fnic *fnic, u32 hwq,
|
||||
struct scsi_device *device);
|
||||
unsigned int fnic_count_lun_ioreqs(struct fnic *fnic,
|
||||
struct scsi_device *device);
|
||||
void fnic_scsi_unload(struct fnic *fnic);
|
||||
void fnic_scsi_unload_cleanup(struct fnic *fnic);
|
||||
int fnic_get_debug_info(struct stats_debug_info *info,
|
||||
struct fnic *fnic);
|
||||
|
||||
struct fnic_scsi_iter_data {
|
||||
struct fnic *fnic;
|
||||
void *data1;
|
||||
void *data2;
|
||||
bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc,
|
||||
void *data1, void *data2);
|
||||
};
|
||||
|
||||
static inline bool
|
||||
fnic_io_iter_handler(struct scsi_cmnd *sc, void *iter_data)
|
||||
{
|
||||
struct fnic_scsi_iter_data *iter = iter_data;
|
||||
|
||||
return iter->fn(iter->fnic, sc, iter->data1, iter->data2);
|
||||
}
|
||||
|
||||
static inline void
|
||||
fnic_scsi_io_iter(struct fnic *fnic,
|
||||
bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc,
|
||||
void *data1, void *data2),
|
||||
void *data1, void *data2)
|
||||
{
|
||||
struct fnic_scsi_iter_data iter_data = {
|
||||
.fn = fn,
|
||||
.fnic = fnic,
|
||||
.data1 = data1,
|
||||
.data2 = data2,
|
||||
};
|
||||
scsi_host_busy_iter(fnic->host, fnic_io_iter_handler, &iter_data);
|
||||
}
|
||||
|
||||
#ifdef FNIC_DEBUG
|
||||
static inline void
|
||||
fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < len; i = i+8) {
|
||||
FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
|
||||
"%d: %02x %02x %02x %02x %02x %02x %02x %02x", i / 8,
|
||||
u8arr[i + 0], u8arr[i + 1], u8arr[i + 2], u8arr[i + 3],
|
||||
u8arr[i + 4], u8arr[i + 5], u8arr[i + 6], u8arr[i + 7]);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr,
|
||||
int len, char *pfx)
|
||||
{
|
||||
uint32_t s_id, d_id;
|
||||
|
||||
s_id = ntoh24(fchdr->fh_s_id);
|
||||
d_id = ntoh24(fchdr->fh_d_id);
|
||||
FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
|
||||
"%s packet contents: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x (len = %d)\n",
|
||||
pfx, s_id, d_id, fchdr->fh_type,
|
||||
FNIC_STD_GET_OX_ID(fchdr), len);
|
||||
|
||||
fnic_debug_dump(fnic, (uint8_t *)fchdr, len);
|
||||
|
||||
}
|
||||
#else /* FNIC_DEBUG */
|
||||
static inline void
|
||||
fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len) {}
|
||||
static inline void
|
||||
fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr,
|
||||
uint32_t len, char *pfx) {}
|
||||
#endif /* FNIC_DEBUG */
|
||||
#endif /* _FNIC_H_ */
|
||||
|
@ -11,8 +11,8 @@
|
||||
static ssize_t fnic_show_state(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct fc_lport *lp = shost_priv(class_to_shost(dev));
|
||||
struct fnic *fnic = lport_priv(lp);
|
||||
struct fnic *fnic =
|
||||
*((struct fnic **) shost_priv(class_to_shost(dev)));
|
||||
|
||||
return sysfs_emit(buf, "%s\n", fnic_state_str[fnic->state]);
|
||||
}
|
||||
@ -26,9 +26,13 @@ static ssize_t fnic_show_drv_version(struct device *dev,
|
||||
static ssize_t fnic_show_link_state(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct fc_lport *lp = shost_priv(class_to_shost(dev));
|
||||
struct fnic *fnic =
|
||||
*((struct fnic **) shost_priv(class_to_shost(dev)));
|
||||
|
||||
return sysfs_emit(buf, "%s\n", (lp->link_up) ? "Link Up" : "Link Down");
|
||||
return sysfs_emit(buf, "%s\n",
|
||||
((fnic->iport.state != FNIC_IPORT_STATE_INIT) &&
|
||||
(fnic->iport.state != FNIC_IPORT_STATE_LINK_WAIT)) ?
|
||||
"Link Up" : "Link Down");
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL);
|
||||
|
@ -7,6 +7,9 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include "fnic.h"
|
||||
|
||||
extern int fnic_get_debug_info(struct stats_debug_info *debug_buffer,
|
||||
struct fnic *fnic);
|
||||
|
||||
static struct dentry *fnic_trace_debugfs_root;
|
||||
static struct dentry *fnic_trace_debugfs_file;
|
||||
static struct dentry *fnic_trace_enable;
|
||||
@ -593,6 +596,7 @@ static int fnic_stats_debugfs_open(struct inode *inode,
|
||||
debug->buf_size = buf_size;
|
||||
memset((void *)debug->debug_buffer, 0, buf_size);
|
||||
debug->buffer_len = fnic_get_stats_data(debug, fnic_stats);
|
||||
debug->buffer_len += fnic_get_debug_info(debug, fnic);
|
||||
|
||||
file->private_data = debug;
|
||||
|
||||
@ -673,26 +677,25 @@ static const struct file_operations fnic_reset_debugfs_fops = {
|
||||
* It will create file stats and reset_stats under statistics/host# directory
|
||||
* to log per fnic stats.
|
||||
*/
|
||||
void fnic_stats_debugfs_init(struct fnic *fnic)
|
||||
int fnic_stats_debugfs_init(struct fnic *fnic)
|
||||
{
|
||||
char name[16];
|
||||
|
||||
snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no);
|
||||
snprintf(name, sizeof(name), "host%d", fnic->host->host_no);
|
||||
|
||||
fnic->fnic_stats_debugfs_host = debugfs_create_dir(name,
|
||||
fnic_stats_debugfs_root);
|
||||
|
||||
fnic->fnic_stats_debugfs_file = debugfs_create_file("stats",
|
||||
S_IFREG|S_IRUGO|S_IWUSR,
|
||||
fnic->fnic_stats_debugfs_host,
|
||||
fnic,
|
||||
&fnic_stats_debugfs_fops);
|
||||
|
||||
fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats",
|
||||
S_IFREG|S_IRUGO|S_IWUSR,
|
||||
fnic->fnic_stats_debugfs_host,
|
||||
fnic,
|
||||
&fnic_reset_debugfs_fops);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
File diff suppressed because it is too large
Load Diff
435
drivers/scsi/fnic/fnic_fdls.h
Normal file
435
drivers/scsi/fnic/fnic_fdls.h
Normal file
@ -0,0 +1,435 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _FNIC_FDLS_H_
|
||||
#define _FNIC_FDLS_H_
|
||||
|
||||
#include "fnic_stats.h"
|
||||
#include "fdls_fc.h"
|
||||
|
||||
/* FDLS - Fabric discovery and login services
|
||||
* -> VLAN discovery
|
||||
* -> retry every retry delay seconds until it succeeds.
|
||||
* <- List of VLANs
|
||||
*
|
||||
* -> Solicitation
|
||||
* <- Solicitation response (Advertisement)
|
||||
*
|
||||
* -> FCF selection & FLOGI ( FLOGI timeout - 2 * E_D_TOV)
|
||||
* <- FLOGI response
|
||||
*
|
||||
* -> FCF keep alive
|
||||
* <- FCF keep alive
|
||||
*
|
||||
* -> PLOGI to FFFFFC (DNS) (PLOGI timeout - 2 * R_A_TOV)
|
||||
* -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
|
||||
* <- PLOGI response
|
||||
* -> Retry PLOGI to FFFFFC (DNS) - Number of retries from vnic.cfg
|
||||
*
|
||||
* -> SCR to FFFFFC (DNS) (SCR timeout - 2 * R_A_TOV)
|
||||
* -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
|
||||
* <- SCR response
|
||||
* -> Retry SCR - Number of retries 2
|
||||
*
|
||||
* -> GPN_FT to FFFFFC (GPN_FT timeout - 2 * R_A_TOV)a
|
||||
* -> Retry on BUSY until it succeeds
|
||||
* -> Retry on BUSY until it succeeds
|
||||
* -> 2 retries on timeout
|
||||
*
|
||||
* -> RFT_ID to FFFFFC (DNS) (RFT_ID timeout - 3 * R_A_TOV)
|
||||
* -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
|
||||
* -> Retry RFT_ID to FFFFFC (DNS) (Number of retries 2 )
|
||||
* -> Ignore if both retires fail.
|
||||
*
|
||||
* Session establishment with targets
|
||||
* For each PWWN
|
||||
* -> PLOGI to FCID of that PWWN (PLOGI timeout 2 * R_A_TOV)
|
||||
* -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
|
||||
* <- PLOGI response
|
||||
* -> Retry PLOGI. Num retries using vnic.cfg
|
||||
*
|
||||
* -> PRLI to FCID of that PWWN (PRLI timeout 2 * R_A_TOV)
|
||||
* -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
|
||||
* <- PRLI response
|
||||
* -> Retry PRLI. Num retries using vnic.cfg
|
||||
*
|
||||
*/
|
||||
|
||||
#define FDLS_RETRY_COUNT 2
|
||||
|
||||
/*
|
||||
* OXID encoding:
|
||||
* bits 0-8: oxid idx - allocated from poool
|
||||
* bits 9-13: oxid frame code from fnic_oxid_frame_type_e
|
||||
* bits 14-15: all zeros
|
||||
*/
|
||||
#define FNIC_OXID_POOL_SZ (512) /* always power of 2 */
|
||||
#define FNIC_OXID_ENCODE(idx, frame_type) (frame_type | idx)
|
||||
#define FNIC_FRAME_MASK 0xFE00
|
||||
#define FNIC_FRAME_TYPE(oxid) (oxid & FNIC_FRAME_MASK)
|
||||
#define FNIC_OXID_IDX(oxid) ((oxid) & (FNIC_OXID_POOL_SZ - 1))
|
||||
|
||||
#define OXID_RECLAIM_TOV(iport) (2 * iport->r_a_tov) /* in milliseconds */
|
||||
|
||||
#define FNIC_FDLS_FABRIC_ABORT_ISSUED 0x1
|
||||
#define FNIC_FDLS_FPMA_LEARNT 0x2
|
||||
|
||||
/* tport flags */
|
||||
#define FNIC_FDLS_TPORT_IN_GPN_FT_LIST 0x1
|
||||
#define FNIC_FDLS_TGT_ABORT_ISSUED 0x2
|
||||
#define FNIC_FDLS_TPORT_SEND_ADISC 0x4
|
||||
#define FNIC_FDLS_RETRY_FRAME 0x8
|
||||
#define FNIC_FDLS_TPORT_BUSY 0x10
|
||||
#define FNIC_FDLS_TPORT_TERMINATING 0x20
|
||||
#define FNIC_FDLS_TPORT_DELETED 0x40
|
||||
#define FNIC_FDLS_SCSI_REGISTERED 0x200
|
||||
|
||||
/* Retry supported by rport(returned by prli service parameters) */
|
||||
#define FDLS_FC_RP_FLAGS_RETRY 0x1
|
||||
|
||||
#define fdls_set_state(_fdls_fabric, _state) ((_fdls_fabric)->state = _state)
|
||||
#define fdls_get_state(_fdls_fabric) ((_fdls_fabric)->state)
|
||||
|
||||
#define FNIC_FDMI_ACTIVE 0x8
|
||||
#define FNIC_FIRST_LINK_UP 0x2
|
||||
|
||||
#define fdls_set_tport_state(_tport, _state) (_tport->state = _state)
|
||||
#define fdls_get_tport_state(_tport) (_tport->state)
|
||||
|
||||
#define FNIC_PORTSPEED_10GBIT 1
|
||||
#define FNIC_FRAME_HT_ROOM (2148)
|
||||
#define FNIC_FCOE_FRAME_MAXSZ (2112)
|
||||
|
||||
|
||||
#define FNIC_FRAME_TYPE_FABRIC_FLOGI 0x1000
|
||||
#define FNIC_FRAME_TYPE_FABRIC_PLOGI 0x1200
|
||||
#define FNIC_FRAME_TYPE_FABRIC_RPN 0x1400
|
||||
#define FNIC_FRAME_TYPE_FABRIC_RFT 0x1600
|
||||
#define FNIC_FRAME_TYPE_FABRIC_RFF 0x1800
|
||||
#define FNIC_FRAME_TYPE_FABRIC_SCR 0x1A00
|
||||
#define FNIC_FRAME_TYPE_FABRIC_GPN_FT 0x1C00
|
||||
#define FNIC_FRAME_TYPE_FABRIC_LOGO 0x1E00
|
||||
#define FNIC_FRAME_TYPE_FDMI_PLOGI 0x2000
|
||||
#define FNIC_FRAME_TYPE_FDMI_RHBA 0x2200
|
||||
#define FNIC_FRAME_TYPE_FDMI_RPA 0x2400
|
||||
#define FNIC_FRAME_TYPE_TGT_PLOGI 0x2600
|
||||
#define FNIC_FRAME_TYPE_TGT_PRLI 0x2800
|
||||
#define FNIC_FRAME_TYPE_TGT_ADISC 0x2A00
|
||||
#define FNIC_FRAME_TYPE_TGT_LOGO 0x2C00
|
||||
|
||||
struct fnic_fip_fcf_s {
|
||||
uint16_t vlan_id;
|
||||
uint8_t fcf_mac[6];
|
||||
uint8_t fcf_priority;
|
||||
uint32_t fka_adv_period;
|
||||
uint8_t ka_disabled;
|
||||
};
|
||||
|
||||
enum fnic_fdls_state_e {
|
||||
FDLS_STATE_INIT = 0,
|
||||
FDLS_STATE_LINKDOWN,
|
||||
FDLS_STATE_FABRIC_LOGO,
|
||||
FDLS_STATE_FLOGO_DONE,
|
||||
FDLS_STATE_FABRIC_FLOGI,
|
||||
FDLS_STATE_FABRIC_PLOGI,
|
||||
FDLS_STATE_RPN_ID,
|
||||
FDLS_STATE_REGISTER_FC4_TYPES,
|
||||
FDLS_STATE_REGISTER_FC4_FEATURES,
|
||||
FDLS_STATE_SCR,
|
||||
FDLS_STATE_GPN_FT,
|
||||
FDLS_STATE_TGT_DISCOVERY,
|
||||
FDLS_STATE_RSCN_GPN_FT,
|
||||
FDLS_STATE_SEND_GPNFT
|
||||
};
|
||||
|
||||
struct fnic_fdls_fabric_s {
|
||||
enum fnic_fdls_state_e state;
|
||||
uint32_t flags;
|
||||
struct list_head tport_list; /* List of discovered tports */
|
||||
struct timer_list retry_timer;
|
||||
int del_timer_inprogress;
|
||||
int del_fdmi_timer_inprogress;
|
||||
int retry_counter;
|
||||
int timer_pending;
|
||||
int fdmi_retry;
|
||||
struct timer_list fdmi_timer;
|
||||
int fdmi_pending;
|
||||
};
|
||||
|
||||
struct fnic_fdls_fip_s {
|
||||
uint32_t state;
|
||||
uint32_t flogi_retry;
|
||||
};
|
||||
|
||||
/* Message to tport_event_handler */
|
||||
enum fnic_tgt_msg_id {
|
||||
TGT_EV_NONE = 0,
|
||||
TGT_EV_RPORT_ADD,
|
||||
TGT_EV_RPORT_DEL,
|
||||
TGT_EV_TPORT_DELETE,
|
||||
TGT_EV_REMOVE
|
||||
};
|
||||
|
||||
struct fnic_tport_event_s {
|
||||
struct list_head links;
|
||||
enum fnic_tgt_msg_id event;
|
||||
void *arg1;
|
||||
};
|
||||
|
||||
enum fdls_tgt_state_e {
|
||||
FDLS_TGT_STATE_INIT = 0,
|
||||
FDLS_TGT_STATE_PLOGI,
|
||||
FDLS_TGT_STATE_PRLI,
|
||||
FDLS_TGT_STATE_READY,
|
||||
FDLS_TGT_STATE_LOGO_RECEIVED,
|
||||
FDLS_TGT_STATE_ADISC,
|
||||
FDL_TGT_STATE_PLOGO,
|
||||
FDLS_TGT_STATE_OFFLINING,
|
||||
FDLS_TGT_STATE_OFFLINE
|
||||
};
|
||||
|
||||
struct fnic_tport_s {
|
||||
struct list_head links; /* To link the tports */
|
||||
enum fdls_tgt_state_e state;
|
||||
uint32_t flags;
|
||||
uint32_t fcid;
|
||||
uint64_t wwpn;
|
||||
uint64_t wwnn;
|
||||
uint16_t active_oxid;
|
||||
uint16_t tgt_flags;
|
||||
atomic_t in_flight; /* io counter */
|
||||
uint16_t max_payload_size;
|
||||
uint16_t r_a_tov;
|
||||
uint16_t e_d_tov;
|
||||
uint16_t lun0_delay;
|
||||
int max_concur_seqs;
|
||||
uint32_t fcp_csp;
|
||||
struct timer_list retry_timer;
|
||||
int del_timer_inprogress;
|
||||
int retry_counter;
|
||||
int timer_pending;
|
||||
unsigned int num_pending_cmds;
|
||||
int nexus_restart_count;
|
||||
int exch_reset_in_progress;
|
||||
void *iport;
|
||||
struct work_struct tport_del_work;
|
||||
struct completion *tport_del_done;
|
||||
struct fc_rport *rport;
|
||||
char str_wwpn[20];
|
||||
char str_wwnn[20];
|
||||
};
|
||||
|
||||
/* OXID pool related structures */
|
||||
struct reclaim_entry_s {
|
||||
struct list_head links;
|
||||
/* oxid that needs to be freed after 2*r_a_tov */
|
||||
uint16_t oxid_idx;
|
||||
/* in jiffies. Use this to waiting time */
|
||||
unsigned long expires;
|
||||
unsigned long *bitmap;
|
||||
};
|
||||
|
||||
/* used for allocating oxids for fabric and fdmi requests */
|
||||
struct fnic_oxid_pool_s {
|
||||
DECLARE_BITMAP(bitmap, FNIC_OXID_POOL_SZ);
|
||||
int sz; /* size of the pool or block */
|
||||
int next_idx; /* used for cycling through the oxid pool */
|
||||
|
||||
/* retry schedule free */
|
||||
DECLARE_BITMAP(pending_schedule_free, FNIC_OXID_POOL_SZ);
|
||||
struct delayed_work schedule_oxid_free_retry;
|
||||
|
||||
/* List of oxids that need to be freed and reclaimed.
|
||||
* This list is shared by all the oxid pools
|
||||
*/
|
||||
struct list_head oxid_reclaim_list;
|
||||
/* Work associated with reclaim list */
|
||||
struct delayed_work oxid_reclaim_work;
|
||||
};
|
||||
|
||||
/* iport */
|
||||
enum fnic_iport_state_e {
|
||||
FNIC_IPORT_STATE_INIT = 0,
|
||||
FNIC_IPORT_STATE_LINK_WAIT,
|
||||
FNIC_IPORT_STATE_FIP,
|
||||
FNIC_IPORT_STATE_FABRIC_DISC,
|
||||
FNIC_IPORT_STATE_READY
|
||||
};
|
||||
|
||||
struct fnic_iport_s {
|
||||
enum fnic_iport_state_e state;
|
||||
struct fnic *fnic;
|
||||
uint64_t boot_time;
|
||||
uint32_t flags;
|
||||
int usefip;
|
||||
uint8_t hwmac[6]; /* HW MAC Addr */
|
||||
uint8_t fpma[6]; /* Fabric Provided MA */
|
||||
uint8_t fcfmac[6]; /* MAC addr of Fabric */
|
||||
uint16_t vlan_id;
|
||||
uint32_t fcid;
|
||||
|
||||
/* oxid pool */
|
||||
struct fnic_oxid_pool_s oxid_pool;
|
||||
|
||||
/*
|
||||
* fabric reqs are serialized and only one req at a time.
|
||||
* Tracking the oxid for sending abort
|
||||
*/
|
||||
uint16_t active_oxid_fabric_req;
|
||||
/* fdmi only */
|
||||
uint16_t active_oxid_fdmi_plogi;
|
||||
uint16_t active_oxid_fdmi_rhba;
|
||||
uint16_t active_oxid_fdmi_rpa;
|
||||
|
||||
struct fnic_fip_fcf_s selected_fcf;
|
||||
struct fnic_fdls_fip_s fip;
|
||||
struct fnic_fdls_fabric_s fabric;
|
||||
struct list_head tport_list;
|
||||
struct list_head tport_list_pending_del;
|
||||
/* list of tports for which we are yet to send PLOGO */
|
||||
struct list_head inprocess_tport_list;
|
||||
struct list_head deleted_tport_list;
|
||||
struct work_struct tport_event_work;
|
||||
uint32_t e_d_tov; /* msec */
|
||||
uint32_t r_a_tov; /* msec */
|
||||
uint32_t link_supported_speeds;
|
||||
uint32_t max_flogi_retries;
|
||||
uint32_t max_plogi_retries;
|
||||
uint32_t plogi_timeout;
|
||||
uint32_t service_params;
|
||||
uint64_t wwpn;
|
||||
uint64_t wwnn;
|
||||
uint16_t max_payload_size;
|
||||
spinlock_t deleted_tport_lst_lock;
|
||||
struct completion *flogi_reg_done;
|
||||
struct fnic_iport_stats iport_stats;
|
||||
char str_wwpn[20];
|
||||
char str_wwnn[20];
|
||||
};
|
||||
|
||||
struct rport_dd_data_s {
|
||||
struct fnic_tport_s *tport;
|
||||
struct fnic_iport_s *iport;
|
||||
};
|
||||
|
||||
enum fnic_recv_frame_type_e {
|
||||
FNIC_FABRIC_FLOGI_RSP = 1,
|
||||
FNIC_FABRIC_PLOGI_RSP,
|
||||
FNIC_FABRIC_RPN_RSP,
|
||||
FNIC_FABRIC_RFT_RSP,
|
||||
FNIC_FABRIC_RFF_RSP,
|
||||
FNIC_FABRIC_SCR_RSP,
|
||||
FNIC_FABRIC_GPN_FT_RSP,
|
||||
FNIC_FABRIC_BLS_ABTS_RSP,
|
||||
FNIC_FDMI_PLOGI_RSP,
|
||||
FNIC_FDMI_REG_HBA_RSP,
|
||||
FNIC_FDMI_RPA_RSP,
|
||||
FNIC_FDMI_BLS_ABTS_RSP,
|
||||
FNIC_FABRIC_LOGO_RSP,
|
||||
|
||||
/* responses to target requests */
|
||||
FNIC_TPORT_PLOGI_RSP,
|
||||
FNIC_TPORT_PRLI_RSP,
|
||||
FNIC_TPORT_ADISC_RSP,
|
||||
FNIC_TPORT_BLS_ABTS_RSP,
|
||||
FNIC_TPORT_LOGO_RSP,
|
||||
|
||||
/* unsolicited requests */
|
||||
FNIC_BLS_ABTS_REQ,
|
||||
FNIC_ELS_PLOGI_REQ,
|
||||
FNIC_ELS_RSCN_REQ,
|
||||
FNIC_ELS_LOGO_REQ,
|
||||
FNIC_ELS_ECHO_REQ,
|
||||
FNIC_ELS_ADISC,
|
||||
FNIC_ELS_RLS,
|
||||
FNIC_ELS_RRQ,
|
||||
FNIC_ELS_UNSUPPORTED_REQ,
|
||||
};
|
||||
|
||||
enum fnic_port_speeds {
|
||||
DCEM_PORTSPEED_NONE = 0,
|
||||
DCEM_PORTSPEED_1G = 1000,
|
||||
DCEM_PORTSPEED_2G = 2000,
|
||||
DCEM_PORTSPEED_4G = 4000,
|
||||
DCEM_PORTSPEED_8G = 8000,
|
||||
DCEM_PORTSPEED_10G = 10000,
|
||||
DCEM_PORTSPEED_16G = 16000,
|
||||
DCEM_PORTSPEED_20G = 20000,
|
||||
DCEM_PORTSPEED_25G = 25000,
|
||||
DCEM_PORTSPEED_32G = 32000,
|
||||
DCEM_PORTSPEED_40G = 40000,
|
||||
DCEM_PORTSPEED_4x10G = 41000,
|
||||
DCEM_PORTSPEED_50G = 50000,
|
||||
DCEM_PORTSPEED_64G = 64000,
|
||||
DCEM_PORTSPEED_100G = 100000,
|
||||
DCEM_PORTSPEED_128G = 128000,
|
||||
};
|
||||
|
||||
/* Function Declarations */
|
||||
/* fdls_disc.c */
|
||||
void fnic_fdls_disc_init(struct fnic_iport_s *iport);
|
||||
void fnic_fdls_disc_start(struct fnic_iport_s *iport);
|
||||
void fnic_fdls_recv_frame(struct fnic_iport_s *iport, void *rx_frame,
|
||||
int len, int fchdr_offset);
|
||||
void fnic_fdls_link_down(struct fnic_iport_s *iport);
|
||||
int fdls_init_frame_pool(struct fnic_iport_s *iport);
|
||||
uint8_t *fdls_alloc_frame(struct fnic_iport_s *iport);
|
||||
uint16_t fdls_alloc_oxid(struct fnic_iport_s *iport, int oxid_frame_type,
|
||||
uint16_t *active_oxid);
|
||||
void fdls_free_oxid(struct fnic_iport_s *iport,
|
||||
uint16_t oxid, uint16_t *active_oxid);
|
||||
void fdls_tgt_logout(struct fnic_iport_s *iport,
|
||||
struct fnic_tport_s *tport);
|
||||
void fnic_del_fabric_timer_sync(struct fnic *fnic);
|
||||
void fnic_del_tport_timer_sync(struct fnic *fnic,
|
||||
struct fnic_tport_s *tport);
|
||||
void fdls_send_fabric_logo(struct fnic_iport_s *iport);
|
||||
int fnic_fdls_validate_and_get_frame_type(struct fnic_iport_s *iport,
|
||||
struct fc_frame_header *fchdr);
|
||||
void fdls_send_tport_abts(struct fnic_iport_s *iport,
|
||||
struct fnic_tport_s *tport);
|
||||
bool fdls_delete_tport(struct fnic_iport_s *iport,
|
||||
struct fnic_tport_s *tport);
|
||||
void fdls_fdmi_timer_callback(struct timer_list *t);
|
||||
void fdls_fdmi_retry_plogi(struct fnic_iport_s *iport);
|
||||
|
||||
/* fnic_fcs.c */
|
||||
void fnic_fdls_init(struct fnic *fnic, int usefip);
|
||||
void fnic_send_fcoe_frame(struct fnic_iport_s *iport, void *frame,
|
||||
int frame_size);
|
||||
void fnic_fcoe_send_vlan_req(struct fnic *fnic);
|
||||
int fnic_send_fip_frame(struct fnic_iport_s *iport,
|
||||
void *frame, int frame_size);
|
||||
void fnic_fdls_learn_fcoe_macs(struct fnic_iport_s *iport, void *rx_frame,
|
||||
uint8_t *fcid);
|
||||
void fnic_fdls_add_tport(struct fnic_iport_s *iport,
|
||||
struct fnic_tport_s *tport, unsigned long flags);
|
||||
void fnic_fdls_remove_tport(struct fnic_iport_s *iport,
|
||||
struct fnic_tport_s *tport,
|
||||
unsigned long flags);
|
||||
|
||||
/* fip.c */
|
||||
void fnic_fcoe_send_vlan_req(struct fnic *fnic);
|
||||
void fnic_common_fip_cleanup(struct fnic *fnic);
|
||||
int fdls_fip_recv_frame(struct fnic *fnic, void *frame);
|
||||
void fnic_handle_fcs_ka_timer(struct timer_list *t);
|
||||
void fnic_handle_enode_ka_timer(struct timer_list *t);
|
||||
void fnic_handle_vn_ka_timer(struct timer_list *t);
|
||||
void fnic_handle_fip_timer(struct timer_list *t);
|
||||
extern void fdls_fabric_timer_callback(struct timer_list *t);
|
||||
|
||||
/* fnic_scsi.c */
|
||||
void fnic_scsi_fcpio_reset(struct fnic *fnic);
|
||||
extern void fdls_fabric_timer_callback(struct timer_list *t);
|
||||
void fnic_rport_exch_reset(struct fnic *fnic, u32 fcid);
|
||||
int fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id,
|
||||
void *fp);
|
||||
struct fnic_tport_s *fnic_find_tport_by_fcid(struct fnic_iport_s *iport,
|
||||
uint32_t fcid);
|
||||
struct fnic_tport_s *fnic_find_tport_by_wwpn(struct fnic_iport_s *iport,
|
||||
uint64_t wwpn);
|
||||
|
||||
#endif /* _FNIC_FDLS_H_ */
|
@ -1,48 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _FNIC_FIP_H_
|
||||
#define _FNIC_FIP_H_
|
||||
|
||||
|
||||
#define FCOE_CTLR_START_DELAY 2000 /* ms after first adv. to choose FCF */
|
||||
#define FCOE_CTLR_FIPVLAN_TOV 2000 /* ms after FIP VLAN disc */
|
||||
#define FCOE_CTLR_MAX_SOL 8
|
||||
|
||||
#define FINC_MAX_FLOGI_REJECTS 8
|
||||
|
||||
struct vlan {
|
||||
__be16 vid;
|
||||
__be16 type;
|
||||
};
|
||||
|
||||
/*
|
||||
* VLAN entry.
|
||||
*/
|
||||
struct fcoe_vlan {
|
||||
struct list_head list;
|
||||
u16 vid; /* vlan ID */
|
||||
u16 sol_count; /* no. of sols sent */
|
||||
u16 state; /* state */
|
||||
};
|
||||
|
||||
enum fip_vlan_state {
|
||||
FIP_VLAN_AVAIL = 0, /* don't do anything */
|
||||
FIP_VLAN_SENT = 1, /* sent */
|
||||
FIP_VLAN_USED = 2, /* succeed */
|
||||
FIP_VLAN_FAILED = 3, /* failed to response */
|
||||
};
|
||||
|
||||
struct fip_vlan {
|
||||
struct ethhdr eth;
|
||||
struct fip_header fip;
|
||||
struct {
|
||||
struct fip_mac_desc mac;
|
||||
struct fip_wwn_desc wwnn;
|
||||
} desc;
|
||||
};
|
||||
|
||||
#endif /* __FINC_FIP_H_ */
|
@ -7,6 +7,7 @@
|
||||
#define _FNIC_IO_H_
|
||||
|
||||
#include <scsi/fc/fc_fcp.h>
|
||||
#include "fnic_fdls.h"
|
||||
|
||||
#define FNIC_DFLT_SG_DESC_CNT 32
|
||||
#define FNIC_MAX_SG_DESC_CNT 256 /* Maximum descriptors per sgl */
|
||||
@ -41,6 +42,8 @@ enum fnic_ioreq_state {
|
||||
};
|
||||
|
||||
struct fnic_io_req {
|
||||
struct fnic_iport_s *iport;
|
||||
struct fnic_tport_s *tport;
|
||||
struct host_sg_desc *sgl_list; /* sgl list */
|
||||
void *sgl_list_alloc; /* sgl list address used for free */
|
||||
dma_addr_t sense_buf_pa; /* dma address for sense buffer*/
|
||||
@ -55,15 +58,4 @@ struct fnic_io_req {
|
||||
unsigned int tag;
|
||||
struct scsi_cmnd *sc; /* midlayer's cmd pointer */
|
||||
};
|
||||
|
||||
enum fnic_port_speeds {
|
||||
DCEM_PORTSPEED_NONE = 0,
|
||||
DCEM_PORTSPEED_1G = 1000,
|
||||
DCEM_PORTSPEED_10G = 10000,
|
||||
DCEM_PORTSPEED_20G = 20000,
|
||||
DCEM_PORTSPEED_25G = 25000,
|
||||
DCEM_PORTSPEED_40G = 40000,
|
||||
DCEM_PORTSPEED_4x10G = 41000,
|
||||
DCEM_PORTSPEED_100G = 100000,
|
||||
};
|
||||
#endif /* _FNIC_IO_H_ */
|
||||
|
@ -7,7 +7,7 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <scsi/libfc.h>
|
||||
#include <scsi/scsi_transport_fc.h>
|
||||
#include <scsi/fc_frame.h>
|
||||
#include "vnic_dev.h"
|
||||
#include "vnic_intr.h"
|
||||
@ -222,7 +222,7 @@ int fnic_request_intr(struct fnic *fnic)
|
||||
fnic->msix[i].devname,
|
||||
fnic->msix[i].devid);
|
||||
if (err) {
|
||||
FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
|
||||
FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
|
||||
"request_irq failed with error: %d\n",
|
||||
err);
|
||||
fnic_free_intr(fnic);
|
||||
@ -250,10 +250,10 @@ int fnic_set_intr_mode_msix(struct fnic *fnic)
|
||||
* We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs
|
||||
* (last INTR is used for WQ/RQ errors and notification area)
|
||||
*/
|
||||
FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
|
||||
FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"rq-array size: %d wq-array size: %d copy-wq array size: %d\n",
|
||||
n, m, o);
|
||||
FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
|
||||
FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"rq_count: %d raw_wq_count: %d wq_copy_count: %d cq_count: %d\n",
|
||||
fnic->rq_count, fnic->raw_wq_count,
|
||||
fnic->wq_copy_count, fnic->cq_count);
|
||||
@ -265,17 +265,17 @@ int fnic_set_intr_mode_msix(struct fnic *fnic)
|
||||
|
||||
vec_count = pci_alloc_irq_vectors(fnic->pdev, min_irqs, vecs,
|
||||
PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
|
||||
FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
|
||||
FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"allocated %d MSI-X vectors\n",
|
||||
vec_count);
|
||||
|
||||
if (vec_count > 0) {
|
||||
if (vec_count < vecs) {
|
||||
FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
|
||||
FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
|
||||
"interrupts number mismatch: vec_count: %d vecs: %d\n",
|
||||
vec_count, vecs);
|
||||
if (vec_count < min_irqs) {
|
||||
FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
|
||||
FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
|
||||
"no interrupts for copy wq\n");
|
||||
return 1;
|
||||
}
|
||||
@ -287,7 +287,7 @@ int fnic_set_intr_mode_msix(struct fnic *fnic)
|
||||
fnic->wq_copy_count = vec_count - n - m - 1;
|
||||
fnic->wq_count = fnic->raw_wq_count + fnic->wq_copy_count;
|
||||
if (fnic->cq_count != vec_count - 1) {
|
||||
FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
|
||||
FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
|
||||
"CQ count: %d does not match MSI-X vector count: %d\n",
|
||||
fnic->cq_count, vec_count);
|
||||
fnic->cq_count = vec_count - 1;
|
||||
@ -295,23 +295,23 @@ int fnic_set_intr_mode_msix(struct fnic *fnic)
|
||||
fnic->intr_count = vec_count;
|
||||
fnic->err_intr_offset = fnic->rq_count + fnic->wq_count;
|
||||
|
||||
FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
|
||||
FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"rq_count: %d raw_wq_count: %d copy_wq_base: %d\n",
|
||||
fnic->rq_count,
|
||||
fnic->raw_wq_count, fnic->copy_wq_base);
|
||||
|
||||
FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
|
||||
FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"wq_copy_count: %d wq_count: %d cq_count: %d\n",
|
||||
fnic->wq_copy_count,
|
||||
fnic->wq_count, fnic->cq_count);
|
||||
|
||||
FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
|
||||
FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"intr_count: %d err_intr_offset: %u",
|
||||
fnic->intr_count,
|
||||
fnic->err_intr_offset);
|
||||
|
||||
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSIX);
|
||||
FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
|
||||
FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
|
||||
"fnic using MSI-X\n");
|
||||
return 0;
|
||||
}
|
||||
@ -351,7 +351,7 @@ int fnic_set_intr_mode(struct fnic *fnic)
|
||||
fnic->intr_count = 1;
|
||||
fnic->err_intr_offset = 0;
|
||||
|
||||
FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
|
||||
FNIC_ISR_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
|
||||
"Using MSI Interrupts\n");
|
||||
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI);
|
||||
|
||||
@ -377,7 +377,7 @@ int fnic_set_intr_mode(struct fnic *fnic)
|
||||
fnic->cq_count = 3;
|
||||
fnic->intr_count = 3;
|
||||
|
||||
FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
|
||||
FNIC_ISR_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
|
||||
"Using Legacy Interrupts\n");
|
||||
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
131
drivers/scsi/fnic/fnic_pci_subsys_devid.c
Normal file
131
drivers/scsi/fnic/fnic_pci_subsys_devid.c
Normal file
@ -0,0 +1,131 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include "fnic.h"
|
||||
|
||||
static struct fnic_pcie_device fnic_pcie_device_table[] = {
|
||||
{PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_VASONA,
|
||||
"VIC 1280"},
|
||||
{PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_COTATI,
|
||||
"VIC 1240"},
|
||||
{PCI_DEVICE_ID_CISCO_SERENO, "Sereno",
|
||||
PCI_SUBDEVICE_ID_CISCO_LEXINGTON, "VIC 1225"},
|
||||
{PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_ICEHOUSE,
|
||||
"VIC 1285"},
|
||||
{PCI_DEVICE_ID_CISCO_SERENO, "Sereno",
|
||||
PCI_SUBDEVICE_ID_CISCO_KIRKWOODLAKE, "VIC 1225T"},
|
||||
{PCI_DEVICE_ID_CISCO_SERENO, "Sereno",
|
||||
PCI_SUBDEVICE_ID_CISCO_SUSANVILLE, "VIC 1227"},
|
||||
{PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_TORRANCE,
|
||||
"VIC 1227T"},
|
||||
|
||||
{PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CALISTOGA,
|
||||
"VIC 1340"},
|
||||
{PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTAINVIEW,
|
||||
"VIC 1380"},
|
||||
{PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN,
|
||||
"C3260-SIOC"},
|
||||
{PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CLEARLAKE,
|
||||
"VIC 1385"},
|
||||
{PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN2,
|
||||
"C3260-SIOC"},
|
||||
{PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CLAREMONT,
|
||||
"VIC 1387"},
|
||||
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BRADBURY,
|
||||
"VIC 1457"},
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
|
||||
PCI_SUBDEVICE_ID_CISCO_BRENTWOOD, "VIC 1455"},
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
|
||||
PCI_SUBDEVICE_ID_CISCO_BURLINGAME, "VIC 1487"},
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BAYSIDE,
|
||||
"VIC 1485"},
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
|
||||
PCI_SUBDEVICE_ID_CISCO_BAKERSFIELD, "VIC 1440"},
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
|
||||
PCI_SUBDEVICE_ID_CISCO_BOONVILLE, "VIC 1480"},
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BENICIA,
|
||||
"VIC 1495"},
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BEAUMONT,
|
||||
"VIC 1497"},
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BRISBANE,
|
||||
"VIC 1467"},
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BENTON,
|
||||
"VIC 1477"},
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
|
||||
PCI_SUBDEVICE_ID_CISCO_TWIN_RIVER, "VIC 14425"},
|
||||
{PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
|
||||
PCI_SUBDEVICE_ID_CISCO_TWIN_PEAK, "VIC 14825"},
|
||||
|
||||
{PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_BERN,
|
||||
"VIC 15420"},
|
||||
{PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
|
||||
PCI_SUBDEVICE_ID_CISCO_STOCKHOLM, "VIC 15428"},
|
||||
{PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_KRAKOW,
|
||||
"VIC 15411"},
|
||||
{PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
|
||||
PCI_SUBDEVICE_ID_CISCO_LUCERNE, "VIC 15231"},
|
||||
{PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_TURKU,
|
||||
"VIC 15238"},
|
||||
{PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_GENEVA,
|
||||
"VIC 15422"},
|
||||
{PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
|
||||
PCI_SUBDEVICE_ID_CISCO_HELSINKI, "VIC 15235"},
|
||||
{PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
|
||||
PCI_SUBDEVICE_ID_CISCO_GOTHENBURG, "VIC 15425"},
|
||||
{PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
|
||||
PCI_SUBDEVICE_ID_CISCO_TURKU_PLUS, "VIC 15237"},
|
||||
{PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_ZURICH,
|
||||
"VIC 15230"},
|
||||
{PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_RIGA,
|
||||
"VIC 15427"},
|
||||
|
||||
{0,}
|
||||
};
|
||||
|
||||
int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc,
|
||||
char **subsys_desc)
|
||||
{
|
||||
unsigned short device = PCI_DEVICE_ID_CISCO_VIC_FC;
|
||||
int max = ARRAY_SIZE(fnic_pcie_device_table);
|
||||
struct fnic_pcie_device *t = fnic_pcie_device_table;
|
||||
int index = 0;
|
||||
|
||||
if (pdev->device != device)
|
||||
return 1;
|
||||
|
||||
while (t->device != 0) {
|
||||
if (memcmp
|
||||
((char *) &pdev->subsystem_device,
|
||||
(char *) &t->subsystem_device, sizeof(short)) == 0)
|
||||
break;
|
||||
t++;
|
||||
index++;
|
||||
}
|
||||
|
||||
if (index >= max - 1) {
|
||||
*desc = NULL;
|
||||
*subsys_desc = NULL;
|
||||
return 1;
|
||||
}
|
||||
|
||||
*desc = fnic_pcie_device_table[index].desc;
|
||||
*subsys_desc = fnic_pcie_device_table[index].subsys_desc;
|
||||
return 0;
|
||||
}
|
@ -30,9 +30,7 @@ int fnic_get_vnic_config(struct fnic *fnic)
|
||||
offsetof(struct vnic_fc_config, m), \
|
||||
sizeof(c->m), &c->m); \
|
||||
if (err) { \
|
||||
shost_printk(KERN_ERR, fnic->lport->host, \
|
||||
"Error getting %s, %d\n", #m, \
|
||||
err); \
|
||||
dev_err(&fnic->pdev->dev, "Error getting %s, %d\n", #m, err); \
|
||||
return err; \
|
||||
} \
|
||||
} while (0);
|
||||
@ -60,6 +58,11 @@ int fnic_get_vnic_config(struct fnic *fnic)
|
||||
GET_CONFIG(intr_mode);
|
||||
GET_CONFIG(wq_copy_count);
|
||||
|
||||
if ((c->flags & (VFCF_FC_INITIATOR)) == 0) {
|
||||
dev_info(&fnic->pdev->dev, "vNIC role not defined (def role: FC Init)\n");
|
||||
c->flags |= VFCF_FC_INITIATOR;
|
||||
}
|
||||
|
||||
c->wq_enet_desc_count =
|
||||
min_t(u32, VNIC_FNIC_WQ_DESCS_MAX,
|
||||
max_t(u32, VNIC_FNIC_WQ_DESCS_MIN,
|
||||
@ -139,40 +142,28 @@ int fnic_get_vnic_config(struct fnic *fnic)
|
||||
|
||||
c->wq_copy_count = min_t(u16, FNIC_WQ_COPY_MAX, c->wq_copy_count);
|
||||
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC MAC addr %pM "
|
||||
"wq/wq_copy/rq %d/%d/%d\n",
|
||||
fnic->ctlr.ctl_src_addr,
|
||||
dev_info(&fnic->pdev->dev, "fNIC MAC addr %p wq/wq_copy/rq %d/%d/%d\n",
|
||||
fnic->data_src_addr,
|
||||
c->wq_enet_desc_count, c->wq_copy_desc_count,
|
||||
c->rq_desc_count);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC node wwn %llx port wwn %llx\n",
|
||||
dev_info(&fnic->pdev->dev, "fNIC node wwn 0x%llx port wwn 0x%llx\n",
|
||||
c->node_wwn, c->port_wwn);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC ed_tov %d ra_tov %d\n",
|
||||
dev_info(&fnic->pdev->dev, "fNIC ed_tov %d ra_tov %d\n",
|
||||
c->ed_tov, c->ra_tov);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC mtu %d intr timer %d\n",
|
||||
dev_info(&fnic->pdev->dev, "fNIC mtu %d intr timer %d\n",
|
||||
c->maxdatafieldsize, c->intr_timer);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC flags 0x%x luns per tgt %d\n",
|
||||
dev_info(&fnic->pdev->dev, "fNIC flags 0x%x luns per tgt %d\n",
|
||||
c->flags, c->luns_per_tgt);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC flogi_retries %d flogi timeout %d\n",
|
||||
dev_info(&fnic->pdev->dev, "fNIC flogi_retries %d flogi timeout %d\n",
|
||||
c->flogi_retries, c->flogi_timeout);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC plogi retries %d plogi timeout %d\n",
|
||||
dev_info(&fnic->pdev->dev, "fNIC plogi retries %d plogi timeout %d\n",
|
||||
c->plogi_retries, c->plogi_timeout);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC io throttle count %d link dn timeout %d\n",
|
||||
dev_info(&fnic->pdev->dev, "fNIC io throttle count %d link dn timeout %d\n",
|
||||
c->io_throttle_count, c->link_down_timeout);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC port dn io retries %d port dn timeout %d\n",
|
||||
dev_info(&fnic->pdev->dev, "fNIC port dn io retries %d port dn timeout %d\n",
|
||||
c->port_down_io_retries, c->port_down_timeout);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC wq_copy_count: %d\n", c->wq_copy_count);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC intr mode: %d\n", c->intr_mode);
|
||||
dev_info(&fnic->pdev->dev, "fNIC wq_copy_count: %d\n", c->wq_copy_count);
|
||||
dev_info(&fnic->pdev->dev, "fNIC intr mode: %d\n", c->intr_mode);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -206,18 +197,12 @@ void fnic_get_res_counts(struct fnic *fnic)
|
||||
fnic->intr_count = vnic_dev_get_res_count(fnic->vdev,
|
||||
RES_TYPE_INTR_CTRL);
|
||||
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC fw resources wq_count: %d\n", fnic->wq_count);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC fw resources raw_wq_count: %d\n", fnic->raw_wq_count);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC fw resources wq_copy_count: %d\n", fnic->wq_copy_count);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC fw resources rq_count: %d\n", fnic->rq_count);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC fw resources cq_count: %d\n", fnic->cq_count);
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC fw resources intr_count: %d\n", fnic->intr_count);
|
||||
dev_info(&fnic->pdev->dev, "vNIC fw resources wq_count: %d\n", fnic->wq_count);
|
||||
dev_info(&fnic->pdev->dev, "vNIC fw resources raw_wq_count: %d\n", fnic->raw_wq_count);
|
||||
dev_info(&fnic->pdev->dev, "vNIC fw resources wq_copy_count: %d\n", fnic->wq_copy_count);
|
||||
dev_info(&fnic->pdev->dev, "vNIC fw resources rq_count: %d\n", fnic->rq_count);
|
||||
dev_info(&fnic->pdev->dev, "vNIC fw resources cq_count: %d\n", fnic->cq_count);
|
||||
dev_info(&fnic->pdev->dev, "vNIC fw resources intr_count: %d\n", fnic->intr_count);
|
||||
}
|
||||
|
||||
void fnic_free_vnic_resources(struct fnic *fnic)
|
||||
@ -253,19 +238,17 @@ int fnic_alloc_vnic_resources(struct fnic *fnic)
|
||||
|
||||
intr_mode = vnic_dev_get_intr_mode(fnic->vdev);
|
||||
|
||||
shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n",
|
||||
dev_info(&fnic->pdev->dev, "vNIC interrupt mode: %s\n",
|
||||
intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
|
||||
intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
|
||||
intr_mode == VNIC_DEV_INTR_MODE_MSIX ?
|
||||
"MSI-X" : "unknown");
|
||||
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC resources avail: wq %d cp_wq %d raw_wq %d rq %d",
|
||||
dev_info(&fnic->pdev->dev, "res avail: wq %d cp_wq %d raw_wq %d rq %d",
|
||||
fnic->wq_count, fnic->wq_copy_count,
|
||||
fnic->raw_wq_count, fnic->rq_count);
|
||||
|
||||
shost_printk(KERN_INFO, fnic->lport->host,
|
||||
"vNIC resources avail: cq %d intr %d cpy-wq desc count %d\n",
|
||||
dev_info(&fnic->pdev->dev, "res avail: cq %d intr %d cpy-wq desc count %d\n",
|
||||
fnic->cq_count, fnic->intr_count,
|
||||
fnic->config.wq_copy_desc_count);
|
||||
|
||||
@ -340,8 +323,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic)
|
||||
RES_TYPE_INTR_PBA_LEGACY, 0);
|
||||
|
||||
if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
|
||||
shost_printk(KERN_ERR, fnic->lport->host,
|
||||
"Failed to hook legacy pba resource\n");
|
||||
dev_err(&fnic->pdev->dev, "Failed to hook legacy pba resource\n");
|
||||
err = -ENODEV;
|
||||
goto err_out_cleanup;
|
||||
}
|
||||
@ -444,8 +426,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic)
|
||||
/* init the stats memory by making the first call here */
|
||||
err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
|
||||
if (err) {
|
||||
shost_printk(KERN_ERR, fnic->lport->host,
|
||||
"vnic_dev_stats_dump failed - x%x\n", err);
|
||||
dev_err(&fnic->pdev->dev, "vnic_dev_stats_dump failed - x%x\n", err);
|
||||
goto err_out_cleanup;
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -3,6 +3,7 @@
|
||||
#ifndef _FNIC_STATS_H_
|
||||
#define _FNIC_STATS_H_
|
||||
#define FNIC_MQ_MAX_QUEUES 64
|
||||
#include <scsi/scsi_transport_fc.h>
|
||||
|
||||
struct stats_timestamps {
|
||||
struct timespec64 last_reset_time;
|
||||
@ -63,6 +64,7 @@ struct reset_stats {
|
||||
atomic64_t fw_resets;
|
||||
atomic64_t fw_reset_completions;
|
||||
atomic64_t fw_reset_failures;
|
||||
atomic64_t fw_reset_timeouts;
|
||||
atomic64_t fnic_resets;
|
||||
atomic64_t fnic_reset_completions;
|
||||
atomic64_t fnic_reset_failures;
|
||||
@ -102,10 +104,51 @@ struct misc_stats {
|
||||
atomic64_t no_icmnd_itmf_cmpls;
|
||||
atomic64_t check_condition;
|
||||
atomic64_t queue_fulls;
|
||||
atomic64_t rport_not_ready;
|
||||
atomic64_t tport_not_ready;
|
||||
atomic64_t iport_not_ready;
|
||||
atomic64_t frame_errors;
|
||||
atomic64_t current_port_speed;
|
||||
atomic64_t intx_dummy;
|
||||
atomic64_t port_speed_in_mbps;
|
||||
};
|
||||
|
||||
struct fnic_iport_stats {
|
||||
atomic64_t num_linkdn;
|
||||
atomic64_t num_linkup;
|
||||
atomic64_t link_failure_count;
|
||||
atomic64_t num_rscns;
|
||||
atomic64_t rscn_redisc;
|
||||
atomic64_t rscn_not_redisc;
|
||||
atomic64_t frame_err;
|
||||
atomic64_t num_rnid;
|
||||
atomic64_t fabric_flogi_sent;
|
||||
atomic64_t fabric_flogi_ls_accepts;
|
||||
atomic64_t fabric_flogi_ls_rejects;
|
||||
atomic64_t fabric_flogi_misc_rejects;
|
||||
atomic64_t fabric_plogi_sent;
|
||||
atomic64_t fabric_plogi_ls_accepts;
|
||||
atomic64_t fabric_plogi_ls_rejects;
|
||||
atomic64_t fabric_plogi_misc_rejects;
|
||||
atomic64_t fabric_scr_sent;
|
||||
atomic64_t fabric_scr_ls_accepts;
|
||||
atomic64_t fabric_scr_ls_rejects;
|
||||
atomic64_t fabric_scr_misc_rejects;
|
||||
atomic64_t fabric_logo_sent;
|
||||
atomic64_t tport_alive;
|
||||
atomic64_t tport_plogi_sent;
|
||||
atomic64_t tport_plogi_ls_accepts;
|
||||
atomic64_t tport_plogi_ls_rejects;
|
||||
atomic64_t tport_plogi_misc_rejects;
|
||||
atomic64_t tport_prli_sent;
|
||||
atomic64_t tport_prli_ls_accepts;
|
||||
atomic64_t tport_prli_ls_rejects;
|
||||
atomic64_t tport_prli_misc_rejects;
|
||||
atomic64_t tport_adisc_sent;
|
||||
atomic64_t tport_adisc_ls_accepts;
|
||||
atomic64_t tport_adisc_ls_rejects;
|
||||
atomic64_t tport_logo_sent;
|
||||
atomic64_t unsupported_frames_ls_rejects;
|
||||
atomic64_t unsupported_frames_dropped;
|
||||
};
|
||||
|
||||
struct fnic_stats {
|
||||
@ -116,6 +159,7 @@ struct fnic_stats {
|
||||
struct reset_stats reset_stats;
|
||||
struct fw_stats fw_stats;
|
||||
struct vlan_stats vlan_stats;
|
||||
struct fc_host_statistics host_stats;
|
||||
struct misc_stats misc_stats;
|
||||
};
|
||||
|
||||
@ -127,6 +171,5 @@ struct stats_debug_info {
|
||||
};
|
||||
|
||||
int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *);
|
||||
void fnic_stats_debugfs_init(struct fnic *);
|
||||
void fnic_stats_debugfs_remove(struct fnic *);
|
||||
const char *fnic_role_to_str(unsigned int role);
|
||||
#endif /* _FNIC_STATS_H_ */
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <scsi/scsi_transport_fc.h>
|
||||
#include "fnic_io.h"
|
||||
#include "fnic.h"
|
||||
|
||||
@ -29,6 +30,17 @@ int fnic_fc_tracing_enabled = 1;
|
||||
int fnic_fc_trace_cleared = 1;
|
||||
static DEFINE_SPINLOCK(fnic_fc_trace_lock);
|
||||
|
||||
static const char * const fnic_role_str[] = {
|
||||
[FNIC_ROLE_FCP_INITIATOR] = "FCP_Initiator",
|
||||
};
|
||||
|
||||
const char *fnic_role_to_str(unsigned int role)
|
||||
{
|
||||
if (role >= ARRAY_SIZE(fnic_role_str) || !fnic_role_str[role])
|
||||
return "Unknown";
|
||||
|
||||
return fnic_role_str[role];
|
||||
}
|
||||
|
||||
/*
|
||||
* fnic_trace_get_buf - Give buffer pointer to user to fill up trace information
|
||||
@ -423,7 +435,8 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
|
||||
"Number of Check Conditions encountered: %lld\n"
|
||||
"Number of QUEUE Fulls: %lld\n"
|
||||
"Number of rport not ready: %lld\n"
|
||||
"Number of receive frame errors: %lld\n",
|
||||
"Number of receive frame errors: %lld\n"
|
||||
"Port speed (in Mbps): %lld\n",
|
||||
(u64)stats->misc_stats.last_isr_time,
|
||||
(s64)val1.tv_sec, val1.tv_nsec,
|
||||
(u64)stats->misc_stats.last_ack_time,
|
||||
@ -446,18 +459,68 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
|
||||
(u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls),
|
||||
(u64)atomic64_read(&stats->misc_stats.check_condition),
|
||||
(u64)atomic64_read(&stats->misc_stats.queue_fulls),
|
||||
(u64)atomic64_read(&stats->misc_stats.rport_not_ready),
|
||||
(u64)atomic64_read(&stats->misc_stats.frame_errors));
|
||||
|
||||
len += scnprintf(debug->debug_buffer + len, buf_size - len,
|
||||
"Firmware reported port speed: %llu\n",
|
||||
(u64)atomic64_read(
|
||||
&stats->misc_stats.current_port_speed));
|
||||
(u64)atomic64_read(&stats->misc_stats.tport_not_ready),
|
||||
(u64)atomic64_read(&stats->misc_stats.frame_errors),
|
||||
(u64)atomic64_read(&stats->misc_stats.port_speed_in_mbps));
|
||||
|
||||
return len;
|
||||
|
||||
}
|
||||
|
||||
int fnic_get_debug_info(struct stats_debug_info *info, struct fnic *fnic)
|
||||
{
|
||||
struct fnic_iport_s *iport = &fnic->iport;
|
||||
int buf_size = info->buf_size;
|
||||
int len = info->buffer_len;
|
||||
struct fnic_tport_s *tport, *next;
|
||||
unsigned long flags;
|
||||
|
||||
len += snprintf(info->debug_buffer + len, buf_size - len,
|
||||
"------------------------------------------\n"
|
||||
"\t\t Debug Info\n"
|
||||
"------------------------------------------\n");
|
||||
len += snprintf(info->debug_buffer + len, buf_size - len,
|
||||
"fnic Name:%s number:%d Role:%s State:%s\n",
|
||||
fnic->name, fnic->fnic_num,
|
||||
fnic_role_to_str(fnic->role),
|
||||
fnic_state_to_str(fnic->state));
|
||||
len +=
|
||||
snprintf(info->debug_buffer + len, buf_size - len,
|
||||
"iport State:%d Flags:0x%x vlan_id:%d fcid:0x%x\n",
|
||||
iport->state, iport->flags, iport->vlan_id, iport->fcid);
|
||||
len +=
|
||||
snprintf(info->debug_buffer + len, buf_size - len,
|
||||
"usefip:%d fip_state:%d fip_flogi_retry:%d\n",
|
||||
iport->usefip, iport->fip.state, iport->fip.flogi_retry);
|
||||
len +=
|
||||
snprintf(info->debug_buffer + len, buf_size - len,
|
||||
"fpma %02x:%02x:%02x:%02x:%02x:%02x",
|
||||
iport->fpma[5], iport->fpma[4], iport->fpma[3],
|
||||
iport->fpma[2], iport->fpma[1], iport->fpma[0]);
|
||||
len +=
|
||||
snprintf(info->debug_buffer + len, buf_size - len,
|
||||
"fcfmac %02x:%02x:%02x:%02x:%02x:%02x\n",
|
||||
iport->fcfmac[5], iport->fcfmac[4], iport->fcfmac[3],
|
||||
iport->fcfmac[2], iport->fcfmac[1], iport->fcfmac[0]);
|
||||
len +=
|
||||
snprintf(info->debug_buffer + len, buf_size - len,
|
||||
"fabric state:%d flags:0x%x retry_counter:%d e_d_tov:%d r_a_tov:%d\n",
|
||||
iport->fabric.state, iport->fabric.flags,
|
||||
iport->fabric.retry_counter, iport->e_d_tov,
|
||||
iport->r_a_tov);
|
||||
|
||||
spin_lock_irqsave(&fnic->fnic_lock, flags);
|
||||
list_for_each_entry_safe(tport, next, &iport->tport_list, links) {
|
||||
len += snprintf(info->debug_buffer + len, buf_size - len,
|
||||
"tport fcid:0x%x state:%d flags:0x%x inflight:%d retry_counter:%d\n",
|
||||
tport->fcid, tport->state, tport->flags,
|
||||
atomic_read(&tport->in_flight),
|
||||
tport->retry_counter);
|
||||
}
|
||||
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
|
||||
return len;
|
||||
}
|
||||
|
||||
/*
|
||||
* fnic_trace_buf_init - Initialize fnic trace buffer logging facility
|
||||
*
|
||||
@ -688,7 +751,7 @@ int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
|
||||
*/
|
||||
if (frame_type == FNIC_FC_RECV) {
|
||||
eth_fcoe_hdr_len = sizeof(struct ethhdr) +
|
||||
sizeof(struct fcoe_hdr);
|
||||
sizeof(struct fcoe_hdr);
|
||||
memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len);
|
||||
/* Copy the rest of data frame */
|
||||
memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame,
|
||||
|
@ -1243,7 +1243,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
|
||||
struct lpfc_nvmet_tgtport *tgtp;
|
||||
struct lpfc_async_xchg_ctx *ctxp =
|
||||
container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
|
||||
struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
|
||||
struct rqb_dmabuf *nvmebuf;
|
||||
struct lpfc_hba *phba = ctxp->phba;
|
||||
unsigned long iflag;
|
||||
|
||||
@ -1251,13 +1251,18 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
|
||||
lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
|
||||
ctxp->oxid, ctxp->size, raw_smp_processor_id());
|
||||
|
||||
spin_lock_irqsave(&ctxp->ctxlock, iflag);
|
||||
nvmebuf = ctxp->rqb_buffer;
|
||||
if (!nvmebuf) {
|
||||
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
|
||||
"6425 Defer rcv: no buffer oxid x%x: "
|
||||
"flg %x ste %x\n",
|
||||
ctxp->oxid, ctxp->flag, ctxp->state);
|
||||
return;
|
||||
}
|
||||
ctxp->rqb_buffer = NULL;
|
||||
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
|
||||
|
||||
tgtp = phba->targetport->private;
|
||||
if (tgtp)
|
||||
@ -1265,9 +1270,6 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
|
||||
|
||||
/* Free the nvmebuf since a new buffer already replaced it */
|
||||
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
|
||||
spin_lock_irqsave(&ctxp->ctxlock, iflag);
|
||||
ctxp->rqb_buffer = NULL;
|
||||
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -127,6 +127,10 @@ static int efivarfs_d_compare(const struct dentry *dentry,
|
||||
{
|
||||
int guid = len - EFI_VARIABLE_GUID_LEN;
|
||||
|
||||
/* Parallel lookups may produce a temporary invalid filename */
|
||||
if (guid <= 0)
|
||||
return 1;
|
||||
|
||||
if (name->len != len)
|
||||
return 1;
|
||||
|
||||
|
@ -218,6 +218,7 @@ struct eventpoll {
|
||||
/* used to optimize loop detection check */
|
||||
u64 gen;
|
||||
struct hlist_head refs;
|
||||
u8 loop_check_depth;
|
||||
|
||||
/*
|
||||
* usage count, used together with epitem->dying to
|
||||
@ -2088,23 +2089,24 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
|
||||
}
|
||||
|
||||
/**
|
||||
* ep_loop_check_proc - verify that adding an epoll file inside another
|
||||
* epoll structure does not violate the constraints, in
|
||||
* terms of closed loops, or too deep chains (which can
|
||||
* result in excessive stack usage).
|
||||
* ep_loop_check_proc - verify that adding an epoll file @ep inside another
|
||||
* epoll file does not create closed loops, and
|
||||
* determine the depth of the subtree starting at @ep
|
||||
*
|
||||
* @ep: the &struct eventpoll to be currently checked.
|
||||
* @depth: Current depth of the path being checked.
|
||||
*
|
||||
* Return: %zero if adding the epoll @file inside current epoll
|
||||
* structure @ep does not violate the constraints, or %-1 otherwise.
|
||||
* Return: depth of the subtree, or INT_MAX if we found a loop or went too deep.
|
||||
*/
|
||||
static int ep_loop_check_proc(struct eventpoll *ep, int depth)
|
||||
{
|
||||
int error = 0;
|
||||
int result = 0;
|
||||
struct rb_node *rbp;
|
||||
struct epitem *epi;
|
||||
|
||||
if (ep->gen == loop_check_gen)
|
||||
return ep->loop_check_depth;
|
||||
|
||||
mutex_lock_nested(&ep->mtx, depth + 1);
|
||||
ep->gen = loop_check_gen;
|
||||
for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
|
||||
@ -2112,13 +2114,11 @@ static int ep_loop_check_proc(struct eventpoll *ep, int depth)
|
||||
if (unlikely(is_file_epoll(epi->ffd.file))) {
|
||||
struct eventpoll *ep_tovisit;
|
||||
ep_tovisit = epi->ffd.file->private_data;
|
||||
if (ep_tovisit->gen == loop_check_gen)
|
||||
continue;
|
||||
if (ep_tovisit == inserting_into || depth > EP_MAX_NESTS)
|
||||
error = -1;
|
||||
result = INT_MAX;
|
||||
else
|
||||
error = ep_loop_check_proc(ep_tovisit, depth + 1);
|
||||
if (error != 0)
|
||||
result = max(result, ep_loop_check_proc(ep_tovisit, depth + 1) + 1);
|
||||
if (result > EP_MAX_NESTS)
|
||||
break;
|
||||
} else {
|
||||
/*
|
||||
@ -2132,9 +2132,27 @@ static int ep_loop_check_proc(struct eventpoll *ep, int depth)
|
||||
list_file(epi->ffd.file);
|
||||
}
|
||||
}
|
||||
ep->loop_check_depth = result;
|
||||
mutex_unlock(&ep->mtx);
|
||||
|
||||
return error;
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* ep_get_upwards_depth_proc - determine depth of @ep when traversed upwards
|
||||
*/
|
||||
static int ep_get_upwards_depth_proc(struct eventpoll *ep, int depth)
|
||||
{
|
||||
int result = 0;
|
||||
struct epitem *epi;
|
||||
|
||||
if (ep->gen == loop_check_gen)
|
||||
return ep->loop_check_depth;
|
||||
hlist_for_each_entry_rcu(epi, &ep->refs, fllink)
|
||||
result = max(result, ep_get_upwards_depth_proc(epi->ep, depth + 1) + 1);
|
||||
ep->gen = loop_check_gen;
|
||||
ep->loop_check_depth = result;
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2150,8 +2168,22 @@ static int ep_loop_check_proc(struct eventpoll *ep, int depth)
|
||||
*/
|
||||
static int ep_loop_check(struct eventpoll *ep, struct eventpoll *to)
|
||||
{
|
||||
int depth, upwards_depth;
|
||||
|
||||
inserting_into = ep;
|
||||
return ep_loop_check_proc(to, 0);
|
||||
/*
|
||||
* Check how deep down we can get from @to, and whether it is possible
|
||||
* to loop up to @ep.
|
||||
*/
|
||||
depth = ep_loop_check_proc(to, 0);
|
||||
if (depth > EP_MAX_NESTS)
|
||||
return -1;
|
||||
/* Check how far up we can go from @ep. */
|
||||
rcu_read_lock();
|
||||
upwards_depth = ep_get_upwards_depth_proc(ep, 0);
|
||||
rcu_read_unlock();
|
||||
|
||||
return (depth+1+upwards_depth > EP_MAX_NESTS) ? -1 : 0;
|
||||
}
|
||||
|
||||
static void clear_tfile_check_list(void)
|
||||
|
@ -18,7 +18,7 @@
|
||||
* adistance value (slightly faster) than default DRAM adistance to be part of
|
||||
* the same memory tier.
|
||||
*/
|
||||
#define MEMTIER_ADISTANCE_DRAM ((4 * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1))
|
||||
#define MEMTIER_ADISTANCE_DRAM ((4L * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1))
|
||||
|
||||
struct memory_tier;
|
||||
struct memory_dev_type {
|
||||
|
@ -2991,6 +2991,29 @@ static inline void skb_reset_transport_header(struct sk_buff *skb)
|
||||
skb->transport_header = skb->data - skb->head;
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_reset_transport_header_careful - conditionally reset transport header
|
||||
* @skb: buffer to alter
|
||||
*
|
||||
* Hardened version of skb_reset_transport_header().
|
||||
*
|
||||
* Returns: true if the operation was a success.
|
||||
*/
|
||||
static inline bool __must_check
|
||||
skb_reset_transport_header_careful(struct sk_buff *skb)
|
||||
{
|
||||
long offset = skb->data - skb->head;
|
||||
|
||||
if (unlikely(offset != (typeof(skb->transport_header))offset))
|
||||
return false;
|
||||
|
||||
if (unlikely(offset == (typeof(skb->transport_header))~0U))
|
||||
return false;
|
||||
|
||||
skb->transport_header = offset;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void skb_set_transport_header(struct sk_buff *skb,
|
||||
const int offset)
|
||||
{
|
||||
|
@ -148,7 +148,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
|
||||
|
||||
ops = rcu_dereference(inet6_offloads[proto]);
|
||||
if (likely(ops && ops->callbacks.gso_segment)) {
|
||||
skb_reset_transport_header(skb);
|
||||
if (!skb_reset_transport_header_careful(skb))
|
||||
goto out;
|
||||
|
||||
segs = ops->callbacks.gso_segment(skb, features);
|
||||
if (!segs)
|
||||
skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
|
||||
|
@ -358,7 +358,7 @@ xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp)
|
||||
|
||||
static int
|
||||
xs_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
|
||||
struct cmsghdr *cmsg, int ret)
|
||||
unsigned int *msg_flags, struct cmsghdr *cmsg, int ret)
|
||||
{
|
||||
u8 content_type = tls_get_record_type(sock->sk, cmsg);
|
||||
u8 level, description;
|
||||
@ -371,7 +371,7 @@ xs_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
|
||||
* record, even though there might be more frames
|
||||
* waiting to be decrypted.
|
||||
*/
|
||||
msg->msg_flags &= ~MSG_EOR;
|
||||
*msg_flags &= ~MSG_EOR;
|
||||
break;
|
||||
case TLS_RECORD_TYPE_ALERT:
|
||||
tls_alert_recv(sock->sk, msg, &level, &description);
|
||||
@ -386,19 +386,33 @@ xs_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
|
||||
}
|
||||
|
||||
static int
|
||||
xs_sock_recv_cmsg(struct socket *sock, struct msghdr *msg, int flags)
|
||||
xs_sock_recv_cmsg(struct socket *sock, unsigned int *msg_flags, int flags)
|
||||
{
|
||||
union {
|
||||
struct cmsghdr cmsg;
|
||||
u8 buf[CMSG_SPACE(sizeof(u8))];
|
||||
} u;
|
||||
u8 alert[2];
|
||||
struct kvec alert_kvec = {
|
||||
.iov_base = alert,
|
||||
.iov_len = sizeof(alert),
|
||||
};
|
||||
struct msghdr msg = {
|
||||
.msg_flags = *msg_flags,
|
||||
.msg_control = &u,
|
||||
.msg_controllen = sizeof(u),
|
||||
};
|
||||
int ret;
|
||||
|
||||
msg->msg_control = &u;
|
||||
msg->msg_controllen = sizeof(u);
|
||||
ret = sock_recvmsg(sock, msg, flags);
|
||||
if (msg->msg_controllen != sizeof(u))
|
||||
ret = xs_sock_process_cmsg(sock, msg, &u.cmsg, ret);
|
||||
iov_iter_kvec(&msg.msg_iter, ITER_DEST, &alert_kvec, 1,
|
||||
alert_kvec.iov_len);
|
||||
ret = sock_recvmsg(sock, &msg, flags);
|
||||
if (ret > 0) {
|
||||
if (tls_get_record_type(sock->sk, &u.cmsg) == TLS_RECORD_TYPE_ALERT)
|
||||
iov_iter_revert(&msg.msg_iter, ret);
|
||||
ret = xs_sock_process_cmsg(sock, &msg, msg_flags, &u.cmsg,
|
||||
-EAGAIN);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -408,7 +422,13 @@ xs_sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags, size_t seek)
|
||||
ssize_t ret;
|
||||
if (seek != 0)
|
||||
iov_iter_advance(&msg->msg_iter, seek);
|
||||
ret = xs_sock_recv_cmsg(sock, msg, flags);
|
||||
ret = sock_recvmsg(sock, msg, flags);
|
||||
/* Handle TLS inband control message lazily */
|
||||
if (msg->msg_flags & MSG_CTRUNC) {
|
||||
msg->msg_flags &= ~(MSG_CTRUNC | MSG_EOR);
|
||||
if (ret == 0 || ret == -EIO)
|
||||
ret = xs_sock_recv_cmsg(sock, &msg->msg_flags, flags);
|
||||
}
|
||||
return ret > 0 ? ret + seek : ret;
|
||||
}
|
||||
|
||||
@ -434,7 +454,7 @@ xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
|
||||
size_t count)
|
||||
{
|
||||
iov_iter_discard(&msg->msg_iter, ITER_DEST, count);
|
||||
return xs_sock_recv_cmsg(sock, msg, flags);
|
||||
return xs_sock_recvmsg(sock, msg, flags, 0);
|
||||
}
|
||||
|
||||
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
|
||||
|
@ -1,3 +1,83 @@
|
||||
* Sat Oct 11 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [6.12.0-55.40.1.el10_0]
|
||||
- scsi: lpfc: Fix buffer free/clear order in deferred receive path (CKI Backport Bot) [RHEL-119125] {CVE-2025-39841}
|
||||
- efivarfs: Fix slab-out-of-bounds in efivarfs_d_compare (CKI Backport Bot) [RHEL-118460] {CVE-2025-39817}
|
||||
- ibmveth: Add multi buffers rx replenishment hcall support (Mamatha Inamdar) [RHEL-116192]
|
||||
- net: ibmveth: Reset the adapter when unexpected states are detected (Mamatha Inamdar) [RHEL-116192]
|
||||
- SUNRPC: call xs_sock_process_cmsg for all cmsg (Olga Kornievskaia) [RHEL-110813]
|
||||
- sunrpc: fix client side handling of tls alerts (Olga Kornievskaia) [RHEL-110813] {CVE-2025-38571}
|
||||
- s390/pci: Do not try re-enabling load/store if device is disabled (CKI Backport Bot) [RHEL-114447]
|
||||
- s390/pci: Fix stale function handles in error handling (CKI Backport Bot) [RHEL-114447]
|
||||
- s390/hypfs: Enable limited access during lockdown (CKI Backport Bot) [RHEL-114430]
|
||||
- s390/hypfs: Avoid unnecessary ioctl registration in debugfs (CKI Backport Bot) [RHEL-114430]
|
||||
- ibmvnic: Use ndo_get_stats64 to fix inaccurate SAR reporting (Mamatha Inamdar) [RHEL-114438]
|
||||
- ibmvnic: Fix hardcoded NUM_RX_STATS/NUM_TX_STATS with dynamic sizeof (Mamatha Inamdar) [RHEL-114438]
|
||||
- ibmvnic: Add stat for tx direct vs tx batched (Mamatha Inamdar) [RHEL-114438]
|
||||
- ipv6: reject malicious packets in ipv6_gso_segment() (CKI Backport Bot) [RHEL-113248] {CVE-2025-38572}
|
||||
- enic: fix incorrect MTU comparison in enic_change_mtu() (John Meneghini) [RHEL-108265]
|
||||
- net/enic: Allow at least 8 RQs to always be used (John Meneghini) [RHEL-108265]
|
||||
- enic: get max rq & wq entries supported by hw, 16K queues (John Meneghini) [RHEL-106602]
|
||||
- enic: cleanup of enic wq request completion path (John Meneghini) [RHEL-106602]
|
||||
- enic: added enic_wq.c and enic_wq.h (John Meneghini) [RHEL-106602]
|
||||
- enic: remove unused function cq_enet_wq_desc_dec (John Meneghini) [RHEL-106602]
|
||||
- enic: enable rq extended cq support (John Meneghini) [RHEL-106602]
|
||||
- enic: enic rq extended cq defines (John Meneghini) [RHEL-106602]
|
||||
- enic: enic rq code reorg (John Meneghini) [RHEL-106602]
|
||||
- enic: Move function from header file to c file (John Meneghini) [RHEL-106602]
|
||||
- enic: add dependency on Page Pool (John Meneghini) [RHEL-106602]
|
||||
- enic: remove copybreak tunable (John Meneghini) [RHEL-106602]
|
||||
- enic: Use the Page Pool API for RX (John Meneghini) [RHEL-106602]
|
||||
- enic: Simplify RX handler function (John Meneghini) [RHEL-106602]
|
||||
- enic: Move RX functions to their own file (John Meneghini) [RHEL-106602]
|
||||
- enic: Fix typo in comment in table indexed by link speed (John Meneghini) [RHEL-106602]
|
||||
- enic: Obtain the Link speed only after the link comes up (John Meneghini) [RHEL-106602]
|
||||
- enic: Move RX coalescing set function (John Meneghini) [RHEL-106602]
|
||||
- enic: Move kdump check into enic_adjust_resources() (John Meneghini) [RHEL-106602]
|
||||
- enic: Move enic resource adjustments to separate function (John Meneghini) [RHEL-106602]
|
||||
- enic: Adjust used MSI-X wq/rq/cq/interrupt resources in a more robust way (John Meneghini) [RHEL-106602]
|
||||
- enic: Allocate arrays in enic struct based on VIC config (John Meneghini) [RHEL-106602]
|
||||
- enic: Save resource counts we read from HW (John Meneghini) [RHEL-106602]
|
||||
- enic: Make MSI-X I/O interrupts come after the other required ones (John Meneghini) [RHEL-106602]
|
||||
- enic: Create enic_wq/rq structures to bundle per wq/rq data (John Meneghini) [RHEL-106602]
|
||||
- scsi: fnic: Fix missing DMA mapping error in fnic_send_frame() (John Meneghini) [RHEL-111542]
|
||||
- scsi: fnic: Set appropriate logging level for log message (John Meneghini) [RHEL-111542]
|
||||
- scsi: fnic: Add and improve logs in FDMI and FDMI ABTS paths (John Meneghini) [RHEL-111542]
|
||||
- scsi: fnic: Turn off FDMI ACTIVE flags on link down (John Meneghini) [RHEL-111542]
|
||||
- scsi: fnic: Fix crash in fnic_wq_cmpl_handler when FDMI times out (John Meneghini) [RHEL-111542]
|
||||
- scsi: fnic: Remove unnecessary spinlock locking and unlocking (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Replace fnic->lock_flags with local flags (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Replace use of sizeof with standard usage (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Fix indentation and remove unnecessary parenthesis (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Remove unnecessary debug print (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Propagate SCSI error code from fnic_scsi_drv_init() (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Test for memory allocation failure and return error code (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Return appropriate error code from failure of scsi drv init (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Return appropriate error code for mem alloc failure (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Remove always-true IS_FNIC_FCP_INITIATOR macro (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Fix use of uninitialized value in debug message (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Delete incorrect debugfs error handling (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Remove unnecessary else to fix warning in FDLS FIP (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Remove extern definition from .c files (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Remove unnecessary else and unnecessary break in FDLS (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Increment driver version (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Add support to handle port channel RSCN (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Code cleanup (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Add stats and related functionality (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Modify fnic interfaces to use FDLS (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Modify IO path to use FDLS (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Add functionality in fnic to support FDLS (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Add and integrate support for FIP (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Add and integrate support for FDMI (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Add Cisco hardware model names (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Add support for unsolicited requests and responses (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Add support for target based solicited requests and responses (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Add support for fabric based solicited requests and responses (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Add headers and definitions for FDLS (John Meneghini) [RHEL-111539]
|
||||
- scsi: fnic: Replace shost_printk() with dev_info()/dev_err() (John Meneghini) [RHEL-111539]
|
||||
- eventpoll: Fix semi-unbounded recursion (CKI Backport Bot) [RHEL-111056] {CVE-2025-38614}
|
||||
- mm/memory-tier: fix abstract distance calculation overflow (Rafael Aquini) [RHEL-109447]
|
||||
- KVM: x86/hyper-v: Skip non-canonical addresses during PV TLB flush (CKI Backport Bot) [RHEL-104737] {CVE-2025-38351}
|
||||
Resolves: RHEL-104737, RHEL-106602, RHEL-108265, RHEL-109447, RHEL-110813, RHEL-111056, RHEL-111539, RHEL-111542, RHEL-113248, RHEL-114430, RHEL-114438, RHEL-114447, RHEL-116192, RHEL-118460, RHEL-119125
|
||||
|
||||
* Sat Oct 04 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [6.12.0-55.39.1.el10_0]
|
||||
- wifi: ath12k: Decrement TID on RX peer frag setup error handling (CKI Backport Bot) [RHEL-114712] {CVE-2025-39761}
|
||||
- ALSA: usb-audio: Validate UAC3 power domain descriptors, too (CKI Backport Bot) [RHEL-114695] {CVE-2025-38729}
|
||||
|
Loading…
Reference in New Issue
Block a user