Import of kernel-4.18.0-553.97.1.el8_10

This commit is contained in:
almalinux-bot-kernel 2026-01-27 04:23:08 +00:00
parent 4934e7026b
commit 8390f5c227
20 changed files with 560 additions and 304 deletions

View File

@ -12,7 +12,7 @@ RHEL_MINOR = 10
#
# Use this spot to avoid future merge conflicts.
# Do not trim this comment.
RHEL_RELEASE = 553.94.1
RHEL_RELEASE = 553.97.1
#
# ZSTREAM

View File

@ -19,6 +19,12 @@ static void __init xen_hvm_smp_prepare_boot_cpu(void)
*/
xen_vcpu_setup(0);
/*
* Called again in case the kernel boots on vcpu >= MAX_VIRT_CPUS.
* Refer to comments in xen_hvm_init_time_ops().
*/
xen_hvm_init_time_ops();
/*
* The alternative logic (which patches the unlock/lock) runs before
* the smp bootup up code is activated. Hence we need to set this up

View File

@ -31,6 +31,8 @@
/* Xen may fire a timer up to this many ns early */
#define TIMER_SLOP 100000
static u64 xen_sched_clock_offset __read_mostly;
/* Get the TSC speed from Xen */
static unsigned long xen_tsc_khz(void)
{
@ -57,6 +59,11 @@ static u64 xen_clocksource_get_cycles(struct clocksource *cs)
return xen_clocksource_read();
}
static u64 xen_sched_clock(void)
{
return xen_clocksource_read() - xen_sched_clock_offset;
}
static void xen_read_wallclock(struct timespec64 *ts)
{
struct shared_info *s = HYPERVISOR_shared_info;
@ -361,8 +368,6 @@ void xen_timer_resume(void)
{
int cpu;
pvclock_resume();
if (xen_clockevent != &xen_vcpuop_clockevent)
return;
@ -374,17 +379,20 @@ void xen_timer_resume(void)
}
static const struct pv_time_ops xen_time_ops __initconst = {
.sched_clock = xen_clocksource_read,
.sched_clock = xen_sched_clock,
.steal_clock = xen_steal_clock,
};
static struct pvclock_vsyscall_time_info *xen_clock __read_mostly;
static u64 xen_clock_value_saved;
void xen_save_time_memory_area(void)
{
struct vcpu_register_time_memory_area t;
int ret;
xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset;
if (!xen_clock)
return;
@ -404,7 +412,7 @@ void xen_restore_time_memory_area(void)
int ret;
if (!xen_clock)
return;
goto out;
t.addr.v = &xen_clock->pvti;
@ -422,6 +430,11 @@ void xen_restore_time_memory_area(void)
if (ret != 0)
pr_notice("Cannot restore secondary vcpu_time_info (err %d)",
ret);
out:
/* Need pvclock_resume() before using xen_clocksource_read(). */
pvclock_resume();
xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved;
}
static void xen_setup_vsyscall_time_info(void)
@ -513,6 +526,7 @@ static void __init xen_time_init(void)
void __ref xen_init_time_ops(void)
{
xen_sched_clock_offset = xen_clocksource_read();
pv_time_ops = xen_time_ops;
x86_init.timers.timer_init = xen_time_init;
@ -541,6 +555,11 @@ static void xen_hvm_setup_cpu_clockevents(void)
void __init xen_hvm_init_time_ops(void)
{
static bool hvm_time_initialized;
if (hvm_time_initialized)
return;
/*
* vector callback is needed otherwise we cannot receive interrupts
* on cpu > 0 and at this point we don't know how many cpus are
@ -550,11 +569,26 @@ void __init xen_hvm_init_time_ops(void)
return;
if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
"disable pv timer\n");
pr_info_once("Xen doesn't support pvclock on HVM, disable pv timer");
return;
}
/*
* Only MAX_VIRT_CPUS 'vcpu_info' are embedded inside 'shared_info'.
* The __this_cpu_read(xen_vcpu) is still NULL when Xen HVM guest
* boots on vcpu >= MAX_VIRT_CPUS (e.g., kexec), To access
* __this_cpu_read(xen_vcpu) via xen_clocksource_read() will panic.
*
* The xen_hvm_init_time_ops() should be called again later after
* __this_cpu_read(xen_vcpu) is available.
*/
if (!__this_cpu_read(xen_vcpu)) {
pr_info("Delay xen_init_time_common() as kernel is running on vcpu=%d\n",
xen_vcpu_nr(0));
return;
}
xen_sched_clock_offset = xen_clocksource_read();
pv_time_ops = xen_time_ops;
x86_init.timers.setup_percpu_clockev = xen_time_init;
x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
@ -562,5 +596,7 @@ void __init xen_hvm_init_time_ops(void)
x86_platform.calibrate_tsc = xen_tsc_khz;
x86_platform.get_wallclock = xen_get_wallclock;
x86_platform.set_wallclock = xen_set_wallclock;
hvm_time_initialized = true;
}
#endif

View File

@ -3676,6 +3676,11 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
cmd_id = header->id;
if (header->size > SVGA_CMD_MAX_DATASIZE) {
VMW_DEBUG_USER("SVGA3D command: %d is too big.\n",
cmd_id + SVGA_3D_CMD_BASE);
return -E2BIG;
}
*size = header->size + sizeof(SVGA3dCmdHeader);
cmd_id -= SVGA_3D_CMD_BASE;

View File

@ -32,6 +32,7 @@
struct lpfc_sli2_slim;
#define ELX_MODEL_NAME_SIZE 80
#define ELX_FW_NAME_SIZE 84
#define LPFC_PCI_DEV_LP 0x1
#define LPFC_PCI_DEV_OC 0x2
@ -309,6 +310,14 @@ struct lpfc_stats {
struct lpfc_hba;
/* Data structure to keep withheld FLOGI_ACC information */
struct lpfc_defer_flogi_acc {
bool flag;
u16 rx_id;
u16 ox_id;
struct lpfc_nodelist *ndlp;
};
#define LPFC_VMID_TIMER 300 /* timer interval in seconds */
@ -1441,9 +1450,7 @@ struct lpfc_hba {
uint16_t vlan_id;
struct list_head fcf_conn_rec_list;
bool defer_flogi_acc_flag;
uint16_t defer_flogi_acc_rx_id;
uint16_t defer_flogi_acc_ox_id;
struct lpfc_defer_flogi_acc defer_flogi_acc;
spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
struct list_head ct_ev_waiters;
@ -1598,10 +1605,6 @@ struct lpfc_hba {
u32 degrade_deactivate_threshold;
u32 fec_degrade_interval;
/* SCSI host template information - for physical port */
struct scsi_host_template port_template;
/* SCSI host template information - for all vports */
struct scsi_host_template vport_template;
atomic_t dbg_log_idx;
atomic_t dbg_log_cnt;
atomic_t dbg_log_dmping;

View File

@ -132,7 +132,6 @@ void lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp);
void lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb);
int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp);
struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t);
void lpfc_disc_list_loopmap(struct lpfc_vport *);
void lpfc_disc_start(struct lpfc_vport *);
@ -444,7 +443,6 @@ void lpfc_free_sysfs_attr(struct lpfc_vport *);
extern struct device_attribute *lpfc_hba_attrs[];
extern struct device_attribute *lpfc_vport_attrs[];
extern struct scsi_host_template lpfc_template;
extern struct scsi_host_template lpfc_template_no_hr;
extern struct scsi_host_template lpfc_template_nvme;
extern struct scsi_host_template lpfc_vport_template;
extern struct fc_function_template lpfc_transport_functions;

View File

@ -1694,6 +1694,18 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
out:
/* If the caller wanted a synchronous DA_ID completion, signal the
* wait obj and clear flag to reset the vport.
*/
if (ndlp->save_flags & NLP_WAIT_FOR_DA_ID) {
if (ndlp->da_id_waitq)
wake_up(ndlp->da_id_waitq);
}
spin_lock_irq(&ndlp->lock);
ndlp->save_flags &= ~NLP_WAIT_FOR_DA_ID;
spin_unlock_irq(&ndlp->lock);
lpfc_ct_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
return;

View File

@ -90,6 +90,8 @@ enum lpfc_nlp_save_flags {
NLP_IN_RECOV_POST_DEV_LOSS = 0x1,
/* wait for outstanding LOGO to cmpl */
NLP_WAIT_FOR_LOGO = 0x2,
/* wait for outstanding DA_ID to finish */
NLP_WAIT_FOR_DA_ID = 0x4
};
struct lpfc_nodelist {
@ -160,7 +162,12 @@ struct lpfc_nodelist {
uint32_t nvme_fb_size; /* NVME target's supported byte cnt */
#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */
uint32_t nlp_defer_did;
/* These wait objects are NPIV specific. These IOs must complete
* synchronously.
*/
wait_queue_head_t *logo_waitq;
wait_queue_head_t *da_id_waitq;
};
struct lpfc_node_rrq {
@ -203,7 +210,6 @@ struct lpfc_node_rrq {
NPR list */
#define NLP_RM_DFLT_RPI 0x04000000 /* need to remove leftover dflt RPI */
#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */
#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
#define NLP_SC_REQ 0x20000000 /* Target requires authentication */
#define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */
#define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */

View File

@ -131,6 +131,15 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
return 1;
}
static bool lpfc_is_els_acc_rsp(struct lpfc_dmabuf *buf)
{
struct fc_els_ls_acc *rsp = buf->virt;
if (rsp && rsp->la_cmd == ELS_LS_ACC)
return true;
return false;
}
/**
* lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
* @vport: pointer to a host virtual N_Port data structure.
@ -1005,10 +1014,19 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) {
/* One additional decrement on node reference count to
* trigger the release of the node
* trigger the release of the node. Make sure the ndlp
* is marked NLP_DROPPED.
*/
if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
spin_lock_irq(&ndlp->lock);
if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS) &&
!(ndlp->nlp_flag & NLP_DROPPED) &&
!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
ndlp->nlp_flag |= NLP_DROPPED;
spin_unlock_irq(&ndlp->lock);
lpfc_nlp_put(ndlp);
spin_lock_irq(&ndlp->lock);
}
spin_unlock_irq(&ndlp->lock);
goto out;
}
@ -1057,10 +1075,11 @@ stop_rr_fcf_flogi:
IOERR_LOOP_OPEN_FAILURE)))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2858 FLOGI failure Status:x%x/x%x TMO"
":x%x Data x%x x%x\n",
":x%x Data x%x x%x x%x x%x\n",
irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpTimeout, phba->hba_flag,
phba->fcf.fcf_flag);
phba->fcf.fcf_flag, ndlp->nlp_flag,
ndlp->fc4_xpt_flags);
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
@ -1077,16 +1096,23 @@ stop_rr_fcf_flogi:
* registered with the SCSI transport, remove the initial
* reference to trigger node release.
*/
spin_lock_irq(&ndlp->lock);
if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS) &&
!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
!(ndlp->nlp_flag & NLP_DROPPED) &&
!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
ndlp->nlp_flag |= NLP_DROPPED;
spin_unlock_irq(&ndlp->lock);
lpfc_nlp_put(ndlp);
spin_lock_irq(&ndlp->lock);
}
spin_unlock_irq(&ndlp->lock);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT,
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
"0150 FLOGI failure Status:x%x/x%x "
"xri x%x TMO:x%x refcnt %d\n",
"xri x%x iotag x%x TMO:x%x refcnt %d\n",
irsp->ulpStatus, irsp->un.ulpWord[4],
cmdiocb->sli4_xritag, irsp->ulpTimeout,
kref_read(&ndlp->kref));
cmdiocb->sli4_xritag, cmdiocb->iotag,
irsp->ulpTimeout, kref_read(&ndlp->kref));
/* If this is not a loop open failure, bail out */
if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
@ -1126,7 +1152,6 @@ stop_rr_fcf_flogi:
if (!lpfc_error_lost_link(irsp))
lpfc_issue_reg_vfi(vport);
lpfc_nlp_put(ndlp);
goto out;
}
goto flogifail;
@ -1143,6 +1168,8 @@ stop_rr_fcf_flogi:
prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
if (!prsp)
goto out;
if (!lpfc_is_els_acc_rsp(prsp))
goto out;
sp = prsp->virt + sizeof(uint32_t);
/* FLOGI completes successfully */
@ -1328,6 +1355,25 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t tmo, did;
int rc;
/* It's possible for lpfc to reissue a FLOGI on an ndlp that is marked
* NLP_DROPPED. This happens when the FLOGI completed with the XB bit
* set causing lpfc to reference the ndlp until the XRI_ABORTED CQE is
* issued. The time window for the XRI_ABORTED CQE can be as much as
* 2*2*RA_TOV allowing for ndlp reuse of this type when the link is
* cycling quickly. When true, restore the initial reference and remove
* the NLP_DROPPED flag as lpfc is retrying.
*/
spin_lock_irq(&ndlp->lock);
if (ndlp->nlp_flag & NLP_DROPPED) {
ndlp->nlp_flag &= ~NLP_DROPPED;
spin_unlock_irq(&ndlp->lock);
if (!lpfc_nlp_get(ndlp)) {
return 1;
}
spin_lock_irq(&ndlp->lock);
}
spin_unlock_irq(&ndlp->lock);
cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_FLOGI);
@ -1426,7 +1472,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
/* Check for a deferred FLOGI ACC condition */
if (phba->defer_flogi_acc_flag) {
if (phba->defer_flogi_acc.flag) {
/* lookup ndlp for received FLOGI */
ndlp = lpfc_findnode_did(vport, 0);
if (!ndlp)
@ -1437,28 +1483,31 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq));
defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc_rx_id;
defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc.rx_id;
defer_flogi_acc.iocb.unsli3.rcvsli3.ox_id =
phba->defer_flogi_acc_ox_id;
phba->defer_flogi_acc.ox_id;
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
" ox_id: x%x, hba_flag x%x\n",
phba->defer_flogi_acc_rx_id,
phba->defer_flogi_acc_ox_id, phba->hba_flag);
phba->defer_flogi_acc.rx_id,
phba->defer_flogi_acc.ox_id, phba->hba_flag);
/* Send deferred FLOGI ACC */
lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc,
ndlp, NULL);
phba->defer_flogi_acc_flag = false;
phba->defer_flogi_acc.flag = false;
/* Decrement the held ndlp that was incremented when the
* deferred flogi acc flag was set.
*/
if (phba->defer_flogi_acc.ndlp) {
lpfc_nlp_put(phba->defer_flogi_acc.ndlp);
phba->defer_flogi_acc.ndlp = NULL;
}
vport->fc_myDID = did;
/* Decrement ndlp reference count to indicate the node can be
* released when other references are removed.
*/
lpfc_nlp_put(ndlp);
}
return 0;
@ -1695,18 +1744,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
struct serv_parm *sp;
uint8_t name[sizeof(struct lpfc_name)];
uint32_t keepDID = 0, keep_nlp_flag = 0;
int rc;
uint32_t keep_new_nlp_flag = 0;
uint16_t keep_nlp_state;
u32 keep_nlp_fc4_type = 0;
struct lpfc_nvme_rport *keep_nrport = NULL;
unsigned long *active_rrqs_xri_bitmap = NULL;
/* Fabric nodes can have the same WWPN so we don't bother searching
* by WWPN. Just return the ndlp that was given to us.
*/
if (ndlp->nlp_type & NLP_FABRIC)
return ndlp;
sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
memset(name, 0, sizeof(struct lpfc_name));
@ -1716,15 +1760,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
/* return immediately if the WWPN matches ndlp */
if (!new_ndlp || (new_ndlp == ndlp))
if (new_ndlp == ndlp)
return ndlp;
/*
* Unregister from backend if not done yet. Could have been skipped
* due to ADISC
*/
lpfc_nlp_unreg_node(vport, new_ndlp);
if (phba->sli_rev == LPFC_SLI_REV4) {
active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
GFP_KERNEL);
@ -1741,11 +1779,37 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
(new_ndlp ? new_ndlp->nlp_flag : 0),
(new_ndlp ? new_ndlp->nlp_fc4_type : 0));
keepDID = new_ndlp->nlp_DID;
if (!new_ndlp) {
rc = memcmp(&ndlp->nlp_portname, name,
sizeof(struct lpfc_name));
if (!rc) {
if (active_rrqs_xri_bitmap)
mempool_free(active_rrqs_xri_bitmap,
phba->active_rrq_pool);
return ndlp;
}
new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID);
if (!new_ndlp) {
if (active_rrqs_xri_bitmap)
mempool_free(active_rrqs_xri_bitmap,
phba->active_rrq_pool);
return ndlp;
}
} else {
if (phba->sli_rev == LPFC_SLI_REV4 &&
active_rrqs_xri_bitmap)
memcpy(active_rrqs_xri_bitmap,
new_ndlp->active_rrqs_xri_bitmap,
phba->cfg_rrq_xri_bitmap_sz);
if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap)
memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap,
phba->cfg_rrq_xri_bitmap_sz);
/*
* Unregister from backend if not done yet. Could have been
* skipped due to ADISC
*/
lpfc_nlp_unreg_node(vport, new_ndlp);
}
keepDID = new_ndlp->nlp_DID;
/* At this point in this routine, we know new_ndlp will be
* returned. however, any previous GID_FTs that were done
@ -2118,9 +2182,12 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
NLP_EVT_DEVICE_RM);
} else {
/* Good status, call state machine */
prsp = list_entry(((struct lpfc_dmabuf *)
cmdiocb->context2)->list.next,
struct lpfc_dmabuf, list);
prsp = list_get_first(&((struct lpfc_dmabuf *)cmdiocb->context2)->list,
struct lpfc_dmabuf, list);
if (!prsp)
goto out;
if (!lpfc_is_els_acc_rsp(prsp))
goto out;
ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
sp = (struct serv_parm *)((u8 *)prsp->virt +
@ -3022,23 +3089,6 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Call state machine. This will unregister the rpi if needed. */
lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
if (skip_recovery)
goto out;
/* The driver sets this flag for an NPIV instance that doesn't want to
* log into the remote port.
*/
if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
spin_lock_irq(&ndlp->lock);
if (phba->sli_rev == LPFC_SLI_REV4)
ndlp->nlp_flag |= NLP_RELEASE_RPI;
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
spin_unlock_irq(&ndlp->lock);
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
goto out_rsrc_free;
}
out:
/* At this point, the LOGO processing is complete. NOTE: For a
* pt2pt topology, we are assuming the NPortID will only change
@ -3086,7 +3136,7 @@ out:
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
}
out_rsrc_free:
/* Driver is done with the I/O. */
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
@ -3407,6 +3457,8 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
prdf = (struct lpfc_els_rdf_rsp *)prsp->virt;
if (!prdf)
goto out;
if (!lpfc_is_els_acc_rsp(prsp))
goto out;
for (i = 0; i < ELS_RDF_REG_TAG_CNT &&
i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++)
@ -3995,6 +4047,9 @@ lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
edc_rsp->acc_hdr.la_cmd,
be32_to_cpu(edc_rsp->desc_list_len));
if (!lpfc_is_els_acc_rsp(prsp))
goto out;
/*
* Payload length in bytes is the response descriptor list
* length minus the 12 bytes of Link Service Request
@ -5165,14 +5220,9 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
*
* This routine is the completion callback function to the Logout (LOGO)
* Accept (ACC) Response ELS command. This routine is invoked to indicate
* the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
* release the ndlp if it has the last reference remaining (reference count
* is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
* field to NULL to inform the following lpfc_els_free_iocb() routine no
* ndlp reference count needs to be decremented. Otherwise, the ndlp
* reference use-count shall be decremented by the lpfc_els_free_iocb()
* routine. Finally, the lpfc_els_free_iocb() is invoked to release the
* IOCB data structure.
* the completion of the LOGO process. If the node has transitioned to NPR,
* this routine unregisters the RPI if it is still registered. The
* lpfc_els_free_iocb() is invoked to release the IOCB data structure.
**/
static void
lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
@ -5189,9 +5239,10 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* ACC to LOGO completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0109 ACC to LOGO completes to NPort x%x refcnt %d "
"Data: x%x x%x x%x\n",
ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
"last els x%x Data: x%x x%x x%x\n",
ndlp->nlp_DID, kref_read(&ndlp->kref),
ndlp->nlp_last_elscmd, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
/* This clause allows the LOGO ACC to complete and free resources
* for the Fabric Domain Controller. It does deliberately skip
@ -5203,29 +5254,22 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
/* If PLOGI is being retried, PLOGI completion will cleanup the
* node. The NLP_NPR_2B_DISC flag needs to be retained to make
* progress on nodes discovered from last RSCN.
*/
if ((ndlp->nlp_flag & NLP_DELAY_TMO) &&
(ndlp->nlp_last_elscmd == ELS_CMD_PLOGI))
goto out;
/* NPort Recovery mode or node is just allocated */
if (!lpfc_nlp_not_used(ndlp)) {
/* A LOGO is completing and the node is in NPR state.
* Just unregister the RPI because the node is still
* required.
*/
if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
lpfc_unreg_rpi(vport, ndlp);
} else {
/* Indicate the node has already released, should
* not reference to it from within lpfc_els_free_iocb.
*/
cmdiocb->context1 = NULL;
/* If came from PRLO, then PRLO_ACC is done.
* Start rediscovery now.
*/
if (ndlp->nlp_last_elscmd == ELS_CMD_PRLO) {
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(&ndlp->lock);
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
}
}
out:
/*
* The driver received a LOGO from the rport and has ACK'd it.
@ -5244,9 +5288,8 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* RPI (Remote Port Index) mailbox command to the @phba. It simply releases
* the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
* decrements the ndlp reference count held for this completion callback
* function. After that, it invokes the lpfc_nlp_not_used() to check
* whether there is only one reference left on the ndlp. If so, it will
* perform one more decrement and trigger the release of the ndlp.
* function. After that, it invokes the lpfc_drop_node to check
* whether it is appropriate to release the node.
**/
void
lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
@ -5429,15 +5472,30 @@ out:
* these conditions and release the RPI.
*/
if (phba->sli_rev == LPFC_SLI_REV4 &&
(vport && vport->port_type == LPFC_NPIV_PORT) &&
!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD) &&
ndlp->nlp_flag & NLP_RELEASE_RPI) {
lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
spin_lock_irq(&ndlp->lock);
ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
spin_unlock_irq(&ndlp->lock);
lpfc_drop_node(vport, ndlp);
vport && vport->port_type == LPFC_NPIV_PORT &&
!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
spin_lock_irq(&ndlp->lock);
ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
spin_unlock_irq(&ndlp->lock);
}
lpfc_drop_node(vport, ndlp);
} else if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE &&
ndlp->nlp_state != NLP_STE_PRLI_ISSUE) {
/* Drop ndlp if there is no planned or outstanding
* issued PRLI.
*
* In cases when the ndlp is acting as both an initiator
* and target function, let our issued PRLI determine
* the final ndlp kref drop.
*/
lpfc_drop_node(vport, ndlp);
}
}
/* Release the originating I/O reference. */
@ -8253,8 +8311,8 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* Defer ACC response until AFTER we issue a FLOGI */
if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) {
phba->defer_flogi_acc_rx_id = cmdiocb->iocb.ulpContext;
phba->defer_flogi_acc_ox_id =
phba->defer_flogi_acc.rx_id = cmdiocb->iocb.ulpContext;
phba->defer_flogi_acc.ox_id =
cmdiocb->iocb.unsli3.rcvsli3.ox_id;
vport->fc_myDID = did;
@ -8262,11 +8320,17 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3344 Deferring FLOGI ACC: rx_id: x%x,"
" ox_id: x%x, hba_flag x%x\n",
phba->defer_flogi_acc_rx_id,
phba->defer_flogi_acc_ox_id, phba->hba_flag);
phba->defer_flogi_acc.rx_id,
phba->defer_flogi_acc.ox_id, phba->hba_flag);
phba->defer_flogi_acc_flag = true;
phba->defer_flogi_acc.flag = true;
/* This nlp_get is paired with nlp_puts that reset the
* defer_flogi_acc.flag back to false. We need to retain
* a kref on the ndlp until the deferred FLOGI ACC is
* processed or cancelled.
*/
phba->defer_flogi_acc.ndlp = lpfc_nlp_get(ndlp);
return 0;
}
@ -8720,17 +8784,19 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
elsiocb->context_un.rrq = rrq;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
lpfc_nlp_get(ndlp);
elsiocb->context1 = ndlp;
elsiocb->context1 = lpfc_nlp_get(ndlp);
if (!elsiocb->context1)
goto io_err;
ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (ret == IOCB_ERROR)
if (ret == IOCB_ERROR) {
lpfc_nlp_put(ndlp);
goto io_err;
}
return 0;
io_err:
lpfc_els_free_iocb(phba, elsiocb);
lpfc_nlp_put(ndlp);
return 1;
}
@ -10213,10 +10279,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
}
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
spin_unlock_irq(&ndlp->lock);
lpfc_disc_state_machine(vport, ndlp, elsiocb,
NLP_EVT_RCV_PLOGI);
@ -10242,7 +10304,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
/* retain node if our response is deferred */
if (phba->defer_flogi_acc_flag)
if (phba->defer_flogi_acc.flag)
break;
if (newnode)
lpfc_disc_state_machine(vport, ndlp, NULL,
@ -11055,6 +11117,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
if (!prsp)
goto out;
if (!lpfc_is_els_acc_rsp(prsp))
goto out;
sp = prsp->virt + sizeof(uint32_t);
fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
memcpy(&vport->fabric_portname, &sp->portName,

View File

@ -100,6 +100,12 @@ lpfc_rport_invalid(struct fc_rport *rport)
return -EINVAL;
}
if (rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) {
pr_info("**** %s: devloss_callbk_done rport x%px SID x%x\n",
__func__, rport, rport->scsi_target_id);
return -EINVAL;
}
rdata = rport->dd_data;
if (!rdata) {
pr_err("**** %s: NULL dd_data on rport x%px SID x%x\n",
@ -155,6 +161,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
struct lpfc_hba *phba;
struct lpfc_work_evt *evtp;
unsigned long iflags;
bool drop_initial_node_ref = false;
ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode;
if (!ndlp)
@ -177,44 +184,66 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
/* Don't schedule a worker thread event if the vport is going down. */
if (vport->load_flag & FC_UNLOADING ||
!(phba->hba_flag & HBA_SETUP)) {
spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->rport = NULL;
/* The scsi_transport is done with the rport so lpfc cannot
* call to unregister. Remove the scsi transport reference
* and clean up the SCSI transport node details.
/* Only 1 thread can drop the initial node reference.
* If not registered for NVME and NLP_DROPPED flag is
* clear, remove the initial reference.
*/
if (ndlp->fc4_xpt_flags & (NLP_XPT_REGD | SCSI_XPT_REGD)) {
if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) {
if (!(ndlp->nlp_flag & NLP_DROPPED)) {
ndlp->nlp_flag |= NLP_DROPPED;
drop_initial_node_ref = true;
}
}
/* The scsi_transport is done with the rport so lpfc cannot
* call to unregister.
*/
if (ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
/* NVME transport-registered rports need the
* NLP_XPT_REGD flag to complete an unregister.
/* If NLP_XPT_REGD was cleared in lpfc_nlp_unreg_node,
* unregister calls were made to the scsi and nvme
* transports and refcnt was already decremented. Clear
* the NLP_XPT_REGD flag only if the NVME nrport is
* confirmed unregistered.
*/
if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
spin_unlock_irqrestore(&ndlp->lock, iflags);
/* Release scsi transport reference */
lpfc_nlp_put(ndlp);
} else {
spin_unlock_irqrestore(&ndlp->lock, iflags);
}
} else {
spin_unlock_irqrestore(&ndlp->lock, iflags);
lpfc_nlp_put(ndlp);
spin_lock_irqsave(&ndlp->lock, iflags);
}
/* Only 1 thread can drop the initial node reference. If
* another thread has set NLP_DROPPED, this thread is done.
*/
if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD) &&
!(ndlp->nlp_flag & NLP_DROPPED)) {
ndlp->nlp_flag |= NLP_DROPPED;
spin_unlock_irqrestore(&ndlp->lock, iflags);
if (drop_initial_node_ref)
lpfc_nlp_put(ndlp);
return;
}
spin_unlock_irqrestore(&ndlp->lock, iflags);
return;
}
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
return;
/* Ignore callback for a mismatched (stale) rport */
if (ndlp->rport != rport) {
lpfc_vlog_msg(vport, KERN_WARNING, LOG_NODE,
"6788 fc rport mismatch: d_id x%06x ndlp x%px "
"fc rport x%px node rport x%px state x%x "
"refcnt %u\n",
ndlp->nlp_DID, ndlp, rport, ndlp->rport,
ndlp->nlp_state, kref_read(&ndlp->kref));
return;
}
if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6789 rport name %llx != node port name %llx",
@ -403,6 +432,7 @@ lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
spin_lock_irqsave(&ndlp->lock, iflags);
if (ndlp->save_flags & NLP_IN_RECOV_POST_DEV_LOSS) {
ndlp->save_flags &= ~NLP_IN_RECOV_POST_DEV_LOSS;
ndlp->nlp_flag &= ~NLP_DROPPED;
spin_unlock_irqrestore(&ndlp->lock, iflags);
lpfc_nlp_get(ndlp);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE,
@ -521,6 +551,9 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
* the following lpfc_nlp_put is necessary after fabric node is
* recovered.
*/
spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
spin_unlock_irqrestore(&ndlp->lock, iflags);
if (recovering) {
lpfc_printf_vlog(vport, KERN_INFO,
LOG_DISCOVERY | LOG_NODE,
@ -533,6 +566,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS;
spin_unlock_irqrestore(&ndlp->lock, iflags);
return fcf_inuse;
} else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
/* Fabric node fully recovered before this dev_loss_tmo
* queue work is processed. Thus, ignore the
@ -550,9 +584,13 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
}
spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
if (!(ndlp->nlp_flag & NLP_DROPPED)) {
ndlp->nlp_flag |= NLP_DROPPED;
spin_unlock_irqrestore(&ndlp->lock, iflags);
lpfc_nlp_put(ndlp);
spin_lock_irqsave(&ndlp->lock, iflags);
}
spin_unlock_irqrestore(&ndlp->lock, iflags);
lpfc_nlp_put(ndlp);
return fcf_inuse;
}
@ -1253,7 +1291,14 @@ lpfc_linkdown(struct lpfc_hba *phba)
lpfc_scsi_dev_block(phba);
offline = pci_channel_offline(phba->pcidev);
phba->defer_flogi_acc_flag = false;
/* Decrement the held ndlp if there is a deferred flogi acc */
if (phba->defer_flogi_acc.flag) {
if (phba->defer_flogi_acc.ndlp) {
lpfc_nlp_put(phba->defer_flogi_acc.ndlp);
phba->defer_flogi_acc.ndlp = NULL;
}
}
phba->defer_flogi_acc.flag = false;
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
@ -1378,7 +1423,7 @@ lpfc_linkup_port(struct lpfc_vport *vport)
FCH_EVT_LINKUP, 0);
spin_lock_irq(shost->host_lock);
if (phba->defer_flogi_acc_flag)
if (phba->defer_flogi_acc.flag)
vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_RSCN_MODE |
FC_NLP_MORE | FC_RSCN_DISCOVERY);
else
@ -4438,13 +4483,14 @@ out:
/* If the node is not registered with the scsi or nvme
* transport, remove the fabric node. The failed reg_login
* is terminal.
* is terminal and forces the removal of the last node
* reference.
*/
if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
spin_unlock_irq(&ndlp->lock);
lpfc_nlp_not_used(ndlp);
lpfc_nlp_put(ndlp);
}
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
@ -4819,9 +4865,7 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) {
vport->phba->nport_event_cnt++;
if (vport->phba->nvmet_support == 0) {
/* Start devloss if target. */
if (ndlp->nlp_type & NLP_NVME_TARGET)
lpfc_nvme_unregister_port(vport, ndlp);
lpfc_nvme_unregister_port(vport, ndlp);
} else {
/* NVMET has no upcall. */
lpfc_nlp_put(ndlp);
@ -5198,7 +5242,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
case CMD_GEN_REQUEST64_CR:
if (iocb->context_un.ndlp == ndlp)
return 1;
/* fall through */
break;
case CMD_ELS_REQUEST64_CR:
if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
return 1;
@ -5751,6 +5795,7 @@ static struct lpfc_nodelist *
__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
{
struct lpfc_nodelist *ndlp;
struct lpfc_nodelist *np = NULL;
uint32_t data1;
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
@ -5765,14 +5810,20 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1, ndlp->nlp_rpi,
ndlp->active_rrqs_xri_bitmap);
return ndlp;
/* Check for new or potentially stale node */
if (ndlp->nlp_state != NLP_STE_UNUSED_NODE)
return ndlp;
np = ndlp;
}
}
/* FIND node did <did> NOT FOUND */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0932 FIND node did x%x NOT FOUND.\n", did);
return NULL;
if (!np)
/* FIND node did <did> NOT FOUND */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0932 FIND node did x%x NOT FOUND.\n", did);
return np;
}
struct lpfc_nodelist *
@ -5879,14 +5930,6 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
if (vport->phba->nvmet_support)
return ndlp;
/* If we've already received a PLOGI from this NPort
* we don't need to try to discover it again.
*/
if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
!(ndlp->nlp_type &
(NLP_FCP_TARGET | NLP_NVME_TARGET)))
return NULL;
if (ndlp->nlp_state > NLP_STE_UNUSED_NODE &&
ndlp->nlp_state < NLP_STE_PRLI_ISSUE) {
lpfc_disc_state_machine(vport, ndlp, NULL,
@ -6800,11 +6843,6 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp)
unsigned long flags;
if (ndlp) {
lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
"node get: did:x%x flg:x%x refcnt:x%x",
ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref));
/* The check of ndlp usage to prevent incrementing the
* ndlp reference count that is in the process of being
* released.
@ -6812,9 +6850,8 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp)
spin_lock_irqsave(&ndlp->lock, flags);
if (!kref_get_unless_zero(&ndlp->kref)) {
spin_unlock_irqrestore(&ndlp->lock, flags);
lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
"0276 %s: ndlp:x%px refcnt:%d\n",
__func__, (void *)ndlp, kref_read(&ndlp->kref));
pr_info("0276 %s: NDLP has zero reference count. "
"Exiting\n", __func__);
return NULL;
}
spin_unlock_irqrestore(&ndlp->lock, flags);
@ -6843,25 +6880,6 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
}
/* This routine free's the specified nodelist if it is not in use
* by any other discovery thread. This routine returns 1 if the
* ndlp has been freed. A return value of 0 indicates the ndlp is
* not yet been released.
*/
int
lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
{
lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
"node not used: did:x%x flg:x%x refcnt:x%x",
ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref));
if (kref_read(&ndlp->kref) == 1)
if (lpfc_nlp_put(ndlp))
return 1;
return 0;
}
/**
* lpfc_fcf_inuse - Check if FCF can be unregistered.
* @phba: Pointer to hba context object.

View File

@ -4543,6 +4543,17 @@ lpfc_get_wwpn(struct lpfc_hba *phba)
return rol64(wwn, 32);
}
static unsigned short lpfc_get_sg_tablesize(struct lpfc_hba *phba)
{
if (phba->sli_rev == LPFC_SLI_REV4)
if (phba->cfg_xpsgl && !phba->nvmet_support)
return LPFC_MAX_SG_TABLESIZE;
else
return phba->cfg_scsi_seg_cnt;
else
return phba->cfg_sg_seg_cnt;
}
/**
* lpfc_vmid_res_alloc - Allocates resources for VMID
* @phba: pointer to lpfc hba data structure.
@ -4613,6 +4624,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
{
struct lpfc_vport *vport;
struct Scsi_Host *shost = NULL;
struct scsi_host_template *template;
int error = 0;
int i;
uint64_t wwn;
@ -4642,22 +4654,31 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
}
}
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
if (dev != &phba->pcidev->dev) {
shost = scsi_host_alloc(&lpfc_vport_template,
sizeof(struct lpfc_vport));
/* Seed template for SCSI host registration */
if (dev == &phba->pcidev->dev) {
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
/* Seed physical port template */
template = &lpfc_template;
if (use_no_reset_hba)
/* template is for a no reset SCSI Host */
template->eh_host_reset_handler = NULL;
/* Seed updated value of sg_tablesize */
template->sg_tablesize = lpfc_get_sg_tablesize(phba);
} else {
if (!use_no_reset_hba)
shost = scsi_host_alloc(&lpfc_template,
sizeof(struct lpfc_vport));
else
shost = scsi_host_alloc(&lpfc_template_no_hr,
sizeof(struct lpfc_vport));
/* NVMET is for physical port only */
template = &lpfc_template_nvme;
}
} else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
shost = scsi_host_alloc(&lpfc_template_nvme,
sizeof(struct lpfc_vport));
} else {
/* Seed vport template */
template = &lpfc_vport_template;
/* Seed updated value of sg_tablesize */
template->sg_tablesize = lpfc_get_sg_tablesize(phba);
}
shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
if (!shost)
goto out;
@ -4687,11 +4708,6 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->dma_boundary =
phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
if (phba->cfg_xpsgl && !phba->nvmet_support)
shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
else
shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
} else
/* SLI-3 has a limited number of hardware queues (3),
* thus there is only one for FCP processing.
@ -7767,7 +7783,6 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
/* Initialize the host templates the configured values. */
lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
if (phba->sli_rev == LPFC_SLI_REV4)
@ -8286,7 +8301,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Initialize the host templates with the updated values. */
lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
"9087 sg_seg_cnt:%d dmabuf_size:%d "
@ -14754,7 +14768,7 @@ out:
int
lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
{
uint8_t file_name[ELX_MODEL_NAME_SIZE];
char file_name[ELX_FW_NAME_SIZE] = {0};
int ret;
const struct firmware *fw;
@ -14763,7 +14777,7 @@ lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
LPFC_SLI_INTF_IF_TYPE_2)
return -EPERM;
snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
scnprintf(file_name, sizeof(file_name), "%s.grp", phba->ModelName);
if (fw_upgrade == INT_FW_UPGRADE) {
ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,

View File

@ -52,9 +52,6 @@ static int
lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_name *nn, struct lpfc_name *pn)
{
/* First, we MUST have a RPI registered */
if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
return 0;
/* Compare the ADISC rsp WWNN / WWPN matches our internal node
* table entry for that node.
@ -498,6 +495,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_config_link(phba, link_mbox);
link_mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
link_mbox->vport = vport;
/* The default completion handling for CONFIG_LINK
* does not require the ndlp so no reference is needed.
*/
link_mbox->ctx_ndlp = ndlp;
rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
@ -623,6 +624,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
login_mbox->mbox_cmpl = lpfc_defer_plogi_acc;
login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
if (!login_mbox->ctx_ndlp)
goto out;
login_mbox->context3 = save_iocb; /* For PLOGI ACC */
spin_lock_irq(&ndlp->lock);
@ -631,8 +635,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Start the ball rolling by issuing REG_LOGIN here */
rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
if (rc == MBX_NOT_FINISHED) {
lpfc_nlp_put(ndlp);
goto out;
}
lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
return 1;
@ -695,6 +701,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
IOCB_t *icmd;
uint32_t *lp;
uint32_t cmd;
int rc;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
@ -719,22 +726,29 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* resume the RPI before the ACC goes out.
*/
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
GFP_KERNEL);
if (elsiocb) {
/* Don't resume an unregistered RPI - unnecessary
* mailbox. Just send the ACC when the RPI is not
* registered.
*/
if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
elsiocb = kmalloc(sizeof(*elsiocb), GFP_KERNEL);
if (elsiocb) {
/* Save info from cmd IOCB used in
* rsp
*/
memcpy(elsiocb, cmdiocb,
sizeof(*elsiocb));
/* Save info from cmd IOCB used in rsp */
memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
sizeof(struct lpfc_iocbq));
elsiocb->drvrTimeout = cmd;
/* Save the ELS cmd */
elsiocb->drvrTimeout = cmd;
rc = lpfc_sli4_resume_rpi(ndlp,
lpfc_mbx_cmpl_resume_rpi,
elsiocb);
if (rc)
kfree(elsiocb);
if (lpfc_sli4_resume_rpi(ndlp,
lpfc_mbx_cmpl_resume_rpi,
elsiocb))
kfree(elsiocb);
goto out;
goto out;
}
}
}
@ -778,7 +792,6 @@ out:
spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
return 0;
}
@ -1104,8 +1117,10 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
if (rc == MBX_NOT_FINISHED) {
lpfc_nlp_put(ndlp);
mempool_free(pmb, phba->mbox_mem_pool);
}
}
}
@ -2265,13 +2280,13 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
(vport->port_type == LPFC_NPIV_PORT) &&
vport->cfg_restrict_login) {
out:
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_TARGET_REMOVE;
spin_unlock_irq(&ndlp->lock);
lpfc_printf_vlog(vport, KERN_INFO,
LOG_ELS | LOG_DISCOVERY | LOG_NODE,
"6228 Sending LOGO, determined nlp_type "
"0x%x nlp_flag x%x refcnt %u\n",
ndlp->nlp_type, ndlp->nlp_flag,
kref_read(&ndlp->kref));
lpfc_issue_els_logo(vport, ndlp, 0);
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
return ndlp->nlp_state;
}
@ -2620,8 +2635,26 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* flush the target */
lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
/* Treat like rcv logo */
lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
/* Send PRLO_ACC */
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_ACC;
spin_unlock_irq(&ndlp->lock);
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
/* Save ELS_CMD_PRLO as the last elscmd and then set to NPR.
* lpfc_cmpl_els_logo_acc is expected to restart discovery.
*/
ndlp->nlp_last_elscmd = ELS_CMD_PRLO;
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_ELS | LOG_DISCOVERY,
"3422 DID x%06x nflag x%x lastels x%x ref cnt %u\n",
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_last_elscmd,
kref_read(&ndlp->kref));
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
return ndlp->nlp_state;
}

View File

@ -2362,6 +2362,11 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo;
spin_lock_irq(&ndlp->lock);
/* If an oldrport exists, so does the ndlp reference. If not
* a new reference is needed because either the node has never
* been registered or it's been unregistered and getting deleted.
*/
oldrport = lpfc_ndlp_get_nrport(ndlp);
if (oldrport) {
prev_ndlp = oldrport->ndlp;
@ -2435,7 +2440,10 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
"6031 RemotePort Registration failed "
"err: %d, DID x%06x ref %u\n",
ret, ndlp->nlp_DID, kref_read(&ndlp->kref));
lpfc_nlp_put(ndlp);
/* Only release reference if one was taken for this request */
if (!oldrport)
lpfc_nlp_put(ndlp);
}
return ret;
@ -2541,7 +2549,8 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* clear any rport state until the transport calls back.
*/
if (ndlp->nlp_type & NLP_NVME_TARGET) {
if ((ndlp->nlp_type & NLP_NVME_TARGET) ||
(remoteport->port_role & FC_PORT_ROLE_NVME_TARGET)) {
/* No concern about the role change on the nvme remoteport.
* The transport will update it.
*/

View File

@ -7254,33 +7254,6 @@ struct scsi_host_template lpfc_template_nvme = {
.track_queue_depth = 0,
};
struct scsi_host_template lpfc_template_no_hr = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,
.proc_name = LPFC_DRIVER_NAME,
.info = lpfc_info,
.queuecommand = lpfc_queuecommand,
.eh_timed_out = fc_eh_timed_out,
.eh_should_retry_cmd = fc_eh_should_retry_cmd,
.eh_abort_handler = lpfc_abort_handler,
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
.eh_bus_reset_handler = lpfc_bus_reset_handler,
.slave_alloc = lpfc_slave_alloc,
.slave_configure = lpfc_slave_configure,
.slave_destroy = lpfc_slave_destroy,
.scan_finished = lpfc_scan_finished,
.this_id = -1,
.sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
.cmd_per_lun = LPFC_CMD_PER_LUN,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = lpfc_hba_attrs,
.max_sectors = 0xFFFFFFFF,
.vendor_id = LPFC_NL_VENDOR_ID,
.change_queue_depth = scsi_change_queue_depth,
.track_queue_depth = 1,
};
struct scsi_host_template lpfc_template = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,
@ -7316,9 +7289,12 @@ struct scsi_host_template lpfc_vport_template = {
.info = lpfc_info,
.queuecommand = lpfc_queuecommand,
.eh_timed_out = fc_eh_timed_out,
.eh_should_retry_cmd = fc_eh_should_retry_cmd,
.eh_abort_handler = lpfc_abort_handler,
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
.eh_bus_reset_handler = NULL,
.eh_host_reset_handler = NULL,
.slave_alloc = lpfc_slave_alloc,
.slave_configure = lpfc_slave_configure,
.slave_destroy = lpfc_slave_destroy,
@ -7329,6 +7305,7 @@ struct scsi_host_template lpfc_vport_template = {
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = lpfc_vport_attrs,
.max_sectors = 0xFFFF,
.vendor_id = 0,
.change_queue_depth = scsi_change_queue_depth,
.track_queue_depth = 1,
};

View File

@ -2895,6 +2895,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
!pmb->u.mb.mbxStatus) {
rpi = pmb->u.mb.un.varWords[0];
vpi = pmb->u.mb.un.varRegLogin.vpi;
if (phba->sli_rev == LPFC_SLI_REV4)
vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
lpfc_unreg_login(phba, vpi, rpi, pmb);
pmb->vport = vport;
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@ -19257,7 +19259,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
"oxid:x%x SID:x%x\n", oxid, sid);
return;
}
/* Put ndlp onto pport node list */
/* Put ndlp onto vport node list */
lpfc_enqueue_node(vport, ndlp);
}
@ -19288,7 +19290,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
return;
}
ctiocb->vport = phba->pport;
ctiocb->vport = vport;
ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
ctiocb->sli4_lxritag = NO_XRI;
ctiocb->sli4_xritag = NO_XRI;
@ -19360,6 +19362,16 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
ctiocb->context1 = NULL;
lpfc_sli_release_iocbq(phba, ctiocb);
}
/* if only usage of this nodelist is BLS response, release initial ref
* to free ndlp when transmit completes
*/
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE &&
!(ndlp->nlp_flag & NLP_DROPPED) &&
!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD | SCSI_XPT_REGD))) {
ndlp->nlp_flag |= NLP_DROPPED;
lpfc_nlp_put(ndlp);
}
}
/**
@ -21343,8 +21355,12 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
/* Put reference count for delayed processing */
/* This reference is local to this routine. The
* reference is removed at routine exit.
*/
act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
/* Unregister the RPI when mailbox complete */
mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
}

View File

@ -636,6 +636,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
int rc;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
if (vport->port_type == LPFC_PHYSICAL_PORT) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@ -691,21 +692,49 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
if (!ndlp)
goto skip_logo;
/* Send the DA_ID and Fabric LOGO to cleanup the NPIV fabric entries. */
if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
phba->link_state >= LPFC_LINK_UP &&
phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
if (vport->cfg_enable_da_id) {
/* Send DA_ID and wait for a completion. */
/* Send DA_ID and wait for a completion. This is best
* effort. If the DA_ID fails, likely the fabric will
* "leak" NportIDs but at least the driver issued the
* command.
*/
ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (!ndlp)
goto issue_logo;
spin_lock_irq(&ndlp->lock);
ndlp->da_id_waitq = &waitq;
ndlp->save_flags |= NLP_WAIT_FOR_DA_ID;
spin_unlock_irq(&ndlp->lock);
rc = lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0);
if (rc) {
lpfc_printf_log(vport->phba, KERN_WARNING,
LOG_VPORT,
"1829 CT command failed to "
"delete objects on fabric, "
"rc %d\n", rc);
if (!rc) {
wait_event_timeout(waitq,
!(ndlp->save_flags & NLP_WAIT_FOR_DA_ID),
msecs_to_jiffies(phba->fc_ratov * 2000));
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT | LOG_ELS,
"1829 DA_ID issue status %d. "
"SFlag x%x NState x%x, NFlag x%x "
"Rpi x%x\n",
rc, ndlp->save_flags, ndlp->nlp_state,
ndlp->nlp_flag, ndlp->nlp_rpi);
/* Remove the waitq and save_flags. It no
* longer matters if the wake happened.
*/
spin_lock_irq(&ndlp->lock);
ndlp->da_id_waitq = NULL;
ndlp->save_flags &= ~NLP_WAIT_FOR_DA_ID;
spin_unlock_irq(&ndlp->lock);
}
issue_logo:
/*
* If the vpi is not registered, then a valid FDISC doesn't
* exist and there is no need for a ELS LOGO. Just cleanup

View File

@ -296,7 +296,8 @@ nfs_start_delegation_return_locked(struct nfs_inode *nfsi)
if (delegation == NULL)
goto out;
spin_lock(&delegation->lock);
if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
if (delegation->inode &&
!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
clear_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
/* Refcount matched in nfs_end_delegation_return() */
ret = nfs_get_delegation(delegation);

View File

@ -2778,6 +2778,9 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
hci_enable_advertising(hdev);
}
/* Inform sockets conn is gone before we delete it */
hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
goto done;
}

View File

@ -1425,18 +1425,40 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
timeout = schedule_timeout(timeout);
lock_sock(sk);
if (signal_pending(current)) {
err = sock_intr_errno(timeout);
sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE;
sock->state = SS_UNCONNECTED;
vsock_transport_cancel_pkt(vsk);
vsock_remove_connected(vsk);
goto out_wait;
} else if ((sk->sk_state != TCP_ESTABLISHED) && (timeout == 0)) {
err = -ETIMEDOUT;
/* Connection established. Whatever happens to socket once we
* release it, that's not connect()'s concern. No need to go
* into signal and timeout handling. Call it a day.
*
* Note that allowing to "reset" an already established socket
* here is racy and insecure.
*/
if (sk->sk_state == TCP_ESTABLISHED)
break;
/* If connection was _not_ established and a signal/timeout came
* to be, we want the socket's state reset. User space may want
* to retry.
*
* sk_state != TCP_ESTABLISHED implies that socket is not on
* vsock_connected_table. We keep the binding and the transport
* assigned.
*/
if (signal_pending(current) || timeout == 0) {
err = timeout == 0 ? -ETIMEDOUT : sock_intr_errno(timeout);
/* Listener might have already responded with
* VIRTIO_VSOCK_OP_RESPONSE. Its handling expects our
* sk_state == TCP_SYN_SENT, which hereby we break.
* In such case VIRTIO_VSOCK_OP_RST will follow.
*/
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
/* Try to cancel VIRTIO_VSOCK_OP_REQUEST skb sent out by
* transport->connect().
*/
vsock_transport_cancel_pkt(vsk);
goto out_wait;
}

View File

@ -65,7 +65,8 @@ enum {
BYT_RT5640_OVCD_SF_1P5 = (RT5640_OVCD_SF_1P5 << 13),
};
#define BYT_RT5640_MAP(quirk) ((quirk) & GENMASK(3, 0))
#define BYT_RT5640_MAP_MASK GENMASK(3, 0)
#define BYT_RT5640_MAP(quirk) ((quirk) & BYT_RT5640_MAP_MASK)
#define BYT_RT5640_JDSRC(quirk) (((quirk) & GENMASK(7, 4)) >> 4)
#define BYT_RT5640_OVCD_TH(quirk) (((quirk) & GENMASK(12, 8)) >> 8)
#define BYT_RT5640_OVCD_SF(quirk) (((quirk) & GENMASK(14, 13)) >> 13)
@ -136,7 +137,9 @@ static void log_quirks(struct device *dev)
dev_info(dev, "quirk NO_INTERNAL_MIC_MAP enabled\n");
break;
default:
dev_err(dev, "quirk map 0x%x is not supported, microphone input will not work\n", map);
dev_warn_once(dev, "quirk sets invalid input map: 0x%x, default to DMIC1_MAP\n", map);
byt_rt5640_quirk &= ~BYT_RT5640_MAP_MASK;
byt_rt5640_quirk |= BYT_RT5640_DMIC1_MAP;
break;
}
if (byt_rt5640_quirk & BYT_RT5640_HSMIC2_ON_IN1)