summaryrefslogtreecommitdiff
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c89
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c139
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c452
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h47
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c1296
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c797
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c290
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c215
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c381
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c77
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c1095
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c744
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c128
23 files changed, 3380 insertions, 2495 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 549adfaa97ce..a54c8da30273 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -664,11 +664,18 @@ struct lpfc_hba {
void (*lpfc_scsi_prep_cmnd)
(struct lpfc_vport *, struct lpfc_io_buf *,
struct lpfc_nodelist *);
+ int (*lpfc_scsi_prep_cmnd_buf)
+ (struct lpfc_vport *vport,
+ struct lpfc_io_buf *lpfc_cmd,
+ uint8_t tmo);
/* IOCB interface function jump table entries */
int (*__lpfc_sli_issue_iocb)
(struct lpfc_hba *, uint32_t,
struct lpfc_iocbq *, uint32_t);
+ int (*__lpfc_sli_issue_fcp_io)
+ (struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb, uint32_t flag);
void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
struct lpfc_iocbq *);
int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
@@ -744,7 +751,8 @@ struct lpfc_hba {
#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
#define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */
-#define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */
+#define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */
+#define LS_CT_VEN_RPA 0x20 /* Vendor RPA sent to switch */
uint32_t hba_flag; /* hba generic flags */
#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
@@ -753,7 +761,7 @@ struct lpfc_hba {
#define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/
#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
#define HBA_PERSISTENT_TOPO 0x20 /* Persistent topology support in hba */
-#define ELS_XRI_ABORT_EVENT 0x40
+#define ELS_XRI_ABORT_EVENT 0x40 /* ELS_XRI abort event was queued */
#define ASYNC_EVENT 0x80
#define LINK_DISABLED 0x100 /* Link disabled by user */
#define FCF_TS_INPROG 0x200 /* FCF table scan in progress */
@@ -922,6 +930,7 @@ struct lpfc_hba {
#define LPFC_ENABLE_NVME 2
#define LPFC_ENABLE_BOTH 3
uint32_t cfg_enable_pbde;
+ uint32_t cfg_enable_mi;
struct nvmet_fc_target_port *targetport;
lpfc_vpd_t vpd; /* vital product data */
@@ -1129,8 +1138,6 @@ struct lpfc_hba {
uint8_t hb_outstanding;
struct timer_list rrq_tmr;
enum hba_temp_state over_temp_state;
- /* ndlp reference management */
- spinlock_t ndlp_lock;
/*
* Following bit will be set for all buffer tags which are not
* associated with any HBQ.
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index e94eac194676..4528166dee36 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -57,10 +57,6 @@
#define LPFC_MIN_DEVLOSS_TMO 1
#define LPFC_MAX_DEVLOSS_TMO 255
-#define LPFC_DEF_MRQ_POST 512
-#define LPFC_MIN_MRQ_POST 512
-#define LPFC_MAX_MRQ_POST 2048
-
/*
* Write key size should be multiple of 4. If write key is changed
* make sure that library write key is also changed.
@@ -372,11 +368,11 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
nrport = NULL;
- spin_lock(&vport->phba->hbalock);
+ spin_lock(&ndlp->lock);
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
nrport = rport->remoteport;
- spin_unlock(&vport->phba->hbalock);
+ spin_unlock(&ndlp->lock);
if (!nrport)
continue;
@@ -1505,6 +1501,7 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
/**
* lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
* @phba: lpfc_hba pointer.
+ * @opcode: The sli4 config command opcode.
*
* Description:
* Request SLI4 interface type-2 device to perform a physical register set
@@ -2288,7 +2285,7 @@ lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val)
return -EINVAL;
}
-/**
+/*
* lpfc_param_show - Return a cfg attribute value in decimal
*
* Description:
@@ -2314,7 +2311,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
phba->cfg_##attr);\
}
-/**
+/*
* lpfc_param_hex_show - Return a cfg attribute value in hex
*
* Description:
@@ -2342,7 +2339,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
phba->cfg_##attr);\
}
-/**
+/*
* lpfc_param_init - Initializes a cfg attribute
*
* Description:
@@ -2376,7 +2373,7 @@ lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
return -EINVAL;\
}
-/**
+/*
* lpfc_param_set - Set a cfg attribute value
*
* Description:
@@ -2413,7 +2410,7 @@ lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
return -EINVAL;\
}
-/**
+/*
* lpfc_param_store - Set a vport attribute value
*
* Description:
@@ -2453,7 +2450,7 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
return -EINVAL;\
}
-/**
+/*
* lpfc_vport_param_show - Return decimal formatted cfg attribute value
*
* Description:
@@ -2477,7 +2474,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
}
-/**
+/*
* lpfc_vport_param_hex_show - Return hex formatted attribute value
*
* Description:
@@ -2502,7 +2499,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
}
-/**
+/*
* lpfc_vport_param_init - Initialize a vport cfg attribute
*
* Description:
@@ -2535,7 +2532,7 @@ lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
return -EINVAL;\
}
-/**
+/*
* lpfc_vport_param_set - Set a vport cfg attribute
*
* Description:
@@ -2571,7 +2568,7 @@ lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
return -EINVAL;\
}
-/**
+/*
* lpfc_vport_param_store - Set a vport attribute
*
* Description:
@@ -2774,7 +2771,7 @@ lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
/**
* lpfc_soft_wwpn_store - Set the ww port name of the adapter
- * @dev class device that is converted into a Scsi_host.
+ * @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: contains the wwpn in hexadecimal.
* @count: number of wwpn bytes in buf
@@ -2871,7 +2868,8 @@ lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
/**
* lpfc_soft_wwnn_store - sets the ww node name of the adapter
- * @cdev: class device that is converted into a Scsi_host.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
* @buf: contains the ww node name in hexadecimal.
* @count: number of wwnn bytes in buf.
*
@@ -3207,9 +3205,11 @@ static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
* lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage
* (OAS) operations.
* @phba: lpfc_hba pointer.
- * @ndlp: pointer to fcp target node.
+ * @vpt_wwpn: wwpn of the vport associated with the returned lun
+ * @tgt_wwpn: wwpn of the target associated with the returned lun
* @lun: the fc lun for setting oas state.
* @oas_state: the oas state to be set to the lun.
+ * @pri: priority
*
* Returns:
* SUCCESS : 0
@@ -3247,6 +3247,7 @@ lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
* @vpt_wwpn: wwpn of the vport associated with the returned lun
* @tgt_wwpn: wwpn of the target associated with the returned lun
* @lun_status: status of the lun returned lun
+ * @lun_pri: priority of the lun returned lun
*
* Returns the first or next lun enabled for OAS operations for the vport/target
* specified. If a lun is found, its vport wwpn, target wwpn and status is
@@ -3285,6 +3286,7 @@ lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
* @tgt_wwpn: target wwpn by reference.
* @lun: the fc lun for setting oas state.
* @oas_state: the oas state to be set to the oas_lun.
+ * @pri: priority
*
* This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE)
* a lun for OAS operations.
@@ -3359,6 +3361,7 @@ lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: buffer for passing information.
+ * @count: size of the formatting string
*
* This function sets the OAS state for lun. Before this function is called,
* the vport wwpn, target wwpn, and oas state need to be set.
@@ -3631,16 +3634,14 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
shost = lpfc_shost_from_vport(vport);
spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->rport)
ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
#if (IS_ENABLED(CONFIG_NVME_FC))
- spin_lock(&vport->phba->hbalock);
+ spin_lock(&ndlp->lock);
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
remoteport = rport->remoteport;
- spin_unlock(&vport->phba->hbalock);
+ spin_unlock(&ndlp->lock);
if (rport && remoteport)
nvme_fc_set_remoteport_devloss(remoteport,
vport->cfg_devloss_tmo);
@@ -3820,7 +3821,7 @@ lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH,
/**
* lpfc_tgt_queue_depth_store: Sets an attribute value.
- * @phba: pointer the the adapter structure.
+ * @vport: lpfc vport structure pointer.
* @val: integer attribute value.
*
* Description: Sets the parameter to the new value.
@@ -4005,8 +4006,10 @@ LPFC_ATTR(topology, 0, 0, 6,
/**
* lpfc_topology_set - Set the adapters topology field
- * @phba: lpfc_hba pointer.
- * @val: topology value.
+ * @dev: class device that is converted into a scsi_host.
+ * @attr:device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: size of the data buffer.
*
* Description:
* If val is in a valid range then set the adapter's topology field and
@@ -4125,6 +4128,7 @@ static DEVICE_ATTR_RO(lpfc_static_vport);
/**
* lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
* @dev: Pointer to class device.
+ * @attr: Unused.
* @buf: Data buffer.
* @count: Size of the data buffer.
*
@@ -4288,7 +4292,8 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
/**
* lpfc_stat_data_ctrl_show - Read function for lpfc_stat_data_ctrl sysfs file
- * @dev: Pointer to class device object.
+ * @dev: Pointer to class device.
+ * @attr: Unused.
* @buf: Data buffer.
*
* This function is the read call back function for
@@ -4367,7 +4372,7 @@ static DEVICE_ATTR_RW(lpfc_stat_data_ctrl);
* @filp: sysfs file
* @kobj: Pointer to the kernel object
* @bin_attr: Attribute object
- * @buff: Buffer pointer
+ * @buf: Buffer pointer
* @off: File offset
* @count: Buffer size
*
@@ -4397,7 +4402,7 @@ sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj,
spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp) || !ndlp->lat_data)
+ if (!ndlp->lat_data)
continue;
if (nport_index > 0) {
@@ -4454,8 +4459,10 @@ static struct bin_attribute sysfs_drvr_stat_data_attr = {
*/
/**
* lpfc_link_speed_set - Set the adapters link speed
- * @phba: lpfc_hba pointer.
- * @val: link speed value.
+ * @dev: Pointer to class device.
+ * @attr: Unused.
+ * @buf: Data buffer.
+ * @count: Size of the data buffer.
*
* Description:
* If val is in a valid range then set the adapter's link speed field and
@@ -5463,8 +5470,6 @@ lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
spin_lock_irq(shost->host_lock);
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
@@ -6138,6 +6143,14 @@ LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery");
*/
LPFC_ATTR_RW(enable_dpp, 1, 0, 1, "Enable Direct Packet Push");
+/*
+ * lpfc_enable_mi: Enable FDMI MIB
+ * 0 = disabled
+ * 1 = enabled (default)
+ * Value range is [0,1].
+ */
+LPFC_ATTR_R(enable_mi, 1, 0, 1, "Enable MI");
+
struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_nvme_info,
&dev_attr_scsi_stat,
@@ -6255,6 +6268,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_ras_fwlog_func,
&dev_attr_lpfc_enable_bbcr,
&dev_attr_lpfc_enable_dpp,
+ &dev_attr_lpfc_enable_mi,
NULL,
};
@@ -6964,8 +6978,7 @@ lpfc_get_node_by_target(struct scsi_target *starget)
spin_lock_irq(shost->host_lock);
/* Search for this, mapped, target ID */
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (NLP_CHK_NODE_ACT(ndlp) &&
- ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
+ if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
starget->id == ndlp->nlp_sid) {
spin_unlock_irq(shost->host_lock);
return ndlp;
@@ -7040,7 +7053,7 @@ lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
else
rport->dev_loss_tmo = 1;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ if (!ndlp) {
dev_info(&rport->dev, "Cannot find remote node to "
"set rport dev loss tmo, port_id x%x\n",
rport->port_id);
@@ -7056,7 +7069,7 @@ lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
#endif
}
-/**
+/*
* lpfc_rport_show_function - Return rport target information
*
* Description:
@@ -7105,6 +7118,7 @@ lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
/**
* lpfc_hba_log_verbose_init - Set hba's log verbose level
* @phba: Pointer to lpfc_hba struct.
+ * @verbose: Verbose level to set.
*
* This function is called by the lpfc_get_cfgparam() routine to set the
* module lpfc_log_verbose into the @phba cfg_log_verbose for use with
@@ -7359,6 +7373,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_irq_chann_init(phba, lpfc_irq_chann);
lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
lpfc_enable_dpp_init(phba, lpfc_enable_dpp);
+ lpfc_enable_mi_init(phba, lpfc_enable_mi);
if (phba->sli_rev != LPFC_SLI_REV4) {
/* NVME only supported on SLI4 */
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 6f9d648a9b9c..eed6ea5e0722 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -329,7 +329,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
spin_unlock_irqrestore(&phba->hbalock, flags);
iocb = &dd_data->context_un.iocb;
- ndlp = iocb->ndlp;
+ ndlp = iocb->cmdiocbq->context_un.ndlp;
rmp = iocb->rmp;
cmp = cmdiocbq->context2;
bmp = cmdiocbq->context3;
@@ -366,8 +366,8 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
lpfc_free_bsg_buffers(phba, rmp);
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
kfree(bmp);
- lpfc_sli_release_iocbq(phba, cmdiocbq);
lpfc_nlp_put(ndlp);
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
kfree(dd_data);
/* Complete the job if the job is still active */
@@ -408,6 +408,9 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
/* in case no data is transferred */
bsg_reply->reply_payload_rcv_len = 0;
+ if (ndlp->nlp_flag & NLP_ELS_SND_MASK)
+ return -ENODEV;
+
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
@@ -417,20 +420,10 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
goto no_dd_data;
}
- if (!lpfc_nlp_get(ndlp)) {
- rc = -ENODEV;
- goto no_ndlp;
- }
-
- if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
- rc = -ENODEV;
- goto free_ndlp;
- }
-
cmdiocbq = lpfc_sli_get_iocbq(phba);
if (!cmdiocbq) {
rc = -ENOMEM;
- goto free_ndlp;
+ goto free_dd;
}
cmd = &cmdiocbq->iocb;
@@ -496,11 +489,10 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
cmdiocbq->context1 = dd_data;
cmdiocbq->context2 = cmp;
cmdiocbq->context3 = bmp;
- cmdiocbq->context_un.ndlp = ndlp;
+
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
- dd_data->context_un.iocb.ndlp = ndlp;
dd_data->context_un.iocb.rmp = rmp;
job->dd_data = dd_data;
@@ -514,8 +506,13 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
readl(phba->HCregaddr); /* flush */
}
- iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
+ cmdiocbq->context_un.ndlp = lpfc_nlp_get(ndlp);
+ if (!cmdiocbq->context_un.ndlp) {
+ rc = -ENODEV;
+ goto free_rmp;
+ }
+ iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
if (iocb_stat == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed yet */
@@ -532,7 +529,7 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
}
/* iocb failed so cleanup */
- job->dd_data = NULL;
+ lpfc_nlp_put(ndlp);
free_rmp:
lpfc_free_bsg_buffers(phba, rmp);
@@ -544,9 +541,7 @@ free_bmp:
kfree(bmp);
free_cmdiocbq:
lpfc_sli_release_iocbq(phba, cmdiocbq);
-free_ndlp:
- lpfc_nlp_put(ndlp);
-no_ndlp:
+free_dd:
kfree(dd_data);
no_dd_data:
/* make error code available to userspace */
@@ -640,8 +635,9 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
}
}
- lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, cmdiocbq);
+
+ lpfc_nlp_put(ndlp);
kfree(dd_data);
/* Complete the job if the job is still active */
@@ -718,15 +714,14 @@ lpfc_bsg_rport_els(struct bsg_job *job)
goto release_ndlp;
}
- rpi = ndlp->nlp_rpi;
-
/* Transfer the request payload to allocated command dma buffer */
-
sg_copy_to_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
cmdsize);
+ rpi = ndlp->nlp_rpi;
+
if (phba->sli_rev == LPFC_SLI_REV4)
cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
else
@@ -752,8 +747,13 @@ lpfc_bsg_rport_els(struct bsg_job *job)
readl(phba->HCregaddr); /* flush */
}
- rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
+ cmdiocbq->context1 = lpfc_nlp_get(ndlp);
+ if (!cmdiocbq->context1) {
+ rc = -EIO;
+ goto linkdown_err;
+ }
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
if (rc == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed/released */
@@ -769,11 +769,9 @@ lpfc_bsg_rport_els(struct bsg_job *job)
rc = -EIO;
}
- /* iocb failed so cleanup */
- job->dd_data = NULL;
+ /* I/O issue failed. Cleanup resources. */
linkdown_err:
- cmdiocbq->context1 = ndlp;
lpfc_els_free_iocb(phba, cmdiocbq);
release_ndlp:
@@ -902,11 +900,8 @@ diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
return 0;
}
-/**
+/*
* lpfc_bsg_ct_unsol_event - process an unsolicited CT command
- * @phba:
- * @pring:
- * @piocbq:
*
* This function is called when an unsolicited CT command is received. It
* forwards the event to any processes registered to receive CT events.
@@ -939,28 +934,9 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
INIT_LIST_HEAD(&head);
list_add_tail(&head, &piocbq->list);
- if (piocbq->iocb.ulpBdeCount == 0 ||
- piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
- goto error_ct_unsol_exit;
-
- if (phba->link_state == LPFC_HBA_ERROR ||
- (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
- goto error_ct_unsol_exit;
-
- if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
- dmabuf = bdeBuf1;
- else {
- dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
- piocbq->iocb.un.cont64[0].addrLow);
- dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
- }
- if (dmabuf == NULL)
- goto error_ct_unsol_exit;
- ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
+ ct_req = (struct lpfc_sli_ct_request *)bdeBuf1;
evt_req_id = ct_req->FsType;
cmd = ct_req->CommandResponse.bits.CmdRsp;
- if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
- lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
@@ -1474,7 +1450,8 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
* @phba: Pointer to HBA context object.
* @job: Pointer to the job object.
* @tag: tag index value into the ports context exchange array.
- * @bmp: Pointer to a dma buffer descriptor.
+ * @cmp: Pointer to a cmp dma buffer descriptor.
+ * @bmp: Pointer to a bmp dma buffer descriptor.
* @num_entry: Number of enties in the bde.
**/
static int
@@ -1490,6 +1467,15 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
unsigned long flags;
uint32_t creg_val;
+ ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
+ if (!ndlp) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
+ "2721 ndlp null for oxid %x SID %x\n",
+ phba->ct_ctx[tag].rxid,
+ phba->ct_ctx[tag].SID);
+ return IOCB_ERROR;
+ }
+
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
@@ -1540,12 +1526,6 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
goto issue_ct_rsp_exit;
}
- /* Check if the ndlp is active */
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- rc = IOCB_ERROR;
- goto issue_ct_rsp_exit;
- }
-
/* get a refernece count so the ndlp doesn't go away while
* we respond
*/
@@ -1580,7 +1560,11 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
dd_data->context_un.iocb.cmdiocbq = ctiocb;
- dd_data->context_un.iocb.ndlp = ndlp;
+ dd_data->context_un.iocb.ndlp = lpfc_nlp_get(ndlp);
+ if (!dd_data->context_un.iocb.ndlp) {
+ rc = -IOCB_ERROR;
+ goto issue_ct_rsp_exit;
+ }
dd_data->context_un.iocb.rmp = NULL;
job->dd_data = dd_data;
@@ -1595,7 +1579,6 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
}
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
-
if (rc == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed/released */
@@ -1609,6 +1592,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
/* iocb failed so cleanup */
job->dd_data = NULL;
+ lpfc_nlp_put(ndlp);
issue_ct_rsp_exit:
lpfc_sli_release_iocbq(phba, ctiocb);
@@ -3535,6 +3519,7 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
mb->mbxCommand);
return -EPERM;
}
+ break;
case MBX_WRITE_NV:
case MBX_WRITE_VPARMS:
case MBX_LOAD_SM:
@@ -3886,9 +3871,9 @@ lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
/**
* lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a BSG mailbox object.
+ * @job: Pointer to the job object.
* @nemb_tp: Enumerate of non-embedded mailbox command type.
- * @dmabuff: Pointer to a DMA buffer descriptor.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
* non-embedded external bufffers.
@@ -4075,8 +4060,9 @@ job_error:
/**
* lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a BSG mailbox object.
- * @dmabuff: Pointer to a DMA buffer descriptor.
+ * @job: Pointer to the job object.
+ * @nemb_tp: Enumerate of non-embedded mailbox command type.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
* non-embedded external bufffers.
@@ -4241,8 +4227,8 @@ job_error:
/**
* lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a BSG mailbox object.
- * @dmabuff: Pointer to a DMA buffer descriptor.
+ * @job: Pointer to the job object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
* external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
@@ -4393,7 +4379,7 @@ lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
/**
* lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
* @phba: Pointer to HBA context object.
- * @dmabuf: Pointer to a DMA buffer descriptor.
+ * @job: Pointer to the job object.
*
* This routine extracts the next mailbox read external buffer back to
* user space through BSG.
@@ -4463,6 +4449,7 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job)
/**
* lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
* @phba: Pointer to HBA context object.
+ * @job: Pointer to the job object.
* @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine sets up the next mailbox read external buffer obtained
@@ -4588,8 +4575,8 @@ job_error:
/**
* lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a BSG mailbox object.
- * @dmabuff: Pointer to a DMA buffer descriptor.
+ * @job: Pointer to the job object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
* command with multiple non-embedded external buffers.
@@ -4633,8 +4620,8 @@ lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job,
/**
* lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a BSG mailbox object.
- * @dmabuff: Pointer to a DMA buffer descriptor.
+ * @job: Pointer to the job object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
* (0x9B) mailbox commands and external buffers.
@@ -4707,7 +4694,7 @@ sli_cfg_ext_error:
/**
* lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a mailbox object.
+ * @job: Pointer to the job object.
* @vport: Pointer to a vport object.
*
* Allocate a tracking object, mailbox command memory, get a mailbox
@@ -5935,7 +5922,7 @@ lpfc_bsg_timeout(struct bsg_job *job)
}
}
if (list_empty(&completions))
- lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL);
spin_unlock_irqrestore(&phba->hbalock, flags);
if (!list_empty(&completions)) {
lpfc_sli_cancel_iocbs(phba, &completions,
@@ -5972,7 +5959,7 @@ lpfc_bsg_timeout(struct bsg_job *job)
}
}
if (list_empty(&completions))
- lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL);
spin_unlock_irqrestore(&phba->hbalock, flags);
if (!list_empty(&completions)) {
lpfc_sli_cancel_iocbs(phba, &completions,
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 782f6f76f18a..f78e52a18b0b 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -88,8 +88,6 @@ void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_unregister_vfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
-struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
- struct lpfc_nodelist *, int);
void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int);
void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_set_disctmo(struct lpfc_vport *);
@@ -322,8 +320,12 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
struct lpfc_iocbq *, uint32_t);
+int lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb, uint32_t flag);
int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
struct lpfc_iocbq *pwqe);
+int lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocb, void *cmpl);
struct lpfc_sglq *__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xri);
struct lpfc_sglq *__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba,
struct lpfc_iocbq *piocbq);
@@ -348,7 +350,7 @@ int lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *, uint32_t);
void lpfc_sli_hbqbuf_free_all(struct lpfc_hba *);
int lpfc_sli_hbq_size(void);
int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
- struct lpfc_iocbq *);
+ struct lpfc_iocbq *, void *);
int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd);
int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t,
uint64_t, lpfc_ctx_cmd);
@@ -371,6 +373,8 @@ int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t,
uint32_t);
void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
+void lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *h, struct lpfc_iocbq *i,
+ struct lpfc_wcqe_complete *w);
void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
@@ -592,11 +596,13 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd,
struct lpfc_sli4_hdw_queue *qp);
void lpfc_io_ktime(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd);
-void lpfc_nvme_cmd_template(void);
+void lpfc_wqe_cmd_template(void);
void lpfc_nvmet_cmd_template(void);
void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn);
-void lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt);
extern int lpfc_enable_nvmet_cnt;
extern unsigned long long lpfc_enable_nvmet[];
extern int lpfc_no_hba_reset_cnt;
extern unsigned long lpfc_no_hba_reset[];
+extern union lpfc_wqe128 lpfc_iread_cmd_template;
+extern union lpfc_wqe128 lpfc_iwrite_cmd_template;
+extern union lpfc_wqe128 lpfc_icmnd_cmd_template;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index a8bf4d0d58f0..dd0b432f7ac5 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -99,21 +99,265 @@ lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
lpfc_ct_ignore_hbq_buffer(phba, piocbq, mp, size);
}
+/**
+ * lpfc_ct_unsol_cmpl : Completion callback function for unsol ct commands
+ * @phba : pointer to lpfc hba data structure.
+ * @cmdiocb : pointer to lpfc command iocb data structure.
+ * @rspiocb : pointer to lpfc response iocb data structure.
+ *
+ * This routine is the callback function for issuing unsol ct reject command.
+ * The memory allocated in the reject command path is freed up here.
+ **/
+static void
+lpfc_ct_unsol_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_dmabuf *mp, *bmp;
+
+ ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+ if (ndlp)
+ lpfc_nlp_put(ndlp);
+
+ mp = cmdiocb->context2;
+ bmp = cmdiocb->context3;
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ cmdiocb->context2 = NULL;
+ }
+
+ if (bmp) {
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ kfree(bmp);
+ cmdiocb->context3 = NULL;
+ }
+
+ lpfc_sli_release_iocbq(phba, cmdiocb);
+}
+
+/**
+ * lpfc_ct_reject_event : Issue reject for unhandled CT MIB commands
+ * @ndlp : pointer to a node-list data structure.
+ * ct_req : pointer to the CT request data structure.
+ * rx_id : rx_id of the received UNSOL CT command
+ * ox_id : ox_id of the UNSOL CT command
+ *
+ * This routine is invoked by the lpfc_ct_handle_mibreq routine for sending
+ * a reject response. Reject response is sent for the unhandled commands.
+ **/
+static void
+lpfc_ct_reject_event(struct lpfc_nodelist *ndlp,
+ struct lpfc_sli_ct_request *ct_req,
+ u16 rx_id, u16 ox_id)
+{
+ struct lpfc_vport *vport = ndlp->vport;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli_ct_request *ct_rsp;
+ struct lpfc_iocbq *cmdiocbq = NULL;
+ struct lpfc_dmabuf *bmp = NULL;
+ struct lpfc_dmabuf *mp = NULL;
+ struct ulp_bde64 *bpl;
+ IOCB_t *icmd;
+ u8 rc = 0;
+
+ /* fill in BDEs for command */
+ mp = kmalloc(sizeof(*mp), GFP_KERNEL);
+ if (!mp) {
+ rc = 1;
+ goto ct_exit;
+ }
+
+ mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp->phys);
+ if (!mp->virt) {
+ rc = 2;
+ goto ct_free_mp;
+ }
+
+ /* Allocate buffer for Buffer ptr list */
+ bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
+ if (!bmp) {
+ rc = 3;
+ goto ct_free_mpvirt;
+ }
+
+ bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &bmp->phys);
+ if (!bmp->virt) {
+ rc = 4;
+ goto ct_free_bmp;
+ }
+
+ INIT_LIST_HEAD(&mp->list);
+ INIT_LIST_HEAD(&bmp->list);
+
+ bpl = (struct ulp_bde64 *)bmp->virt;
+ memset(bpl, 0, sizeof(struct ulp_bde64));
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
+ bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
+ bpl->tus.f.bdeSize = (LPFC_CT_PREAMBLE - 4);
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+ ct_rsp = (struct lpfc_sli_ct_request *)mp->virt;
+ memset(ct_rsp, 0, sizeof(struct lpfc_sli_ct_request));
+
+ ct_rsp->RevisionId.bits.Revision = SLI_CT_REVISION;
+ ct_rsp->RevisionId.bits.InId = 0;
+ ct_rsp->FsType = ct_req->FsType;
+ ct_rsp->FsSubType = ct_req->FsSubType;
+ ct_rsp->CommandResponse.bits.Size = 0;
+ ct_rsp->CommandResponse.bits.CmdRsp =
+ cpu_to_be16(SLI_CT_RESPONSE_FS_RJT);
+ ct_rsp->ReasonCode = SLI_CT_REQ_NOT_SUPPORTED;
+ ct_rsp->Explanation = SLI_CT_NO_ADDITIONAL_EXPL;
+
+ cmdiocbq = lpfc_sli_get_iocbq(phba);
+ if (!cmdiocbq) {
+ rc = 5;
+ goto ct_free_bmpvirt;
+ }
+
+ icmd = &cmdiocbq->iocb;
+ icmd->un.genreq64.bdl.ulpIoTag32 = 0;
+ icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+ icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+ icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ icmd->un.genreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
+ icmd->un.genreq64.w5.hcsw.Fctl = (LS | LA);
+ icmd->un.genreq64.w5.hcsw.Dfctl = 0;
+ icmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
+ icmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
+ icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
+ icmd->ulpBdeCount = 1;
+ icmd->ulpLe = 1;
+ icmd->ulpClass = CLASS3;
+
+ /* Save for completion so we can release these resources */
+ cmdiocbq->context1 = lpfc_nlp_get(ndlp);
+ cmdiocbq->context2 = (uint8_t *)mp;
+ cmdiocbq->context3 = (uint8_t *)bmp;
+ cmdiocbq->iocb_cmpl = lpfc_ct_unsol_cmpl;
+ icmd->ulpContext = rx_id; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = ox_id;
+ icmd->un.ulpWord[3] =
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ icmd->ulpTimeout = (3 * phba->fc_ratov);
+
+ cmdiocbq->retry = 0;
+ cmdiocbq->vport = vport;
+ cmdiocbq->context_un.ndlp = NULL;
+ cmdiocbq->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
+ if (!rc)
+ return;
+
+ rc = 6;
+ lpfc_nlp_put(ndlp);
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+ct_free_bmpvirt:
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ct_free_bmp:
+ kfree(bmp);
+ct_free_mpvirt:
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ct_free_mp:
+ kfree(mp);
+ct_exit:
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "6440 Unsol CT: Rsp err %d Data: x%x\n",
+ rc, vport->fc_flag);
+}
+
+/**
+ * lpfc_ct_handle_mibreq - Process an unsolicited CT MIB request data buffer
+ * @phba: pointer to lpfc hba data structure.
+ * @ctiocb: pointer to lpfc CT command iocb data structure.
+ *
+ * This routine is used for processing the IOCB associated with a unsolicited
+ * CT MIB request. It first determines whether there is an existing ndlp that
+ * matches the DID from the unsolicited IOCB. If not, it will return.
+ **/
+static void
+lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq)
+{
+ struct lpfc_sli_ct_request *ct_req;
+ struct lpfc_nodelist *ndlp = NULL;
+ struct lpfc_vport *vport = NULL;
+ IOCB_t *icmd = &ctiocbq->iocb;
+ u32 mi_cmd, vpi;
+ u32 did = 0;
+
+ vpi = ctiocbq->iocb.unsli3.rcvsli3.vpi;
+ vport = lpfc_find_vport_by_vpid(phba, vpi);
+ if (!vport) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "6437 Unsol CT: VPORT NULL vpi : x%x\n",
+ vpi);
+ return;
+ }
+
+ did = ctiocbq->iocb.un.rcvels.remoteID;
+ if (icmd->ulpStatus) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "6438 Unsol CT: status:x%x/x%x did : x%x\n",
+ icmd->ulpStatus, icmd->un.ulpWord[4], did);
+ return;
+ }
+
+ /* Ignore traffic received during vport shutdown */
+ if (vport->fc_flag & FC_UNLOADING)
+ return;
+
+ ndlp = lpfc_findnode_did(vport, did);
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "6439 Unsol CT: NDLP Not Found for DID : x%x",
+ did);
+ return;
+ }
+
+ ct_req = ((struct lpfc_sli_ct_request *)
+ (((struct lpfc_dmabuf *)ctiocbq->context2)->virt));
+
+ mi_cmd = ct_req->CommandResponse.bits.CmdRsp;
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "6442 : MI Cmd : x%x Not Supported\n", mi_cmd);
+ lpfc_ct_reject_event(ndlp, ct_req,
+ ctiocbq->iocb.ulpContext,
+ ctiocbq->iocb.unsli3.rcvsli3.ox_id);
+}
+
+/**
+ * lpfc_ct_unsol_event - Process an unsolicited event from a ct sli ring
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a SLI ring.
+ * @ctiocbq: pointer to lpfc ct iocb data structure.
+ *
+ * This routine is used to process an unsolicited event received from a SLI
+ * (Service Level Interface) ring. The actual processing of the data buffer
+ * associated with the unsolicited event is done by invoking appropriate routine
+ * after properly set up the iocb buffer from the SLI ring on which the
+ * unsolicited event was received.
+ **/
void
lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
- struct lpfc_iocbq *piocbq)
+ struct lpfc_iocbq *ctiocbq)
{
struct lpfc_dmabuf *mp = NULL;
- IOCB_t *icmd = &piocbq->iocb;
+ IOCB_t *icmd = &ctiocbq->iocb;
int i;
struct lpfc_iocbq *iocbq;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
uint32_t size;
struct list_head head;
- struct lpfc_dmabuf *bdeBuf;
+ struct lpfc_sli_ct_request *ct_req;
+ struct lpfc_dmabuf *bdeBuf1 = ctiocbq->context2;
+ struct lpfc_dmabuf *bdeBuf2 = ctiocbq->context3;
- if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
- return;
+ ctiocbq->context1 = NULL;
+ ctiocbq->context2 = NULL;
+ ctiocbq->context3 = NULL;
if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
@@ -127,46 +371,75 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return;
}
- /* If there are no BDEs associated with this IOCB,
- * there is nothing to do.
+ /* If there are no BDEs associated
+ * with this IOCB, there is nothing to do.
*/
if (icmd->ulpBdeCount == 0)
return;
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ ctiocbq->context2 = bdeBuf1;
+ if (icmd->ulpBdeCount == 2)
+ ctiocbq->context3 = bdeBuf2;
+ } else {
+ dma_addr = getPaddr(icmd->un.cont64[0].addrHigh,
+ icmd->un.cont64[0].addrLow);
+ ctiocbq->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
+ dma_addr);
+ if (icmd->ulpBdeCount == 2) {
+ dma_addr = getPaddr(icmd->un.cont64[1].addrHigh,
+ icmd->un.cont64[1].addrLow);
+ ctiocbq->context3 = lpfc_sli_ringpostbuf_get(phba,
+ pring,
+ dma_addr);
+ }
+ }
+
+ ct_req = ((struct lpfc_sli_ct_request *)
+ (((struct lpfc_dmabuf *)ctiocbq->context2)->virt));
+
+ if (ct_req->FsType == SLI_CT_MANAGEMENT_SERVICE &&
+ ct_req->FsSubType == SLI_CT_MIB_Subtypes) {
+ lpfc_ct_handle_mibreq(phba, ctiocbq);
+ } else {
+ if (!lpfc_bsg_ct_unsol_event(phba, pring, ctiocbq))
+ return;
+ }
+
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
INIT_LIST_HEAD(&head);
- list_add_tail(&head, &piocbq->list);
+ list_add_tail(&head, &ctiocbq->list);
list_for_each_entry(iocbq, &head, list) {
icmd = &iocbq->iocb;
if (icmd->ulpBdeCount == 0)
continue;
- bdeBuf = iocbq->context2;
+ bdeBuf1 = iocbq->context2;
iocbq->context2 = NULL;
size = icmd->un.cont64[0].tus.f.bdeSize;
- lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size);
- lpfc_in_buf_free(phba, bdeBuf);
+ lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf1, size);
+ lpfc_in_buf_free(phba, bdeBuf1);
if (icmd->ulpBdeCount == 2) {
- bdeBuf = iocbq->context3;
+ bdeBuf2 = iocbq->context3;
iocbq->context3 = NULL;
size = icmd->unsli3.rcvsli3.bde2.tus.f.bdeSize;
- lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf,
+ lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf2,
size);
- lpfc_in_buf_free(phba, bdeBuf);
+ lpfc_in_buf_free(phba, bdeBuf2);
}
}
list_del(&head);
} else {
INIT_LIST_HEAD(&head);
- list_add_tail(&head, &piocbq->list);
+ list_add_tail(&head, &ctiocbq->list);
list_for_each_entry(iocbq, &head, list) {
icmd = &iocbq->iocb;
if (icmd->ulpBdeCount == 0)
lpfc_ct_unsol_buffer(phba, iocbq, NULL, 0);
for (i = 0; i < icmd->ulpBdeCount; i++) {
- paddr = getPaddr(icmd->un.cont64[i].addrHigh,
- icmd->un.cont64[i].addrLow);
+ dma_addr = getPaddr(icmd->un.cont64[i].addrHigh,
+ icmd->un.cont64[i].addrLow);
mp = lpfc_sli_ringpostbuf_get(phba, pring,
- paddr);
+ dma_addr);
size = icmd->un.cont64[i].tus.f.bdeSize;
lpfc_ct_unsol_buffer(phba, iocbq, mp, size);
lpfc_in_buf_free(phba, mp);
@@ -275,10 +548,8 @@ lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)
{
struct lpfc_dmabuf *buf_ptr;
- if (ctiocb->context_un.ndlp) {
- lpfc_nlp_put(ctiocb->context_un.ndlp);
- ctiocb->context_un.ndlp = NULL;
- }
+ /* I/O job is complete so context is now invalid*/
+ ctiocb->context_un.ndlp = NULL;
if (ctiocb->context1) {
buf_ptr = (struct lpfc_dmabuf *) ctiocb->context1;
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
@@ -345,7 +616,6 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
/* Save for completion so we can release these resources */
geniocb->context1 = (uint8_t *) inp;
geniocb->context2 = (uint8_t *) outp;
- geniocb->context_un.ndlp = lpfc_nlp_get(ndlp);
/* Fill in payload, bp points to frame payload */
icmd->ulpCommand = CMD_GEN_REQUEST64_CR;
@@ -384,16 +654,21 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
geniocb->vport = vport;
geniocb->retry = retry;
+ geniocb->context_un.ndlp = lpfc_nlp_get(ndlp);
+ if (!geniocb->context_un.ndlp)
+ goto out;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
if (rc == IOCB_ERROR) {
geniocb->context_un.ndlp = NULL;
lpfc_nlp_put(ndlp);
- lpfc_sli_release_iocbq(phba, geniocb);
- return 1;
+ goto out;
}
return 0;
+out:
+ lpfc_sli_release_iocbq(phba, geniocb);
+ return 1;
}
/*
@@ -467,7 +742,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
ndlp = lpfc_setup_disc_node(vport, Did);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ if (ndlp) {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"Parse GID_FTrsp: did:x%x flg:x%x x%x",
Did, ndlp->nlp_flag, vport->fc_flag);
@@ -518,7 +793,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
* Don't even bother to send GFF_ID.
*/
ndlp = lpfc_findnode_did(vport, Did);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+ if (ndlp &&
(ndlp->nlp_type &
(NLP_FCP_TARGET | NLP_NVME_TARGET))) {
if (fc4_type == FC_TYPE_FCP)
@@ -550,7 +825,6 @@ lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
char *str;
if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT)
@@ -579,12 +853,12 @@ lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
if (ndlp->nlp_type != NLP_NVME_INITIATOR ||
ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
continue;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
if (ndlp->nlp_DID == Did)
ndlp->nlp_flag &= ~NLP_NVMET_RECOV;
else
ndlp->nlp_flag |= NLP_NVMET_RECOV;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
}
}
}
@@ -600,7 +874,6 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type,
uint32_t Did, CTentry;
int Cnt;
struct list_head head;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp = NULL;
lpfc_set_disctmo(vport);
@@ -646,9 +919,9 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type,
continue;
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NVMET_RECOV;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
}
}
@@ -861,8 +1134,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_disc_start(vport);
}
out:
- cmdiocb->context_un.ndlp = ndlp; /* Now restore ndlp for free */
lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
return;
}
@@ -1068,8 +1341,8 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_disc_start(vport);
}
out:
- cmdiocb->context_un.ndlp = ndlp; /* Now restore ndlp for free */
lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
}
static void
@@ -1084,7 +1357,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_sli_ct_request *CTrsp;
int did, rc, retry;
uint8_t fbits;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = NULL, *free_ndlp = NULL;
did = ((struct lpfc_sli_ct_request *) inp->virt)->un.gff.PortId;
did = be32_to_cpu(did);
@@ -1150,7 +1423,9 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cmdiocb->retry, did);
if (rc == 0) {
/* success */
+ free_ndlp = cmdiocb->context_un.ndlp;
lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(free_ndlp);
return;
}
}
@@ -1164,7 +1439,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* This is a target port, unregistered port, or the GFF_ID failed */
ndlp = lpfc_setup_disc_node(vport, did);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ if (ndlp) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0242 Process x%x GFF "
"NameServer Rsp Data: x%x x%x x%x\n",
@@ -1203,7 +1478,10 @@ out:
}
lpfc_disc_start(vport);
}
+
+ free_ndlp = cmdiocb->context_un.ndlp;
lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(free_ndlp);
return;
}
@@ -1217,7 +1495,8 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *)cmdiocb->context2;
struct lpfc_sli_ct_request *CTrsp;
int did;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = NULL;
+ struct lpfc_nodelist *ns_ndlp = NULL;
uint32_t fc4_data_0, fc4_data_1;
did = ((struct lpfc_sli_ct_request *)inp->virt)->un.gft.PortId;
@@ -1227,6 +1506,9 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"GFT_ID cmpl: status:x%x/x%x did:x%x",
irsp->ulpStatus, irsp->un.ulpWord[4], did);
+ /* Preserve the nameserver node to release the reference. */
+ ns_ndlp = cmdiocb->context_un.ndlp;
+
if (irsp->ulpStatus == IOSTAT_SUCCESS) {
/* Good status, continue checking */
CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
@@ -1242,6 +1524,10 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
(fc4_data_1 & LPFC_FC4_TYPE_BITMASK) ?
"NVME" : " ");
+ /* Lookup the NPort_ID queried in the GFT_ID and find the
+ * driver's local node. It's an error if the driver
+ * doesn't have one.
+ */
ndlp = lpfc_findnode_did(vport, did);
if (ndlp) {
/* The bitmask value for FCP and NVME FCP types is
@@ -1287,6 +1573,7 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"3065 GFT_ID failed x%08x\n", irsp->ulpStatus);
lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ns_ndlp);
}
static void
@@ -1356,8 +1643,8 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
out:
- cmdiocb->context_un.ndlp = ndlp; /* Now restore ndlp for free */
lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
return;
}
@@ -1599,8 +1886,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
int rc = 0;
ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)
- || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) {
+ if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) {
rc=1;
goto ns_cmd_exit;
}
@@ -1841,11 +2127,6 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
}
rc=6;
- /* Decrement ndlp reference count to release ndlp reference held
- * for the failed command's callback function.
- */
- lpfc_nlp_put(ndlp);
-
ns_cmd_free_bmpvirt:
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
ns_cmd_free_bmp:
@@ -1882,7 +2163,7 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp;
IOCB_t *irsp = &rspiocb->iocb;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp, *free_ndlp = NULL;
uint32_t latt, cmd, err;
latt = lpfc_els_chk_latt(vport);
@@ -1928,10 +2209,13 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
be16_to_cpu(fdmi_cmd), latt, irsp->ulpStatus,
irsp->un.ulpWord[4]);
}
+
+ free_ndlp = cmdiocb->context_un.ndlp;
lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(free_ndlp);
ndlp = lpfc_findnode_did(vport, FDMI_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ if (!ndlp)
return;
/* Check for a CT LS_RJT response */
@@ -1959,6 +2243,7 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
/* Start over */
lpfc_fdmi_cmd(vport, ndlp, cmd, 0);
+ return;
}
if (vport->fdmi_port_mask == LPFC_FDMI2_SMART_ATTR) {
vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
@@ -1968,12 +2253,21 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
case SLI_MGMT_RPA:
+ /* No retry on Vendor RPA */
+ if (phba->link_flag & LS_CT_VEN_RPA) {
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_DISCOVERY | LOG_ELS,
+ "6460 VEN FDMI RPA failure\n");
+ phba->link_flag &= ~LS_CT_VEN_RPA;
+ return;
+ }
if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) {
/* Fallback to FDMI-1 */
vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR;
vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
/* Start over */
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
+ return;
}
if (vport->fdmi_port_mask == LPFC_FDMI2_SMART_ATTR) {
vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
@@ -2004,6 +2298,33 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
else
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0);
break;
+ case SLI_MGMT_RPA:
+ if (vport->port_type == LPFC_PHYSICAL_PORT &&
+ phba->cfg_enable_mi &&
+ phba->sli4_hba.pc_sli4_params.mi_ver > LPFC_MIB1_SUPPORT) {
+ /* mi is only for the phyical port, no vports */
+ if (phba->link_flag & LS_CT_VEN_RPA) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY | LOG_ELS,
+ "6449 VEN RPA Success\n");
+ break;
+ }
+
+ if (lpfc_fdmi_cmd(vport, ndlp, cmd,
+ LPFC_FDMI_VENDOR_ATTR_mi) == 0)
+ phba->link_flag |= LS_CT_VEN_RPA;
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY | LOG_ELS,
+ "6458 Send MI FDMI:%x Flag x%x\n",
+ phba->sli4_hba.pc_sli4_params.mi_value,
+ phba->link_flag);
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY | LOG_ELS,
+ "6459 No FDMI VEN MI support - "
+ "RPA Success\n");
+ }
+ break;
}
return;
}
@@ -2033,7 +2354,7 @@ lpfc_fdmi_change_check(struct lpfc_vport *vport)
return;
ndlp = lpfc_findnode_did(vport, FDMI_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ if (!ndlp)
return;
/* Check if system hostname changed */
@@ -2974,6 +3295,28 @@ lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport,
return size;
}
+static int
+lpfc_fdmi_vendor_attr_mi(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_def *ad)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t len, size;
+ char mibrevision[16];
+
+ ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ memset(ae, 0, 256);
+ sprintf(mibrevision, "ELXE2EM:%04d",
+ phba->sli4_hba.pc_sli4_params.mi_value);
+ strncpy(ae->un.AttrString, &mibrevision[0], sizeof(ae->un.AttrString));
+ len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+ size = FOURBYTES + len;
+ ad->AttrLen = cpu_to_be16(size);
+ ad->AttrType = cpu_to_be16(RPRT_VENDOR_MI);
+ return size;
+}
+
/* RHBA attribute jump table */
int (*lpfc_fdmi_hba_action[])
(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) = {
@@ -3025,6 +3368,7 @@ int (*lpfc_fdmi_port_action[])
lpfc_fdmi_smart_attr_port_info, /* bit20 RPRT_SMART_PORT_INFO */
lpfc_fdmi_smart_attr_qos, /* bit21 RPRT_SMART_QOS */
lpfc_fdmi_smart_attr_security, /* bit22 RPRT_SMART_SECURITY */
+ lpfc_fdmi_vendor_attr_mi, /* bit23 RPRT_VENDOR_MI */
};
/**
@@ -3056,7 +3400,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ if (!ndlp)
return 0;
cmpl = lpfc_cmpl_ct_disc_fdmi; /* called from discovery */
@@ -3250,12 +3594,6 @@ port_out:
if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, 0))
return 0;
- /*
- * Decrement ndlp reference count to release ndlp reference held
- * for the failed command's callback function.
- */
- lpfc_nlp_put(ndlp);
-
fdmi_cmd_free_bmpvirt:
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
fdmi_cmd_free_bmp:
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index c9a327b13e5c..bc79a017e1a2 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -895,8 +895,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
if (ndlp->nlp_type & NLP_NVME_INITIATOR)
len += scnprintf(buf + len,
size - len, "NVME_INITIATOR ");
- len += scnprintf(buf+len, size-len, "usgmap:%x ",
- ndlp->nlp_usg_map);
len += scnprintf(buf+len, size-len, "refcnt:%x",
kref_read(&ndlp->kref));
if (iocnt) {
@@ -957,13 +955,13 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
len += scnprintf(buf + len, size - len, "\tRport List:\n");
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
/* local short-hand pointer. */
- spin_lock(&phba->hbalock);
+ spin_lock(&ndlp->lock);
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
nrport = rport->remoteport;
else
nrport = NULL;
- spin_unlock(&phba->hbalock);
+ spin_unlock(&ndlp->lock);
if (!nrport)
continue;
@@ -3341,7 +3339,6 @@ lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes,
break;
case LPFC_PCI_CFG_BROWSE: /* browse all */
goto pcicfg_browse;
- break;
default:
/* illegal count */
len = 0;
@@ -4187,6 +4184,7 @@ lpfc_idiag_que_param_check(struct lpfc_queue *q, int index, int count)
/**
* lpfc_idiag_queacc_read_qe - read a single entry from the given queue index
* @pbuffer: The pointer to buffer to copy the read data into.
+ * @len: Length of the buffer.
* @pque: The pointer to the queue to be read.
* @index: The index into the queue entry.
*
@@ -4381,7 +4379,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
}
}
goto error_out;
- break;
+
case LPFC_IDIAG_CQ:
/* MBX complete queue */
if (phba->sli4_hba.mbx_cq &&
@@ -4433,7 +4431,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
}
}
goto error_out;
- break;
+
case LPFC_IDIAG_MQ:
/* MBX work queue */
if (phba->sli4_hba.mbx_wq &&
@@ -4447,7 +4445,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
goto error_out;
- break;
+
case LPFC_IDIAG_WQ:
/* ELS work queue */
if (phba->sli4_hba.els_wq &&
@@ -4487,9 +4485,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
}
}
}
-
goto error_out;
- break;
+
case LPFC_IDIAG_RQ:
/* HDR queue */
if (phba->sli4_hba.hdr_rq &&
@@ -4514,10 +4511,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
goto error_out;
- break;
default:
goto error_out;
- break;
}
pass_check:
@@ -4766,7 +4761,7 @@ error_out:
* @phba: The pointer to hba structure.
* @pbuffer: The pointer to the buffer to copy the data to.
* @len: The length of bytes to copied.
- * @drbregid: The id to doorbell registers.
+ * @ctlregid: The id to doorbell registers.
*
* Description:
* This routine reads a control register and copies its content to the
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 482e4a888dae..ea07afcb750a 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -41,6 +41,7 @@ enum lpfc_work_type {
LPFC_EVT_DEV_LOSS,
LPFC_EVT_FASTPATH_MGMT_EVT,
LPFC_EVT_RESET_HBA,
+ LPFC_EVT_RECOVER_PORT
};
/* structure used to queue event to the discovery tasklet */
@@ -80,6 +81,9 @@ struct lpfc_nodelist {
struct list_head nlp_listp;
struct lpfc_name nlp_portname;
struct lpfc_name nlp_nodename;
+
+ spinlock_t lock; /* Node management lock */
+
uint32_t nlp_flag; /* entry flags */
uint32_t nlp_DID; /* FC D_ID of entry */
uint32_t nlp_last_elscmd; /* Last ELS cmd sent */
@@ -115,12 +119,6 @@ struct lpfc_nodelist {
u8 nlp_nvme_info; /* NVME NSLER Support */
#define NLP_NVME_NSLER 0x1 /* NVME NSLER device */
- uint16_t nlp_usg_map; /* ndlp management usage bitmap */
-#define NLP_USG_NODE_ACT_BIT 0x1 /* Indicate ndlp is actively used */
-#define NLP_USG_IACT_REQ_BIT 0x2 /* Request to inactivate ndlp */
-#define NLP_USG_FREE_REQ_BIT 0x4 /* Request to invoke ndlp memory free */
-#define NLP_USG_FREE_ACK_BIT 0x8 /* Indicate ndlp memory free invoked */
-
struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
struct lpfc_hba *phba;
struct fc_rport *rport; /* scsi_transport_fc port structure */
@@ -128,6 +126,7 @@ struct lpfc_nodelist {
struct lpfc_vport *vport;
struct lpfc_work_evt els_retry_evt;
struct lpfc_work_evt dev_loss_evt;
+ struct lpfc_work_evt recovery_evt;
struct kref kref;
atomic_t cmd_pending;
uint32_t cmd_qdepth;
@@ -135,13 +134,17 @@ struct lpfc_nodelist {
unsigned long *active_rrqs_xri_bitmap;
struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
uint32_t fc4_prli_sent;
- uint32_t upcall_flags;
+ uint32_t fc4_xpt_flags;
#define NLP_WAIT_FOR_UNREG 0x1
+#define SCSI_XPT_REGD 0x2
+#define NVME_XPT_REGD 0x4
+
uint32_t nvme_fb_size; /* NVME target's supported byte cnt */
#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */
uint32_t nlp_defer_did;
};
+
struct lpfc_node_rrq {
struct list_head list;
uint16_t xritag;
@@ -170,7 +173,7 @@ struct lpfc_node_rrq {
#define NLP_NVMET_RECOV 0x00001000 /* NVMET auditing node for recovery. */
#define NLP_FCP_PRLI_RJT 0x00002000 /* Rport does not support FCP PRLI. */
#define NLP_UNREG_INP 0x00008000 /* UNREG_RPI cmd is in progress */
-#define NLP_DEFER_RM 0x00010000 /* Remove this ndlp if no longer used */
+#define NLP_DROPPED 0x00010000 /* Init ref count has been dropped */
#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */
#define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */
#define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */
@@ -189,32 +192,6 @@ struct lpfc_node_rrq {
#define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */
#define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */
-
-/* ndlp usage management macros */
-#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \
- & NLP_USG_NODE_ACT_BIT) \
- && \
- !((ndlp)->nlp_usg_map \
- & NLP_USG_FREE_ACK_BIT))
-#define NLP_SET_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \
- |= NLP_USG_NODE_ACT_BIT)
-#define NLP_INT_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \
- = NLP_USG_NODE_ACT_BIT)
-#define NLP_CLR_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \
- &= ~NLP_USG_NODE_ACT_BIT)
-#define NLP_CHK_IACT_REQ(ndlp) ((ndlp)->nlp_usg_map \
- & NLP_USG_IACT_REQ_BIT)
-#define NLP_SET_IACT_REQ(ndlp) ((ndlp)->nlp_usg_map \
- |= NLP_USG_IACT_REQ_BIT)
-#define NLP_CHK_FREE_REQ(ndlp) ((ndlp)->nlp_usg_map \
- & NLP_USG_FREE_REQ_BIT)
-#define NLP_SET_FREE_REQ(ndlp) ((ndlp)->nlp_usg_map \
- |= NLP_USG_FREE_REQ_BIT)
-#define NLP_CHK_FREE_ACK(ndlp) ((ndlp)->nlp_usg_map \
- & NLP_USG_FREE_ACK_BIT)
-#define NLP_SET_FREE_ACK(ndlp) ((ndlp)->nlp_usg_map \
- |= NLP_USG_FREE_ACK_BIT)
-
/* There are 4 different double linked lists nodelist entries can reside on.
* The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
* when Link Up discovery or Registered State Change Notification (RSCN)
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b60945182db8..96c087b8b474 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -300,10 +300,6 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
bpl->tus.w = le32_to_cpu(bpl->tus.w);
}
- /* prevent preparing iocb with NULL ndlp reference */
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
- goto els_iocb_free_pbuf_exit;
elsiocb->context2 = pcmd;
elsiocb->context3 = pbuflist;
elsiocb->retry = retry;
@@ -378,7 +374,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
sp = &phba->fc_fabparam;
ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ if (!ndlp) {
err = 1;
goto fail;
}
@@ -418,10 +414,14 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
* for the callback routine.
*/
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!mbox->ctx_ndlp) {
+ err = 6;
+ goto fail_no_ndlp;
+ }
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- err = 6;
+ err = 7;
goto fail_issue_reg_login;
}
@@ -432,6 +432,7 @@ fail_issue_reg_login:
* for the failed mbox command.
*/
lpfc_nlp_put(ndlp);
+fail_no_ndlp:
mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
@@ -471,7 +472,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
!(phba->link_flag & LS_LOOPBACK_MODE) &&
!(vport->fc_flag & FC_PT2PT)) {
ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ if (!ndlp) {
rc = -ENODEV;
goto fail;
}
@@ -765,14 +766,12 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
list_for_each_entry_safe(np, next_np,
&vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(np))
- continue;
if ((np->nlp_state != NLP_STE_NPR_NODE) ||
!(np->nlp_flag & NLP_NPR_ADISC))
continue;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&np->lock);
np->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&np->lock);
lpfc_unreg_rpi(vport, np);
}
lpfc_cleanup_pending_mbox(vport);
@@ -908,11 +907,6 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID);
if (!ndlp)
goto fail;
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp,
- NLP_STE_UNUSED_NODE);
- if(!ndlp)
- goto fail;
}
memcpy(&ndlp->nlp_portname, &sp->portName,
@@ -921,9 +915,9 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
sizeof(struct lpfc_name));
/* Set state will put ndlp onto node list if not already done */
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -1095,6 +1089,7 @@ stop_rr_fcf_flogi:
/* Do not register VFI if the driver aborted FLOGI */
if (!lpfc_error_lost_link(irsp))
lpfc_issue_reg_vfi(vport);
+
lpfc_nlp_put(ndlp);
goto out;
}
@@ -1156,6 +1151,7 @@ stop_rr_fcf_flogi:
phba->fcf.current_rec.fabric_name[5],
phba->fcf.current_rec.fabric_name[6],
phba->fcf.current_rec.fabric_name[7]);
+
lpfc_nlp_put(ndlp);
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
@@ -1187,7 +1183,6 @@ flogifail:
spin_unlock_irq(&phba->hbalock);
lpfc_nlp_put(ndlp);
-
if (!lpfc_error_lost_link(irsp)) {
/* FLOGI failed, so just use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
@@ -1205,6 +1200,7 @@ flogifail:
}
out:
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
}
/**
@@ -1344,7 +1340,13 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"Issue FLOGI: opt:x%x",
phba->sli3_options, 0, 0);
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto out;
+
rc = lpfc_issue_fabric_iocb(phba, elsiocb);
+ if (rc == IOCB_ERROR)
+ lpfc_nlp_put(ndlp);
phba->hba_flag |= HBA_FLOGI_ISSUED;
@@ -1374,11 +1376,11 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
vport->fc_myDID = did;
}
- if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
- return 0;
+ if (!rc)
+ return 0;
+ out:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -1421,9 +1423,9 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
icmd = &iocb->iocb;
if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
ndlp = (struct lpfc_nodelist *)(iocb->context1);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
- (ndlp->nlp_DID == Fabric_DID))
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ if (ndlp && (ndlp->nlp_DID == Fabric_DID))
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb,
+ NULL);
}
}
spin_unlock_irq(&phba->hbalock);
@@ -1464,13 +1466,9 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
return 0;
/* Set the node type */
ndlp->nlp_type |= NLP_FABRIC;
+
/* Put ndlp onto node list */
lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- /* re-setup ndlp without removing from node list */
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp)
- return 0;
}
if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
@@ -1511,13 +1509,12 @@ lpfc_initial_fdisc(struct lpfc_vport *vport)
ndlp = lpfc_nlp_init(vport, Fabric_DID);
if (!ndlp)
return 0;
+
+ /* NPIV is only supported in Fabrics. */
+ ndlp->nlp_type |= NLP_FABRIC;
+
/* Put ndlp onto node list */
lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- /* re-setup ndlp without removing from node list */
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp)
- return 0;
}
if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
@@ -1562,7 +1559,7 @@ lpfc_more_plogi(struct lpfc_vport *vport)
}
/**
- * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp
+ * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp
* @phba: pointer to lpfc hba data structure.
* @prsp: pointer to response IOCB payload.
* @ndlp: pointer to a node-list data structure.
@@ -1597,10 +1594,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
struct lpfc_nodelist *ndlp)
{
struct lpfc_vport *vport = ndlp->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *new_ndlp;
- struct lpfc_rport_data *rdata;
- struct fc_rport *rport;
struct serv_parm *sp;
uint8_t name[sizeof(struct lpfc_name)];
uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
@@ -1608,8 +1602,6 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
uint16_t keep_nlp_state;
u32 keep_nlp_fc4_type = 0;
struct lpfc_nvme_rport *keep_nrport = NULL;
- int put_node;
- int put_rport;
unsigned long *active_rrqs_xri_bitmap = NULL;
/* Fabric nodes can have the same WWPN so we don't bother searching
@@ -1627,7 +1619,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
/* return immediately if the WWPN matches ndlp */
- if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
+ if (new_ndlp == ndlp)
return ndlp;
if (phba->sli_rev == LPFC_SLI_REV4) {
@@ -1662,28 +1654,6 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
phba->active_rrq_pool);
return ndlp;
}
- } else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
- rc = memcmp(&ndlp->nlp_portname, name,
- sizeof(struct lpfc_name));
- if (!rc) {
- if (active_rrqs_xri_bitmap)
- mempool_free(active_rrqs_xri_bitmap,
- phba->active_rrq_pool);
- return ndlp;
- }
- new_ndlp = lpfc_enable_node(vport, new_ndlp,
- NLP_STE_UNUSED_NODE);
- if (!new_ndlp) {
- if (active_rrqs_xri_bitmap)
- mempool_free(active_rrqs_xri_bitmap,
- phba->active_rrq_pool);
- return ndlp;
- }
- keepDID = new_ndlp->nlp_DID;
- if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap)
- memcpy(active_rrqs_xri_bitmap,
- new_ndlp->active_rrqs_xri_bitmap,
- phba->cfg_rrq_xri_bitmap_sz);
} else {
keepDID = new_ndlp->nlp_DID;
if (phba->sli_rev == LPFC_SLI_REV4 &&
@@ -1711,7 +1681,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
ndlp->active_rrqs_xri_bitmap,
phba->cfg_rrq_xri_bitmap_sz);
- spin_lock_irq(shost->host_lock);
+ /* Lock both ndlps */
+ spin_lock_irq(&ndlp->lock);
+ spin_lock_irq(&new_ndlp->lock);
keep_new_nlp_flag = new_ndlp->nlp_flag;
keep_nlp_flag = ndlp->nlp_flag;
new_ndlp->nlp_flag = ndlp->nlp_flag;
@@ -1742,7 +1714,8 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
else
ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&new_ndlp->lock);
+ spin_unlock_irq(&ndlp->lock);
/* Set nlp_states accordingly */
keep_nlp_state = new_ndlp->nlp_state;
@@ -1761,36 +1734,6 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
"3179 PLOGI confirm NEW: %x %x\n",
new_ndlp->nlp_DID, keepDID);
- /* Fix up the rport accordingly */
- rport = ndlp->rport;
- if (rport) {
- rdata = rport->dd_data;
- if (rdata->pnode == ndlp) {
- /* break the link before dropping the ref */
- ndlp->rport = NULL;
- lpfc_nlp_put(ndlp);
- rdata->pnode = lpfc_nlp_get(new_ndlp);
- new_ndlp->rport = rport;
- }
- new_ndlp->nlp_type = ndlp->nlp_type;
- }
-
- /* Fix up the nvme rport */
- if (ndlp->nrport) {
- ndlp->nrport = NULL;
- lpfc_nlp_put(ndlp);
- }
-
- /* We shall actually free the ndlp with both nlp_DID and
- * nlp_portname fields equals 0 to avoid any ndlp on the
- * nodelist never to be used.
- */
- if (ndlp->nlp_DID == 0) {
- spin_lock_irq(&phba->ndlp_lock);
- NLP_SET_FREE_REQ(ndlp);
- spin_unlock_irq(&phba->ndlp_lock);
- }
-
/* Two ndlps cannot have the same did on the nodelist.
* Note: for this case, ndlp has a NULL WWPN so setting
* the nlp_fc4_type isn't required.
@@ -1803,10 +1746,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
active_rrqs_xri_bitmap,
phba->cfg_rrq_xri_bitmap_sz);
- if (!NLP_CHK_NODE_ACT(ndlp))
- lpfc_drop_node(vport, ndlp);
- }
- else {
+ } else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3180 PLOGI confirm SWAP: %x %x\n",
new_ndlp->nlp_DID, keepDID);
@@ -1833,29 +1773,16 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
(ndlp->nlp_state == NLP_STE_MAPPED_NODE))
keep_nlp_state = NLP_STE_NPR_NODE;
lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
-
- /* Previous ndlp no longer active with nvme host transport.
- * Remove reference from earlier registration unless the
- * nvme host took care of it.
- */
- if (ndlp->nrport)
- lpfc_nlp_put(ndlp);
ndlp->nrport = keep_nrport;
-
- /* Fix up the rport accordingly */
- rport = ndlp->rport;
- if (rport) {
- rdata = rport->dd_data;
- put_node = rdata->pnode != NULL;
- put_rport = ndlp->rport != NULL;
- rdata->pnode = NULL;
- ndlp->rport = NULL;
- if (put_node)
- lpfc_nlp_put(ndlp);
- if (put_rport)
- put_device(&rport->dev);
- }
}
+
+ /*
+ * If ndlp is not associated with any rport we can drop it here else
+ * let dev_loss_tmo_callbk trigger DEVICE_RM event
+ */
+ if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE))
+ lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
+
if (phba->sli_rev == LPFC_SLI_REV4 &&
active_rrqs_xri_bitmap)
mempool_free(active_rrqs_xri_bitmap,
@@ -1933,7 +1860,7 @@ lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->un.elsreq64.remoteID);
ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) {
+ if (!ndlp || ndlp != rrq->ndlp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2882 RRQ completes to NPort x%x "
"with no ndlp. Data: x%x x%x x%x\n",
@@ -1966,7 +1893,9 @@ lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
out:
if (rrq)
lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
return;
}
/**
@@ -1996,7 +1925,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp, *free_ndlp;
struct lpfc_dmabuf *prsp;
int disc;
@@ -2010,23 +1939,23 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->un.elsreq64.remoteID);
ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ if (!ndlp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0136 PLOGI completes to NPort x%x "
"with no ndlp. Data: x%x x%x x%x\n",
irsp->un.elsreq64.remoteID,
irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpIoTag);
- goto out;
+ goto out_freeiocb;
}
/* Since ndlp can be freed in the disc state machine, note if this node
* is being used during discovery.
*/
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/* PLOGI completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -2038,9 +1967,9 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
goto out;
}
@@ -2049,9 +1978,9 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
if (disc) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
}
goto out;
}
@@ -2064,10 +1993,25 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4]);
+
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (!lpfc_error_lost_link(irsp))
+ if (lpfc_error_lost_link(irsp))
+ goto check_plogi;
+ else
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PLOGI);
+
+ /* As long as this node is not registered with the scsi or nvme
+ * transport, it is no longer an active node. Otherwise
+ * devloss handles the final cleanup.
+ */
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_DEVICE_RM);
+ }
} else {
/* Good status, call state machine */
prsp = list_entry(((struct lpfc_dmabuf *)
@@ -2075,9 +2019,10 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_dmabuf, list);
ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
- NLP_EVT_CMPL_PLOGI);
+ NLP_EVT_CMPL_PLOGI);
}
+ check_plogi:
if (disc && vport->num_disc_nodes) {
/* Check to see if there are more PLOGIs to be sent */
lpfc_more_plogi(vport);
@@ -2093,7 +2038,16 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
out:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
+ "PLOGI Cmpl PUT: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+
+out_freeiocb:
+ /* Release the reference on the original I/O request. */
+ free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(free_ndlp);
return;
}
@@ -2122,7 +2076,6 @@ int
lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
- struct Scsi_Host *shost;
struct serv_parm *sp;
struct lpfc_nodelist *ndlp;
struct lpfc_iocbq *elsiocb;
@@ -2151,8 +2104,6 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
ndlp->nlp_defer_did = did;
return 0;
}
- if (!NLP_CHK_NODE_ACT(ndlp))
- ndlp = NULL;
}
/* If ndlp is not NULL, we will bump the reference count on it */
@@ -2162,10 +2113,9 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
if (!elsiocb)
return 1;
- shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
@@ -2207,13 +2157,24 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
phba->fc_stat.elsXmitPLOGI++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
- ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (ret == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue PLOGI: did:x%x refcnt %d",
+ did, kref_read(&ndlp->kref), 0);
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto io_err;
+
+ ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (ret) {
+ lpfc_nlp_put(ndlp);
+ goto io_err;
}
return 0;
+
+ io_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -2234,7 +2195,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
char *mode;
@@ -2245,13 +2205,13 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp = &(rspiocb->iocb);
ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_PRLI_SND;
/* Driver supports multiple FC4 types. Counters matter. */
vport->fc_prli_sent--;
ndlp->fc4_prli_sent--;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"PRLI cmpl: status:x%x/x%x did:x%x",
@@ -2301,6 +2261,20 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
else
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PRLI);
+
+ /* As long as this node is not registered with the SCSI
+ * or NVMe transport and no other PRLIs are outstanding,
+ * it is no longer an active node. Otherwise devloss
+ * handles the final cleanup.
+ */
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
+ !ndlp->fc4_prli_sent) {
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_DEVICE_RM);
+ }
} else {
/* Good status, call state machine. However, if another
* PRLI is outstanding, don't call the state machine
@@ -2313,6 +2287,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
out:
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
return;
}
@@ -2341,7 +2316,7 @@ int
lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ int rc = 0;
struct lpfc_hba *phba = vport->phba;
PRLI *npr;
struct lpfc_nvme_prli *npr_nvme;
@@ -2476,13 +2451,9 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
local_nlp_type &= ~NLP_FC4_NVME;
}
- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
- "Issue PRLI: did:x%x",
- ndlp->nlp_DID, 0, 0);
-
phba->fc_stat.elsXmitPRLI++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_PRLI_SND;
/* The vport counters are used for lpfc_scan_finished, but
@@ -2491,15 +2462,18 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
vport->fc_prli_sent++;
ndlp->fc4_prli_sent++;
- spin_unlock_irq(shost->host_lock);
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
- IOCB_ERROR) {
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~NLP_PRLI_SND;
- spin_unlock_irq(shost->host_lock);
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ spin_unlock_irq(&ndlp->lock);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue PRLI: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto io_err;
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR)
+ goto node_err;
/* The driver supports 2 FC4 types. Make sure
@@ -2508,8 +2482,17 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (phba->sli_rev == LPFC_SLI_REV4 &&
local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME))
goto send_next_prli;
+ else
+ return 0;
- return 0;
+ node_err:
+ lpfc_nlp_put(ndlp);
+ io_err:
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_PRLI_SND;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -2650,7 +2633,6 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
int disc;
@@ -2669,10 +2651,10 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Since ndlp can be freed in the disc state machine, note if this node
* is being used during discovery.
*/
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/* ADISC completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0104 ADISC completes to NPort x%x "
@@ -2681,9 +2663,9 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpTimeout, disc, vport->num_disc_nodes);
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
goto out;
}
@@ -2692,9 +2674,9 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
if (disc) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_set_disctmo(vport);
}
goto out;
@@ -2705,19 +2687,35 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4]);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (!lpfc_error_lost_link(irsp))
+ if (lpfc_error_lost_link(irsp))
+ goto check_adisc;
+ else
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_ADISC);
+
+ /* As long as this node is not registered with the SCSI or NVMe
+ * transport, it is no longer an active node. Otherwise
+ * devloss handles the final cleanup.
+ */
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_DEVICE_RM);
+ }
} else
/* Good status, call state machine */
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_ADISC);
+ check_adisc:
/* Check to see if there are more ADISCs to be sent */
if (disc && vport->num_disc_nodes)
lpfc_more_adisc(vport);
out:
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
return;
}
@@ -2745,7 +2743,7 @@ int
lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ int rc = 0;
struct lpfc_hba *phba = vport->phba;
ADISC *ap;
struct lpfc_iocbq *elsiocb;
@@ -2771,24 +2769,31 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
ap->DID = be32_to_cpu(vport->fc_myDID);
- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
- "Issue ADISC: did:x%x",
- ndlp->nlp_DID, 0, 0);
-
phba->fc_stat.elsXmitADISC++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_ADISC_SND;
- spin_unlock_irq(shost->host_lock);
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
- IOCB_ERROR) {
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~NLP_ADISC_SND;
- spin_unlock_irq(shost->host_lock);
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ spin_unlock_irq(&ndlp->lock);
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue ADISC: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR)
+ goto io_err;
return 0;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_ADISC_SND;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -2809,7 +2814,6 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
struct lpfc_vport *vport = ndlp->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfcMboxq *mbox;
unsigned long flags;
@@ -2819,9 +2823,9 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cmdiocb->context_un.rsp_iocb = rspiocb;
irsp = &(rspiocb->iocb);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_LOGO_SND;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"LOGO cmpl: status:x%x/x%x did:x%x",
@@ -2831,8 +2835,9 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* LOGO completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0105 LOGO completes to NPort x%x "
- "Data: x%x x%x x%x x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+ "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
+ irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpTimeout, vport->num_disc_nodes);
if (lpfc_els_chk_latt(vport)) {
@@ -2840,17 +2845,6 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
- /* Check to see if link went down during discovery */
- if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
- /* NLP_EVT_DEVICE_RM should unregister the RPI
- * which should abort all outstanding IOs.
- */
- lpfc_disc_state_machine(vport, ndlp, cmdiocb,
- NLP_EVT_DEVICE_RM);
- skip_recovery = 1;
- goto out;
- }
-
/* The LOGO will not be retried on failure. A LOGO was
* issued to the remote rport and a ACC or RJT or no Answer are
* all acceptable. Note the failure and move forward with
@@ -2872,8 +2866,24 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Call state machine. This will unregister the rpi if needed. */
lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
+ /* The driver sets this flag for an NPIV instance that doesn't want to
+ * log into the remote port.
+ */
+ if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_DEVICE_RM);
+ lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
+
+ /* Presume the node was released. */
+ return;
+ }
+
out:
+ /* Driver is done with the IO. */
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
+
/* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
if ((vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -2908,9 +2918,9 @@ out:
if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) &&
skip_recovery == 0) {
lpfc_cancel_retry_delay_tmo(vport, ndlp);
- spin_lock_irqsave(shost->host_lock, flags);
+ spin_lock_irqsave(&ndlp->lock, flags);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irqrestore(shost->host_lock, flags);
+ spin_unlock_irqrestore(&ndlp->lock, flags);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3187 LOGO completes to NPort x%x: Start "
@@ -2919,8 +2929,21 @@ out:
irsp->un.ulpWord[4], irsp->ulpTimeout,
vport->num_disc_nodes);
lpfc_disc_start(vport);
+ return;
+ }
+
+ /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the
+ * driver sends a LOGO to the rport to cleanup. For fabric and
+ * initiator ports cleanup the node as long as it the node is not
+ * register with the transport.
+ */
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_DEVICE_RM);
}
- return;
}
/**
@@ -2949,19 +2972,18 @@ int
lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
if (ndlp->nlp_flag & NLP_LOGO_SND) {
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return 0;
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
@@ -2978,30 +3000,37 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
pcmd += sizeof(uint32_t);
memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
- "Issue LOGO: did:x%x",
- ndlp->nlp_DID, 0, 0);
-
phba->fc_stat.elsXmitLOGO++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_SND;
ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue LOGO: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR) {
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~NLP_LOGO_SND;
- spin_unlock_irq(shost->host_lock);
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ if (rc == IOCB_ERROR)
+ goto io_err;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_prev_state = ndlp->nlp_state;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
return 0;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_LOGO_SND;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -3024,6 +3053,7 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
+ struct lpfc_nodelist *free_ndlp;
IOCB_t *irsp;
irsp = &rspiocb->iocb;
@@ -3041,7 +3071,11 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check to see if link went down during discovery */
lpfc_els_chk_latt(vport);
+
+ free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(free_ndlp);
}
/**
@@ -3065,6 +3099,7 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_dmabuf *pcmd, *prsp;
u32 *pdata;
u32 cmd;
+ struct lpfc_nodelist *ndlp = cmdiocb->context1;
irsp = &rspiocb->iocb;
@@ -3143,6 +3178,7 @@ out:
/* Check to see if link went down during discovery */
lpfc_els_chk_latt(vport);
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
return;
}
@@ -3170,6 +3206,7 @@ out:
int
lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
{
+ int rc = 0;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
@@ -3184,22 +3221,13 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
if (!ndlp)
return 1;
lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp)
- return 1;
}
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_SCR);
- if (!elsiocb) {
- /* This will trigger the release of the node just
- * allocated
- */
- lpfc_nlp_put(ndlp);
+ if (!elsiocb)
return 1;
- }
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
@@ -3216,22 +3244,26 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
phba->fc_stat.elsXmitSCR++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
- IOCB_ERROR) {
- /* The additional lpfc_nlp_put will cause the following
- * lpfc_els_free_iocb routine to trigger the rlease of
- * the node.
- */
- lpfc_nlp_put(ndlp);
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
- /* This will cause the callback-function lpfc_cmpl_els_cmd to
- * trigger the release of node.
- */
- if (!(vport->fc_flag & FC_PT2PT))
- lpfc_nlp_put(ndlp);
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue SCR: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR)
+ goto io_err;
+
+ /* Keep the ndlp just in case RDF is being sent */
return 0;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -3257,6 +3289,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
int
lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
{
+ int rc = 0;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
struct lpfc_nodelist *ndlp;
@@ -3287,24 +3320,14 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
if (!ndlp)
return 1;
lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp,
- NLP_STE_UNUSED_NODE);
- if (!ndlp)
- return 1;
}
}
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_RSCN_XMT);
- if (!elsiocb) {
- /* This will trigger the release of the node just
- * allocated
- */
- lpfc_nlp_put(ndlp);
+ if (!elsiocb)
return 1;
- }
event = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
@@ -3319,29 +3342,31 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8;
event->portid.rscn_fid[2] = nportid & 0x000000FF;
+ phba->fc_stat.elsXmitRSCN++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue RSCN: did:x%x",
ndlp->nlp_DID, 0, 0);
- phba->fc_stat.elsXmitRSCN++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
- IOCB_ERROR) {
- /* The additional lpfc_nlp_put will cause the following
- * lpfc_els_free_iocb routine to trigger the rlease of
- * the node.
- */
- lpfc_nlp_put(ndlp);
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR)
+ goto io_err;
+
/* This will cause the callback-function lpfc_cmpl_els_cmd to
* trigger the release of node.
*/
if (!(vport->fc_flag & FC_PT2PT))
lpfc_nlp_put(ndlp);
-
return 0;
+io_err:
+ lpfc_nlp_put(ndlp);
+node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -3369,6 +3394,7 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
static int
lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
{
+ int rc = 0;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
FARP *fp;
@@ -3386,21 +3412,12 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
if (!ndlp)
return 1;
lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp)
- return 1;
}
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_RNID);
- if (!elsiocb) {
- /* This will trigger the release of the node just
- * allocated
- */
- lpfc_nlp_put(ndlp);
+ if (!elsiocb)
return 1;
- }
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
@@ -3419,7 +3436,7 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
ondlp = lpfc_findnode_did(vport, nportid);
- if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
+ if (ondlp) {
memcpy(&fp->OportName, &ondlp->nlp_portname,
sizeof(struct lpfc_name));
memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
@@ -3432,8 +3449,14 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
phba->fc_stat.elsXmitFARPR++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
- IOCB_ERROR) {
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
/* The additional lpfc_nlp_put will cause the following
* lpfc_els_free_iocb routine to trigger the release of
* the node.
@@ -3474,6 +3497,7 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
struct lpfc_els_rdf_req *prdf;
struct lpfc_nodelist *ndlp;
uint16_t cmdsize;
+ int rc;
cmdsize = sizeof(*prdf);
@@ -3483,10 +3507,6 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
if (!ndlp)
return -ENODEV;
lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp)
- return -ENODEV;
}
/* RDF ELS is not required on an NPIV VN_Port. */
@@ -3497,13 +3517,8 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_RDF);
- if (!elsiocb) {
- /* This will trigger the release of the node just
- * allocated
- */
- lpfc_nlp_put(ndlp);
+ if (!elsiocb)
return -ENOMEM;
- }
/* Configure the payload for the supported FPIN events. */
prdf = (struct lpfc_els_rdf_req *)
@@ -3521,31 +3536,29 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST);
prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION);
- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
- "Issue RDF: did:x%x",
- ndlp->nlp_DID, 0, 0);
-
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"6444 Xmit RDF to remote NPORT x%x\n",
ndlp->nlp_DID);
elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
- IOCB_ERROR) {
- /* The additional lpfc_nlp_put will cause the following
- * lpfc_els_free_iocb routine to trigger the rlease of
- * the node.
- */
- lpfc_nlp_put(ndlp);
- lpfc_els_free_iocb(phba, elsiocb);
- return -EIO;
- }
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
- /* An RDF was issued - this put ensures the ndlp is cleaned up
- * when the RDF completes.
- */
- lpfc_nlp_put(ndlp);
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue RDF: did:x%x refcnt %d",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR)
+ goto io_err;
return 0;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return -EIO;
}
/**
@@ -3568,9 +3581,9 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
if (!(nlp->nlp_flag & NLP_DELAY_TMO))
return;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&nlp->lock);
nlp->nlp_flag &= ~NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&nlp->lock);
del_timer_sync(&nlp->nlp_delayfunc);
nlp->nlp_last_elscmd = 0;
if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
@@ -3580,9 +3593,9 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
}
if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&nlp->lock);
nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&nlp->lock);
if (vport->num_disc_nodes) {
if (vport->port_state < LPFC_VPORT_READY) {
/* Check if there are more ADISCs to be sent */
@@ -3658,20 +3671,19 @@ void
lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
{
struct lpfc_vport *vport = ndlp->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
uint32_t cmd, retry;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
cmd = ndlp->nlp_last_elscmd;
ndlp->nlp_last_elscmd = 0;
if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return;
}
ndlp->nlp_flag &= ~NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/*
* If a discovery event readded nlp_delayfunc after timer
* firing and before processing the timer, cancel the
@@ -3800,7 +3812,6 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -3822,14 +3833,13 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cmd = *elscmd++;
}
- if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+ if (ndlp)
did = ndlp->nlp_DID;
else {
/* We should only hit this case for retrying PLOGI */
did = irsp->un.elsreq64.remoteID;
ndlp = lpfc_findnode_did(vport, did);
- if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
- && (cmd != ELS_CMD_PLOGI))
+ if (!ndlp && (cmd != ELS_CMD_PLOGI))
return 1;
}
@@ -4059,9 +4069,9 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
if (stat.un.b.lsRjtRsnCodeExp ==
LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_FCP_PRLI_RJT;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
retry = 0;
goto out_retry;
}
@@ -4157,16 +4167,16 @@ out_retry:
}
phba->fc_stat.elsXmitRetry++;
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
+ if (ndlp && delay) {
phba->fc_stat.elsDelayRetry++;
ndlp->nlp_retry = cmdiocb->retry;
/* delay is specified in milliseconds */
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(delay));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_prev_state = ndlp->nlp_state;
if ((cmd == ELS_CMD_PRLI) ||
@@ -4188,7 +4198,7 @@ out_retry:
lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
return 1;
case ELS_CMD_PLOGI:
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ if (ndlp) {
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp,
NLP_STE_PLOGI_ISSUE);
@@ -4314,27 +4324,10 @@ int
lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
{
struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
- struct lpfc_nodelist *ndlp;
- ndlp = (struct lpfc_nodelist *)elsiocb->context1;
- if (ndlp) {
- if (ndlp->nlp_flag & NLP_DEFER_RM) {
- lpfc_nlp_put(ndlp);
+ /* The I/O job is complete. Clear the context1 data. */
+ elsiocb->context1 = NULL;
- /* If the ndlp is not being used by another discovery
- * thread, free it.
- */
- if (!lpfc_nlp_not_used(ndlp)) {
- /* If ndlp is being used by another discovery
- * thread, just clear NLP_DEFER_RM
- */
- ndlp->nlp_flag &= ~NLP_DEFER_RM;
- }
- }
- else
- lpfc_nlp_put(ndlp);
- elsiocb->context1 = NULL;
- }
/* context2 = cmd, context2->next = rsp, context3 = bpl */
if (elsiocb->context2) {
if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
@@ -4409,10 +4402,10 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
/* ACC to LOGO completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0109 ACC to LOGO completes to NPort x%x "
+ "0109 ACC to LOGO completes to NPort x%x refcnt %d"
"Data: x%x x%x x%x\n",
- ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
- ndlp->nlp_rpi);
+ ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
+ ndlp->nlp_state, ndlp->nlp_rpi);
if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
/* NPort Recovery mode or node is just allocated */
@@ -4434,6 +4427,7 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* At this point, the driver is done so release the IOCB
*/
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
}
/**
@@ -4463,20 +4457,17 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mempool_free(pmb, phba->mbox_mem_pool);
if (ndlp) {
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "0006 rpi%x DID:%x flg:%x %d map:%x x%px\n",
+ "0006 rpi x%x DID:%x flg:%x %d x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
- if (NLP_CHK_NODE_ACT(ndlp)) {
- lpfc_nlp_put(ndlp);
- /* This is the end of the default RPI cleanup logic for
- * this ndlp. If no other discovery threads are using
- * this ndlp, free all resources associated with it.
- */
- lpfc_nlp_not_used(ndlp);
- } else {
- lpfc_drop_node(ndlp->vport, ndlp);
- }
+ ndlp);
+ /* This is the end of the default RPI cleanup logic for
+ * this ndlp and it could get released. Clear the nlp_flags to
+ * prevent any further processing.
+ */
+ ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
+ lpfc_nlp_put(ndlp);
+ lpfc_nlp_not_used(ndlp);
}
return;
@@ -4525,8 +4516,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* function can have cmdiocb->contest1 (ndlp) field set to NULL.
*/
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
- (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
+ if (ndlp && (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
/* A LS_RJT associated with Default RPI cleanup has its own
* separate code path.
*/
@@ -4535,7 +4525,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
/* Check to see if link went down during discovery */
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
+ if (!ndlp || lpfc_els_chk_latt(vport)) {
if (mbox) {
mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
if (mp) {
@@ -4544,8 +4534,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
mempool_free(mbox, phba->mbox_mem_pool);
}
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
- (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
+ if (ndlp && (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
if (lpfc_nlp_not_used(ndlp)) {
ndlp = NULL;
/* Indicate the node has already released,
@@ -4570,32 +4559,38 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
if (mbox) {
- if ((rspiocb->iocb.ulpStatus == 0)
- && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
+ if ((rspiocb->iocb.ulpStatus == 0) &&
+ (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
if (!lpfc_unreg_rpi(vport, ndlp) &&
- (!(vport->fc_flag & FC_PT2PT)) &&
- (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
- ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE)) {
- lpfc_printf_vlog(vport, KERN_INFO,
- LOG_DISCOVERY,
- "0314 PLOGI recov DID x%x "
- "Data: x%x x%x x%x\n",
- ndlp->nlp_DID, ndlp->nlp_state,
- ndlp->nlp_rpi, ndlp->nlp_flag);
- mp = mbox->ctx_buf;
- if (mp) {
- lpfc_mbuf_free(phba, mp->virt,
- mp->phys);
- kfree(mp);
+ (!(vport->fc_flag & FC_PT2PT))) {
+ if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY,
+ "0314 PLOGI recov "
+ "DID x%x "
+ "Data: x%x x%x x%x\n",
+ ndlp->nlp_DID,
+ ndlp->nlp_state,
+ ndlp->nlp_rpi,
+ ndlp->nlp_flag);
+ mp = mbox->ctx_buf;
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt,
+ mp->phys);
+ kfree(mp);
+ }
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto out;
}
- mempool_free(mbox, phba->mbox_mem_pool);
- goto out;
}
/* Increment reference count to ndlp to hold the
* reference to ndlp for the callback function.
*/
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!mbox->ctx_ndlp)
+ goto out;
+
mbox->vport = vport;
if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
@@ -4657,12 +4652,12 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
mempool_free(mbox, phba->mbox_mem_pool);
}
out:
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) {
- spin_lock_irq(shost->host_lock);
+ if (ndlp && shost) {
+ spin_lock_irq(&ndlp->lock);
if (mbox)
ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/* If the node is not being used by another discovery thread,
* and we are sending a reject, we are done with it.
@@ -4676,10 +4671,11 @@ out:
* the routine lpfc_els_free_iocb.
*/
cmdiocb->context1 = NULL;
-
}
+ /* Release the originating I/O reference. */
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
return;
}
@@ -4713,7 +4709,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
LPFC_MBOXQ_t *mbox)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
IOCB_t *oldcmd;
@@ -4732,9 +4727,9 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_LOGO_ACC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return 1;
}
@@ -4838,23 +4833,40 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
return 1;
}
if (ndlp->nlp_flag & NLP_LOGO_ACC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED ||
ndlp->nlp_flag & NLP_REG_LOGIN_SEND))
ndlp->nlp_flag &= ~NLP_LOGO_ACC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
} else {
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
}
phba->fc_stat.elsXmitACC++;
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ if (rc == IOCB_ERROR)
+ goto io_err;
+
+ /* Xmit ELS ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
+ "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
+ "RPI: x%x, fc_flag x%x\n",
+ rc, elsiocb->iotag, elsiocb->sli4_xritag,
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi, vport->fc_flag);
return 0;
+
+io_err:
+ lpfc_nlp_put(ndlp);
+node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -4884,13 +4896,13 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
LPFC_MBOXQ_t *mbox)
{
+ int rc;
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
IOCB_t *oldcmd;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
- int rc;
cmdsize = 2 * sizeof(uint32_t);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -4925,13 +4937,21 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
phba->fc_stat.elsXmitLSRJT++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR)
+ goto io_err;
- if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
return 0;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -4995,16 +5015,18 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
ap->DID = be32_to_cpu(vport->fc_myDID);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
- "Issue ACC ADISC: did:x%x flg:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag, 0);
+ "Issue ACC ADISC: did:x%x flg:x%x refcnt %d",
+ ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ if (rc == IOCB_ERROR)
+ goto io_err;
/* Xmit ELS ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -5015,6 +5037,12 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi, vport->fc_flag);
return 0;
+
+io_err:
+ lpfc_nlp_put(ndlp);
+node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -5162,18 +5190,25 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
ndlp->nlp_DID);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
- "Issue ACC PRLI: did:x%x flg:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag, 0);
+ "Issue ACC PRLI: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ if (rc == IOCB_ERROR)
+ goto io_err;
return 0;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -5262,18 +5297,26 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
- "Issue ACC RNID: did:x%x flg:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag, 0);
+ "Issue ACC RNID: did:x%x flg:x%x refcnt %d",
+ ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ if (rc == IOCB_ERROR)
+ goto io_err;
+
return 0;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -5369,18 +5412,25 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
- "Issue ACC ECHO: did:x%x flg:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag, 0);
+ "Issue ACC ECHO: did:x%x flg:x%x refcnt %d",
+ ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ if (rc == IOCB_ERROR)
+ goto io_err;
return 0;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -5411,14 +5461,12 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport)
/* go thru NPR nodes and issue any remaining ELS ADISCs */
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
(ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
(ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
lpfc_issue_els_adisc(vport, ndlp, 0);
@@ -5469,8 +5517,6 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
/* go thru NPR nodes and issue any remaining ELS PLOGIs */
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
(ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
(ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
@@ -5947,7 +5993,6 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize,
lpfc_max_els_tries, rdp_context->ndlp,
rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
- lpfc_nlp_put(ndlp);
if (!elsiocb)
goto free_rdp_context;
@@ -6021,18 +6066,24 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
bpl->tus.w = le32_to_cpu(bpl->tus.w);
phba->fc_stat.elsXmitACC++;
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ goto free_rdp_context;
+ }
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
+ if (rc == IOCB_ERROR) {
+ lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, elsiocb);
+ }
- kfree(rdp_context);
+ goto free_rdp_context;
- return;
error:
cmdsize = 2 * sizeof(uint32_t);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries,
ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT);
- lpfc_nlp_put(ndlp);
if (!elsiocb)
goto free_rdp_context;
@@ -6047,11 +6098,23 @@ error:
phba->fc_stat.elsXmitLSRJT++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ goto free_rdp_context;
+ }
- if (rc == IOCB_ERROR)
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, elsiocb);
+ }
+
free_rdp_context:
+ /* This reference put is for the original unsolicited RDP. If the
+ * iocb prep failed, there is no reference to remove.
+ */
+ lpfc_nlp_put(ndlp);
kfree(rdp_context);
}
@@ -6155,6 +6218,11 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
cmd = &cmdiocb->iocb;
rdp_context->ndlp = lpfc_nlp_get(ndlp);
+ if (!rdp_context->ndlp) {
+ kfree(rdp_context);
+ rjt_err = LSRJT_UNABLE_TPC;
+ goto error;
+ }
rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id;
rdp_context->rx_id = cmd->ulpContext;
rdp_context->cmpl = lpfc_els_rdp_cmpl;
@@ -6249,10 +6317,19 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lcb_res->lcb_duration = lcb_context->duration;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
+
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
- lpfc_els_free_iocb(phba, elsiocb);
+ if (!rc)
+ goto out;
+ lpfc_nlp_put(ndlp);
+ node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ out:
kfree(lcb_context);
return;
@@ -6279,9 +6356,17 @@ error:
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitLSRJT++;
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ goto free_lcb_context;
+ }
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
+ if (rc == IOCB_ERROR) {
+ lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, elsiocb);
+ }
free_lcb_context:
kfree(lcb_context);
}
@@ -6381,7 +6466,7 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
uint8_t *lp;
struct fc_lcb_request_frame *beacon;
struct lpfc_lcb_context *lcb_context;
- uint8_t state, rjt_err;
+ u8 state, rjt_err = 0;
struct ls_rjt stat;
pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
@@ -6427,15 +6512,22 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
lcb_context->rx_id = cmdiocb->iocb.ulpContext;
lcb_context->ndlp = lpfc_nlp_get(ndlp);
+ if (!lcb_context->ndlp) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ goto rjt_free;
+ }
+
if (lpfc_sli4_set_beacon(vport, lcb_context, state)) {
lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT,
"0193 failed to send mail box");
- kfree(lcb_context);
lpfc_nlp_put(ndlp);
rjt_err = LSRJT_UNABLE_TPC;
- goto rjt;
+ goto rjt_free;
}
return 0;
+
+rjt_free:
+ kfree(lcb_context);
rjt:
memset(&stat, 0, sizeof(stat));
stat.un.b.lsRjtRsnCode = rjt_err;
@@ -6578,8 +6670,7 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
/* Move all affected nodes by pending RSCNs to NPR state. */
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp) ||
- (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
+ if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
!lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
continue;
@@ -6914,8 +7005,7 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
vport->num_disc_nodes = 0;
ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)
- && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+ if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
/* Good ndlp, issue CT Request to NameServer. Need to
* know how many gidfts were issued. If none, then just
* flush the RSCN. Otherwise, the outstanding requests
@@ -6933,13 +7023,8 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
} else {
/* Nameserver login in question. Revalidate. */
if (ndlp) {
- ndlp = lpfc_enable_node(vport, ndlp,
- NLP_STE_PLOGI_ISSUE);
- if (!ndlp) {
- lpfc_els_flush_rscn(vport);
- return 0;
- }
ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
} else {
ndlp = lpfc_nlp_init(vport, NameServer_DID);
if (!ndlp) {
@@ -7283,6 +7368,7 @@ lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
static void
lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
+ int rc = 0;
MAILBOX_t *mb;
IOCB_t *icmd;
struct RLS_RSP *rls_rsp;
@@ -7344,8 +7430,19 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_rpi);
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
- lpfc_els_free_iocb(phba, elsiocb);
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR)
+ goto io_err;
+ return;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
}
/**
@@ -7386,6 +7483,8 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
cmdiocb->iocb.ulpContext)); /* rx_id */
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!mbox->ctx_ndlp)
+ goto node_err;
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
@@ -7396,6 +7495,7 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
* command.
*/
lpfc_nlp_put(ndlp);
+node_err:
mempool_free(mbox, phba->mbox_mem_pool);
}
reject_out:
@@ -7433,6 +7533,7 @@ static int
lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
{
+ int rc = 0;
struct lpfc_hba *phba = vport->phba;
struct ls_rjt stat;
struct RTV_RSP *rtv_rsp;
@@ -7482,8 +7583,17 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 0;
+ }
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, elsiocb);
+ }
return 0;
reject_out:
@@ -7523,7 +7633,7 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (ndlp != rrq->ndlp)
ndlp = rrq->ndlp;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ if (!ndlp)
return 1;
/* If ndlp is not NULL, we will bump the reference count on it */
@@ -7552,13 +7662,20 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
did, rrq->xritag, rrq->rxid);
elsiocb->context_un.rrq = rrq;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
- ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
- if (ret == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (ret == IOCB_ERROR)
+ goto io_err;
return 0;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -7611,6 +7728,7 @@ static int
lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
{
+ int rc = 0;
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd, *oldcmd;
RPL_RSP rpl_rsp;
@@ -7652,12 +7770,20 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
ndlp->nlp_rpi);
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
- IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR)
+ goto io_err;
return 0;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -7994,7 +8120,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
else {
struct lpfc_nodelist *ndlp;
ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+ if (ndlp)
remote_ID = ndlp->nlp_DID;
}
list_add_tail(&piocb->dlist, &abort_list);
@@ -8011,7 +8137,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
spin_lock_irq(&phba->hbalock);
list_del_init(&piocb->dlist);
- lpfc_sli_issue_abort_iotag(phba, pring, piocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
spin_unlock_irq(&phba->hbalock);
}
@@ -8111,7 +8237,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
spin_lock_irqsave(&phba->hbalock, iflags);
list_del_init(&piocb->dlist);
- lpfc_sli_issue_abort_iotag(phba, pring, piocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
spin_unlock_irqrestore(&phba->hbalock, iflags);
}
if (!list_empty(&abort_list))
@@ -8221,7 +8347,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
uint32_t *pcmd;
ndlp = cmdiocbp->context1;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ if (!ndlp)
return;
if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
@@ -8447,7 +8573,6 @@ static void
lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
{
- struct Scsi_Host *shost;
struct lpfc_nodelist *ndlp;
struct ls_rjt stat;
uint32_t *payload, payload_len;
@@ -8497,20 +8622,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
newnode = 1;
if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
ndlp->nlp_type |= NLP_FABRIC;
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp,
- NLP_STE_UNUSED_NODE);
- if (!ndlp)
- goto dropit;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- newnode = 1;
- if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
- ndlp->nlp_type |= NLP_FABRIC;
} else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
- /* This is similar to the new node path */
- ndlp = lpfc_nlp_get(ndlp);
- if (!ndlp)
- goto dropit;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
newnode = 1;
}
@@ -8521,17 +8633,18 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* Do not process any unsolicited ELS commands
* if the ndlp is in DEV_LOSS
*/
- shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) {
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
if (newnode)
lpfc_nlp_put(ndlp);
goto dropit;
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto dropit;
elsiocb->vport = vport;
if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
@@ -8540,9 +8653,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* ELS command <elsCmd> received from NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0112 ELS command x%x received from NPORT x%x "
- "Data: x%x x%x x%x x%x\n",
- cmd, did, vport->port_state, vport->fc_flag,
- vport->fc_myDID, vport->fc_prevDID);
+ "refcnt %d Data: x%x x%x x%x x%x\n",
+ cmd, did, kref_read(&ndlp->kref), vport->port_state,
+ vport->fc_flag, vport->fc_myDID, vport->fc_prevDID);
/* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */
if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
@@ -8593,9 +8706,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
}
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_disc_state_machine(vport, ndlp, elsiocb,
NLP_EVT_RCV_PLOGI);
@@ -8622,7 +8735,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_LOGO:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8664,7 +8778,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvRSCN++;
lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_ADISC:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8742,7 +8857,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvLIRR++;
lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_RLS:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8752,7 +8868,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvRLS++;
lpfc_els_rcv_rls(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_RPL:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8762,7 +8879,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvRPL++;
lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_RNID:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8772,7 +8890,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvRNID++;
lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_RTV:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8781,7 +8900,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvRTV++;
lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_RRQ:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8791,7 +8911,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvRRQ++;
lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_ECHO:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8801,7 +8922,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvECHO++;
lpfc_els_rcv_echo(vport, elsiocb, ndlp);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
case ELS_CMD_REC:
/* receive this due to exchange closed */
@@ -8832,7 +8954,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
"0115 Unknown ELS command x%x "
"received from NPORT x%x\n", cmd, did);
if (newnode)
- lpfc_nlp_put(ndlp);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
break;
}
@@ -8843,9 +8966,14 @@ lsrjt:
stat.un.b.lsRjtRsnCode = rjt_err;
stat.un.b.lsRjtRsnCodeExp = rjt_exp;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
- NULL);
+ NULL);
+ /* Remove the reference from above for new nodes. */
+ if (newnode)
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
}
+ /* Release the reference on this elsiocb, not the ndlp. */
lpfc_nlp_put(elsiocb->context1);
elsiocb->context1 = NULL;
@@ -8987,13 +9115,9 @@ lpfc_start_fdmi(struct lpfc_vport *vport)
return;
}
}
- if (!NLP_CHK_NODE_ACT(ndlp))
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
- if (ndlp) {
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
- lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
- }
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
}
/**
@@ -9023,9 +9147,9 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
spin_lock_irq(shost->host_lock);
if (vport->fc_flag & FC_DISC_DELAYED) {
spin_unlock_irq(shost->host_lock);
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "3334 Delay fc port discovery for %d seconds\n",
- phba->fc_ratov);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "3334 Delay fc port discovery for %d secs\n",
+ phba->fc_ratov);
mod_timer(&vport->delayed_disc_tmo,
jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
return;
@@ -9045,19 +9169,8 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
"0251 NameServer login: no memory\n");
return;
}
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp) {
- if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
- lpfc_disc_start(vport);
- return;
- }
- lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "0348 NameServer login: node freed\n");
- return;
- }
}
+
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
@@ -9177,8 +9290,9 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_start_fdiscs(phba);
lpfc_do_scr_ns_plogi(phba, vport);
}
- } else
+ } else {
lpfc_do_scr_ns_plogi(phba, vport);
+ }
}
mbox_err_exit:
/* Now, we decrement the ndlp reference count held for this
@@ -9211,6 +9325,11 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
lpfc_reg_vpi(vport, mbox);
mbox->vport = vport;
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!mbox->ctx_ndlp) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto mbox_err_exit;
+ }
+
mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
== MBX_NOT_FINISHED) {
@@ -9283,7 +9402,6 @@ void
lpfc_retry_pport_discovery(struct lpfc_hba *phba)
{
struct lpfc_nodelist *ndlp;
- struct Scsi_Host *shost;
/* Cancel the all vports retry delay retry timers */
lpfc_cancel_all_vport_retry_delay_timer(phba);
@@ -9293,11 +9411,10 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba)
if (!ndlp)
return;
- shost = lpfc_shost_from_vport(phba->pport);
mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
phba->pport->port_state = LPFC_FLOGI;
return;
@@ -9419,13 +9536,12 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
list_for_each_entry_safe(np, next_np,
&vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp) ||
- (np->nlp_state != NLP_STE_NPR_NODE) ||
+ if ((np->nlp_state != NLP_STE_NPR_NODE) ||
!(np->nlp_flag & NLP_NPR_ADISC))
continue;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
np->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_unreg_rpi(vport, np);
}
lpfc_cleanup_pending_mbox(vport);
@@ -9448,6 +9564,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* to update the MAC address.
*/
lpfc_register_new_vport(phba, vport, ndlp);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
goto out;
}
@@ -9457,16 +9574,22 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_register_new_vport(phba, vport, ndlp);
else
lpfc_do_scr_ns_plogi(phba, vport);
+
+ /* The FDISC completed successfully. Move the fabric ndlp to
+ * UNMAPPED state and register with the transport.
+ */
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
goto out;
+
fdisc_failed:
if (vport->fc_vport &&
(vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS))
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
/* Cancel discovery timer */
lpfc_can_disctmo(vport);
- lpfc_nlp_put(ndlp);
out:
lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
}
/**
@@ -9559,16 +9682,25 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"Issue FDISC: did:x%x",
did, 0, 0);
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto err_out;
+
rc = lpfc_issue_fabric_iocb(phba, elsiocb);
if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
- lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "0256 Issue FDISC: Cannot send IOCB\n");
- return 1;
+ lpfc_nlp_put(ndlp);
+ goto err_out;
}
+
lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
return 0;
+
+ err_out:
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "0256 Issue FDISC: Cannot send IOCB\n");
+ return 1;
}
/**
@@ -9600,18 +9732,14 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"LOGO npiv cmpl: status:x%x/x%x did:x%x",
irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
- lpfc_els_free_iocb(phba, cmdiocb);
- vport->unreg_vpi_cmpl = VPORT_ERROR;
-
- /* Trigger the release of the ndlp after logo */
- lpfc_nlp_put(ndlp);
-
/* NPIV LOGO completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2928 NPIV LOGO completes to NPort x%x "
- "Data: x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%x x%x x%x x%x\n",
ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, vport->num_disc_nodes);
+ irsp->ulpTimeout, vport->num_disc_nodes,
+ kref_read(&ndlp->kref), ndlp->nlp_flag,
+ ndlp->fc4_xpt_flags);
if (irsp->ulpStatus == IOSTAT_SUCCESS) {
spin_lock_irq(shost->host_lock);
@@ -9620,6 +9748,11 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
spin_unlock_irq(shost->host_lock);
lpfc_can_disctmo(vport);
}
+
+ /* Safe to release resources now. */
+ lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
+ vport->unreg_vpi_cmpl = VPORT_ERROR;
}
/**
@@ -9641,7 +9774,7 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
int
lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ int rc = 0;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
@@ -9667,18 +9800,25 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, ndlp->nlp_flag, 0);
elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_SND;
- spin_unlock_irq(shost->host_lock);
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
- IOCB_ERROR) {
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~NLP_LOGO_SND;
- spin_unlock_irq(shost->host_lock);
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
- }
+ spin_unlock_irq(&ndlp->lock);
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto node_err;
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR)
+ goto io_err;
return 0;
+
+ io_err:
+ lpfc_nlp_put(ndlp);
+ node_err:
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_LOGO_SND;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
}
/**
@@ -9764,8 +9904,6 @@ repeat:
goto repeat;
}
}
-
- return;
}
/**
@@ -10046,8 +10184,10 @@ lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
spin_lock(&phba->sli4_hba.sgl_list_lock);
list_for_each_entry_safe(sglq_entry, sglq_next,
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
- if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
+ if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) {
+ lpfc_nlp_put(sglq_entry->ndlp);
sglq_entry->ndlp = NULL;
+ }
}
spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -10090,9 +10230,13 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
sglq_entry->state = SGL_FREED;
spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
- lpfc_set_rrq_active(phba, ndlp,
- sglq_entry->sli4_lxritag,
- rxid, 1);
+
+ if (ndlp) {
+ lpfc_set_rrq_active(phba, ndlp,
+ sglq_entry->sli4_lxritag,
+ rxid, 1);
+ lpfc_nlp_put(ndlp);
+ }
/* Check if TXQ queue needs to be serviced */
if (pring && !list_empty(&pring->txq))
@@ -10155,10 +10299,10 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
* The rport is not responding. Remove the FCP-2 flag to prevent
* an ADISC in the follow-up recovery code.
*/
- spin_lock_irqsave(shost->host_lock, flags);
+ spin_lock_irqsave(&ndlp->lock, flags);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
ndlp->nlp_flag |= NLP_ISSUE_LOGO;
- spin_unlock_irqrestore(shost->host_lock, flags);
+ spin_unlock_irqrestore(&ndlp->lock, flags);
lpfc_unreg_rpi(vport, ndlp);
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index bb02fd8bc2dd..2b6b5fc671fe 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -73,34 +73,67 @@ static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
static int lpfc_fcf_inuse(struct lpfc_hba *);
static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
-void
-lpfc_terminate_rport_io(struct fc_rport *rport)
+/* The source of a terminate rport I/O is either a dev_loss_tmo
+ * event or a call to fc_remove_host. While the rport should be
+ * valid during these downcalls, the transport can call twice
+ * in a single event. This routine provides somoe protection
+ * as the NDLP isn't really free, just released to the pool.
+ */
+static int
+lpfc_rport_invalid(struct fc_rport *rport)
{
struct lpfc_rport_data *rdata;
- struct lpfc_nodelist * ndlp;
- struct lpfc_hba *phba;
+ struct lpfc_nodelist *ndlp;
+
+ if (!rport) {
+ pr_err("**** %s: NULL rport, exit.\n", __func__);
+ return -EINVAL;
+ }
rdata = rport->dd_data;
+ if (!rdata) {
+ pr_err("**** %s: NULL dd_data on rport %p SID x%x\n",
+ __func__, rport, rport->scsi_target_id);
+ return -EINVAL;
+ }
+
ndlp = rdata->pnode;
+ if (!rdata->pnode) {
+ pr_err("**** %s: NULL ndlp on rport %p SID x%x\n",
+ __func__, rport, rport->scsi_target_id);
+ return -EINVAL;
+ }
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
- printk(KERN_ERR "Cannot find remote node"
- " to terminate I/O Data x%x\n",
- rport->port_id);
- return;
+ if (!ndlp->vport) {
+ pr_err("**** %s: Null vport on ndlp %p, DID x%x rport %p "
+ "SID x%x\n", __func__, ndlp, ndlp->nlp_DID, rport,
+ rport->scsi_target_id);
+ return -EINVAL;
}
+ return 0;
+}
+
+void
+lpfc_terminate_rport_io(struct fc_rport *rport)
+{
+ struct lpfc_rport_data *rdata;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_vport *vport;
- phba = ndlp->phba;
+ if (lpfc_rport_invalid(rport))
+ return;
- lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
- "rport terminate: sid:x%x did:x%x flg:x%x",
- ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
+ rdata = rport->dd_data;
+ ndlp = rdata->pnode;
+ vport = ndlp->vport;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+ "rport terminate: sid:x%x did:x%x flg:x%x",
+ ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
if (ndlp->nlp_sid != NLP_NO_SID) {
- lpfc_sli_abort_iocb(ndlp->vport,
- &phba->sli.sli3_ring[LPFC_FCP_RING],
- ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+ lpfc_sli_abort_iocb(vport,
+ &vport->phba->sli.sli3_ring[LPFC_FCP_RING],
+ ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
}
@@ -110,19 +143,14 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
void
lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
{
- struct lpfc_rport_data *rdata;
- struct lpfc_nodelist * ndlp;
+ struct lpfc_nodelist *ndlp;
struct lpfc_vport *vport;
- struct Scsi_Host *shost;
struct lpfc_hba *phba;
struct lpfc_work_evt *evtp;
- int put_node;
- int put_rport;
unsigned long iflags;
- rdata = rport->dd_data;
- ndlp = rdata->pnode;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode;
+ if (!ndlp)
return;
vport = ndlp->vport;
@@ -133,22 +161,24 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3181 dev_loss_callbk x%06x, rport x%px flg x%x\n",
- ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
+ "3181 dev_loss_callbk x%06x, rport %p flg x%x "
+ "load_flag x%x refcnt %d\n",
+ ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
+ vport->load_flag, kref_read(&ndlp->kref));
- /* Don't defer this if we are in the process of deleting the vport
- * or unloading the driver. The unload will cleanup the node
- * appropriately we just need to cleanup the ndlp rport info here.
+ /* Don't schedule a worker thread event if the vport is going down.
+ * The teardown process cleans up the node via lpfc_drop_node.
*/
if (vport->load_flag & FC_UNLOADING) {
- put_node = rdata->pnode != NULL;
- put_rport = ndlp->rport != NULL;
- rdata->pnode = NULL;
+ ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL;
ndlp->rport = NULL;
- if (put_node)
- lpfc_nlp_put(ndlp);
- if (put_rport)
- put_device(&rport->dev);
+
+ ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
+
+ /* Remove the node reference from remote_port_add now.
+ * The driver will not call remote_port_delete.
+ */
+ lpfc_nlp_put(ndlp);
return;
}
@@ -165,20 +195,28 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
if (!list_empty(&evtp->evt_listp)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "6790 rport name %llx dev_loss_evt pending",
+ "6790 rport name %llx dev_loss_evt pending\n",
rport->port_name);
return;
}
- shost = lpfc_shost_from_vport(vport);
- spin_lock_irqsave(shost->host_lock, iflags);
+ spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
- spin_unlock_irqrestore(shost->host_lock, iflags);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+
+ /*
+ * The backend does not expect any more calls associated with this
+ * rport. Remove the association between rport and ndlp.
+ */
+ ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
+ ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL;
+ ndlp->rport = NULL;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
/* We need to hold the node by incrementing the reference
* count until this queued work is done
*/
- evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+ evtp->evt_arg1 = lpfc_nlp_get(ndlp);
spin_lock_irqsave(&phba->hbalock, iflags);
if (evtp->evt_arg1) {
@@ -204,70 +242,34 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
static int
lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
{
- struct lpfc_rport_data *rdata;
- struct fc_rport *rport;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
- struct Scsi_Host *shost;
uint8_t *name;
- int put_node;
int warn_on = 0;
int fcf_inuse = 0;
unsigned long iflags;
- rport = ndlp->rport;
vport = ndlp->vport;
- shost = lpfc_shost_from_vport(vport);
+ name = (uint8_t *)&ndlp->nlp_portname;
+ phba = vport->phba;
- spin_lock_irqsave(shost->host_lock, iflags);
+ spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
- spin_unlock_irqrestore(shost->host_lock, iflags);
-
- if (!rport)
- return fcf_inuse;
-
- name = (uint8_t *) &ndlp->nlp_portname;
- phba = vport->phba;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
if (phba->sli_rev == LPFC_SLI_REV4)
fcf_inuse = lpfc_fcf_inuse(phba);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
- "rport devlosstmo:did:x%x type:x%x id:x%x",
- ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
+ "rport devlosstmo:did:x%x type:x%x id:x%x",
+ ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_sid);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3182 dev_loss_tmo_handler x%06x, rport x%px flg x%x\n",
- ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
-
- /*
- * lpfc_nlp_remove if reached with dangling rport drops the
- * reference. To make sure that does not happen clear rport
- * pointer in ndlp before lpfc_nlp_put.
- */
- rdata = rport->dd_data;
-
- /* Don't defer this if we are in the process of deleting the vport
- * or unloading the driver. The unload will cleanup the node
- * appropriately we just need to cleanup the ndlp rport info here.
- */
- if (vport->load_flag & FC_UNLOADING) {
- if (ndlp->nlp_sid != NLP_NO_SID) {
- /* flush the target */
- lpfc_sli_abort_iocb(vport,
- &phba->sli.sli3_ring[LPFC_FCP_RING],
- ndlp->nlp_sid, 0, LPFC_CTX_TGT);
- }
- put_node = rdata->pnode != NULL;
- rdata->pnode = NULL;
- ndlp->rport = NULL;
- if (put_node)
- lpfc_nlp_put(ndlp);
- put_device(&rport->dev);
-
- return fcf_inuse;
- }
+ "3182 %s x%06x, nflag x%x xflags x%x refcnt %d\n",
+ __func__, ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->fc4_xpt_flags, kref_read(&ndlp->kref));
+ /* If the driver is recovering the rport, ignore devloss. */
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0284 Devloss timeout Ignored on "
@@ -279,15 +281,11 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
return fcf_inuse;
}
- put_node = rdata->pnode != NULL;
- rdata->pnode = NULL;
- ndlp->rport = NULL;
- if (put_node)
+ /* Fabric nodes are done. */
+ if (ndlp->nlp_type & NLP_FABRIC) {
lpfc_nlp_put(ndlp);
- put_device(&rport->dev);
-
- if (ndlp->nlp_type & NLP_FABRIC)
return fcf_inuse;
+ }
if (ndlp->nlp_sid != NLP_NO_SID) {
warn_on = 1;
@@ -315,11 +313,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
ndlp->nlp_state, ndlp->nlp_rpi);
}
- if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
- !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
- (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
- (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
- (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
return fcf_inuse;
@@ -552,6 +546,15 @@ lpfc_work_list_done(struct lpfc_hba *phba)
fcf_inuse,
nlp_did);
break;
+ case LPFC_EVT_RECOVER_PORT:
+ ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
+ lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
+ free_evt = 0;
+ /* decrement the node reference count held for
+ * this queued work
+ */
+ lpfc_nlp_put(ndlp);
+ break;
case LPFC_EVT_ONLINE:
if (phba->link_state < LPFC_LINK_DOWN)
*(int *) (evtp->evt_arg1) = lpfc_online(phba);
@@ -811,13 +814,13 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
struct lpfc_nodelist *ndlp, *next_ndlp;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
+
if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
- ((vport->port_type == LPFC_NPIV_PORT) &&
- (ndlp->nlp_DID == NameServer_DID)))
+ ((vport->port_type == LPFC_NPIV_PORT) &&
+ ((ndlp->nlp_DID == NameServer_DID) ||
+ (ndlp->nlp_DID == FDMI_DID))))
lpfc_unreg_rpi(vport, ndlp);
/* Leave Fabric nodes alone on link down */
@@ -983,8 +986,7 @@ lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
+
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
if (ndlp->nlp_type & NLP_FABRIC) {
@@ -1032,9 +1034,7 @@ lpfc_linkup_port(struct lpfc_vport *vport)
vport->fc_ns_retry = 0;
spin_unlock_irq(shost->host_lock);
- if (vport->fc_flag & FC_LBIT)
- lpfc_linkup_cleanup_nodes(vport);
-
+ lpfc_linkup_cleanup_nodes(vport);
}
static int
@@ -3196,7 +3196,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
}
phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
- phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
+ phba->link_flag &= ~(LS_NPIV_FAB_SUPPORTED | LS_CT_VEN_RPA);
shost = lpfc_shost_from_vport(vport);
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
@@ -3579,16 +3579,15 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_vport *vport = pmb->vport;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
pmb->ctx_buf = NULL;
pmb->ctx_ndlp = NULL;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
- "0002 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NODE | LOG_DISCOVERY,
+ "0002 rpi:%x DID:%x flg:%x %d x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
+ ndlp);
if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
@@ -3602,9 +3601,9 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* there is another reg login in
* process.
*/
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/*
* We cannot leave the RPI registered because
@@ -4076,8 +4075,16 @@ out:
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
- /* If no other thread is using the ndlp, free it */
- lpfc_nlp_not_used(ndlp);
+ /* If the node is not registered with the scsi or nvme
+ * transport, remove the fabric node. The failed reg_login
+ * is terminal.
+ */
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_nlp_not_used(ndlp);
+ }
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
/*
@@ -4100,10 +4107,10 @@ out:
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
- "0003 rpi:%x DID:%x flg:%x %d map%x x%px\n",
+ "0003 rpi:%x DID:%x flg:%x %d x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
+ ndlp);
if (vport->port_state < LPFC_VPORT_READY) {
/* Link up discovery requires Fabric registration. */
@@ -4124,7 +4131,9 @@ out:
/* Issue SCR just before NameServer GID_FT Query */
lpfc_issue_els_scr(vport, 0);
- lpfc_issue_els_rdf(vport, 0);
+ if (!phba->cfg_enable_mi ||
+ phba->sli4_hba.pc_sli4_params.mi_ver < LPFC_MIB3_SUPPORT)
+ lpfc_issue_els_rdf(vport, 0);
}
vport->fc_ns_retry = 0;
@@ -4154,6 +4163,7 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_rport_data *rdata;
struct fc_rport_identifiers rport_ids;
struct lpfc_hba *phba = vport->phba;
+ unsigned long flags;
if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
return;
@@ -4164,47 +4174,46 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rport_ids.port_id = ndlp->nlp_DID;
rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
- /*
- * We leave our node pointer in rport->dd_data when we unregister a
- * FCP target port. But fc_remote_port_add zeros the space to which
- * rport->dd_data points. So, if we're reusing a previously
- * registered port, drop the reference that we took the last time we
- * registered the port.
- */
- rport = ndlp->rport;
- if (rport) {
- rdata = rport->dd_data;
- /* break the link before dropping the ref */
- ndlp->rport = NULL;
- if (rdata) {
- if (rdata->pnode == ndlp)
- lpfc_nlp_put(ndlp);
- rdata->pnode = NULL;
- }
- /* drop reference for earlier registeration */
- put_device(&rport->dev);
- }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
- "rport add: did:x%x flg:x%x type x%x",
- ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+ "rport add: did:x%x flg:x%x type x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
/* Don't add the remote port if unloading. */
if (vport->load_flag & FC_UNLOADING)
return;
+ /*
+ * Disassociate any older association between this ndlp and rport
+ */
+ if (ndlp->rport) {
+ rdata = ndlp->rport->dd_data;
+ rdata->pnode = NULL;
+ }
+
ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
- if (!rport || !get_device(&rport->dev)) {
+ if (!rport) {
dev_printk(KERN_WARNING, &phba->pcidev->dev,
"Warning: fc_remote_port_add failed\n");
return;
}
- /* initialize static port data */
+ /* Successful port add. Complete initializing node data */
rport->maxframe_size = ndlp->nlp_maxframe;
rport->supported_classes = ndlp->nlp_class_sup;
rdata = rport->dd_data;
rdata->pnode = lpfc_nlp_get(ndlp);
+ if (!rdata->pnode) {
+ dev_warn(&phba->pcidev->dev,
+ "Warning - node ref failed. Unreg rport\n");
+ fc_remote_port_delete(rport);
+ ndlp->rport = NULL;
+ return;
+ }
+
+ spin_lock_irqsave(&ndlp->lock, flags);
+ ndlp->fc4_xpt_flags |= SCSI_XPT_REGD;
+ spin_unlock_irqrestore(&ndlp->lock, flags);
if (ndlp->nlp_type & NLP_FCP_TARGET)
rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
@@ -4221,13 +4230,14 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
fc_remote_port_rolechg(rport, rport_ids.roles);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3183 rport register x%06x, rport x%px role x%x\n",
- ndlp->nlp_DID, rport, rport_ids.roles);
+ "3183 %s rport x%px DID x%x, role x%x\n",
+ __func__, rport, rport->port_id, rport->roles);
if ((rport->scsi_target_id != -1) &&
(rport->scsi_target_id < LPFC_MAX_TARGET)) {
ndlp->nlp_sid = rport->scsi_target_id;
}
+
return;
}
@@ -4245,12 +4255,12 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "3184 rport unregister x%06x, rport x%px\n",
- ndlp->nlp_DID, rport);
+ "3184 rport unregister x%06x, rport x%px "
+ "xptflg x%x\n",
+ ndlp->nlp_DID, rport, ndlp->fc4_xpt_flags);
fc_remote_port_delete(rport);
-
- return;
+ lpfc_nlp_put(ndlp);
}
static void
@@ -4296,8 +4306,6 @@ static void
lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int old_state, int new_state)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
if (new_state == NLP_STE_UNMAPPED_NODE) {
ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
ndlp->nlp_type |= NLP_FC_NODE;
@@ -4391,9 +4399,9 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
(!ndlp->rport ||
ndlp->rport->scsi_target_id == -1 ||
ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
}
}
@@ -4426,6 +4434,7 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
int old_state = ndlp->nlp_state;
+ int node_dropped = ndlp->nlp_flag & NLP_DROPPED;
char name1[16], name2[16];
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
@@ -4438,6 +4447,12 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"node statechg did:x%x old:%d ste:%d",
ndlp->nlp_DID, old_state, state);
+ if (node_dropped && old_state == NLP_STE_UNUSED_NODE &&
+ state != NLP_STE_UNUSED_NODE) {
+ ndlp->nlp_flag &= ~NLP_DROPPED;
+ lpfc_nlp_get(ndlp);
+ }
+
if (old_state == NLP_STE_NPR_NODE &&
state != NLP_STE_NPR_NODE)
lpfc_cancel_retry_delay_tmo(vport, ndlp);
@@ -4485,15 +4500,6 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
NLP_STE_UNUSED_NODE);
}
-static void
-lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
-{
- lpfc_cancel_retry_delay_tmo(vport, ndlp);
- if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
- lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
- lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
- NLP_STE_UNUSED_NODE);
-}
/**
* lpfc_initialize_node - Initialize all fields of node object
* @vport: Pointer to Virtual Port object.
@@ -4515,128 +4521,19 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0);
+ INIT_LIST_HEAD(&ndlp->recovery_evt.evt_listp);
+
ndlp->nlp_DID = did;
ndlp->vport = vport;
ndlp->phba = vport->phba;
ndlp->nlp_sid = NLP_NO_SID;
ndlp->nlp_fc4_type = NLP_FC4_NONE;
kref_init(&ndlp->kref);
- NLP_INT_NODE_ACT(ndlp);
atomic_set(&ndlp->cmd_pending, 0);
ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
}
-struct lpfc_nodelist *
-lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
- int state)
-{
- struct lpfc_hba *phba = vport->phba;
- uint32_t did, flag;
- unsigned long flags;
- unsigned long *active_rrqs_xri_bitmap = NULL;
- int rpi = LPFC_RPI_ALLOC_ERROR;
- uint32_t defer_did = 0;
-
- if (!ndlp)
- return NULL;
-
- if (phba->sli_rev == LPFC_SLI_REV4) {
- if (ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)
- rpi = lpfc_sli4_alloc_rpi(vport->phba);
- else
- rpi = ndlp->nlp_rpi;
-
- if (rpi == LPFC_RPI_ALLOC_ERROR) {
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0359 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d FAILED RPI "
- " ALLOC\n",
- __func__,
- (void *)ndlp, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
- return NULL;
- }
- }
-
- spin_lock_irqsave(&phba->ndlp_lock, flags);
- /* The ndlp should not be in memory free mode */
- if (NLP_CHK_FREE_REQ(ndlp)) {
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0277 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- __func__, (void *)ndlp, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
- goto free_rpi;
- }
- /* The ndlp should not already be in active mode */
- if (NLP_CHK_NODE_ACT(ndlp)) {
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0278 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- __func__, (void *)ndlp, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
- goto free_rpi;
- }
-
- /* First preserve the orginal DID, xri_bitmap and some flags */
- did = ndlp->nlp_DID;
- flag = (ndlp->nlp_flag & NLP_UNREG_INP);
- if (flag & NLP_UNREG_INP)
- defer_did = ndlp->nlp_defer_did;
- if (phba->sli_rev == LPFC_SLI_REV4)
- active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;
-
- /* Zero ndlp except of ndlp linked list pointer */
- memset((((char *)ndlp) + sizeof (struct list_head)), 0,
- sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
-
- /* Next reinitialize and restore saved objects */
- lpfc_initialize_node(vport, ndlp, did);
- ndlp->nlp_flag |= flag;
- if (flag & NLP_UNREG_INP)
- ndlp->nlp_defer_did = defer_did;
- if (phba->sli_rev == LPFC_SLI_REV4)
- ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
-
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- if (vport->phba->sli_rev == LPFC_SLI_REV4) {
- ndlp->nlp_rpi = rpi;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0008 rpi:%x DID:%x flg:%x refcnt:%d "
- "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
- ndlp->nlp_flag,
- kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
- }
-
-
- if (state != NLP_STE_UNUSED_NODE)
- lpfc_nlp_set_state(vport, ndlp, state);
- else
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0013 rpi:%x DID:%x flg:%x refcnt:%d "
- "map:%x x%px STATE=UNUSED\n",
- ndlp->nlp_rpi, ndlp->nlp_DID,
- ndlp->nlp_flag,
- kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
-
- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
- "node enable: did:x%x",
- ndlp->nlp_DID, 0, 0);
- return ndlp;
-
-free_rpi:
- if (phba->sli_rev == LPFC_SLI_REV4) {
- lpfc_sli4_free_rpi(vport->phba, rpi);
- ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
- }
- return NULL;
-}
-
void
lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
@@ -4650,6 +4547,7 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
return;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
+ ndlp->nlp_flag |= NLP_DROPPED;
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
lpfc_cleanup_vports_rrqs(vport, ndlp);
lpfc_unreg_rpi(vport, ndlp);
@@ -4908,8 +4806,14 @@ lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
{
unsigned long iflags;
+ /* Driver always gets a reference on the mailbox job
+ * in support of async jobs.
+ */
+ mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!mbox->ctx_ndlp)
+ return;
+
if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
- mbox->ctx_ndlp = ndlp;
mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
} else if (phba->sli_rev == LPFC_SLI_REV4 &&
@@ -4917,20 +4821,15 @@ lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
(bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
LPFC_SLI_INTF_IF_TYPE_2) &&
(kref_read(&ndlp->kref) > 0)) {
- mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
} else {
if (vport->load_flag & FC_UNLOADING) {
if (phba->sli_rev == LPFC_SLI_REV4) {
- spin_lock_irqsave(&vport->phba->ndlp_lock,
- iflags);
+ spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->nlp_flag |= NLP_RELEASE_RPI;
- spin_unlock_irqrestore(&vport->phba->ndlp_lock,
- iflags);
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
}
- lpfc_nlp_get(ndlp);
}
- mbox->ctx_ndlp = ndlp;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
}
@@ -4988,6 +4887,11 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
mbox->vport = vport;
lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox);
+ if (!mbox->ctx_ndlp) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return 1;
+ }
+
if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr)
/*
* accept PLOGIs after unreg_rpi_cmpl
@@ -5011,6 +4915,29 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
mempool_free(mbox, phba->mbox_mem_pool);
acc_plogi = 1;
}
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "1444 Failed to allocate mempool "
+ "unreg_rpi UNREG x%x, "
+ "DID x%x, flag x%x, "
+ "ndlp x%px\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp);
+
+ /* Because mempool_alloc failed, we
+ * will issue a LOGO here and keep the rpi alive if
+ * not unloading.
+ */
+ if (!(vport->load_flag & FC_UNLOADING)) {
+ ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ lpfc_issue_els_logo(vport, ndlp, 0);
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_NPR_NODE);
+ }
+
+ return 1;
}
lpfc_no_rpi(phba, ndlp);
out:
@@ -5131,11 +5058,9 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
static int
lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mb, *nextmb;
struct lpfc_dmabuf *mp;
- unsigned long iflags;
/* Cleanup node for NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
@@ -5143,22 +5068,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
"Data: x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
- if (NLP_CHK_FREE_REQ(ndlp)) {
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0280 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- __func__, (void *)ndlp, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
- lpfc_dequeue_node(vport, ndlp);
- } else {
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0281 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- __func__, (void *)ndlp, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
- lpfc_disable_node(vport, ndlp);
- }
-
+ lpfc_dequeue_node(vport, ndlp);
/* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
@@ -5205,108 +5115,22 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_els_abort(phba, ndlp);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = 0;
del_timer_sync(&ndlp->nlp_delayfunc);
list_del_init(&ndlp->els_retry_evt.evt_listp);
list_del_init(&ndlp->dev_loss_evt.evt_listp);
+ list_del_init(&ndlp->recovery_evt.evt_listp);
lpfc_cleanup_vports_rrqs(vport, ndlp);
if (phba->sli_rev == LPFC_SLI_REV4)
ndlp->nlp_flag |= NLP_RELEASE_RPI;
- if (!lpfc_unreg_rpi(vport, ndlp)) {
- /* Clean up unregistered and non freed rpis */
- if ((ndlp->nlp_flag & NLP_RELEASE_RPI) &&
- !(ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)) {
- lpfc_sli4_free_rpi(vport->phba,
- ndlp->nlp_rpi);
- spin_lock_irqsave(&vport->phba->ndlp_lock,
- iflags);
- ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
- ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
- spin_unlock_irqrestore(&vport->phba->ndlp_lock,
- iflags);
- }
- }
return 0;
}
-/*
- * Check to see if we can free the nlp back to the freelist.
- * If we are in the middle of using the nlp in the discovery state
- * machine, defer the free till we reach the end of the state machine.
- */
-static void
-lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
-{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_rport_data *rdata;
- struct fc_rport *rport;
- LPFC_MBOXQ_t *mbox;
- int rc;
-
- lpfc_cancel_retry_delay_tmo(vport, ndlp);
- if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
- !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
- !(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
- phba->sli_rev != LPFC_SLI_REV4) {
- /* For this case we need to cleanup the default rpi
- * allocated by the firmware.
- */
- lpfc_printf_vlog(vport, KERN_INFO,
- LOG_NODE | LOG_DISCOVERY,
- "0005 Cleanup Default rpi:x%x DID:x%x flg:x%x "
- "ref %d map:x%x ndlp x%px\n",
- ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
- kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
- if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
- != NULL) {
- rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
- (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi);
- if (rc) {
- mempool_free(mbox, phba->mbox_mem_pool);
- }
- else {
- mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
- mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
- mbox->vport = vport;
- mbox->ctx_ndlp = ndlp;
- rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
- }
- }
- }
- }
- lpfc_cleanup_node(vport, ndlp);
-
- /*
- * ndlp->rport must be set to NULL before it reaches here
- * i.e. break rport/node link before doing lpfc_nlp_put for
- * registered rport and then drop the reference of rport.
- */
- if (ndlp->rport) {
- /*
- * extra lpfc_nlp_put dropped the reference of ndlp
- * for registered rport so need to cleanup rport
- */
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0940 removed node x%px DID x%x "
- "rpi %d rport not null x%px\n",
- ndlp, ndlp->nlp_DID, ndlp->nlp_rpi,
- ndlp->rport);
- rport = ndlp->rport;
- rdata = rport->dd_data;
- rdata->pnode = NULL;
- ndlp->rport = NULL;
- put_device(&rport->dev);
- }
-}
-
static int
lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t did)
@@ -5373,8 +5197,8 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
if (lpfc_matchdid(vport, ndlp, did)) {
data1 = (((uint32_t)ndlp->nlp_state << 24) |
((uint32_t)ndlp->nlp_xri << 16) |
- ((uint32_t)ndlp->nlp_type << 8) |
- ((uint32_t)ndlp->nlp_usg_map & 0xff));
+ ((uint32_t)ndlp->nlp_type << 8)
+ );
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0929 FIND node DID "
"Data: x%px x%x x%x x%x x%x x%px\n",
@@ -5442,7 +5266,6 @@ lpfc_findnode_mapped(struct lpfc_vport *vport)
struct lpfc_nodelist *
lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
ndlp = lpfc_findnode_did(vport, did);
@@ -5463,28 +5286,9 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, vport->fc_flag);
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
- return ndlp;
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- if (vport->phba->nvmet_support)
- return NULL;
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
- if (!ndlp) {
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
- "0014 Could not enable ndlp\n");
- return NULL;
- }
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "6454 Setup Enabled Node 2B_DISC x%x "
- "Data:x%x x%x x%x\n",
- ndlp->nlp_DID, ndlp->nlp_flag,
- ndlp->nlp_state, vport->fc_flag);
-
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp;
}
@@ -5526,9 +5330,9 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
} else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"6456 Skip Setup RSCN Node x%x "
@@ -5562,9 +5366,9 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
*/
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
}
return ndlp;
}
@@ -5802,7 +5606,7 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
icmd = &iocb->iocb;
if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
}
}
spin_unlock_irq(&phba->hbalock);
@@ -5821,8 +5625,6 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
lpfc_free_tx(phba, ndlp);
@@ -5910,8 +5712,6 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
/* Start discovery by sending FLOGI, clean up old rpis */
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->nlp_state != NLP_STE_NPR_NODE)
continue;
if (ndlp->nlp_type & NLP_FABRIC) {
@@ -5963,7 +5763,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
"NameServer login\n");
/* Next look for NameServer ndlp */
ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+ if (ndlp)
lpfc_els_abort(phba, ndlp);
/* ReStart discovery */
@@ -6136,10 +5936,10 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
- "0004 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
+ "0004 rpi:%x DID:%x flg:%x %d x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
+ ndlp);
/*
* Start issuing Fabric-Device Management Interface (FDMI) command to
* 0xfffffa (FDMI well known port).
@@ -6168,10 +5968,6 @@ lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
{
uint16_t *rpi = param;
- /* check for active node */
- if (!NLP_CHK_NODE_ACT(ndlp))
- return 0;
-
return ndlp->nlp_rpi == *rpi;
}
@@ -6320,16 +6116,17 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
memset(ndlp, 0, sizeof (struct lpfc_nodelist));
+ spin_lock_init(&ndlp->lock);
+
lpfc_initialize_node(vport, ndlp, did);
INIT_LIST_HEAD(&ndlp->nlp_listp);
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
ndlp->nlp_rpi = rpi;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
"0007 Init New ndlp x%px, rpi:x%x DID:%x "
- "flg:x%x refcnt:%d map:x%x\n",
+ "flg:x%x refcnt:%d\n",
ndlp, ndlp->nlp_rpi, ndlp->nlp_DID,
- ndlp->nlp_flag, kref_read(&ndlp->kref),
- ndlp->nlp_usg_map);
+ ndlp->nlp_flag, kref_read(&ndlp->kref));
ndlp->active_rrqs_xri_bitmap =
mempool_alloc(vport->phba->active_rrq_pool,
@@ -6354,39 +6151,37 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
static void
lpfc_nlp_release(struct kref *kref)
{
- struct lpfc_hba *phba;
- unsigned long flags;
struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
kref);
+ struct lpfc_vport *vport = ndlp->vport;
lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
"node release: did:x%x flg:x%x type:x%x",
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
- lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "0279 %s: ndlp:x%px did %x "
- "usgmap:x%x refcnt:%d rpi:%x\n",
- __func__,
- (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref), ndlp->nlp_rpi);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0279 %s: ndlp:%p did %x refcnt:%d rpi:%x\n",
+ __func__, ndlp, ndlp->nlp_DID,
+ kref_read(&ndlp->kref), ndlp->nlp_rpi);
/* remove ndlp from action. */
- lpfc_nlp_remove(ndlp->vport, ndlp);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ lpfc_cleanup_node(vport, ndlp);
- /* clear the ndlp active flag for all release cases */
- phba = ndlp->phba;
- spin_lock_irqsave(&phba->ndlp_lock, flags);
- NLP_CLR_NODE_ACT(ndlp);
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ /* Clear Node key fields to give other threads notice
+ * that this node memory is not valid anymore.
+ */
+ ndlp->vport = NULL;
+ ndlp->nlp_state = NLP_STE_FREED_NODE;
+ ndlp->nlp_flag = 0;
+ ndlp->fc4_xpt_flags = 0;
/* free ndlp memory for final ndlp release */
- if (NLP_CHK_FREE_REQ(ndlp)) {
- kfree(ndlp->lat_data);
- if (phba->sli_rev == LPFC_SLI_REV4)
- mempool_free(ndlp->active_rrqs_xri_bitmap,
- ndlp->phba->active_rrq_pool);
- mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
- }
+ kfree(ndlp->lat_data);
+ if (ndlp->phba->sli_rev == LPFC_SLI_REV4)
+ mempool_free(ndlp->active_rrqs_xri_bitmap,
+ ndlp->phba->active_rrq_pool);
+ mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
}
/* This routine bumps the reference count for a ndlp structure to ensure
@@ -6396,7 +6191,6 @@ lpfc_nlp_release(struct kref *kref)
struct lpfc_nodelist *
lpfc_nlp_get(struct lpfc_nodelist *ndlp)
{
- struct lpfc_hba *phba;
unsigned long flags;
if (ndlp) {
@@ -6404,94 +6198,43 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp)
"node get: did:x%x flg:x%x refcnt:x%x",
ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref));
+
/* The check of ndlp usage to prevent incrementing the
* ndlp reference count that is in the process of being
* released.
*/
- phba = ndlp->phba;
- spin_lock_irqsave(&phba->ndlp_lock, flags);
- if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ spin_lock_irqsave(&ndlp->lock, flags);
+ if (!kref_get_unless_zero(&ndlp->kref)) {
+ spin_unlock_irqrestore(&ndlp->lock, flags);
lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
- "0276 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- __func__, (void *)ndlp, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
+ "0276 %s: ndlp:x%px refcnt:%d\n",
+ __func__, (void *)ndlp, kref_read(&ndlp->kref));
return NULL;
- } else
- kref_get(&ndlp->kref);
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ }
+ spin_unlock_irqrestore(&ndlp->lock, flags);
+ } else {
+ WARN_ONCE(!ndlp, "**** %s, get ref on NULL ndlp!", __func__);
}
+
return ndlp;
}
/* This routine decrements the reference count for a ndlp structure. If the
- * count goes to 0, this indicates the the associated nodelist should be
- * freed. Returning 1 indicates the ndlp resource has been released; on the
- * other hand, returning 0 indicates the ndlp resource has not been released
- * yet.
+ * count goes to 0, this indicates the associated nodelist should be freed.
*/
int
lpfc_nlp_put(struct lpfc_nodelist *ndlp)
{
- struct lpfc_hba *phba;
- unsigned long flags;
-
- if (!ndlp)
- return 1;
-
- lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
- "node put: did:x%x flg:x%x refcnt:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag,
- kref_read(&ndlp->kref));
- phba = ndlp->phba;
- spin_lock_irqsave(&phba->ndlp_lock, flags);
- /* Check the ndlp memory free acknowledge flag to avoid the
- * possible race condition that kref_put got invoked again
- * after previous one has done ndlp memory free.
- */
- if (NLP_CHK_FREE_ACK(ndlp)) {
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
- "0274 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- __func__, (void *)ndlp, ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
- return 1;
- }
- /* Check the ndlp inactivate log flag to avoid the possible
- * race condition that kref_put got invoked again after ndlp
- * is already in inactivating state.
- */
- if (NLP_CHK_IACT_REQ(ndlp)) {
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
- "0275 %s: ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- __func__, (void *)ndlp, ndlp->nlp_usg_map,
+ if (ndlp) {
+ lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+ "node put: did:x%x flg:x%x refcnt:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref));
- return 1;
+ } else {
+ WARN_ONCE(!ndlp, "**** %s, put ref on NULL ndlp!", __func__);
}
- /* For last put, mark the ndlp usage flags to make sure no
- * other kref_get and kref_put on the same ndlp shall get
- * in between the process when the final kref_put has been
- * invoked on this ndlp.
- */
- if (kref_read(&ndlp->kref) == 1) {
- /* Indicate ndlp is put to inactive state. */
- NLP_SET_IACT_REQ(ndlp);
- /* Acknowledge ndlp memory free has been seen. */
- if (NLP_CHK_FREE_REQ(ndlp))
- NLP_SET_FREE_ACK(ndlp);
- }
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- /* Note, the kref_put returns 1 when decrementing a reference
- * count that was 1, it invokes the release callback function,
- * but it still left the reference count as 1 (not actually
- * performs the last decrementation). Otherwise, it actually
- * decrements the reference count and returns 0.
- */
- return kref_put(&ndlp->kref, lpfc_nlp_release);
+
+ return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
}
/* This routine free's the specified nodelist if it is not in use
@@ -6551,7 +6294,7 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
goto out;
}
list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
- if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
+ if (ndlp->rport &&
(ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
ret = 1;
spin_unlock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index c20034b3101c..42682d95af52 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1380,6 +1380,9 @@ struct lpfc_fdmi_reg_hba {
struct lpfc_fdmi_reg_port_list rpl;
};
+/******** MI MIB ********/
+#define SLI_CT_MIB_Subtypes 0x11
+
/*
* Register HBA Attributes (RHAT)
*/
@@ -1465,7 +1468,7 @@ struct lpfc_fdmi_reg_portattr {
#define LPFC_FDMI2_HBA_ATTR 0x0002efff
/*
- * Port Attrubute Types
+ * Port Attribute Types
*/
#define RPRT_SUPPORTED_FC4_TYPES 0x1 /* 32 byte binary array */
#define RPRT_SUPPORTED_SPEED 0x2 /* 32-bit unsigned int */
@@ -1483,6 +1486,7 @@ struct lpfc_fdmi_reg_portattr {
#define RPRT_PORT_STATE 0x101 /* 32-bit unsigned int */
#define RPRT_DISC_PORT 0x102 /* 32-bit unsigned int */
#define RPRT_PORT_ID 0x103 /* 32-bit unsigned int */
+#define RPRT_VENDOR_MI 0xf047 /* vendor ascii string */
#define RPRT_SMART_SERVICE 0xf100 /* 4 to 256 byte ASCII string */
#define RPRT_SMART_GUID 0xf101 /* 8 byte WWNN + 8 byte WWPN */
#define RPRT_SMART_VERSION 0xf102 /* 4 to 256 byte ASCII string */
@@ -1515,6 +1519,7 @@ struct lpfc_fdmi_reg_portattr {
#define LPFC_FDMI_SMART_ATTR_port_info 0x00100000 /* Vendor specific */
#define LPFC_FDMI_SMART_ATTR_qos 0x00200000 /* Vendor specific */
#define LPFC_FDMI_SMART_ATTR_security 0x00400000 /* Vendor specific */
+#define LPFC_FDMI_VENDOR_ATTR_mi 0x00800000 /* Vendor specific */
/* Bit mask for FDMI-1 defined PORT attributes */
#define LPFC_FDMI1_PORT_ATTR 0x0000003f
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 12e4e76233e6..541b9aef6bfe 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -3506,8 +3506,14 @@ struct lpfc_sli4_parameters {
#define cfg_max_tow_xri_MASK 0x0000ffff
#define cfg_max_tow_xri_WORD word20
- uint32_t word21; /* RESERVED */
- uint32_t word22; /* RESERVED */
+ uint32_t word21;
+#define cfg_mib_bde_cnt_SHIFT 16
+#define cfg_mib_bde_cnt_MASK 0x000000ff
+#define cfg_mib_bde_cnt_WORD word21
+#define cfg_mi_ver_SHIFT 0
+#define cfg_mi_ver_MASK 0x0000ffff
+#define cfg_mi_ver_WORD word21
+ uint32_t mib_size;
uint32_t word23; /* RESERVED */
uint32_t word24;
@@ -4380,9 +4386,11 @@ struct wqe_common {
#define wqe_ebde_cnt_SHIFT 0
#define wqe_ebde_cnt_MASK 0x0000000f
#define wqe_ebde_cnt_WORD word10
-#define wqe_nvme_SHIFT 4
-#define wqe_nvme_MASK 0x00000001
-#define wqe_nvme_WORD word10
+#define wqe_xchg_SHIFT 4
+#define wqe_xchg_MASK 0x00000001
+#define wqe_xchg_WORD word10
+#define LPFC_SCSI_XCHG 0x0
+#define LPFC_NVME_XCHG 0x1
#define wqe_oas_SHIFT 6
#define wqe_oas_MASK 0x00000001
#define wqe_oas_WORD word10
@@ -4880,6 +4888,8 @@ struct lpfc_grp_hdr {
#define NVME_READ_CMD 0x0
#define FCP_COMMAND_DATA_OUT 0x1
#define NVME_WRITE_CMD 0x1
+#define COMMAND_DATA_IN 0x0
+#define COMMAND_DATA_OUT 0x1
#define FCP_COMMAND_TRECEIVE 0x2
#define FCP_COMMAND_TRSP 0x3
#define FCP_COMMAND_TSEND 0x7
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index ca25e54bb782..ac67f420ec26 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2844,28 +2844,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
lpfc_port_link_failure(vport);
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp,
- NLP_STE_UNUSED_NODE);
- if (!ndlp)
- continue;
- spin_lock_irq(&phba->ndlp_lock);
- NLP_SET_FREE_REQ(ndlp);
- spin_unlock_irq(&phba->ndlp_lock);
- /* Trigger the release of the ndlp memory */
- lpfc_nlp_put(ndlp);
- continue;
- }
- spin_lock_irq(&phba->ndlp_lock);
- if (NLP_CHK_FREE_REQ(ndlp)) {
- /* The ndlp should not be in memory free mode already */
- spin_unlock_irq(&phba->ndlp_lock);
- continue;
- } else
- /* Indicate request for freeing ndlp memory */
- NLP_SET_FREE_REQ(ndlp);
- spin_unlock_irq(&phba->ndlp_lock);
-
if (vport->port_type != LPFC_PHYSICAL_PORT &&
ndlp->nlp_DID == Fabric_DID) {
/* Just free up ndlp with Fabric_DID for vports */
@@ -2873,20 +2851,23 @@ lpfc_cleanup(struct lpfc_vport *vport)
continue;
}
- /* take care of nodes in unused state before the state
- * machine taking action.
- */
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+ if (ndlp->nlp_DID == Fabric_Cntl_DID &&
+ ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
lpfc_nlp_put(ndlp);
continue;
}
- if (ndlp->nlp_type & NLP_FABRIC)
+ /* Fabric Ports not in UNMAPPED state are cleaned up in the
+ * DEVICE_RM event.
+ */
+ if (ndlp->nlp_type & NLP_FABRIC &&
+ ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
- lpfc_disc_state_machine(vport, ndlp, NULL,
- NLP_EVT_DEVICE_RM);
+ if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
}
/* At this point, ALL ndlp's should be gone
@@ -2901,12 +2882,13 @@ lpfc_cleanup(struct lpfc_vport *vport)
list_for_each_entry_safe(ndlp, next_ndlp,
&vport->fc_nodes, nlp_listp) {
lpfc_printf_vlog(ndlp->vport, KERN_ERR,
- LOG_TRACE_EVENT,
- "0282 did:x%x ndlp:x%px "
- "usgmap:x%x refcnt:%d\n",
- ndlp->nlp_DID, (void *)ndlp,
- ndlp->nlp_usg_map,
- kref_read(&ndlp->kref));
+ LOG_TRACE_EVENT,
+ "0282 did:x%x ndlp:x%px "
+ "refcnt:%d xflags x%x nflag x%x\n",
+ ndlp->nlp_DID, (void *)ndlp,
+ kref_read(&ndlp->kref),
+ ndlp->fc4_xpt_flags,
+ ndlp->nlp_flag);
}
break;
}
@@ -3080,7 +3062,6 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
struct lpfc_nodelist *ndlp, *next_ndlp;
struct lpfc_vport **vports;
int i, rpi;
- unsigned long flags;
if (phba->sli_rev != LPFC_SLI_REV4)
return;
@@ -3096,22 +3077,18 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
rpi = lpfc_sli4_alloc_rpi(phba);
if (rpi == LPFC_RPI_ALLOC_ERROR) {
- spin_lock_irqsave(&phba->ndlp_lock, flags);
- NLP_CLR_NODE_ACT(ndlp);
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ /* TODO print log? */
continue;
}
ndlp->nlp_rpi = rpi;
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
LOG_NODE | LOG_DISCOVERY,
"0009 Assign RPI x%x to ndlp x%px "
- "DID:x%06x flg:x%x map:x%x\n",
+ "DID:x%06x flg:x%x\n",
ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
- ndlp->nlp_flag, ndlp->nlp_usg_map);
+ ndlp->nlp_flag);
}
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -3510,8 +3487,7 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
nlp_listp) {
- if ((!NLP_CHK_NODE_ACT(ndlp)) ||
- ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
/* Driver must assume RPI is invalid for
* any unused or inactive node.
*/
@@ -3519,33 +3495,42 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
continue;
}
- if (ndlp->nlp_type & NLP_FABRIC) {
- lpfc_disc_state_machine(vports[i], ndlp,
- NULL, NLP_EVT_DEVICE_RECOVERY);
- lpfc_disc_state_machine(vports[i], ndlp,
- NULL, NLP_EVT_DEVICE_RM);
- }
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/*
* Whenever an SLI4 port goes offline, free the
* RPI. Get a new RPI when the adapter port
* comes back online.
*/
if (phba->sli_rev == LPFC_SLI_REV4) {
- lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ lpfc_printf_vlog(vports[i], KERN_INFO,
LOG_NODE | LOG_DISCOVERY,
"0011 Free RPI x%x on "
- "ndlp:x%px did x%x "
- "usgmap:x%x\n",
+ "ndlp: %p did x%x\n",
ndlp->nlp_rpi, ndlp,
- ndlp->nlp_DID,
- ndlp->nlp_usg_map);
+ ndlp->nlp_DID);
lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
}
lpfc_unreg_rpi(vports[i], ndlp);
+
+ if (ndlp->nlp_type & NLP_FABRIC) {
+ lpfc_disc_state_machine(vports[i], ndlp,
+ NULL, NLP_EVT_DEVICE_RECOVERY);
+
+ /* Don't remove the node unless the
+ * has been unregistered with the
+ * transport. If so, let dev_loss
+ * take care of the node.
+ */
+ if (!(ndlp->fc4_xpt_flags &
+ (NVME_XPT_REGD | SCSI_XPT_REGD)))
+ lpfc_disc_state_machine
+ (vports[i], ndlp,
+ NULL,
+ NLP_EVT_DEVICE_RM);
+ }
}
}
}
@@ -4343,16 +4328,13 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
/* Seed physical port template */
memcpy(template, &lpfc_template, sizeof(*template));
- if (use_no_reset_hba) {
+ if (use_no_reset_hba)
/* template is for a no reset SCSI Host */
- template->max_sectors = 0xffff;
template->eh_host_reset_handler = NULL;
- }
/* Template for all vports this physical port creates */
memcpy(&phba->vport_template, &lpfc_template,
sizeof(*template));
- phba->vport_template.max_sectors = 0xffff;
phba->vport_template.shost_attrs = lpfc_vport_attrs;
phba->vport_template.eh_bus_reset_handler = NULL;
phba->vport_template.eh_host_reset_handler = NULL;
@@ -5607,11 +5589,6 @@ lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
ndlp->nlp_type |= NLP_FABRIC;
/* Put ndlp onto node list */
lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- /* re-setup ndlp without removing from node list */
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp)
- return 0;
}
if ((phba->pport->port_state < LPFC_FLOGI) &&
(phba->pport->port_state != LPFC_VPORT_FAILED))
@@ -5667,7 +5644,6 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
int rc;
struct lpfc_vport *vport;
struct lpfc_nodelist *ndlp;
- struct Scsi_Host *shost;
int active_vlink_present;
struct lpfc_vport **vports;
int i;
@@ -5848,10 +5824,9 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
*/
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000));
- shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
vport->port_state = LPFC_FDISC;
} else {
@@ -5958,18 +5933,21 @@ lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
{
struct lpfc_cq_event *cq_event;
+ unsigned long iflags;
/* First, declare the async event has been handled */
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag &= ~ASYNC_EVENT;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
/* Now, handle all the async events */
+ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
- /* Get the first event from the head of the event queue */
- spin_lock_irq(&phba->hbalock);
list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
cq_event, struct lpfc_cq_event, list);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
+ iflags);
+
/* Process the asynchronous event */
switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
case LPFC_TRAILER_CODE_LINK:
@@ -6001,9 +5979,12 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
&cq_event->cqe.mcqe_cmpl));
break;
}
+
/* Free the completion event processed to the free pool */
lpfc_sli4_cq_event_release(phba, cq_event);
+ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
}
+ spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
}
/**
@@ -6295,9 +6276,6 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
atomic_set(&phba->dbg_log_dmping, 0);
spin_lock_init(&phba->hbalock);
- /* Initialize ndlp management spinlock */
- spin_lock_init(&phba->ndlp_lock);
-
/* Initialize port_list spinlock */
spin_lock_init(&phba->port_list_lock);
INIT_LIST_HEAD(&phba->port_list);
@@ -6630,6 +6608,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* This abort list used by worker thread */
spin_lock_init(&phba->sli4_hba.sgl_list_lock);
spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
+ spin_lock_init(&phba->sli4_hba.asynce_list_lock);
+ spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
/*
* Initialize driver internal slow-path work queues
@@ -6641,8 +6621,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
/* Asynchronous event CQ Event work queue list */
INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
- /* Fast-path XRI aborted CQ Event work queue list */
- INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
/* Slow-path XRI aborted CQ Event work queue list */
INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
/* Receive queue CQ Event work queue list */
@@ -7196,7 +7174,6 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
"1431 Invalid HBA PCI-device group: 0x%x\n",
dev_grp);
return -ENODEV;
- break;
}
return 0;
}
@@ -10174,26 +10151,28 @@ lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
static void
lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
{
- LIST_HEAD(cqelist);
- struct lpfc_cq_event *cqe;
+ LIST_HEAD(cq_event_list);
+ struct lpfc_cq_event *cq_event;
unsigned long iflags;
/* Retrieve all the pending WCQEs from pending WCQE lists */
- spin_lock_irqsave(&phba->hbalock, iflags);
- /* Pending FCP XRI abort events */
- list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
- &cqelist);
+
/* Pending ELS XRI abort events */
+ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
- &cqelist);
+ &cq_event_list);
+ spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
+
/* Pending asynnc events */
+ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
- &cqelist);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ &cq_event_list);
+ spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
- while (!list_empty(&cqelist)) {
- list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
- lpfc_sli4_cq_event_release(phba, cqe);
+ while (!list_empty(&cq_event_list)) {
+ list_remove_head(&cq_event_list, cq_event,
+ struct lpfc_cq_event, list);
+ lpfc_sli4_cq_event_release(phba, cq_event);
}
}
@@ -12310,6 +12289,21 @@ fcponly:
else
phba->nsler = 0;
+ /* Save PB info for use during HBA setup */
+ sli4_params->mi_ver = bf_get(cfg_mi_ver, mbx_sli4_parameters);
+ sli4_params->mib_bde_cnt = bf_get(cfg_mib_bde_cnt, mbx_sli4_parameters);
+ sli4_params->mib_size = mbx_sli4_parameters->mib_size;
+ sli4_params->mi_value = LPFC_DFLT_MIB_VAL;
+
+ /* Next we check for Vendor MIB support */
+ if (sli4_params->mi_ver && phba->cfg_enable_mi)
+ phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "6461 MIB attr %d enable %d FDMI %d buf %d:%d\n",
+ sli4_params->mi_ver, phba->cfg_enable_mi,
+ sli4_params->mi_value, sli4_params->mib_bde_cnt,
+ sli4_params->mib_size);
return 0;
}
@@ -12515,10 +12509,11 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
}
lpfc_destroy_vport_work_array(phba, vports);
- /* Remove FC host and then SCSI host with the physical port */
+ /* Remove FC host with the physical port */
fc_remove_host(shost);
scsi_remove_host(shost);
+ /* Clean up all nodes, mailboxes and IOs. */
lpfc_cleanup(vport);
/*
@@ -12581,8 +12576,7 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
/**
* lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
- * @pdev: pointer to PCI device
- * @msg: power management message
+ * @dev_d: pointer to device
*
* This routine is to be called from the kernel's PCI subsystem to support
* system Power Management (PM) to device with SLI-3 interface spec. When
@@ -12600,10 +12594,10 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
* 0 - driver suspended the device
* Error otherwise
**/
-static int
-lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
+static int __maybe_unused
+lpfc_pci_suspend_one_s3(struct device *dev_d)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = dev_get_drvdata(dev_d);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -12617,16 +12611,12 @@ lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
/* Disable interrupt from device */
lpfc_sli_disable_intr(phba);
- /* Save device state to PCI config space */
- pci_save_state(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
-
return 0;
}
/**
* lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
- * @pdev: pointer to PCI device
+ * @dev_d: pointer to device
*
* This routine is to be called from the kernel's PCI subsystem to support
* system Power Management (PM) to device with SLI-3 interface spec. When PM
@@ -12643,10 +12633,10 @@ lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
* 0 - driver suspended the device
* Error otherwise
**/
-static int
-lpfc_pci_resume_one_s3(struct pci_dev *pdev)
+static int __maybe_unused
+lpfc_pci_resume_one_s3(struct device *dev_d)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = dev_get_drvdata(dev_d);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
uint32_t intr_mode;
int error;
@@ -12654,19 +12644,6 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0452 PCI device Power Management resume.\n");
- /* Restore device state from PCI config space */
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- /*
- * As the new kernel behavior of pci_restore_state() API call clears
- * device saved_state flag, need to save the restored state again.
- */
- pci_save_state(pdev);
-
- if (pdev->is_busmaster)
- pci_set_master(pdev);
-
/* Startup the kernel thread for this host adapter. */
phba->worker_thread = kthread_run(lpfc_do_work, phba,
"lpfc_worker_%d", phba->brd_no);
@@ -13358,7 +13335,6 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
vport->load_flag |= FC_UNLOADING;
spin_unlock_irq(&phba->hbalock);
- /* Free the HBA sysfs attributes */
lpfc_free_sysfs_attr(vport);
/* Release all the vports against this physical port */
@@ -13371,7 +13347,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
}
lpfc_destroy_vport_work_array(phba, vports);
- /* Remove FC host and then SCSI host with the physical port */
+ /* Remove FC host with the physical port */
fc_remove_host(shost);
scsi_remove_host(shost);
@@ -13423,8 +13399,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
/**
* lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
- * @pdev: pointer to PCI device
- * @msg: power management message
+ * @dev_d: pointer to device
*
* This routine is called from the kernel's PCI subsystem to support system
* Power Management (PM) to device with SLI-4 interface spec. When PM invokes
@@ -13442,10 +13417,10 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
* 0 - driver suspended the device
* Error otherwise
**/
-static int
-lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
+static int __maybe_unused
+lpfc_pci_suspend_one_s4(struct device *dev_d)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = dev_get_drvdata(dev_d);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -13460,16 +13435,12 @@ lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
lpfc_sli4_disable_intr(phba);
lpfc_sli4_queue_destroy(phba);
- /* Save device state to PCI config space */
- pci_save_state(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
-
return 0;
}
/**
* lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
- * @pdev: pointer to PCI device
+ * @dev_d: pointer to device
*
* This routine is called from the kernel's PCI subsystem to support system
* Power Management (PM) to device with SLI-4 interface spac. When PM invokes
@@ -13486,10 +13457,10 @@ lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
* 0 - driver suspended the device
* Error otherwise
**/
-static int
-lpfc_pci_resume_one_s4(struct pci_dev *pdev)
+static int __maybe_unused
+lpfc_pci_resume_one_s4(struct device *dev_d)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = dev_get_drvdata(dev_d);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
uint32_t intr_mode;
int error;
@@ -13497,19 +13468,6 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0292 PCI device Power Management resume.\n");
- /* Restore device state from PCI config space */
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- /*
- * As the new kernel behavior of pci_restore_state() API call clears
- * device saved_state flag, need to save the restored state again.
- */
- pci_save_state(pdev);
-
- if (pdev->is_busmaster)
- pci_set_master(pdev);
-
/* Startup the kernel thread for this host adapter. */
phba->worker_thread = kthread_run(lpfc_do_work, phba,
"lpfc_worker_%d", phba->brd_no);
@@ -13825,8 +13783,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
/**
* lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
- * @pdev: pointer to PCI device
- * @msg: power management message
+ * @dev: pointer to device
*
* This routine is to be registered to the kernel's PCI subsystem to support
* system Power Management (PM). When PM invokes this method, it dispatches
@@ -13837,19 +13794,19 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
* 0 - driver suspended the device
* Error otherwise
**/
-static int
-lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
+static int __maybe_unused
+lpfc_pci_suspend_one(struct device *dev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = dev_get_drvdata(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
int rc = -ENODEV;
switch (phba->pci_dev_grp) {
case LPFC_PCI_DEV_LP:
- rc = lpfc_pci_suspend_one_s3(pdev, msg);
+ rc = lpfc_pci_suspend_one_s3(dev);
break;
case LPFC_PCI_DEV_OC:
- rc = lpfc_pci_suspend_one_s4(pdev, msg);
+ rc = lpfc_pci_suspend_one_s4(dev);
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -13862,7 +13819,7 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
/**
* lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
- * @pdev: pointer to PCI device
+ * @dev: pointer to device
*
* This routine is to be registered to the kernel's PCI subsystem to support
* system Power Management (PM). When PM invokes this method, it dispatches
@@ -13873,19 +13830,19 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
* 0 - driver suspended the device
* Error otherwise
**/
-static int
-lpfc_pci_resume_one(struct pci_dev *pdev)
+static int __maybe_unused
+lpfc_pci_resume_one(struct device *dev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = dev_get_drvdata(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
int rc = -ENODEV;
switch (phba->pci_dev_grp) {
case LPFC_PCI_DEV_LP:
- rc = lpfc_pci_resume_one_s3(pdev);
+ rc = lpfc_pci_resume_one_s3(dev);
break;
case LPFC_PCI_DEV_OC:
- rc = lpfc_pci_resume_one_s4(pdev);
+ rc = lpfc_pci_resume_one_s4(dev);
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -14065,14 +14022,17 @@ static const struct pci_error_handlers lpfc_err_handler = {
.resume = lpfc_io_resume,
};
+static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
+ lpfc_pci_suspend_one,
+ lpfc_pci_resume_one);
+
static struct pci_driver lpfc_driver = {
.name = LPFC_DRIVER_NAME,
.id_table = lpfc_id_table,
.probe = lpfc_pci_probe_one,
.remove = lpfc_pci_remove_one,
.shutdown = lpfc_pci_remove_one,
- .suspend = lpfc_pci_suspend_one,
- .resume = lpfc_pci_resume_one,
+ .driver.pm = &lpfc_pci_pm_ops_one,
.err_handler = &lpfc_err_handler,
};
@@ -14124,7 +14084,7 @@ lpfc_init(void)
fc_release_transport(lpfc_transport_template);
goto unregister;
}
- lpfc_nvme_cmd_template();
+ lpfc_wqe_cmd_template();
lpfc_nvmet_cmd_template();
/* Initialize in case vector mapping is needed */
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 27ff67e9edae..be54fbf5146f 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -46,6 +46,7 @@
#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
#define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */
#define LPFC_RRQ_POOL_SIZE 256 /* max elements in non-DMA pool */
+#define LPFC_MBX_POOL_SIZE 256 /* max elements in MBX non-DMA pool */
int
lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
@@ -111,8 +112,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
pool->current_count++;
}
- phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
- sizeof(LPFC_MBOXQ_t));
+ phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MBX_POOL_SIZE,
+ sizeof(LPFC_MBOXQ_t));
if (!phba->mbox_mem_pool)
goto fail_free_mbuf_pool;
@@ -588,8 +589,6 @@ lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
* Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
* pool along a non-DMA-mapped container for it.
*
- * Notes: Not interrupt-safe. Must be called with no locks held.
- *
* Returns:
* pointer to HBQ on success
* NULL on failure
@@ -599,7 +598,7 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
{
struct rqb_dmabuf *dma_buf;
- dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
+ dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
if (!dma_buf)
return NULL;
@@ -722,7 +721,6 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
if (rc < 0) {
- (rqbp->rqb_free_buffer)(phba, rqb_entry);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6409 Cannot post to HRQ %d: %x %x %x "
"DRQ %x %x\n",
@@ -732,6 +730,7 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
rqb_entry->hrq->entry_count,
rqb_entry->drq->host_index,
rqb_entry->drq->hba_index);
+ (rqbp->rqb_free_buffer)(phba, rqb_entry);
} else {
list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
rqbp->buffer_count++;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 92d6e7b98770..1ac855640fc5 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -247,7 +247,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
spin_lock_irq(&phba->hbalock);
list_del_init(&iocb->dlist);
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
spin_unlock_irq(&phba->hbalock);
}
@@ -357,7 +357,6 @@ lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* Complete the unreg rpi mbx request, and update flags.
* This will also restart any deferred events.
*/
- lpfc_nlp_get(ndlp);
lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb);
if (!piocb) {
@@ -365,7 +364,7 @@ lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
"4578 PLOGI ACC fail\n");
if (mbox)
mempool_free(mbox, phba->mbox_mem_pool);
- goto out;
+ return;
}
rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox);
@@ -376,15 +375,12 @@ lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mempool_free(mbox, phba->mbox_mem_pool);
}
kfree(piocb);
-out:
- lpfc_nlp_put(ndlp);
}
static int
lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd;
uint64_t nlp_portwwn = 0;
@@ -588,7 +584,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox);
link_mbox->vport = vport;
- link_mbox->ctx_ndlp = ndlp;
+ link_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!link_mbox->ctx_ndlp)
+ goto out;
+
link_mbox->mbox_cmpl = lpfc_defer_acc_rsp;
if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
@@ -617,9 +616,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* command issued in lpfc_cmpl_els_acc().
*/
login_mbox->vport = vport;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/*
* If there is an outstanding PLOGI issued, abort it before
@@ -648,9 +647,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* this ELS request. The only way to do this is
* to register, then unregister the RPI.
*/
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
@@ -739,7 +738,6 @@ static int
lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *elsiocb;
struct lpfc_dmabuf *pcmd;
struct serv_parm *sp;
@@ -821,9 +819,9 @@ out:
/* 1 sec timeout */
mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
@@ -843,9 +841,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
* PLOGIs during LOGO storms from a device.
*/
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_ACC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
if (els_cmd == ELS_CMD_PRLO)
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
else
@@ -890,9 +888,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
vport->port_state = LPFC_FDISC;
} else {
@@ -908,9 +906,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Only try to re-login if this is NOT a Fabric Node */
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000 * 1));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
}
@@ -918,9 +916,9 @@ out:
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/* The driver has to wait until the ACC completes before it continues
* processing the LOGO. The action will resume in
* lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
@@ -1036,12 +1034,10 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
static uint32_t
lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return 0;
}
@@ -1050,16 +1046,16 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) ||
((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
(ndlp->nlp_type & NLP_FCP_TARGET)))) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return 1;
}
}
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_unreg_rpi(vport, ndlp);
return 0;
}
@@ -1104,7 +1100,11 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
pmb->vport = vport;
- pmb->ctx_ndlp = ndlp;
+ pmb->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!pmb->ctx_ndlp) {
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return;
+ }
if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
(!(vport->fc_flag & FC_OFFLINE_MODE)))
@@ -1192,12 +1192,11 @@ static uint32_t
lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_ACC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
return ndlp->nlp_state;
@@ -1258,9 +1257,9 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
(vport->num_disc_nodes)) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
/* Check if there are more PLOGIs to be sent */
lpfc_more_plogi(vport);
if (vport->num_disc_nodes == 0) {
@@ -1310,7 +1309,6 @@ static uint32_t
lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
@@ -1325,9 +1323,9 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Put ndlp in npr state set plogi timer for 1 sec */
mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
@@ -1342,7 +1340,6 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
uint32_t evt)
{
struct lpfc_hba *phba = vport->phba;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb, *rspiocb;
struct lpfc_dmabuf *pcmd, *prsp, *mp;
uint32_t *lp;
@@ -1488,7 +1485,11 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
}
+
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!mbox->ctx_ndlp)
+ goto out;
+
mbox->vport = vport;
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
!= MBX_NOT_FINISHED) {
@@ -1537,9 +1538,6 @@ out:
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_DEFER_RM;
- spin_unlock_irq(shost->host_lock);
return NLP_STE_FREED_NODE;
}
@@ -1573,12 +1571,10 @@ static uint32_t
lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp->nlp_state;
} else {
/* software abort outstanding PLOGI */
@@ -1595,7 +1591,6 @@ lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
/* Don't do anything that will mess up processing of the
@@ -1609,9 +1604,9 @@ lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp->nlp_state;
}
@@ -1620,7 +1615,6 @@ static uint32_t
lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb;
@@ -1631,9 +1625,9 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
if (vport->num_disc_nodes)
lpfc_more_adisc(vport);
}
@@ -1704,7 +1698,6 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb, *rspiocb;
IOCB_t *irsp;
@@ -1722,9 +1715,9 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
/* 1 sec timeout */
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
@@ -1766,12 +1759,10 @@ static uint32_t
lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp->nlp_state;
} else {
/* software abort outstanding ADISC */
@@ -1788,7 +1779,6 @@ lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
/* Don't do anything that will mess up processing of the
@@ -1802,9 +1792,9 @@ lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
@@ -1907,7 +1897,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
/* software abort if any GID_FT is outstanding */
if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (ns_ndlp && NLP_CHK_NODE_ACT(ns_ndlp))
+ if (ns_ndlp)
lpfc_els_abort(phba, ns_ndlp);
}
@@ -1946,7 +1936,6 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
MAILBOX_t *mb = &pmb->u.mb;
@@ -1973,9 +1962,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
/* Put ndlp in npr state set plogi timer for 1 sec */
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000 * 1));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
lpfc_issue_els_logo(vport, ndlp, 0);
@@ -2058,12 +2047,10 @@ lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp->nlp_state;
} else {
lpfc_drop_node(vport, ndlp);
@@ -2077,8 +2064,6 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
/* Don't do anything that will mess up processing of the
* previous RSCN.
*/
@@ -2087,7 +2072,7 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
/* If we are a target we won't immediately transition into PRLI,
* so if REG_LOGIN already completed we don't need to ignore it.
@@ -2097,7 +2082,7 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
@@ -2168,7 +2153,6 @@ static uint32_t
lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb, *rspiocb;
struct lpfc_hba *phba = vport->phba;
IOCB_t *irsp;
@@ -2290,9 +2274,9 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
(vport->port_type == LPFC_NPIV_PORT) &&
vport->cfg_restrict_login) {
out:
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_TARGET_REMOVE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_issue_els_logo(vport, ndlp, 0);
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
@@ -2344,12 +2328,10 @@ static uint32_t
lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp->nlp_state;
} else {
/* software abort outstanding PLOGI */
@@ -2383,7 +2365,6 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
/* Don't do anything that will mess up processing of the
@@ -2397,9 +2378,9 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
@@ -2436,12 +2417,11 @@ static uint32_t
lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_ACC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
return ndlp->nlp_state;
}
@@ -2478,13 +2458,11 @@ static uint32_t
lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
@@ -2578,14 +2556,12 @@ lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
@@ -2656,14 +2632,12 @@ lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
void *arg,
uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
@@ -2672,7 +2646,6 @@ static uint32_t
lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
/* Ignore PLOGI if we have an outstanding LOGO */
@@ -2680,9 +2653,9 @@ lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return ndlp->nlp_state;
if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
lpfc_cancel_retry_delay_tmo(vport, ndlp);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
} else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
/* send PLOGI immediately, move to PLOGI issue state */
if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
@@ -2698,7 +2671,6 @@ static uint32_t
lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
struct ls_rjt stat;
@@ -2709,10 +2681,10 @@ lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
if (ndlp->nlp_flag & NLP_NPR_ADISC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
lpfc_issue_els_adisc(vport, ndlp, 0);
} else {
@@ -2766,27 +2738,26 @@ static uint32_t
lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_ACC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000 * 1));
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
} else {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
}
return ndlp->nlp_state;
}
@@ -2797,16 +2768,12 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb, *rspiocb;
IOCB_t *irsp;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
irsp = &rspiocb->iocb;
if (irsp->ulpStatus) {
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_DEFER_RM;
- spin_unlock_irq(shost->host_lock);
return NLP_STE_FREED_NODE;
}
return ndlp->nlp_state;
@@ -2893,12 +2860,10 @@ static uint32_t
lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp->nlp_state;
}
lpfc_drop_node(vport, ndlp);
@@ -2909,8 +2874,6 @@ static uint32_t
lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
/* Don't do anything that will mess up processing of the
* previous RSCN.
*/
@@ -2918,10 +2881,10 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return ndlp->nlp_state;
lpfc_cancel_retry_delay_tmo(vport, ndlp);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&ndlp->lock);
return ndlp->nlp_state;
}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 0c39ed50998c..1cb82fa6a60e 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -62,180 +62,9 @@ lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
static struct nvme_fc_port_template lpfc_nvme_template;
-static union lpfc_wqe128 lpfc_iread_cmd_template;
-static union lpfc_wqe128 lpfc_iwrite_cmd_template;
-static union lpfc_wqe128 lpfc_icmnd_cmd_template;
-
-/* Setup WQE templates for NVME IOs */
-void
-lpfc_nvme_cmd_template(void)
-{
- union lpfc_wqe128 *wqe;
-
- /* IREAD template */
- wqe = &lpfc_iread_cmd_template;
- memset(wqe, 0, sizeof(union lpfc_wqe128));
-
- /* Word 0, 1, 2 - BDE is variable */
-
- /* Word 3 - cmd_buff_len, payload_offset_len is zero */
-
- /* Word 4 - total_xfer_len is variable */
-
- /* Word 5 - is zero */
-
- /* Word 6 - ctxt_tag, xri_tag is variable */
-
- /* Word 7 */
- bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
- bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
- bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
- bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
-
- /* Word 8 - abort_tag is variable */
-
- /* Word 9 - reqtag is variable */
-
- /* Word 10 - dbde, wqes is variable */
- bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
- bf_set(wqe_nvme, &wqe->fcp_iread.wqe_com, 1);
- bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
- bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
- bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
- bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
-
- /* Word 11 - pbde is variable */
- bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, NVME_READ_CMD);
- bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
- bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
-
- /* Word 12 - is zero */
-
- /* Word 13, 14, 15 - PBDE is variable */
-
- /* IWRITE template */
- wqe = &lpfc_iwrite_cmd_template;
- memset(wqe, 0, sizeof(union lpfc_wqe128));
-
- /* Word 0, 1, 2 - BDE is variable */
-
- /* Word 3 - cmd_buff_len, payload_offset_len is zero */
-
- /* Word 4 - total_xfer_len is variable */
-
- /* Word 5 - initial_xfer_len is variable */
-
- /* Word 6 - ctxt_tag, xri_tag is variable */
-
- /* Word 7 */
- bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
- bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
- bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
- bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
-
- /* Word 8 - abort_tag is variable */
-
- /* Word 9 - reqtag is variable */
-
- /* Word 10 - dbde, wqes is variable */
- bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
- bf_set(wqe_nvme, &wqe->fcp_iwrite.wqe_com, 1);
- bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
- bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
- bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
- bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
-
- /* Word 11 - pbde is variable */
- bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, NVME_WRITE_CMD);
- bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
- bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
-
- /* Word 12 - is zero */
-
- /* Word 13, 14, 15 - PBDE is variable */
-
- /* ICMND template */
- wqe = &lpfc_icmnd_cmd_template;
- memset(wqe, 0, sizeof(union lpfc_wqe128));
-
- /* Word 0, 1, 2 - BDE is variable */
-
- /* Word 3 - payload_offset_len is variable */
-
- /* Word 4, 5 - is zero */
-
- /* Word 6 - ctxt_tag, xri_tag is variable */
-
- /* Word 7 */
- bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
- bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
- bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
- bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
-
- /* Word 8 - abort_tag is variable */
-
- /* Word 9 - reqtag is variable */
-
- /* Word 10 - dbde, wqes is variable */
- bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
- bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1);
- bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
- bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
- bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
- bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
-
- /* Word 11 */
- bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, FCP_COMMAND);
- bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
- bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
-
- /* Word 12, 13, 14, 15 - is zero */
-}
-
-/**
- * lpfc_nvme_prep_abort_wqe - set up 'abort' work queue entry.
- * @pwqeq: Pointer to command iocb.
- * @xritag: Tag that uniqely identifies the local exchange resource.
- * @opt: Option bits -
- * bit 0 = inhibit sending abts on the link
- *
- * This function is called with hbalock held.
- **/
-void
-lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
-{
- union lpfc_wqe128 *wqe = &pwqeq->wqe;
-
- /* WQEs are reused. Clear stale data and set key fields to
- * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
- */
- memset(wqe, 0, sizeof(*wqe));
-
- if (opt & INHIBIT_ABORT)
- bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
- /* Abort specified xri tag, with the mask deliberately zeroed */
- bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
-
- bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
-
- /* Abort the IO associated with this outstanding exchange ID. */
- wqe->abort_cmd.wqe_com.abort_tag = xritag;
-
- /* iotag for the wqe completion. */
- bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
-
- bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
-
- bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
- bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
-}
-
/**
* lpfc_nvme_create_queue -
* @pnvme_lport: Transport localport that LS is to be issued from
- * @lpfc_pnvme: Pointer to the driver's nvme instance data
* @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
* @qsize: Size of the queue in bytes
* @handle: An opaque driver handle used in follow-up calls.
@@ -357,39 +186,47 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
struct lpfc_nvme_rport *rport = remoteport->private;
struct lpfc_vport *vport;
struct lpfc_nodelist *ndlp;
+ u32 fc4_xpt_flags;
ndlp = rport->ndlp;
- if (!ndlp)
+ if (!ndlp) {
+ pr_err("**** %s: NULL ndlp on rport %p remoteport %p\n",
+ __func__, rport, remoteport);
goto rport_err;
+ }
vport = ndlp->vport;
- if (!vport)
+ if (!vport) {
+ pr_err("**** %s: Null vport on ndlp %p, ste x%x rport %p\n",
+ __func__, ndlp, ndlp->nlp_state, rport);
goto rport_err;
+ }
+
+ fc4_xpt_flags = NVME_XPT_REGD | SCSI_XPT_REGD;
/* Remove this rport from the lport's list - memory is owned by the
* transport. Remove the ndlp reference for the NVME transport before
* calling state machine to remove the node.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6146 remoteport delete of remoteport x%px\n",
+ "6146 remoteport delete of remoteport %p\n",
remoteport);
- spin_lock_irq(&vport->phba->hbalock);
+ spin_lock_irq(&ndlp->lock);
/* The register rebind might have occurred before the delete
* downcall. Guard against this race.
*/
- if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
- ndlp->nrport = NULL;
- ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
- spin_unlock_irq(&vport->phba->hbalock);
+ if (ndlp->fc4_xpt_flags & NLP_WAIT_FOR_UNREG)
+ ndlp->fc4_xpt_flags &= ~(NLP_WAIT_FOR_UNREG | NVME_XPT_REGD);
- /* Remove original register reference. The host transport
- * won't reference this rport/remoteport any further.
- */
- lpfc_nlp_put(ndlp);
- } else {
- spin_unlock_irq(&vport->phba->hbalock);
- }
+ spin_unlock_irq(&ndlp->lock);
+
+ /* On a devloss timeout event, one more put is executed provided the
+ * NVME and SCSI rport unregister requests are complete. If the vport
+ * is unloading, this extra put is executed by lpfc_drop_node.
+ */
+ if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags))
+ lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
rport_err:
return;
@@ -567,6 +404,13 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
/* Save for completion so we can release these resources */
genwqe->context1 = lpfc_nlp_get(ndlp);
+ if (!genwqe->context1) {
+ dev_warn(&phba->pcidev->dev,
+ "Warning: Failed node ref, not sending LS_REQ\n");
+ lpfc_sli_release_iocbq(phba, genwqe);
+ return 1;
+ }
+
genwqe->context2 = (uint8_t *)pnvme_lsreq;
/* Fill in payload, bp points to frame payload */
@@ -654,6 +498,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
"Data: x%x x%x rc x%x\n",
ndlp->nlp_DID, genwqe->iotag,
vport->port_state, rc);
+ lpfc_nlp_put(ndlp);
lpfc_sli_release_iocbq(phba, genwqe);
return 1;
}
@@ -695,7 +540,7 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int ret;
uint16_t ntype, nstate;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ if (!ndlp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6051 NVMEx LS REQ: Bad NDLP x%px, Failing "
"LS Req\n",
@@ -787,7 +632,7 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/**
* lpfc_nvme_ls_req - Issue an NVME Link Service request
* @pnvme_lport: Transport localport that LS is to be issued from.
- * @nvme_rport: Transport remoteport that LS is to be sent to.
+ * @pnvme_rport: Transport remoteport that LS is to be sent to.
* @pnvme_lsreq: the transport nvme_ls_req structure for the LS
*
* Driver registers this routine to handle any link service request
@@ -881,7 +726,7 @@ __lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_unlock(&pring->ring_lock);
if (foundit)
- lpfc_sli_issue_abort_iotag(phba, pring, wqe);
+ lpfc_sli_issue_abort_iotag(phba, pring, wqe, NULL);
spin_unlock_irq(&phba->hbalock);
if (foundit)
@@ -940,7 +785,6 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
{
struct lpfc_nvme_lport *lport;
struct lpfc_vport *vport;
- struct lpfc_hba *phba;
struct lpfc_nodelist *ndlp;
int ret;
@@ -948,7 +792,6 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
if (unlikely(!lport))
return;
vport = lport->vport;
- phba = vport->phba;
if (vport->load_flag & FC_UNLOADING)
return;
@@ -1134,7 +977,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
* transport is still transitioning.
*/
ndlp = lpfc_ncmd->ndlp;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ if (!ndlp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6062 Ignoring NVME cmpl. No ndlp\n");
goto out_err;
@@ -1292,7 +1135,7 @@ out_err:
/**
* lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
* @vport: pointer to a host virtual N_Port data structure
- * @lpfcn_cmd: Pointer to lpfc scsi command
+ * @lpfc_ncmd: Pointer to lpfc scsi command
* @pnode: pointer to a node-list data structure
* @cstat: pointer to the control status structure
*
@@ -1316,9 +1159,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
union lpfc_wqe128 *wqe = &pwqeq->wqe;
uint32_t req_len;
- if (!NLP_CHK_NODE_ACT(pnode))
- return -EINVAL;
-
/*
* There are three possibilities here - use scatter-gather segment, use
* the single mapping, or neither.
@@ -1390,6 +1230,9 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/* Word 9 */
bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
+ /* Word 10 */
+ bf_set(wqe_xchg, &wqe->fcp_iwrite.wqe_com, LPFC_NVME_XCHG);
+
/* Words 13 14 15 are for PBDE support */
pwqeq->vport = vport;
@@ -1400,7 +1243,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/**
* lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
* @vport: pointer to a host virtual N_Port data structure
- * @lpfcn_cmd: Pointer to lpfc scsi command
+ * @lpfc_ncmd: Pointer to lpfc scsi command
*
* Driver registers this routine as it io request handler. This
* routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
@@ -1557,7 +1400,9 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
le32_to_cpu(first_data_sgl->sge_len);
bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bde->tus.w = cpu_to_le32(bde->tus.w);
- /* wqe_pbde is 1 in template */
+
+ /* Word 11 */
+ bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
} else {
memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
@@ -1582,16 +1427,14 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
/**
* lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
- * @lpfc_pnvme: Pointer to the driver's nvme instance data
- * @lpfc_nvme_lport: Pointer to the driver's local port data
- * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
- * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
+ * @pnvme_lport: Pointer to the driver's local port data
+ * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
* @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
+ * @pnvme_fcreq: IO request from nvme fc to driver.
*
* Driver registers this routine as it io request handler. This
* routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
- * data structure to the rport
- indicated in @lpfc_nvme_rport.
+ * data structure to the rport indicated in @lpfc_nvme_rport.
*
* Return value :
* 0 - Success
@@ -1670,7 +1513,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
* transport is still transitioning.
*/
ndlp = rport->ndlp;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ if (!ndlp) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
"6053 Busy IO, ndlp not ready: rport x%px "
"ndlp x%px, DID x%06x\n",
@@ -1688,7 +1531,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
"IO. State x%x, Type x%x Flg x%x\n",
pnvme_rport->port_id,
ndlp->nlp_state, ndlp->nlp_type,
- ndlp->upcall_flags);
+ ndlp->fc4_xpt_flags);
atomic_inc(&lport->xmt_fcp_bad_ndlp);
ret = -EBUSY;
goto out_fail;
@@ -1839,7 +1682,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
* lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
* @phba: Pointer to HBA context object
* @cmdiocb: Pointer to command iocb object.
- * @rspiocb: Pointer to response iocb object.
+ * @abts_cmpl: Pointer to wcqe complete object.
*
* This is the callback function for any NVME FCP IO that was aborted.
*
@@ -1865,11 +1708,10 @@ lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/**
* lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
- * @lpfc_pnvme: Pointer to the driver's nvme instance data
- * @lpfc_nvme_lport: Pointer to the driver's local port data
- * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
- * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
+ * @pnvme_lport: Pointer to the driver's local port data
+ * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
* @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
+ * @pnvme_fcreq: IO request from nvme fc to driver.
*
* Driver registers this routine as its nvme request io abort handler. This
* routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
@@ -1890,7 +1732,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_vport *vport;
struct lpfc_hba *phba;
struct lpfc_io_buf *lpfc_nbuf;
- struct lpfc_iocbq *abts_buf;
struct lpfc_iocbq *nvmereq_wqe;
struct lpfc_nvme_fcpreq_priv *freqpriv;
unsigned long flags;
@@ -2001,42 +1842,23 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
goto out_unlock;
}
- abts_buf = __lpfc_sli_get_iocbq(phba);
- if (!abts_buf) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "6136 No available abort wqes. Skipping "
- "Abts req for nvme_fcreq x%px xri x%x\n",
- pnvme_fcreq, nvmereq_wqe->sli4_xritag);
- goto out_unlock;
- }
-
- /* Ready - mark outstanding as aborted by driver. */
- nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
+ ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe,
+ lpfc_nvme_abort_fcreq_cmpl);
- lpfc_nvme_prep_abort_wqe(abts_buf, nvmereq_wqe->sli4_xritag, 0);
-
- /* ABTS WQE must go to the same WQ as the WQE to be aborted */
- abts_buf->iocb_flag |= LPFC_IO_NVME;
- abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
- abts_buf->vport = vport;
- abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
- ret_val = lpfc_sli4_issue_wqe(phba, lpfc_nbuf->hdwq, abts_buf);
spin_unlock(&lpfc_nbuf->buf_lock);
spin_unlock_irqrestore(&phba->hbalock, flags);
- if (ret_val) {
+ if (ret_val != WQE_SUCCESS) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6137 Failed abts issue_wqe with status x%x "
"for nvme_fcreq x%px.\n",
ret_val, pnvme_fcreq);
- lpfc_sli_release_iocbq(phba, abts_buf);
return;
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
"6138 Transport Abort NVME Request Issued for "
- "ox_id x%x on reqtag x%x\n",
- nvmereq_wqe->sli4_xritag,
- abts_buf->iotag);
+ "ox_id x%x\n",
+ nvmereq_wqe->sli4_xritag);
return;
out_unlock:
@@ -2072,9 +1894,8 @@ static struct nvme_fc_port_template lpfc_nvme_template = {
.fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
};
-/**
+/*
* lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
- * @phba: The HBA for which this call is being executed.
*
* This routine removes a nvme buffer from head of @hdwq io_buf_list
* and returns to caller.
@@ -2174,7 +1995,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
/**
* lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
- * @pvport - the lpfc_vport instance requesting a localport.
+ * @vport - the lpfc_vport instance requesting a localport.
*
* This routine is invoked to create an nvme localport instance to bind
* to the nvme_fc_transport. It is called once during driver load
@@ -2280,6 +2101,8 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
int ret, i, pending = 0;
struct lpfc_sli_ring *pring;
struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli4_hdw_queue *qp;
+ int abts_scsi, abts_nvme;
/* Host transport has to clean up and confirm requiring an indefinite
* wait. Print a message if a 10 second wait expires and renew the
@@ -2290,17 +2113,23 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
if (unlikely(!ret)) {
pending = 0;
+ abts_scsi = 0;
+ abts_nvme = 0;
for (i = 0; i < phba->cfg_hdw_queue; i++) {
- pring = phba->sli4_hba.hdwq[i].io_wq->pring;
+ qp = &phba->sli4_hba.hdwq[i];
+ pring = qp->io_wq->pring;
if (!pring)
continue;
- if (pring->txcmplq_cnt)
- pending += pring->txcmplq_cnt;
+ pending += pring->txcmplq_cnt;
+ abts_scsi += qp->abts_scsi_io_bufs;
+ abts_nvme += qp->abts_nvme_io_bufs;
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6176 Lport x%px Localport x%px wait "
- "timed out. Pending %d. Renewing.\n",
- lport, vport->localport, pending);
+ "timed out. Pending %d [%d:%d]. "
+ "Renewing.\n",
+ lport, vport->localport, pending,
+ abts_scsi, abts_nvme);
continue;
}
break;
@@ -2313,7 +2142,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
/**
* lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
- * @pnvme: pointer to lpfc nvme data structure.
+ * @vport: pointer to a host virtual N_Port data structure
*
* This routine is invoked to destroy all lports bound to the phba.
* The lport memory was allocated by the nvme fc transport and is
@@ -2454,14 +2283,18 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
else
rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo;
- spin_lock_irq(&vport->phba->hbalock);
+ spin_lock_irq(&ndlp->lock);
oldrport = lpfc_ndlp_get_nrport(ndlp);
if (oldrport) {
prev_ndlp = oldrport->ndlp;
- spin_unlock_irq(&vport->phba->hbalock);
+ spin_unlock_irq(&ndlp->lock);
} else {
- spin_unlock_irq(&vport->phba->hbalock);
- lpfc_nlp_get(ndlp);
+ spin_unlock_irq(&ndlp->lock);
+ if (!lpfc_nlp_get(ndlp)) {
+ dev_warn(&vport->phba->pcidev->dev,
+ "Warning - No node ref - exit register\n");
+ return 0;
+ }
}
ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
@@ -2473,9 +2306,10 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* Guard against an unregister/reregister
* race that leaves the WAIT flag set.
*/
- spin_lock_irq(&vport->phba->hbalock);
- ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
- spin_unlock_irq(&vport->phba->hbalock);
+ spin_lock_irq(&ndlp->lock);
+ ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG;
+ ndlp->fc4_xpt_flags |= NVME_XPT_REGD;
+ spin_unlock_irq(&ndlp->lock);
rport = remote_port->private;
if (oldrport) {
@@ -2483,10 +2317,10 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* before dropping the ndlp ref from
* register.
*/
- spin_lock_irq(&vport->phba->hbalock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nrport = NULL;
- ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
- spin_unlock_irq(&vport->phba->hbalock);
+ ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG;
+ spin_unlock_irq(&ndlp->lock);
rport->ndlp = NULL;
rport->remoteport = NULL;
@@ -2495,8 +2329,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* reference would cause a premature cleanup.
*/
if (prev_ndlp && prev_ndlp != ndlp) {
- if ((!NLP_CHK_NODE_ACT(prev_ndlp)) ||
- (!prev_ndlp->nrport))
+ if (!prev_ndlp->nrport)
lpfc_nlp_put(prev_ndlp);
}
}
@@ -2505,9 +2338,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rport->remoteport = remote_port;
rport->lport = lport;
rport->ndlp = ndlp;
- spin_lock_irq(&vport->phba->hbalock);
+ spin_lock_irq(&ndlp->lock);
ndlp->nrport = rport;
- spin_unlock_irq(&vport->phba->hbalock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NVME_DISC | LOG_NODE,
"6022 Bind lport x%px to remoteport x%px "
@@ -2532,7 +2365,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
#endif
}
-/**
+/*
* lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport
*
* If the ndlp represents an NVME Target, that we are logged into,
@@ -2546,11 +2379,11 @@ lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_nvme_rport *nrport;
struct nvme_fc_remote_port *remoteport = NULL;
- spin_lock_irq(&vport->phba->hbalock);
+ spin_lock_irq(&ndlp->lock);
nrport = lpfc_ndlp_get_nrport(ndlp);
if (nrport)
remoteport = nrport->remoteport;
- spin_unlock_irq(&vport->phba->hbalock);
+ spin_unlock_irq(&ndlp->lock);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6170 Rescan NPort DID x%06x type x%x "
@@ -2613,20 +2446,21 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (!lport)
goto input_err;
- spin_lock_irq(&vport->phba->hbalock);
+ spin_lock_irq(&ndlp->lock);
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
remoteport = rport->remoteport;
- spin_unlock_irq(&vport->phba->hbalock);
+ spin_unlock_irq(&ndlp->lock);
if (!remoteport)
goto input_err;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6033 Unreg nvme remoteport x%px, portname x%llx, "
- "port_id x%06x, portstate x%x port type x%x\n",
+ "port_id x%06x, portstate x%x port type x%x "
+ "refcnt %d\n",
remoteport, remoteport->port_name,
remoteport->port_id, remoteport->port_state,
- ndlp->nlp_type);
+ ndlp->nlp_type, kref_read(&ndlp->kref));
/* Sanity check ndlp type. Only call for NVME ports. Don't
* clear any rport state until the transport calls back.
@@ -2636,7 +2470,9 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* No concern about the role change on the nvme remoteport.
* The transport will update it.
*/
- ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG;
+ spin_lock_irq(&vport->phba->hbalock);
+ ndlp->fc4_xpt_flags |= NLP_WAIT_FOR_UNREG;
+ spin_unlock_irq(&vport->phba->hbalock);
/* Don't let the host nvme transport keep sending keep-alives
* on this remoteport. Vport is unloading, no recovery. The
@@ -2647,8 +2483,15 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
(void)nvme_fc_set_remoteport_devloss(remoteport, 0);
ret = nvme_fc_unregister_remoteport(remoteport);
+
+ /* The driver no longer knows if the nrport memory is valid.
+ * because the controller teardown process has begun and
+ * is asynchronous. Break the binding in the ndlp. Also
+ * remove the register ndlp reference to setup node release.
+ */
+ ndlp->nrport = NULL;
+ lpfc_nlp_put(ndlp);
if (ret != 0) {
- lpfc_nlp_put(ndlp);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6167 NVME unregister failed %d "
"port_state x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index 4a4c3f780e1f..69a5a844c69c 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -38,7 +38,7 @@
#define LPFC_NVME_INFO_MORE_STR "\nCould be more info...\n"
#define lpfc_ndlp_get_nrport(ndlp) \
- ((!ndlp->nrport || (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG)) \
+ ((!ndlp->nrport || (ndlp->fc4_xpt_flags & NLP_WAIT_FOR_UNREG)) \
? NULL : ndlp->nrport)
struct lpfc_nvme_qhandle {
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index d4ade7cdb93a..a71df8788fff 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channsel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -105,7 +105,7 @@ lpfc_nvmet_cmd_template(void)
/* Word 9 - reqtag, rcvoxid is variable */
/* Word 10 - wqes, xc is variable */
- bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
+ bf_set(wqe_xchg, &wqe->fcp_tsend.wqe_com, LPFC_NVME_XCHG);
bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
@@ -153,7 +153,7 @@ lpfc_nvmet_cmd_template(void)
/* Word 10 - xc is variable */
bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
- bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
+ bf_set(wqe_xchg, &wqe->fcp_treceive.wqe_com, LPFC_NVME_XCHG);
bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
@@ -195,7 +195,7 @@ lpfc_nvmet_cmd_template(void)
/* Word 10 wqes, xc is variable */
bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
- bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
+ bf_set(wqe_xchg, &wqe->fcp_trsp.wqe_com, LPFC_NVME_XCHG);
bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
@@ -371,8 +371,7 @@ finish:
/**
* lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
* @phba: HBA buffer is associated with
- * @ctxp: context to clean up
- * @mp: Buffer to free
+ * @ctx_buf: ctx buffer context
*
* Description: Frees the given DMA buffer in the appropriate way given by
* reposting it to its associated RQ so it can be reused.
@@ -1291,10 +1290,10 @@ lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
/**
* lpfc_nvmet_ls_req - Issue an Link Service request
- * @targetport - pointer to target instance registered with nvmet transport.
- * @hosthandle - hosthandle set by the driver in a prior ls_rqst_rcv.
+ * @targetport: pointer to target instance registered with nvmet transport.
+ * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv.
* Driver sets this value to the ndlp pointer.
- * @pnvme_lsreq - the transport nvme_ls_req structure for the LS
+ * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
*
* Driver registers this routine to handle any link service request
* from the nvme_fc transport to a remote nvme-aware port.
@@ -1336,9 +1335,9 @@ lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport,
/**
* lpfc_nvmet_ls_abort - Abort a prior NVME LS request
* @targetport: Transport targetport, that LS was issued from.
- * @hosthandle - hosthandle set by the driver in a prior ls_rqst_rcv.
+ * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv.
* Driver sets this value to the ndlp pointer.
- * @pnvme_lsreq - the transport nvme_ls_req structure for LS to be aborted
+ * @pnvme_lsreq: the transport nvme_ls_req structure for LS to be aborted
*
* Driver registers this routine to abort an NVME LS request that is
* in progress (from the transports perspective).
@@ -1807,7 +1806,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
rrq_empty = list_empty(&phba->active_rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflag);
ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+ if (ndlp &&
(ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
lpfc_set_rrq_active(phba, ndlp,
@@ -2597,7 +2596,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
}
ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+ if (!ndlp ||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -2717,7 +2716,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
}
ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+ if (!ndlp ||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -3249,7 +3248,7 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
ndlp = lpfc_findnode_did(phba->pport, sid);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+ if (!ndlp ||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
if (tgtp)
@@ -3328,6 +3327,46 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
return 1;
}
+/**
+ * lpfc_nvmet_prep_abort_wqe - set up 'abort' work queue entry.
+ * @pwqeq: Pointer to command iocb.
+ * @xritag: Tag that uniqely identifies the local exchange resource.
+ * @opt: Option bits -
+ * bit 0 = inhibit sending abts on the link
+ *
+ * This function is called with hbalock held.
+ **/
+static void
+lpfc_nvmet_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
+{
+ union lpfc_wqe128 *wqe = &pwqeq->wqe;
+
+ /* WQEs are reused. Clear stale data and set key fields to
+ * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
+ */
+ memset(wqe, 0, sizeof(*wqe));
+
+ if (opt & INHIBIT_ABORT)
+ bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
+ /* Abort specified xri tag, with the mask deliberately zeroed */
+ bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
+
+ bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+
+ /* Abort the I/O associated with this outstanding exchange ID. */
+ wqe->abort_cmd.wqe_com.abort_tag = xritag;
+
+ /* iotag for the wqe completion. */
+ bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
+
+ bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
+
+ bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
+ bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+}
+
static int
lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
struct lpfc_async_xchg_ctx *ctxp,
@@ -3347,7 +3386,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
}
ndlp = lpfc_findnode_did(phba->pport, sid);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+ if (!ndlp ||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
atomic_inc(&tgtp->xmt_abort_rsp_error);
@@ -3423,7 +3462,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
/* Ready - mark outstanding as aborted by driver. */
abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
- lpfc_nvme_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
+ lpfc_nvmet_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
@@ -3596,8 +3635,8 @@ out:
/**
* lpfc_nvmet_invalidate_host
*
- * @phba - pointer to the driver instance bound to an adapter port.
- * @ndlp - pointer to an lpfc_nodelist type
+ * @phba: pointer to the driver instance bound to an adapter port.
+ * @ndlp: pointer to an lpfc_nodelist type
*
* This routine upcalls the nvmet transport to invalidate an NVME
* host to which this target instance had active connections.
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 983eeb0e3d07..3b989f720937 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -313,7 +313,7 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
/**
* lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
* @vport: The virtual port for which this call being executed.
- * @num_to_allocate: The requested number of buffers to allocate.
+ * @num_to_alloc: The requested number of buffers to allocate.
*
* This routine allocates a scsi buffer for device with SLI-3 interface spec,
* the scsi buffer contains all the necessary information needed to initiate
@@ -497,6 +497,7 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
* lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
* @phba: pointer to lpfc hba data structure.
* @axri: pointer to the fcp xri abort wcqe structure.
+ * @idx: index into hdwq
*
* This routine is invoked by the worker thread to process a SLI4 fast-path
* FCP or NVME aborted xri.
@@ -579,6 +580,8 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
/**
* lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
* @phba: The HBA for which this call is being executed.
+ * @ndlp: pointer to a node-list data structure.
+ * @cmnd: Pointer to scsi_cmnd data structure.
*
* This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
* and returns to caller.
@@ -618,6 +621,8 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
/**
* lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA
* @phba: The HBA for which this call is being executed.
+ * @ndlp: pointer to a node-list data structure.
+ * @cmnd: Pointer to scsi_cmnd data structure.
*
* This routine removes a scsi buffer from head of @hdwq io_buf_list
* and returns to caller.
@@ -633,7 +638,6 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
struct lpfc_io_buf *lpfc_cmd;
struct lpfc_sli4_hdw_queue *qp;
struct sli4_sge *sgl;
- IOCB_t *iocb;
dma_addr_t pdma_phys_fcp_rsp;
dma_addr_t pdma_phys_fcp_cmd;
uint32_t cpu, idx;
@@ -703,24 +707,6 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
- /*
- * Since the IOCB for the FCP I/O is built into this
- * lpfc_io_buf, initialize it with all known data now.
- */
- iocb = &lpfc_cmd->cur_iocbq.iocb;
- iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
- iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
- /* setting the BLP size to 2 * sizeof BDE may not be correct.
- * We are setting the bpl to point to out sgl. An sgl's
- * entries are 16 bytes, a bpl entries are 12 bytes.
- */
- iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
- iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
- iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
- iocb->ulpBdeCount = 1;
- iocb->ulpLe = 1;
- iocb->ulpClass = CLASS3;
-
if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
atomic_inc(&ndlp->cmd_pending);
lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
@@ -730,6 +716,8 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
/**
* lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
* @phba: The HBA for which this call is being executed.
+ * @ndlp: pointer to a node-list data structure.
+ * @cmnd: Pointer to scsi_cmnd data structure.
*
* This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
* and returns to caller.
@@ -818,6 +806,25 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
}
/**
+ * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
+ * @data: A pointer to the immediate command data portion of the IOCB.
+ * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
+ *
+ * The routine copies the entire FCP command from @fcp_cmnd to @data while
+ * byte swapping the data to big endian format for transmission on the wire.
+ **/
+static void
+lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd)
+{
+ int i, j;
+
+ for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
+ i += sizeof(uint32_t), j++) {
+ ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
+ }
+}
+
+/**
* lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
* @phba: The Hba for which this call is being executed.
* @lpfc_cmd: The scsi buffer which is going to be mapped.
@@ -953,6 +960,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
* we need to set word 4 of IOCB here
*/
iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
+ lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
return 0;
}
@@ -976,7 +984,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
* @sc: The SCSI command to examine
* @reftag: (out) BlockGuard reference tag for transmitted data
* @apptag: (out) BlockGuard application tag for transmitted data
- * @new_guard (in) Value to replace CRC with if needed
+ * @new_guard: (in) Value to replace CRC with if needed
*
* Returns BG_ERR_* bit mask or 0 if request ignored
**/
@@ -1381,8 +1389,8 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
* the specified SCSI command.
* @phba: The Hba for which this call is being executed.
* @sc: The SCSI command to examine
- * @txopt: (out) BlockGuard operation for transmitted data
- * @rxopt: (out) BlockGuard operation for received data
+ * @txop: (out) BlockGuard operation for transmitted data
+ * @rxop: (out) BlockGuard operation for received data
*
* Returns: zero on success; non-zero if tx and/or rx op cannot be determined
*
@@ -1461,8 +1469,8 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
* the specified SCSI command in order to force a guard tag error.
* @phba: The Hba for which this call is being executed.
* @sc: The SCSI command to examine
- * @txopt: (out) BlockGuard operation for transmitted data
- * @rxopt: (out) BlockGuard operation for received data
+ * @txop: (out) BlockGuard operation for transmitted data
+ * @rxop: (out) BlockGuard operation for received data
*
* Returns: zero on success; non-zero if tx and/or rx op cannot be determined
*
@@ -1533,7 +1541,7 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
* @phba: The Hba for which this call is being executed.
* @sc: pointer to scsi command we're working on
* @bpl: pointer to buffer list for protection groups
- * @datacnt: number of segments of data that have been dma mapped
+ * @datasegcnt: number of segments of data that have been dma mapped
*
* This function sets up BPL buffer list for protection groups of
* type LPFC_PG_TYPE_NO_DIF
@@ -1920,7 +1928,8 @@ out:
* @phba: The Hba for which this call is being executed.
* @sc: pointer to scsi command we're working on
* @sgl: pointer to buffer list for protection groups
- * @datacnt: number of segments of data that have been dma mapped
+ * @datasegcnt: number of segments of data that have been dma mapped
+ * @lpfc_cmd: lpfc scsi command object pointer.
*
* This function sets up SGL buffer list for protection groups of
* type LPFC_PG_TYPE_NO_DIF
@@ -2094,6 +2103,7 @@ out:
* @sgl: pointer to buffer list for protection groups
* @datacnt: number of segments of data that have been dma mapped
* @protcnt: number of segment of protection data that have been dma mapped
+ * @lpfc_cmd: lpfc scsi command object pointer.
*
* This function sets up SGL buffer list for protection groups of
* type LPFC_PG_TYPE_DIF
@@ -2881,6 +2891,150 @@ out:
}
}
+/*
+ * This function checks for BlockGuard errors detected by
+ * the HBA. In case of errors, the ASC/ASCQ fields in the
+ * sense buffer will be set accordingly, paired with
+ * ILLEGAL_REQUEST to signal to the kernel that the HBA
+ * detected corruption.
+ *
+ * Returns:
+ * 0 - No error found
+ * 1 - BlockGuard error found
+ * -1 - Internal error (bad profile, ...etc)
+ */
+static int
+lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
+ struct lpfc_wcqe_complete *wcqe)
+{
+ struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+ int ret = 0;
+ u32 status = bf_get(lpfc_wcqe_c_status, wcqe);
+ u32 bghm = 0;
+ u32 bgstat = 0;
+ u64 failing_sector = 0;
+
+ if (status == CQE_STATUS_DI_ERROR) {
+ if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
+ bgstat |= BGS_GUARD_ERR_MASK;
+ if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* AppTag Check failed */
+ bgstat |= BGS_APPTAG_ERR_MASK;
+ if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* RefTag Check failed */
+ bgstat |= BGS_REFTAG_ERR_MASK;
+
+ /* Check to see if there was any good data before the error */
+ if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
+ bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK;
+ bghm = wcqe->total_data_placed;
+ }
+
+ /*
+ * Set ALL the error bits to indicate we don't know what
+ * type of error it is.
+ */
+ if (!bgstat)
+ bgstat |= (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
+ BGS_GUARD_ERR_MASK);
+ }
+
+ if (lpfc_bgs_get_guard_err(bgstat)) {
+ ret = 1;
+
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x1);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
+ phba->bg_guard_err_cnt++;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+ "9059 BLKGRD: Guard Tag error in cmd"
+ " 0x%x lba 0x%llx blk cnt 0x%x "
+ "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+ (unsigned long long)scsi_get_lba(cmd),
+ blk_rq_sectors(cmd->request), bgstat, bghm);
+ }
+
+ if (lpfc_bgs_get_reftag_err(bgstat)) {
+ ret = 1;
+
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x3);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
+
+ phba->bg_reftag_err_cnt++;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+ "9060 BLKGRD: Ref Tag error in cmd"
+ " 0x%x lba 0x%llx blk cnt 0x%x "
+ "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+ (unsigned long long)scsi_get_lba(cmd),
+ blk_rq_sectors(cmd->request), bgstat, bghm);
+ }
+
+ if (lpfc_bgs_get_apptag_err(bgstat)) {
+ ret = 1;
+
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x2);
+ cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
+ SAM_STAT_CHECK_CONDITION;
+
+ phba->bg_apptag_err_cnt++;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+ "9062 BLKGRD: App Tag error in cmd"
+ " 0x%x lba 0x%llx blk cnt 0x%x "
+ "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+ (unsigned long long)scsi_get_lba(cmd),
+ blk_rq_sectors(cmd->request), bgstat, bghm);
+ }
+
+ if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
+ /*
+ * setup sense data descriptor 0 per SPC-4 as an information
+ * field, and put the failing LBA in it.
+ * This code assumes there was also a guard/app/ref tag error
+ * indication.
+ */
+ cmd->sense_buffer[7] = 0xc; /* Additional sense length */
+ cmd->sense_buffer[8] = 0; /* Information descriptor type */
+ cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
+ cmd->sense_buffer[10] = 0x80; /* Validity bit */
+
+ /* bghm is a "on the wire" FC frame based count */
+ switch (scsi_get_prot_op(cmd)) {
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ bghm /= cmd->device->sector_size;
+ break;
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ bghm /= (cmd->device->sector_size +
+ sizeof(struct scsi_dif_tuple));
+ break;
+ }
+
+ failing_sector = scsi_get_lba(cmd);
+ failing_sector += bghm;
+
+ /* Descriptor Information */
+ put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
+ }
+
+ if (!ret) {
+ /* No error was reported - problem in FW? */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+ "9068 BLKGRD: Unknown error in cmd"
+ " 0x%x lba 0x%llx blk cnt 0x%x "
+ "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+ (unsigned long long)scsi_get_lba(cmd),
+ blk_rq_sectors(cmd->request), bgstat, bghm);
+
+ /* Calcuate what type of error it was */
+ lpfc_calc_bg_err(phba, lpfc_cmd);
+ }
+ return ret;
+}
/*
* This function checks for BlockGuard errors detected by
@@ -3050,7 +3204,9 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
struct sli4_sge *first_data_sgl;
- IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+ struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
+ struct lpfc_vport *vport = phba->pport;
+ union lpfc_wqe128 *wqe = &pwqeq->wqe;
dma_addr_t physaddr;
uint32_t num_bde = 0;
uint32_t dma_len;
@@ -3191,13 +3347,16 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
phba->cfg_enable_pbde) {
bde = (struct ulp_bde64 *)
- &(iocb_cmd->unsli3.sli3Words[5]);
+ &wqe->words[13];
bde->addrLow = first_data_sgl->addr_lo;
bde->addrHigh = first_data_sgl->addr_hi;
bde->tus.f.bdeSize =
le32_to_cpu(first_data_sgl->sge_len);
bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bde->tus.w = cpu_to_le32(bde->tus.w);
+
+ } else {
+ memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
}
} else {
sgl += 1;
@@ -3209,11 +3368,15 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
phba->cfg_enable_pbde) {
bde = (struct ulp_bde64 *)
- &(iocb_cmd->unsli3.sli3Words[5]);
+ &wqe->words[13];
memset(bde, 0, (sizeof(uint32_t) * 3));
}
}
+ /* Word 11 */
+ if (phba->cfg_enable_pbde)
+ bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
+
/*
* Finish initializing those IOCB fields that are dependent on the
* scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
@@ -3221,12 +3384,23 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
* all iocb memory resources are reused.
*/
fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
-
- /*
- * Due to difference in data length between DIF/non-DIF paths,
- * we need to set word 4 of IOCB here
- */
- iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
+ /* Set first-burst provided it was successfully negotiated */
+ if (!(phba->hba_flag & HBA_FCOE_MODE) &&
+ vport->cfg_first_burst_size &&
+ scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
+ u32 init_len, total_len;
+
+ total_len = be32_to_cpu(fcp_cmnd->fcpDl);
+ init_len = min(total_len, vport->cfg_first_burst_size);
+
+ /* Word 4 & 5 */
+ wqe->fcp_iwrite.initial_xfer_len = init_len;
+ wqe->fcp_iwrite.total_xfer_len = total_len;
+ } else {
+ /* Word 4 */
+ wqe->fcp_iwrite.total_xfer_len =
+ be32_to_cpu(fcp_cmnd->fcpDl);
+ }
/*
* If the OAS driver feature is enabled and the lun is enabled for
@@ -3237,6 +3411,17 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
scsi_cmnd->device->hostdata)->priority;
+
+ /* Word 10 */
+ bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
+
+ if (lpfc_cmd->cur_iocbq.priority)
+ bf_set(wqe_ccp, &wqe->generic.wqe_com,
+ (lpfc_cmd->cur_iocbq.priority << 1));
+ else
+ bf_set(wqe_ccp, &wqe->generic.wqe_com,
+ (phba->cfg_XLanePriority << 1));
}
return 0;
@@ -3262,7 +3447,8 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
- IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+ struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
+ union lpfc_wqe128 *wqe = &pwqeq->wqe;
uint32_t num_sge = 0;
int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
int prot_group_type = 0;
@@ -3394,28 +3580,50 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
- /*
- * Due to difference in data length between DIF/non-DIF paths,
- * we need to set word 4 of IOCB here
- */
- iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
+ /* Set first-burst provided it was successfully negotiated */
+ if (!(phba->hba_flag & HBA_FCOE_MODE) &&
+ vport->cfg_first_burst_size &&
+ scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
+ u32 init_len, total_len;
- /*
- * For First burst, we may need to adjust the initial transfer
- * length for DIF
- */
- if (iocb_cmd->un.fcpi.fcpi_XRdy &&
- (fcpdl < vport->cfg_first_burst_size))
- iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
+ total_len = be32_to_cpu(fcp_cmnd->fcpDl);
+ init_len = min(total_len, vport->cfg_first_burst_size);
+
+ /* Word 4 & 5 */
+ wqe->fcp_iwrite.initial_xfer_len = init_len;
+ wqe->fcp_iwrite.total_xfer_len = total_len;
+ } else {
+ /* Word 4 */
+ wqe->fcp_iwrite.total_xfer_len =
+ be32_to_cpu(fcp_cmnd->fcpDl);
+ }
/*
* If the OAS driver feature is enabled and the lun is enabled for
* OAS, set the oas iocb related flags.
*/
if ((phba->cfg_fof) && ((struct lpfc_device_data *)
- scsi_cmnd->device->hostdata)->oas_enabled)
+ scsi_cmnd->device->hostdata)->oas_enabled) {
lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
+ /* Word 10 */
+ bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->generic.wqe_com,
+ (phba->cfg_XLanePriority << 1));
+ }
+
+ /* Word 7. DIF Flags */
+ if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_PASS)
+ bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
+ else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_STRIP)
+ bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
+ else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_INSERT)
+ bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
+
+ lpfc_cmd->cur_iocbq.iocb_flag &= ~(LPFC_IO_DIF_PASS |
+ LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT);
+
return 0;
err:
if (lpfc_cmd->seg_cnt)
@@ -3475,6 +3683,26 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
}
/**
+ * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi
+ * buffer
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ * @tmo: Timeout value for IO
+ *
+ * This routine initializes IOCB/WQE data structure from scsi command
+ *
+ * Return codes:
+ * 1 - Error
+ * 0 - Success
+ **/
+static inline int
+lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
+ uint8_t tmo)
+{
+ return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo);
+}
+
+/**
* lpfc_send_scsi_error_event - Posts an event when there is SCSI error
* @phba: Pointer to hba context object.
* @vport: Pointer to vport object.
@@ -3486,17 +3714,16 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
**/
static void
lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
- struct lpfc_io_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
+ struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) {
struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
uint32_t resp_info = fcprsp->rspStatus2;
uint32_t scsi_status = fcprsp->rspStatus3;
- uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
struct lpfc_fast_path_event *fast_path_evt = NULL;
struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
unsigned long flags;
- if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+ if (!pnode)
return;
/* If there is queuefull or busy condition send a scsi event */
@@ -3606,13 +3833,11 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
**/
static void
lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
- struct lpfc_iocbq *rsp_iocb)
+ uint32_t fcpi_parm)
{
- struct lpfc_hba *phba = vport->phba;
struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
- uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
uint32_t resp_info = fcprsp->rspStatus2;
uint32_t scsi_status = fcprsp->rspStatus3;
uint32_t *lp;
@@ -3747,13 +3972,10 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
*/
} else if (fcpi_parm) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
- "9029 FCP %s Check Error xri x%x Data: "
+ "9029 FCP %s Check Error Data: "
"x%x x%x x%x x%x x%x\n",
((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
"Read" : "Write"),
- ((phba->sli_rev == LPFC_SLI_REV4) ?
- lpfc_cmd->cur_iocbq.sli4_xritag :
- rsp_iocb->iocb.ulpContext),
fcpDl, be32_to_cpu(fcprsp->rspResId),
fcpi_parm, cmnd->cmnd[0], scsi_status);
@@ -3780,7 +4002,336 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
out:
cmnd->result = host_status << 16 | scsi_status;
- lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
+ lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm);
+}
+
+/**
+ * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO
+ * @phba: The hba for which this call is being executed.
+ * @pwqeIn: The command WQE for the scsi cmnd.
+ * @pwqeOut: The response WQE for the scsi cmnd.
+ *
+ * This routine assigns scsi command result by looking into response WQE
+ * status field appropriately. This routine handles QUEUE FULL condition as
+ * well by ramping down device queue depth.
+ **/
+static void
+lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
+ struct lpfc_wcqe_complete *wcqe)
+{
+ struct lpfc_io_buf *lpfc_cmd =
+ (struct lpfc_io_buf *)pwqeIn->context1;
+ struct lpfc_vport *vport = pwqeIn->vport;
+ struct lpfc_rport_data *rdata;
+ struct lpfc_nodelist *ndlp;
+ struct scsi_cmnd *cmd;
+ unsigned long flags;
+ struct lpfc_fast_path_event *fast_path_evt;
+ struct Scsi_Host *shost;
+ u32 logit = LOG_FCP;
+ u32 status, idx;
+ unsigned long iflags = 0;
+
+ /* Sanity check on return of outstanding command */
+ if (!lpfc_cmd) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "9032 Null lpfc_cmd pointer. No "
+ "release, skip completion\n");
+ return;
+ }
+
+ rdata = lpfc_cmd->rdata;
+ ndlp = rdata->pnode;
+
+ if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
+ /* TOREMOVE - currently this flag is checked during
+ * the release of lpfc_iocbq. Remove once we move
+ * to lpfc_wqe_job construct.
+ *
+ * This needs to be done outside buf_lock
+ */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_EXCHANGE_BUSY;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
+
+ /* Guard against abort handler being called at same time */
+ spin_lock(&lpfc_cmd->buf_lock);
+
+ /* Sanity check on return of outstanding command */
+ cmd = lpfc_cmd->pCmd;
+ if (!cmd || !phba) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "9042 I/O completion: Not an active IO\n");
+ spin_unlock(&lpfc_cmd->buf_lock);
+ lpfc_release_scsi_buf(phba, lpfc_cmd);
+ return;
+ }
+ idx = lpfc_cmd->cur_iocbq.hba_wqidx;
+ if (phba->sli4_hba.hdwq)
+ phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
+ this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
+#endif
+ shost = cmd->device->host;
+
+ status = bf_get(lpfc_wcqe_c_status, wcqe);
+ lpfc_cmd->status = (status & LPFC_IOCB_STATUS_MASK);
+ lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
+
+ lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (lpfc_cmd->prot_data_type) {
+ struct scsi_dif_tuple *src = NULL;
+
+ src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
+ /*
+ * Used to restore any changes to protection
+ * data for error injection.
+ */
+ switch (lpfc_cmd->prot_data_type) {
+ case LPFC_INJERR_REFTAG:
+ src->ref_tag =
+ lpfc_cmd->prot_data;
+ break;
+ case LPFC_INJERR_APPTAG:
+ src->app_tag =
+ (uint16_t)lpfc_cmd->prot_data;
+ break;
+ case LPFC_INJERR_GUARD:
+ src->guard_tag =
+ (uint16_t)lpfc_cmd->prot_data;
+ break;
+ default:
+ break;
+ }
+
+ lpfc_cmd->prot_data = 0;
+ lpfc_cmd->prot_data_type = 0;
+ lpfc_cmd->prot_data_segment = NULL;
+ }
+#endif
+ if (unlikely(lpfc_cmd->status)) {
+ if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
+ (lpfc_cmd->result & IOERR_DRVR_MASK))
+ lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+ else if (lpfc_cmd->status >= IOSTAT_CNT)
+ lpfc_cmd->status = IOSTAT_DEFAULT;
+ if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
+ !lpfc_cmd->fcp_rsp->rspStatus3 &&
+ (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
+ !(vport->cfg_log_verbose & LOG_FCP_UNDER))
+ logit = 0;
+ else
+ logit = LOG_FCP | LOG_FCP_UNDER;
+ lpfc_printf_vlog(vport, KERN_WARNING, logit,
+ "9034 FCP cmd x%x failed <%d/%lld> "
+ "status: x%x result: x%x "
+ "sid: x%x did: x%x oxid: x%x "
+ "Data: x%x x%x x%x\n",
+ cmd->cmnd[0],
+ cmd->device ? cmd->device->id : 0xffff,
+ cmd->device ? cmd->device->lun : 0xffff,
+ lpfc_cmd->status, lpfc_cmd->result,
+ vport->fc_myDID,
+ (ndlp) ? ndlp->nlp_DID : 0,
+ lpfc_cmd->cur_iocbq.sli4_xritag,
+ wcqe->parameter, wcqe->total_data_placed,
+ lpfc_cmd->cur_iocbq.iotag);
+ }
+
+ switch (lpfc_cmd->status) {
+ case IOSTAT_SUCCESS:
+ cmd->result = DID_OK << 16;
+ break;
+ case IOSTAT_FCP_RSP_ERROR:
+ lpfc_handle_fcp_err(vport, lpfc_cmd,
+ pwqeIn->wqe.fcp_iread.total_xfer_len -
+ wcqe->total_data_placed);
+ break;
+ case IOSTAT_NPORT_BSY:
+ case IOSTAT_FABRIC_BSY:
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
+ fast_path_evt = lpfc_alloc_fast_evt(phba);
+ if (!fast_path_evt)
+ break;
+ fast_path_evt->un.fabric_evt.event_type =
+ FC_REG_FABRIC_EVENT;
+ fast_path_evt->un.fabric_evt.subcategory =
+ (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
+ LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
+ if (ndlp) {
+ memcpy(&fast_path_evt->un.fabric_evt.wwpn,
+ &ndlp->nlp_portname,
+ sizeof(struct lpfc_name));
+ memcpy(&fast_path_evt->un.fabric_evt.wwnn,
+ &ndlp->nlp_nodename,
+ sizeof(struct lpfc_name));
+ }
+ fast_path_evt->vport = vport;
+ fast_path_evt->work_evt.evt =
+ LPFC_EVT_FASTPATH_MGMT_EVT;
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_add_tail(&fast_path_evt->work_evt.evt_listp,
+ &phba->work_list);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_worker_wake_up(phba);
+ lpfc_printf_vlog(vport, KERN_WARNING, logit,
+ "9035 Fabric/Node busy FCP cmd x%x failed"
+ " <%d/%lld> "
+ "status: x%x result: x%x "
+ "sid: x%x did: x%x oxid: x%x "
+ "Data: x%x x%x x%x\n",
+ cmd->cmnd[0],
+ cmd->device ? cmd->device->id : 0xffff,
+ cmd->device ? cmd->device->lun : 0xffff,
+ lpfc_cmd->status, lpfc_cmd->result,
+ vport->fc_myDID,
+ (ndlp) ? ndlp->nlp_DID : 0,
+ lpfc_cmd->cur_iocbq.sli4_xritag,
+ wcqe->parameter,
+ wcqe->total_data_placed,
+ lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
+ break;
+ case IOSTAT_REMOTE_STOP:
+ if (ndlp) {
+ /* This I/O was aborted by the target, we don't
+ * know the rxid and because we did not send the
+ * ABTS we cannot generate and RRQ.
+ */
+ lpfc_set_rrq_active(phba, ndlp,
+ lpfc_cmd->cur_iocbq.sli4_lxritag,
+ 0, 0);
+ }
+ fallthrough;
+ case IOSTAT_LOCAL_REJECT:
+ if (lpfc_cmd->result & IOERR_DRVR_MASK)
+ lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+ if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
+ lpfc_cmd->result ==
+ IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
+ lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
+ lpfc_cmd->result ==
+ IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
+ cmd->result = DID_NO_CONNECT << 16;
+ break;
+ }
+ if (lpfc_cmd->result == IOERR_INVALID_RPI ||
+ lpfc_cmd->result == IOERR_NO_RESOURCES ||
+ lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
+ lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
+ cmd->result = DID_REQUEUE << 16;
+ break;
+ }
+ if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
+ lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
+ status == CQE_STATUS_DI_ERROR) {
+ if (scsi_get_prot_op(cmd) !=
+ SCSI_PROT_NORMAL) {
+ /*
+ * This is a response for a BG enabled
+ * cmd. Parse BG error
+ */
+ lpfc_sli4_parse_bg_err(phba, lpfc_cmd,
+ wcqe);
+ break;
+ }
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+ "9040 non-zero BGSTAT on unprotected cmd\n");
+ }
+ lpfc_printf_vlog(vport, KERN_WARNING, logit,
+ "9036 Local Reject FCP cmd x%x failed"
+ " <%d/%lld> "
+ "status: x%x result: x%x "
+ "sid: x%x did: x%x oxid: x%x "
+ "Data: x%x x%x x%x\n",
+ cmd->cmnd[0],
+ cmd->device ? cmd->device->id : 0xffff,
+ cmd->device ? cmd->device->lun : 0xffff,
+ lpfc_cmd->status, lpfc_cmd->result,
+ vport->fc_myDID,
+ (ndlp) ? ndlp->nlp_DID : 0,
+ lpfc_cmd->cur_iocbq.sli4_xritag,
+ wcqe->parameter,
+ wcqe->total_data_placed,
+ lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
+ fallthrough;
+ default:
+ if (lpfc_cmd->status >= IOSTAT_CNT)
+ lpfc_cmd->status = IOSTAT_DEFAULT;
+ cmd->result = DID_ERROR << 16;
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
+ "9037 FCP Completion Error: xri %x "
+ "status x%x result x%x [x%x] "
+ "placed x%x\n",
+ lpfc_cmd->cur_iocbq.sli4_xritag,
+ lpfc_cmd->status, lpfc_cmd->result,
+ wcqe->parameter,
+ wcqe->total_data_placed);
+ }
+ if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
+ u32 *lp = (u32 *)cmd->sense_buffer;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "9039 Iodone <%d/%llu> cmd x%p, error "
+ "x%x SNS x%x x%x Data: x%x x%x\n",
+ cmd->device->id, cmd->device->lun, cmd,
+ cmd->result, *lp, *(lp + 3), cmd->retries,
+ scsi_get_resid(cmd));
+ }
+
+ lpfc_update_stats(vport, lpfc_cmd);
+
+ if (vport->cfg_max_scsicmpl_time &&
+ time_after(jiffies, lpfc_cmd->start_time +
+ msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (ndlp) {
+ if (ndlp->cmd_qdepth >
+ atomic_read(&ndlp->cmd_pending) &&
+ (atomic_read(&ndlp->cmd_pending) >
+ LPFC_MIN_TGT_QDEPTH) &&
+ (cmd->cmnd[0] == READ_10 ||
+ cmd->cmnd[0] == WRITE_10))
+ ndlp->cmd_qdepth =
+ atomic_read(&ndlp->cmd_pending);
+
+ ndlp->last_change_time = jiffies;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ }
+ lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (lpfc_cmd->ts_cmd_start) {
+ lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp;
+ lpfc_cmd->ts_data_io = ktime_get_ns();
+ phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
+ lpfc_io_ktime(phba, lpfc_cmd);
+ }
+#endif
+ lpfc_cmd->pCmd = NULL;
+ spin_unlock(&lpfc_cmd->buf_lock);
+
+ /* The sdev is not guaranteed to be valid post scsi_done upcall. */
+ cmd->scsi_done(cmd);
+
+ /*
+ * If there is an abort thread waiting for command completion
+ * wake up the thread.
+ */
+ spin_lock(&lpfc_cmd->buf_lock);
+ lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ if (lpfc_cmd->waitq)
+ wake_up(lpfc_cmd->waitq);
+ spin_unlock(&lpfc_cmd->buf_lock);
+
+ lpfc_release_scsi_buf(phba, lpfc_cmd);
}
/**
@@ -3903,7 +4454,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
switch (lpfc_cmd->status) {
case IOSTAT_FCP_RSP_ERROR:
/* Call FCP RSP handler to determine result */
- lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
+ lpfc_handle_fcp_err(vport, lpfc_cmd,
+ pIocbOut->iocb.un.fcpi.fcpi_parm);
break;
case IOSTAT_NPORT_BSY:
case IOSTAT_FABRIC_BSY:
@@ -3916,7 +4468,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
fast_path_evt->un.fabric_evt.subcategory =
(lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
- if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+ if (pnode) {
memcpy(&fast_path_evt->un.fabric_evt.wwpn,
&pnode->nlp_portname,
sizeof(struct lpfc_name));
@@ -3971,7 +4523,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
&& (phba->sli_rev == LPFC_SLI_REV4)
- && (pnode && NLP_CHK_NODE_ACT(pnode))) {
+ && pnode) {
/* This IO was aborted by the target, we don't
* know the rxid and because we did not send the
* ABTS we cannot generate and RRQ.
@@ -3986,8 +4538,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
break;
}
- if (!pnode || !NLP_CHK_NODE_ACT(pnode)
- || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
+ if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
SAM_STAT_BUSY;
} else
@@ -4009,7 +4560,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
time_after(jiffies, lpfc_cmd->start_time +
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
spin_lock_irqsave(shost->host_lock, flags);
- if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+ if (pnode) {
if (pnode->cmd_qdepth >
atomic_read(&pnode->cmd_pending) &&
(atomic_read(&pnode->cmd_pending) >
@@ -4053,72 +4604,30 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
/**
- * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
- * @data: A pointer to the immediate command data portion of the IOCB.
- * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
+ * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO
+ * @phba: Pointer to vport object for which I/O is executed
+ * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
+ * @tmo: timeout value for the IO
*
- * The routine copies the entire FCP command from @fcp_cmnd to @data while
- * byte swapping the data to big endian format for transmission on the wire.
- **/
-static void
-lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
-{
- int i, j;
- for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
- i += sizeof(uint32_t), j++) {
- ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
- }
-}
-
-/**
- * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
- * @vport: The virtual port for which this call is being executed.
- * @lpfc_cmd: The scsi command which needs to send.
- * @pnode: Pointer to lpfc_nodelist.
+ * Based on the data-direction of the command, initialize IOCB
+ * in the I/O buffer. Fill in the IOCB fields which are independent
+ * of the scsi buffer
*
- * This routine initializes fcp_cmnd and iocb data structure from scsi command
- * to transfer for device with SLI3 interface spec.
+ * RETURNS 0 - SUCCESS,
**/
-static void
-lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
- struct lpfc_nodelist *pnode)
+static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
+ struct lpfc_io_buf *lpfc_cmd,
+ uint8_t tmo)
{
- struct lpfc_hba *phba = vport->phba;
+ IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+ struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq;
struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
- IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
- struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
- struct lpfc_sli4_hdw_queue *hdwq = NULL;
+ struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
int datadir = scsi_cmnd->sc_data_direction;
- int idx;
- uint8_t *ptr;
- bool sli4;
- uint32_t fcpdl;
-
- if (!pnode || !NLP_CHK_NODE_ACT(pnode))
- return;
-
- lpfc_cmd->fcp_rsp->rspSnsLen = 0;
- /* clear task management bits */
- lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
-
- int_to_scsilun(lpfc_cmd->pCmd->device->lun,
- &lpfc_cmd->fcp_cmnd->fcp_lun);
+ u32 fcpdl;
- ptr = &fcp_cmnd->fcpCdb[0];
- memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
- if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
- ptr += scsi_cmnd->cmd_len;
- memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
- }
-
- fcp_cmnd->fcpCntl1 = SIMPLE_Q;
-
- sli4 = (phba->sli_rev == LPFC_SLI_REV4);
piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
- idx = lpfc_cmd->hdwq_no;
- if (phba->sli4_hba.hdwq)
- hdwq = &phba->sli4_hba.hdwq[idx];
/*
* There are three possibilities here - use scatter-gather segment, use
@@ -4132,42 +4641,31 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
iocb_cmd->ulpPU = PARM_READ_CHECK;
if (vport->cfg_first_burst_size &&
(pnode->nlp_flag & NLP_FIRSTBURST)) {
+ u32 xrdy_len;
+
fcpdl = scsi_bufflen(scsi_cmnd);
- if (fcpdl < vport->cfg_first_burst_size)
- piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
- else
- piocbq->iocb.un.fcpi.fcpi_XRdy =
- vport->cfg_first_burst_size;
+ xrdy_len = min(fcpdl,
+ vport->cfg_first_burst_size);
+ piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len;
}
fcp_cmnd->fcpCntl3 = WRITE_DATA;
- if (hdwq)
- hdwq->scsi_cstat.output_requests++;
} else {
iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
iocb_cmd->ulpPU = PARM_READ_CHECK;
fcp_cmnd->fcpCntl3 = READ_DATA;
- if (hdwq)
- hdwq->scsi_cstat.input_requests++;
}
} else {
iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
iocb_cmd->un.fcpi.fcpi_parm = 0;
iocb_cmd->ulpPU = 0;
fcp_cmnd->fcpCntl3 = 0;
- if (hdwq)
- hdwq->scsi_cstat.control_requests++;
}
- if (phba->sli_rev == 3 &&
- !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
- lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
+
/*
* Finish initializing those IOCB fields that are independent
* of the scsi_cmnd request_buffer
*/
piocbq->iocb.ulpContext = pnode->nlp_rpi;
- if (sli4)
- piocbq->iocb.ulpContext =
- phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
piocbq->iocb.ulpFCP2Rcvy = 1;
else
@@ -4175,9 +4673,159 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
piocbq->context1 = lpfc_cmd;
- piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
- piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
+ if (!piocbq->iocb_cmpl)
+ piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
+ piocbq->iocb.ulpTimeout = tmo;
piocbq->vport = vport;
+ return 0;
+}
+
+/**
+ * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO
+ * @phba: Pointer to vport object for which I/O is executed
+ * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
+ * @tmo: timeout value for the IO
+ *
+ * Based on the data-direction of the command copy WQE template
+ * to I/O buffer WQE. Fill in the WQE fields which are independent
+ * of the scsi buffer
+ *
+ * RETURNS 0 - SUCCESS,
+ **/
+static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
+ struct lpfc_io_buf *lpfc_cmd,
+ uint8_t tmo)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+ struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ struct lpfc_sli4_hdw_queue *hdwq = NULL;
+ struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
+ struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
+ union lpfc_wqe128 *wqe = &pwqeq->wqe;
+ u16 idx = lpfc_cmd->hdwq_no;
+ int datadir = scsi_cmnd->sc_data_direction;
+
+ hdwq = &phba->sli4_hba.hdwq[idx];
+
+ /* Initialize 64 bytes only */
+ memset(wqe, 0, sizeof(union lpfc_wqe128));
+
+ /*
+ * There are three possibilities here - use scatter-gather segment, use
+ * the single mapping, or neither.
+ */
+ if (scsi_sg_count(scsi_cmnd)) {
+ if (datadir == DMA_TO_DEVICE) {
+ /* From the iwrite template, initialize words 7 - 11 */
+ memcpy(&wqe->words[7],
+ &lpfc_iwrite_cmd_template.words[7],
+ sizeof(uint32_t) * 5);
+
+ fcp_cmnd->fcpCntl3 = WRITE_DATA;
+ if (hdwq)
+ hdwq->scsi_cstat.output_requests++;
+ } else {
+ /* From the iread template, initialize words 7 - 11 */
+ memcpy(&wqe->words[7],
+ &lpfc_iread_cmd_template.words[7],
+ sizeof(uint32_t) * 5);
+
+ /* Word 7 */
+ bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo);
+
+ fcp_cmnd->fcpCntl3 = READ_DATA;
+ if (hdwq)
+ hdwq->scsi_cstat.input_requests++;
+ }
+ } else {
+ /* From the icmnd template, initialize words 4 - 11 */
+ memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
+ sizeof(uint32_t) * 8);
+
+ /* Word 7 */
+ bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo);
+
+ fcp_cmnd->fcpCntl3 = 0;
+ if (hdwq)
+ hdwq->scsi_cstat.control_requests++;
+ }
+
+ /*
+ * Finish initializing those WQE fields that are independent
+ * of the request_buffer
+ */
+
+ /* Word 3 */
+ bf_set(payload_offset_len, &wqe->fcp_icmd,
+ sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
+ /* Word 6 */
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
+ bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
+
+ /* Word 7*/
+ if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
+ bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
+
+ bf_set(wqe_class, &wqe->generic.wqe_com,
+ (pnode->nlp_fcp_info & 0x0f));
+
+ /* Word 8 */
+ wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
+
+ /* Word 9 */
+ bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
+
+ pwqeq->vport = vport;
+ pwqeq->vport = vport;
+ pwqeq->context1 = lpfc_cmd;
+ pwqeq->hba_wqidx = lpfc_cmd->hdwq_no;
+ pwqeq->wqe_cmpl = lpfc_fcp_io_cmd_wqe_cmpl;
+
+ return 0;
+}
+
+/**
+ * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: The scsi command which needs to send.
+ * @pnode: Pointer to lpfc_nodelist.
+ *
+ * This routine initializes fcp_cmnd and iocb data structure from scsi command
+ * to transfer for device with SLI3 interface spec.
+ **/
+static int
+lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
+ struct lpfc_nodelist *pnode)
+{
+ struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+ struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ u8 *ptr;
+
+ if (!pnode)
+ return 0;
+
+ lpfc_cmd->fcp_rsp->rspSnsLen = 0;
+ /* clear task management bits */
+ lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
+
+ int_to_scsilun(lpfc_cmd->pCmd->device->lun,
+ &lpfc_cmd->fcp_cmnd->fcp_lun);
+
+ ptr = &fcp_cmnd->fcpCdb[0];
+ memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
+ if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
+ ptr += scsi_cmnd->cmd_len;
+ memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
+ }
+
+ fcp_cmnd->fcpCntl1 = SIMPLE_Q;
+
+ lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, lpfc_cmd->timeout);
+
+ return 0;
}
/**
@@ -4206,8 +4854,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
struct lpfc_nodelist *ndlp = rdata->pnode;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
- ndlp->nlp_state != NLP_STE_MAPPED_NODE)
+ if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE)
return 0;
piocbq = &(lpfc_cmd->cur_iocbq);
@@ -4264,7 +4911,6 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
{
phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
- phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
switch (dev_grp) {
case LPFC_PCI_DEV_LP:
@@ -4272,19 +4918,20 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
+ phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3;
break;
case LPFC_PCI_DEV_OC:
phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
+ phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1418 Invalid HBA PCI-device group: 0x%x\n",
dev_grp);
return -ENODEV;
- break;
}
phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
@@ -4466,12 +5113,11 @@ void lpfc_poll_start_timer(struct lpfc_hba * phba)
/**
* lpfc_poll_timeout - Restart polling timer
- * @ptr: Map to lpfc_hba data structure pointer.
+ * @t: Timer construct where lpfc_hba data structure pointer is obtained.
*
* This routine restarts fcp_poll timer, when FCP ring polling is enable
* and FCP Ring interrupt is disable.
**/
-
void lpfc_poll_timeout(struct timer_list *t)
{
struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
@@ -4487,8 +5133,8 @@ void lpfc_poll_timeout(struct timer_list *t)
/**
* lpfc_queuecommand - scsi_host_template queuecommand entry point
+ * @shost: kernel scsi host pointer.
* @cmnd: Pointer to scsi_cmnd data structure.
- * @done: Pointer to done routine.
*
* Driver registers this routine to scsi midlayer to submit a @cmd to process.
* This routine prepares an IOCB from scsi command and provides to firmware.
@@ -4544,7 +5190,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
* Catch race where our node has transitioned, but the
* transport is still transitioning.
*/
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ if (!ndlp)
goto out_tgt_busy;
if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
@@ -4594,8 +5240,13 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_cmd->pCmd = cmnd;
lpfc_cmd->rdata = rdata;
lpfc_cmd->ndlp = ndlp;
+ lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
cmnd->host_scribble = (unsigned char *)lpfc_cmd;
+ err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
+ if (err)
+ goto out_host_busy_release_buf;
+
if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
if (vport->phba->cfg_enable_bg) {
lpfc_printf_vlog(vport,
@@ -4631,14 +5282,15 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
goto out_host_busy_free_buf;
}
- lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
#endif
- err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
- &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
+ /* Issue I/O to adapter */
+ err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING,
+ &lpfc_cmd->cur_iocbq,
+ SLI_IOCB_RET_IOCB);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (start) {
lpfc_cmd->ts_cmd_start = start;
@@ -4650,24 +5302,30 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
#endif
if (err) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "3376 FCP could not issue IOCB err %x"
- "FCP cmd x%x <%d/%llu> "
- "sid: x%x did: x%x oxid: x%x "
- "Data: x%x x%x x%x x%x\n",
- err, cmnd->cmnd[0],
- cmnd->device ? cmnd->device->id : 0xffff,
- cmnd->device ? cmnd->device->lun : (u64) -1,
- vport->fc_myDID, ndlp->nlp_DID,
- phba->sli_rev == LPFC_SLI_REV4 ?
- lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
- lpfc_cmd->cur_iocbq.iocb.ulpContext,
- lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
- lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
- (uint32_t)
- (cmnd->request->timeout / 1000));
+ "3376 FCP could not issue IOCB err %x "
+ "FCP cmd x%x <%d/%llu> "
+ "sid: x%x did: x%x oxid: x%x "
+ "Data: x%x x%x x%x x%x\n",
+ err, cmnd->cmnd[0],
+ cmnd->device ? cmnd->device->id : 0xffff,
+ cmnd->device ? cmnd->device->lun : (u64)-1,
+ vport->fc_myDID, ndlp->nlp_DID,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] :
+ lpfc_cmd->cur_iocbq.iocb.ulpContext,
+ lpfc_cmd->cur_iocbq.iotag,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ bf_get(wqe_tmo,
+ &lpfc_cmd->cur_iocbq.wqe.generic.wqe_com) :
+ lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
+ (uint32_t)
+ (cmnd->request->timeout / 1000));
goto out_host_busy_free_buf;
}
+
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
lpfc_sli_handle_fast_ring_event(phba,
&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
@@ -4696,6 +5354,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
}
}
+ out_host_busy_release_buf:
lpfc_release_scsi_buf(phba, lpfc_cmd);
out_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
@@ -4729,11 +5388,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocb;
- struct lpfc_iocbq *abtsiocb;
struct lpfc_io_buf *lpfc_cmd;
- IOCB_t *cmd, *icmd;
int ret = SUCCESS, status = 0;
struct lpfc_sli_ring *pring_s4 = NULL;
+ struct lpfc_sli_ring *pring = NULL;
int ret_val;
unsigned long flags;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
@@ -4810,64 +5468,22 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
goto wait_for_cmpl;
}
- abtsiocb = __lpfc_sli_get_iocbq(phba);
- if (abtsiocb == NULL) {
- ret = FAILED;
- goto out_unlock_ring;
- }
-
- /* Indicate the IO is being aborted by the driver. */
- iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
-
- /*
- * The scsi command can not be in txq and it is in flight because the
- * pCmd is still pointig at the SCSI command we have to abort. There
- * is no need to search the txcmplq. Just send an abort to the FW.
- */
-
- cmd = &iocb->iocb;
- icmd = &abtsiocb->iocb;
- icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
- icmd->un.acxri.abortContextTag = cmd->ulpContext;
- if (phba->sli_rev == LPFC_SLI_REV4)
- icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
- else
- icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
-
- icmd->ulpLe = 1;
- icmd->ulpClass = cmd->ulpClass;
-
- /* ABTS WQE must go to the same WQ as the WQE to be aborted */
- abtsiocb->hba_wqidx = iocb->hba_wqidx;
- abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
- if (iocb->iocb_flag & LPFC_IO_FOF)
- abtsiocb->iocb_flag |= LPFC_IO_FOF;
-
- if (lpfc_is_link_up(phba))
- icmd->ulpCommand = CMD_ABORT_XRI_CN;
- else
- icmd->ulpCommand = CMD_CLOSE_XRI_CN;
-
- abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
- abtsiocb->vport = vport;
lpfc_cmd->waitq = &waitq;
if (phba->sli_rev == LPFC_SLI_REV4) {
- /* Note: both hbalock and ring_lock must be set here */
- ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
- abtsiocb, 0);
spin_unlock(&pring_s4->ring_lock);
+ ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb,
+ lpfc_sli4_abort_fcp_cmpl);
} else {
- ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
- abtsiocb, 0);
+ pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
+ ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
+ lpfc_sli_abort_fcp_cmpl);
}
- if (ret_val == IOCB_ERROR) {
+ if (ret_val != IOCB_SUCCESS) {
/* Indicate the IO is not being aborted by the driver. */
- iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
lpfc_cmd->waitq = NULL;
spin_unlock(&lpfc_cmd->buf_lock);
spin_unlock_irqrestore(&phba->hbalock, flags);
- lpfc_sli_release_iocbq(phba, abtsiocb);
ret = FAILED;
goto out;
}
@@ -4881,7 +5497,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
wait_for_cmpl:
- /* Wait for abort to complete */
+ /*
+ * iocb_flag is set to LPFC_DRIVER_ABORTED before we wait
+ * for abort to complete.
+ */
wait_event_timeout(waitq,
(lpfc_cmd->pCmd != cmnd),
msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
@@ -5016,7 +5635,7 @@ lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
/**
* lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
* @vport: The virtual port for which this call is being executed.
- * @rdata: Pointer to remote port local data
+ * @cmnd: Pointer to scsi_cmnd data structure.
* @tgt_id: Target ID of remote device.
* @lun_id: Lun number for the TMF
* @task_mgmt_cmd: type of TMF to send
@@ -5043,7 +5662,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
int status;
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
- if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
+ if (!rdata || !rdata->pnode)
return FAILED;
pnode = rdata->pnode;
@@ -5147,7 +5766,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
*/
later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
while (time_after(later, jiffies)) {
- if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+ if (!pnode)
return FAILED;
if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
return SUCCESS;
@@ -5157,8 +5776,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
return FAILED;
pnode = rdata->pnode;
}
- if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
- (pnode->nlp_state != NLP_STE_MAPPED_NODE))
+ if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
return FAILED;
return SUCCESS;
}
@@ -5320,10 +5938,10 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0722 Target Reset rport failure: rdata x%px\n", rdata);
if (pnode) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irq(&pnode->lock);
pnode->nlp_flag &= ~NLP_NPR_ADISC;
pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irq(&pnode->lock);
}
lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
LPFC_CTX_TGT);
@@ -5402,8 +6020,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
match = 0;
spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
+
if (vport->phba->cfg_fcp2_no_tgt_reset &&
(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
continue;
@@ -5665,10 +6282,11 @@ lpfc_slave_destroy(struct scsi_device *sdev)
/**
* lpfc_create_device_data - creates and initializes device data structure for OAS
- * @pha: Pointer to host bus adapter structure.
+ * @phba: Pointer to host bus adapter structure.
* @vport_wwpn: Pointer to vport's wwpn information
* @target_wwpn: Pointer to target's wwpn information
* @lun: Lun on target
+ * @pri: Priority
* @atomic_create: Flag to indicate if memory should be allocated using the
* GFP_ATOMIC flag or not.
*
@@ -5718,7 +6336,7 @@ lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
/**
* lpfc_delete_device_data - frees a device data structure for OAS
- * @pha: Pointer to host bus adapter structure.
+ * @phba: Pointer to host bus adapter structure.
* @lun_info: Pointer to device data structure to free.
*
* This routine frees the previously allocated device data structure passed.
@@ -5741,7 +6359,7 @@ lpfc_delete_device_data(struct lpfc_hba *phba,
/**
* __lpfc_get_device_data - returns the device data for the specified lun
- * @pha: Pointer to host bus adapter structure.
+ * @phba: Pointer to host bus adapter structure.
* @list: Point to list to search.
* @vport_wwpn: Pointer to vport's wwpn information
* @target_wwpn: Pointer to target's wwpn information
@@ -5783,7 +6401,7 @@ __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
/**
* lpfc_find_next_oas_lun - searches for the next oas lun
- * @pha: Pointer to host bus adapter structure.
+ * @phba: Pointer to host bus adapter structure.
* @vport_wwpn: Pointer to vport's wwpn information
* @target_wwpn: Pointer to target's wwpn information
* @starting_lun: Pointer to the lun to start searching for
@@ -5791,6 +6409,7 @@ __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
* @found_target_wwpn: Pointer to the found lun's target wwpn information
* @found_lun: Pointer to the found lun.
* @found_lun_status: Pointer to status of the found lun.
+ * @found_lun_pri: Pointer to priority of the found lun.
*
* This routine searches the luns list for the specified lun
* or the first lun for the vport/target. If the vport wwpn contains
@@ -5885,10 +6504,11 @@ lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
/**
* lpfc_enable_oas_lun - enables a lun for OAS operations
- * @pha: Pointer to host bus adapter structure.
+ * @phba: Pointer to host bus adapter structure.
* @vport_wwpn: Pointer to vport's wwpn information
* @target_wwpn: Pointer to target's wwpn information
* @lun: Lun
+ * @pri: Priority
*
* This routine enables a lun for oas operations. The routines does so by
* doing the following :
@@ -5945,10 +6565,11 @@ lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
/**
* lpfc_disable_oas_lun - disables a lun for OAS operations
- * @pha: Pointer to host bus adapter structure.
+ * @phba: Pointer to host bus adapter structure.
* @vport_wwpn: Pointer to vport's wwpn information
* @target_wwpn: Pointer to target's wwpn information
* @lun: Lun
+ * @pri: Priority
*
* This routine disables a lun for oas operations. The routines does so by
* doing the following :
@@ -6029,7 +6650,7 @@ struct scsi_host_template lpfc_template_nvme = {
.sg_tablesize = 1,
.cmd_per_lun = 1,
.shost_attrs = lpfc_hba_attrs,
- .max_sectors = 0xFFFF,
+ .max_sectors = 0xFFFFFFFF,
.vendor_id = LPFC_NL_VENDOR_ID,
.track_queue_depth = 0,
};
@@ -6054,7 +6675,7 @@ struct scsi_host_template lpfc_template = {
.sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
.cmd_per_lun = LPFC_CMD_PER_LUN,
.shost_attrs = lpfc_hba_attrs,
- .max_sectors = 0xFFFF,
+ .max_sectors = 0xFFFFFFFF,
.vendor_id = LPFC_NL_VENDOR_ID,
.change_queue_depth = scsi_change_queue_depth,
.track_queue_depth = 1,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index e158cd77d387..95caad764fb7 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -90,12 +90,138 @@ static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
struct lpfc_queue *cq,
struct lpfc_cqe *cqe);
+union lpfc_wqe128 lpfc_iread_cmd_template;
+union lpfc_wqe128 lpfc_iwrite_cmd_template;
+union lpfc_wqe128 lpfc_icmnd_cmd_template;
+
static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
{
return &iocbq->iocb;
}
+/* Setup WQE templates for IOs */
+void lpfc_wqe_cmd_template(void)
+{
+ union lpfc_wqe128 *wqe;
+
+ /* IREAD template */
+ wqe = &lpfc_iread_cmd_template;
+ memset(wqe, 0, sizeof(union lpfc_wqe128));
+
+ /* Word 0, 1, 2 - BDE is variable */
+
+ /* Word 3 - cmd_buff_len, payload_offset_len is zero */
+
+ /* Word 4 - total_xfer_len is variable */
+
+ /* Word 5 - is zero */
+
+ /* Word 6 - ctxt_tag, xri_tag is variable */
+
+ /* Word 7 */
+ bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
+ bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
+ bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
+ bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
+
+ /* Word 8 - abort_tag is variable */
+
+ /* Word 9 - reqtag is variable */
+
+ /* Word 10 - dbde, wqes is variable */
+ bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
+ bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
+ bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
+ bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
+
+ /* Word 11 - pbde is variable */
+ bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
+ bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
+
+ /* Word 12 - is zero */
+
+ /* Word 13, 14, 15 - PBDE is variable */
+
+ /* IWRITE template */
+ wqe = &lpfc_iwrite_cmd_template;
+ memset(wqe, 0, sizeof(union lpfc_wqe128));
+
+ /* Word 0, 1, 2 - BDE is variable */
+
+ /* Word 3 - cmd_buff_len, payload_offset_len is zero */
+
+ /* Word 4 - total_xfer_len is variable */
+
+ /* Word 5 - initial_xfer_len is variable */
+
+ /* Word 6 - ctxt_tag, xri_tag is variable */
+
+ /* Word 7 */
+ bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
+ bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
+ bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
+ bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
+
+ /* Word 8 - abort_tag is variable */
+
+ /* Word 9 - reqtag is variable */
+
+ /* Word 10 - dbde, wqes is variable */
+ bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
+ bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
+ bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
+ bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
+
+ /* Word 11 - pbde is variable */
+ bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
+ bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
+
+ /* Word 12 - is zero */
+
+ /* Word 13, 14, 15 - PBDE is variable */
+
+ /* ICMND template */
+ wqe = &lpfc_icmnd_cmd_template;
+ memset(wqe, 0, sizeof(union lpfc_wqe128));
+
+ /* Word 0, 1, 2 - BDE is variable */
+
+ /* Word 3 - payload_offset_len is variable */
+
+ /* Word 4, 5 - is zero */
+
+ /* Word 6 - ctxt_tag, xri_tag is variable */
+
+ /* Word 7 */
+ bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
+ bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
+ bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
+ bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
+
+ /* Word 8 - abort_tag is variable */
+
+ /* Word 9 - reqtag is variable */
+
+ /* Word 10 - dbde, wqes is variable */
+ bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
+ bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
+ bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
+
+ /* Word 11 */
+ bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
+ bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
+
+ /* Word 12, 13, 14, 15 - is zero */
+}
+
#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
/**
* lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
@@ -150,6 +276,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
/* sanity check on queue memory */
if (unlikely(!q))
return -ENOMEM;
+
temp_wqe = lpfc_sli4_qe(q, q->host_index);
/* If the host has not yet processed the next entry then we are done */
@@ -860,7 +987,7 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
{
struct lpfc_nodelist *ndlp = NULL;
- if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
+ if (rrq->vport)
ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
/* The target DID could have been swapped (cable swap)
@@ -1061,12 +1188,6 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
goto out;
}
- /*
- * set the active bit even if there is no mem available.
- */
- if (NLP_CHK_FREE_REQ(ndlp))
- goto out;
-
if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
goto out;
@@ -1289,6 +1410,11 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
(sglq->state != SGL_XRI_ABORTED)) {
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
iflag);
+
+ /* Check if we can get a reference on ndlp */
+ if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
+ sglq->ndlp = NULL;
+
list_add(&sglq->list,
&phba->sli4_hba.lpfc_abts_els_sgl_list);
spin_unlock_irqrestore(
@@ -2449,10 +2575,10 @@ __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
- spin_lock_irqsave(&vport->phba->ndlp_lock, iflags);
+ spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
- spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags);
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
}
ndlp->nlp_flag &= ~NLP_UNREG_INP;
}
@@ -2529,9 +2655,10 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport,
KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
"1438 UNREG cmpl deferred mbox x%x "
- "on NPort x%x Data: x%x x%x %px\n",
+ "on NPort x%x Data: x%x x%x %px x%x x%x\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
- ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
+ ndlp->nlp_flag, ndlp->nlp_defer_did,
+ ndlp, vport->load_flag, kref_read(&ndlp->kref));
if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
(ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
@@ -2541,8 +2668,12 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
__lpfc_sli_rpi_release(vport, ndlp);
}
- if (vport->load_flag & FC_UNLOADING)
- lpfc_nlp_put(ndlp);
+
+ /* The unreg_login mailbox is complete and had a
+ * reference that has to be released. The PLOGI
+ * got its own ref.
+ */
+ lpfc_nlp_put(ndlp);
pmb->ctx_ndlp = NULL;
}
}
@@ -2566,7 +2697,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
*
* This function is the unreg rpi mailbox completion handler. It
* frees the memory resources associated with the completed mailbox
- * command. An additional refrenece is put on the ndlp to prevent
+ * command. An additional reference is put on the ndlp to prevent
* lpfc_nlp_release from freeing the rpi bit in the bitmask before
* the unreg mailbox command completes, this routine puts the
* reference back.
@@ -2586,16 +2717,15 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
LPFC_SLI_INTF_IF_TYPE_2)) {
if (ndlp) {
lpfc_printf_vlog(
- vport, KERN_INFO, LOG_MBOX | LOG_SLI,
+ vport, KERN_INFO, LOG_MBOX | LOG_SLI,
"0010 UNREG_LOGIN vpi:%x "
"rpi:%x DID:%x defer x%x flg x%x "
- "map:%x %px\n",
+ "%px\n",
vport->vpi, ndlp->nlp_rpi,
ndlp->nlp_DID, ndlp->nlp_defer_did,
ndlp->nlp_flag,
- ndlp->nlp_usg_map, ndlp);
+ ndlp);
ndlp->nlp_flag &= ~NLP_LOGO_ACC;
- lpfc_nlp_put(ndlp);
/* Check to see if there are any deferred
* events to process
@@ -2618,6 +2748,8 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
__lpfc_sli_rpi_release(vport, ndlp);
}
+
+ lpfc_nlp_put(ndlp);
}
}
}
@@ -2852,7 +2984,7 @@ lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
/* validate the source of the LS is logged in */
ndlp = lpfc_findnode_did(phba->pport, sid);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+ if (!ndlp ||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
@@ -4077,7 +4209,7 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
spin_lock_irq(&phba->hbalock);
/* Next issue ABTS for everything on the txcmplq */
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
spin_unlock_irq(&phba->hbalock);
} else {
spin_lock_irq(&phba->hbalock);
@@ -4086,7 +4218,7 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
/* Next issue ABTS for everything on the txcmplq */
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
spin_unlock_irq(&phba->hbalock);
}
@@ -7248,12 +7380,16 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
struct rqb_dmabuf *rqb_buffer;
LIST_HEAD(rqb_buf_list);
- spin_lock_irqsave(&phba->hbalock, flags);
rqbp = hrq->rqbp;
for (i = 0; i < count; i++) {
+ spin_lock_irqsave(&phba->hbalock, flags);
/* IF RQ is already full, don't bother */
- if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
+ if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
break;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
rqb_buffer = rqbp->rqb_alloc_buffer(phba);
if (!rqb_buffer)
break;
@@ -7262,6 +7398,8 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
rqb_buffer->idx = idx;
list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
}
+
+ spin_lock_irqsave(&phba->hbalock, flags);
while (!list_empty(&rqb_buf_list)) {
list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
hbuf.list);
@@ -9189,7 +9327,6 @@ lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
"1420 Invalid HBA PCI-device group: 0x%x\n",
dev_grp);
return -ENODEV;
- break;
}
return 0;
}
@@ -10072,7 +10209,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
"2014 Invalid command 0x%x\n",
iocbq->iocb.ulpCommand);
return IOCB_ERROR;
- break;
}
if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
@@ -10094,6 +10230,96 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
}
/**
+ * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
+ * @phba: Pointer to HBA context object.
+ * @ring_number: SLI ring number to issue wqe on.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
+ *
+ * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
+ * send an iocb command to an HBA with SLI-4 interface spec.
+ *
+ * This function takes the hbalock before invoking the lockless version.
+ * The function will return success after it successfully submit the wqe to
+ * firmware or after adding to the txq.
+ **/
+static int
+__lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb, uint32_t flag)
+{
+ unsigned long iflags;
+ int rc;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ return rc;
+}
+
+/**
+ * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
+ * @phba: Pointer to HBA context object.
+ * @ring_number: SLI ring number to issue wqe on.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
+ *
+ * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
+ * an wqe command to an HBA with SLI-4 interface spec.
+ *
+ * This function is a lockless version. The function will return success
+ * after it successfully submit the wqe to firmware or after adding to the
+ * txq.
+ **/
+static int
+__lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb, uint32_t flag)
+{
+ int rc;
+ struct lpfc_io_buf *lpfc_cmd =
+ (struct lpfc_io_buf *)piocb->context1;
+ union lpfc_wqe128 *wqe = &piocb->wqe;
+ struct sli4_sge *sgl;
+
+ /* 128 byte wqe support here */
+ sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
+
+ if (phba->fcp_embed_io) {
+ struct fcp_cmnd *fcp_cmnd;
+ u32 *ptr;
+
+ fcp_cmnd = lpfc_cmd->fcp_cmnd;
+
+ /* Word 0-2 - FCP_CMND */
+ wqe->generic.bde.tus.f.bdeFlags =
+ BUFF_TYPE_BDE_IMMED;
+ wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
+ wqe->generic.bde.addrHigh = 0;
+ wqe->generic.bde.addrLow = 88; /* Word 22 */
+
+ bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
+
+ /* Word 22-29 FCP CMND Payload */
+ ptr = &wqe->words[22];
+ memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
+ } else {
+ /* Word 0-2 - Inline BDE */
+ wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd);
+ wqe->generic.bde.addrHigh = sgl->addr_hi;
+ wqe->generic.bde.addrLow = sgl->addr_lo;
+
+ /* Word 10 */
+ bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
+ }
+
+ rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
+ return rc;
+}
+
+/**
* __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
* @phba: Pointer to HBA context object.
* @ring_number: SLI ring number to issue iocb on.
@@ -10159,9 +10385,10 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
}
}
}
- } else if (piocb->iocb_flag & LPFC_IO_FCP)
+ } else if (piocb->iocb_flag & LPFC_IO_FCP) {
/* These IO's already have an XRI and a mapped sgl. */
sglq = NULL;
+ }
else {
/*
* This is a continuation of a commandi,(CX) so this
@@ -10189,6 +10416,25 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
return 0;
}
+/**
+ * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
+ *
+ * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
+ * or IOCB for sli-3 function.
+ * pointer from the lpfc_hba struct.
+ *
+ * Return codes:
+ * IOCB_ERROR - Error
+ * IOCB_SUCCESS - Success
+ * IOCB_BUSY - Busy
+ **/
+int
+lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *piocb, uint32_t flag)
+{
+ return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
+}
+
/*
* __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
*
@@ -10224,17 +10470,18 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
case LPFC_PCI_DEV_LP:
phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
+ phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
break;
case LPFC_PCI_DEV_OC:
phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
+ phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1419 Invalid HBA PCI-device group: 0x%x\n",
dev_grp);
return -ENODEV;
- break;
}
phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
return 0;
@@ -10364,6 +10611,32 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
return 0;
}
+static void
+lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
+ struct lpfc_nodelist *ndlp)
+{
+ unsigned long iflags;
+ struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (!list_empty(&evtp->evt_listp)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return;
+ }
+
+ /* Incrementing the reference count until the queued work is done. */
+ evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+ if (!evtp->evt_arg1) {
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return;
+ }
+ evtp->evt = LPFC_EVT_RECOVER_PORT;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ lpfc_worker_wake_up(phba);
+}
+
/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
* @phba: Pointer to HBA context object.
* @iocbq: Pointer to iocb object.
@@ -10397,7 +10670,7 @@ lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
if (!vport)
goto err_exit;
ndlp = lpfc_findnode_rpi(vport, rpi);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ if (!ndlp)
goto err_exit;
if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
@@ -10427,17 +10700,15 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
struct lpfc_nodelist *ndlp,
struct sli4_wcqe_xri_aborted *axri)
{
- struct lpfc_vport *vport;
uint32_t ext_status = 0;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ if (!ndlp) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3115 Node Context not found, driver "
"ignoring abts err event\n");
return;
}
- vport = ndlp->vport;
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"3116 Port generated FCP XRI ABORT event on "
"vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
@@ -10454,7 +10725,7 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
ext_status = axri->parameter & IOERR_PARAM_MASK;
if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
- lpfc_sli_abts_recover_port(vport, ndlp);
+ lpfc_sli_post_recovery_event(phba, ndlp);
}
/**
@@ -10908,7 +11179,8 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
&pring->txcmplq, list) {
if (iocb->vport != vport)
continue;
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb,
+ NULL);
}
pring->flag = prev_pring_flag;
}
@@ -10935,7 +11207,8 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
&pring->txcmplq, list) {
if (iocb->vport != vport)
continue;
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb,
+ NULL);
}
pring->flag = prev_pring_flag;
}
@@ -11324,35 +11597,37 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"x%x x%x x%x\n",
irsp->ulpIoTag, irsp->ulpStatus,
irsp->un.ulpWord[4], irsp->ulpTimeout);
+ lpfc_nlp_put((struct lpfc_nodelist *)cmdiocb->context1);
if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
lpfc_ct_free_iocb(phba, cmdiocb);
else
lpfc_els_free_iocb(phba, cmdiocb);
- return;
}
/**
- * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
+ * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @cmdiocb: Pointer to driver command iocb object.
+ * @cmpl: completion function.
+ *
+ * This function issues an abort iocb for the provided command iocb. In case
+ * of unloading, the abort iocb will not be issued to commands on the ELS
+ * ring. Instead, the callback function shall be changed to those commands
+ * so that nothing happens when them finishes. This function is called with
+ * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS
+ * when the command iocb is an abort request.
*
- * This function issues an abort iocb for the provided command iocb down to
- * the port. Other than the case the outstanding command iocb is an abort
- * request, this function issues abort out unconditionally. This function is
- * called with hbalock held. The function returns 0 when it fails due to
- * memory allocation failure or when the command iocb is an abort request.
- * The hbalock is asserted held in the code path calling this routine.
**/
-static int
-lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
- struct lpfc_iocbq *cmdiocb)
+int
+lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *cmdiocb, void *cmpl)
{
struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_iocbq *abtsiocbp;
IOCB_t *icmd = NULL;
IOCB_t *iabt = NULL;
- int retval;
+ int retval = IOCB_ERROR;
unsigned long iflags;
struct lpfc_nodelist *ndlp;
@@ -11365,12 +11640,33 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
(cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
- return 0;
+ return IOCB_ABORTING;
+
+ if (!pring) {
+ if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
+ cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+ else
+ cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
+ return retval;
+ }
+
+ /*
+ * If we're unloading, don't abort iocb on the ELS ring, but change
+ * the callback so that nothing happens when it finishes.
+ */
+ if ((vport->load_flag & FC_UNLOADING) &&
+ pring->ringno == LPFC_ELS_RING) {
+ if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
+ cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+ else
+ cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
+ return retval;
+ }
/* issue ABTS for this IOCB based on iotag */
abtsiocbp = __lpfc_sli_get_iocbq(phba);
if (abtsiocbp == NULL)
- return 0;
+ return IOCB_NORESOURCE;
/* This signals the response to set the correct status
* before calling the completion handler
@@ -11382,7 +11678,8 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
iabt->un.acxri.abortContextTag = icmd->ulpContext;
if (phba->sli_rev == LPFC_SLI_REV4) {
iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
- iabt->un.acxri.abortContextTag = cmdiocb->iotag;
+ if (pring->ringno == LPFC_ELS_RING)
+ iabt->un.acxri.abortContextTag = cmdiocb->iotag;
} else {
iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
if (pring->ringno == LPFC_ELS_RING) {
@@ -11395,8 +11692,10 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
- if (cmdiocb->iocb_flag & LPFC_IO_FCP)
+ if (cmdiocb->iocb_flag & LPFC_IO_FCP) {
+ abtsiocbp->iocb_flag |= LPFC_IO_FCP;
abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
+ }
if (cmdiocb->iocb_flag & LPFC_IO_FOF)
abtsiocbp->iocb_flag |= LPFC_IO_FOF;
@@ -11405,20 +11704,16 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
else
iabt->ulpCommand = CMD_CLOSE_XRI_CN;
- abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
+ if (cmpl)
+ abtsiocbp->iocb_cmpl = cmpl;
+ else
+ abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
abtsiocbp->vport = vport;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
- "0339 Abort xri x%x, original iotag x%x, "
- "abort cmd iotag x%x\n",
- iabt->un.acxri.abortIoTag,
- iabt->un.acxri.abortContextTag,
- abtsiocbp->iotag);
-
if (phba->sli_rev == LPFC_SLI_REV4) {
pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
if (unlikely(pring == NULL))
- return 0;
+ goto abort_iotag_exit;
/* Note: both hbalock and ring_lock need to be set here */
spin_lock_irqsave(&pring->ring_lock, iflags);
retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
@@ -11429,76 +11724,20 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
abtsiocbp, 0);
}
- if (retval)
- __lpfc_sli_release_iocbq(phba, abtsiocbp);
-
- /*
- * Caller to this routine should check for IOCB_ERROR
- * and handle it properly. This routine no longer removes
- * iocb off txcmplq and call compl in case of IOCB_ERROR.
- */
- return retval;
-}
-
-/**
- * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
- * @phba: Pointer to HBA context object.
- * @pring: Pointer to driver SLI ring object.
- * @cmdiocb: Pointer to driver command iocb object.
- *
- * This function issues an abort iocb for the provided command iocb. In case
- * of unloading, the abort iocb will not be issued to commands on the ELS
- * ring. Instead, the callback function shall be changed to those commands
- * so that nothing happens when them finishes. This function is called with
- * hbalock held. The function returns 0 when the command iocb is an abort
- * request.
- **/
-int
-lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
- struct lpfc_iocbq *cmdiocb)
-{
- struct lpfc_vport *vport = cmdiocb->vport;
- int retval = IOCB_ERROR;
- IOCB_t *icmd = NULL;
-
- lockdep_assert_held(&phba->hbalock);
-
- /*
- * There are certain command types we don't want to abort. And we
- * don't want to abort commands that are already in the process of
- * being aborted.
- */
- icmd = &cmdiocb->iocb;
- if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
- icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
- (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
- return 0;
+abort_iotag_exit:
- if (!pring) {
- if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
- cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
- else
- cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
- goto abort_iotag_exit;
- }
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "0339 Abort xri x%x, original iotag x%x, "
+ "abort cmd iotag x%x retval x%x\n",
+ iabt->un.acxri.abortIoTag,
+ iabt->un.acxri.abortContextTag,
+ abtsiocbp->iotag, retval);
- /*
- * If we're unloading, don't abort iocb on the ELS ring, but change
- * the callback so that nothing happens when it finishes.
- */
- if ((vport->load_flag & FC_UNLOADING) &&
- (pring->ringno == LPFC_ELS_RING)) {
- if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
- cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
- else
- cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
- goto abort_iotag_exit;
+ if (retval) {
+ cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ __lpfc_sli_release_iocbq(phba, abtsiocbp);
}
- /* Now, we try to issue the abort to the cmdiocb out */
- retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
-
-abort_iotag_exit:
/*
* Caller to this routine should check for IOCB_ERROR
* and handle it properly. This routine no longer removes
@@ -11643,6 +11882,33 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
}
/**
+ * lpfc_sli4_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
+ * @phba: Pointer to HBA context object
+ * @cmdiocb: Pointer to command iocb object.
+ * @wcqe: pointer to the complete wcqe
+ *
+ * This function is called when an aborted FCP iocb completes. This
+ * function is called by the ring event handler with no lock held.
+ * This function frees the iocb. It is called for sli-4 adapters.
+ **/
+void
+lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_wcqe_complete *wcqe)
+{
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3017 ABORT_XRI_CN completing on rpi x%x "
+ "original iotag x%x, abort cmd iotag x%x "
+ "status 0x%x, reason 0x%x\n",
+ cmdiocb->iocb.un.acxri.abortContextTag,
+ cmdiocb->iocb.un.acxri.abortIoTag,
+ cmdiocb->iotag,
+ (bf_get(lpfc_wcqe_c_status, wcqe)
+ & LPFC_IOCB_STATUS_MASK),
+ wcqe->parameter);
+ lpfc_sli_release_iocbq(phba, cmdiocb);
+}
+
+/**
* lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
* @phba: Pointer to HBA context object
* @cmdiocb: Pointer to command iocb object.
@@ -11695,10 +11961,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocbq;
- struct lpfc_iocbq *abtsiocb;
- struct lpfc_sli_ring *pring_s4;
- IOCB_t *cmd = NULL;
int errcnt = 0, ret_val = 0;
+ unsigned long iflags;
int i;
/* all I/Os are in process of being flushed */
@@ -11712,62 +11976,12 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
abort_cmd) != 0)
continue;
- /*
- * If the iocbq is already being aborted, don't take a second
- * action, but do count it.
- */
- if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
- continue;
-
- /* issue ABTS for this IOCB based on iotag */
- abtsiocb = lpfc_sli_get_iocbq(phba);
- if (abtsiocb == NULL) {
- errcnt++;
- continue;
- }
-
- /* indicate the IO is being aborted by the driver. */
- iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
-
- cmd = &iocbq->iocb;
- abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
- abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
- if (phba->sli_rev == LPFC_SLI_REV4)
- abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
- else
- abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
- abtsiocb->iocb.ulpLe = 1;
- abtsiocb->iocb.ulpClass = cmd->ulpClass;
- abtsiocb->vport = vport;
-
- /* ABTS WQE must go to the same WQ as the WQE to be aborted */
- abtsiocb->hba_wqidx = iocbq->hba_wqidx;
- if (iocbq->iocb_flag & LPFC_IO_FCP)
- abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
- if (iocbq->iocb_flag & LPFC_IO_FOF)
- abtsiocb->iocb_flag |= LPFC_IO_FOF;
-
- if (lpfc_is_link_up(phba))
- abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
- else
- abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
-
- /* Setup callback routine and issue the command. */
- abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
- if (phba->sli_rev == LPFC_SLI_REV4) {
- pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
- if (!pring_s4)
- continue;
- ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
- abtsiocb, 0);
- } else
- ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
- abtsiocb, 0);
- if (ret_val == IOCB_ERROR) {
- lpfc_sli_release_iocbq(phba, abtsiocb);
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
+ lpfc_sli_abort_fcp_cmpl);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ if (ret_val != IOCB_SUCCESS)
errcnt++;
- continue;
- }
}
return errcnt;
@@ -13062,23 +13276,30 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
{
struct lpfc_cq_event *cq_event;
+ unsigned long iflags;
/* First, declare the els xri abort event has been handled */
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
/* Now, handle all the els xri abort events */
+ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
/* Get the first event from the head of the event queue */
- spin_lock_irq(&phba->hbalock);
list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
cq_event, struct lpfc_cq_event, list);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
+ iflags);
/* Notify aborted XRI for ELS work queue */
lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
+
/* Free the event processed back to the free pool */
lpfc_sli4_cq_event_release(phba, cq_event);
+ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
+ iflags);
}
+ spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
}
/**
@@ -13289,9 +13510,13 @@ lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
if (!cq_event)
return false;
- spin_lock_irqsave(&phba->hbalock, iflags);
+
+ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
+ spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
+
/* Set the async event flag */
+ spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag |= ASYNC_EVENT;
spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -13375,6 +13600,12 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
pmbox->un.varWords[0], pmb);
pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
pmb->ctx_buf = mp;
+
+ /* No reference taken here. This is a default
+ * RPI reg/immediate unreg cycle. The reference was
+ * taken in the reg rpi path and is released when
+ * this mailbox completes.
+ */
pmb->ctx_ndlp = ndlp;
pmb->vport = vport;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
@@ -13566,17 +13797,20 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
break;
case LPFC_NVME_LS: /* NVME LS uses ELS resources */
case LPFC_ELS:
- cq_event = lpfc_cq_event_setup(
- phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
- if (!cq_event)
- return false;
+ cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
+ if (!cq_event) {
+ workposted = false;
+ break;
+ }
cq_event->hdwq = cq->hdwq;
- spin_lock_irqsave(&phba->hbalock, iflags);
+ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
+ iflags);
list_add_tail(&cq_event->list,
&phba->sli4_hba.sp_els_xri_aborted_work_queue);
/* Set the els xri abort event flag */
phba->hba_flag |= ELS_XRI_ABORT_EVENT;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
+ iflags);
workposted = true;
break;
default:
@@ -14046,7 +14280,9 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
#endif
if (cmdiocbq->iocb_cmpl == NULL) {
if (cmdiocbq->wqe_cmpl) {
- if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
+ /* For FCP the flag is cleared in wqe_cmpl */
+ if (!(cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
+ cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
spin_lock_irqsave(&phba->hbalock, iflags);
cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -14063,6 +14299,7 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
return;
}
+ /* Only SLI4 non-IO commands stil use IOCB */
/* Fake the irspiocb and copy necessary response information */
lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
@@ -15850,12 +16087,6 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
else
wq_create_version = LPFC_Q_CREATE_VERSION_0;
-
- if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
- wq_create_version = LPFC_Q_CREATE_VERSION_1;
- else
- wq_create_version = LPFC_Q_CREATE_VERSION_0;
-
switch (wq_create_version) {
case LPFC_Q_CREATE_VERSION_1:
bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
@@ -17864,15 +18095,6 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
}
/* Put ndlp onto pport node list */
lpfc_enqueue_node(vport, ndlp);
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- /* re-setup ndlp without removing from node list */
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (!ndlp) {
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
- "3275 Failed to active ndlp found "
- "for oxid:x%x SID:x%x\n", oxid, sid);
- return;
- }
}
/* Allocate buffer for rsp iocb */
@@ -17897,6 +18119,10 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
icmd->ulpClass = CLASS3;
icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
ctiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!ctiocb->context1) {
+ lpfc_sli_release_iocbq(phba, ctiocb);
+ return;
+ }
ctiocb->vport = phba->pport;
ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
@@ -19840,7 +20066,6 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
struct lpfc_dmabuf *mp;
struct lpfc_nodelist *ndlp;
struct lpfc_nodelist *act_mbx_ndlp = NULL;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
LIST_HEAD(mbox_cmd_list);
uint8_t restart_loop;
@@ -19894,9 +20119,9 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
restart_loop = 1;
spin_unlock_irq(&phba->hbalock);
- spin_lock(shost->host_lock);
+ spin_lock(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
- spin_unlock(shost->host_lock);
+ spin_unlock(&ndlp->lock);
spin_lock_irq(&phba->hbalock);
break;
}
@@ -19918,9 +20143,9 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
mb->ctx_ndlp = NULL;
if (ndlp) {
- spin_lock(shost->host_lock);
+ spin_lock(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
- spin_unlock(shost->host_lock);
+ spin_unlock(&ndlp->lock);
lpfc_nlp_put(ndlp);
}
}
@@ -19929,9 +20154,9 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
/* Release the ndlp with the cleaned-up active mailbox command */
if (act_mbx_ndlp) {
- spin_lock(shost->host_lock);
+ spin_lock(&act_mbx_ndlp->lock);
act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
- spin_unlock(shost->host_lock);
+ spin_unlock(&act_mbx_ndlp->lock);
lpfc_nlp_put(act_mbx_ndlp);
}
}
@@ -20213,7 +20438,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
/* NVME_FCREQ and NVME_ABTS requests */
- if (pwqe->iocb_flag & LPFC_IO_NVME) {
+ if (pwqe->iocb_flag & LPFC_IO_NVME ||
+ pwqe->iocb_flag & LPFC_IO_FCP) {
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
wq = qp->io_wq;
pring = wq->pring;
@@ -20266,6 +20492,88 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
return WQE_ERROR;
}
+/**
+ * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort
+ * @phba: Pointer to HBA context object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ * @cmpl: completion function.
+ *
+ * Fill the appropriate fields for the abort WQE and call
+ * internal routine lpfc_sli4_issue_wqe to send the WQE
+ * This function is called with hbalock held and no ring_lock held.
+ *
+ * RETURNS 0 - SUCCESS
+ **/
+
+int
+lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ void *cmpl)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct lpfc_iocbq *abtsiocb = NULL;
+ union lpfc_wqe128 *abtswqe;
+ struct lpfc_io_buf *lpfc_cmd;
+ int retval = IOCB_ERROR;
+ u16 xritag = cmdiocb->sli4_xritag;
+
+ /*
+ * The scsi command can not be in txq and it is in flight because the
+ * pCmd is still pointing at the SCSI command we have to abort. There
+ * is no need to search the txcmplq. Just send an abort to the FW.
+ */
+
+ abtsiocb = __lpfc_sli_get_iocbq(phba);
+ if (!abtsiocb)
+ return WQE_NORESOURCE;
+
+ /* Indicate the IO is being aborted by the driver. */
+ cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
+
+ abtswqe = &abtsiocb->wqe;
+ memset(abtswqe, 0, sizeof(*abtswqe));
+
+ if (lpfc_is_link_up(phba))
+ bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
+ else
+ bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 0);
+ bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
+ abtswqe->abort_cmd.rsrvd5 = 0;
+ abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
+ bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
+ bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+ bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
+ bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
+
+ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
+ abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
+ abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
+ if (cmdiocb->iocb_flag & LPFC_IO_FCP)
+ abtsiocb->iocb_flag |= LPFC_IO_FCP;
+ if (cmdiocb->iocb_flag & LPFC_IO_NVME)
+ abtsiocb->iocb_flag |= LPFC_IO_NVME;
+ if (cmdiocb->iocb_flag & LPFC_IO_FOF)
+ abtsiocb->iocb_flag |= LPFC_IO_FOF;
+ abtsiocb->vport = vport;
+ abtsiocb->wqe_cmpl = cmpl;
+
+ lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
+ retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
+ "0359 Abort xri x%x, original iotag x%x, "
+ "abort cmd iotag x%x retval x%x\n",
+ xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
+
+ if (retval) {
+ cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ __lpfc_sli_release_iocbq(phba, abtsiocb);
+ }
+
+ return retval;
+}
+
#ifdef LPFC_MXP_STAT
/**
* lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 93d976ea8c5d..4f6936014ff5 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -130,6 +130,9 @@ struct lpfc_iocbq {
#define IOCB_BUSY 1
#define IOCB_ERROR 2
#define IOCB_TIMEDOUT 3
+#define IOCB_ABORTED 4
+#define IOCB_ABORTING 5
+#define IOCB_NORESOURCE 6
#define SLI_WQE_RET_WQE 1 /* Return WQE if cmd ring full */
@@ -138,6 +141,8 @@ struct lpfc_iocbq {
#define WQE_ERROR 2
#define WQE_TIMEDOUT 3
#define WQE_ABORTED 4
+#define WQE_ABORTING 5
+#define WQE_NORESOURCE 6
#define LPFC_MBX_WAKE 1
#define LPFC_MBX_IMED_UNREG 2
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index a966cdeb52ee..26f19c95380f 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -549,6 +549,14 @@ struct lpfc_pc_sli4_params {
uint32_t hdr_pp_align;
uint32_t sgl_pages_max;
uint32_t sgl_pp_align;
+ uint32_t mib_size;
+ uint16_t mi_ver;
+#define LPFC_MIB1_SUPPORT 1
+#define LPFC_MIB2_SUPPORT 2
+#define LPFC_MIB3_SUPPORT 3
+ uint16_t mi_value;
+#define LPFC_DFLT_MIB_VAL 2
+ uint8_t mib_bde_cnt;
uint8_t cqv;
uint8_t mqv;
uint8_t wqv;
@@ -920,8 +928,9 @@ struct lpfc_sli4_hba {
struct list_head sp_queue_event;
struct list_head sp_cqe_event_pool;
struct list_head sp_asynce_work_queue;
- struct list_head sp_fcp_xri_aborted_work_queue;
+ spinlock_t asynce_list_lock; /* protect sp_asynce_work_queue list */
struct list_head sp_els_xri_aborted_work_queue;
+ spinlock_t els_xri_abrt_list_lock; /* protect els_xri_aborted list */
struct list_head sp_unsol_work_queue;
struct lpfc_sli4_link link_state;
struct lpfc_sli4_lnk_info lnk_info;
@@ -1103,8 +1112,7 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
-void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
-void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
+void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba);
void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri,
struct lpfc_io_buf *lpfc_ncmd);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c657abf22b75..234dca60995b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.8.0.4"
+#define LPFC_DRIVER_VERSION "12.8.0.6"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@@ -32,6 +32,6 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright (C) 2017-2019 Broadcom. All Rights " \
+#define LPFC_COPYRIGHT "Copyright (C) 2017-2020 Broadcom. All Rights " \
"Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
"and/or its subsidiaries."
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index aa4e451d5dc1..a99fdfba7d27 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -462,7 +462,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
* up and ready to FDISC.
*/
ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+ if (ndlp &&
ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
lpfc_set_disctmo(vport);
@@ -495,8 +495,7 @@ disable_vport(struct fc_vport *fc_vport)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)
- && phba->link_state >= LPFC_LINK_UP) {
+ if (ndlp && phba->link_state >= LPFC_LINK_UP) {
vport->unreg_vpi_cmpl = VPORT_INVAL;
timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
if (!lpfc_issue_els_npiv_logo(vport, ndlp))
@@ -510,8 +509,6 @@ disable_vport(struct fc_vport *fc_vport)
* calling lpfc_cleanup_rpis(vport, 1)
*/
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
lpfc_disc_state_machine(vport, ndlp, NULL,
@@ -568,8 +565,7 @@ enable_vport(struct fc_vport *fc_vport)
* up and ready to FDISC.
*/
ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)
- && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+ if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
lpfc_set_disctmo(vport);
lpfc_initial_fdisc(vport);
@@ -597,16 +593,14 @@ lpfc_vport_disable(struct fc_vport *fc_vport, bool disable)
return enable_vport(fc_vport);
}
-
int
lpfc_vport_delete(struct fc_vport *fc_vport)
{
struct lpfc_nodelist *ndlp = NULL;
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- struct lpfc_hba *phba = vport->phba;
+ struct lpfc_hba *phba = vport->phba;
long timeout;
- bool ns_ndlp_referenced = false;
if (vport->port_type == LPFC_PHYSICAL_PORT) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -623,9 +617,11 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
"static vport.\n");
return VPORT_ERROR;
}
+
spin_lock_irq(&phba->hbalock);
vport->load_flag |= FC_UNLOADING;
spin_unlock_irq(&phba->hbalock);
+
/*
* If we are not unloading the driver then prevent the vport_delete
* from happening until after this vport's discovery is finished.
@@ -653,64 +649,22 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
return VPORT_INVAL;
lpfc_free_sysfs_attr(vport);
-
lpfc_debugfs_terminate(vport);
- /*
- * The call to fc_remove_host might release the NameServer ndlp. Since
- * we might need to use the ndlp to send the DA_ID CT command,
- * increment the reference for the NameServer ndlp to prevent it from
- * being released.
- */
- ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
- lpfc_nlp_get(ndlp);
- ns_ndlp_referenced = true;
- }
-
- /* Remove FC host and then SCSI host with the vport */
+ /* Remove FC host to break driver binding. */
fc_remove_host(shost);
scsi_remove_host(shost);
- ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
-
- /* In case of driver unload, we shall not perform fabric logo as the
- * worker thread already stopped at this stage and, in this case, we
- * can safely skip the fabric logo.
- */
- if (phba->pport->load_flag & FC_UNLOADING) {
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
- ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
- phba->link_state >= LPFC_LINK_UP) {
- /* First look for the Fabric ndlp */
- ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp)
- goto skip_logo;
- else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp,
- NLP_STE_UNUSED_NODE);
- if (!ndlp)
- goto skip_logo;
- }
- /* Remove ndlp from vport npld list */
- lpfc_dequeue_node(vport, ndlp);
-
- /* Indicate free memory when release */
- spin_lock_irq(&phba->ndlp_lock);
- NLP_SET_FREE_REQ(ndlp);
- spin_unlock_irq(&phba->ndlp_lock);
- /* Kick off release ndlp when it can be safely done */
- lpfc_nlp_put(ndlp);
- }
+ /* Send the DA_ID and Fabric LOGO to cleanup Nameserver entries. */
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp)
goto skip_logo;
- }
- /* Otherwise, we will perform fabric logo as needed */
- if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
- ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
+ if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
phba->link_state >= LPFC_LINK_UP &&
phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
if (vport->cfg_enable_da_id) {
+ /* Send DA_ID and wait for a completion. */
timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0))
while (vport->ct_flags && timeout)
@@ -721,47 +675,19 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
"1829 CT command failed to "
"delete objects on fabric\n");
}
- /* First look for the Fabric ndlp */
- ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp) {
- /* Cannot find existing Fabric ndlp, allocate one */
- ndlp = lpfc_nlp_init(vport, Fabric_DID);
- if (!ndlp)
- goto skip_logo;
- /* Indicate free memory when release */
- NLP_SET_FREE_REQ(ndlp);
- } else {
- if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp,
- NLP_STE_UNUSED_NODE);
- if (!ndlp)
- goto skip_logo;
- }
-
- /* Remove ndlp from vport list */
- lpfc_dequeue_node(vport, ndlp);
- spin_lock_irq(&phba->ndlp_lock);
- if (!NLP_CHK_FREE_REQ(ndlp))
- /* Indicate free memory when release */
- NLP_SET_FREE_REQ(ndlp);
- else {
- /* Skip this if ndlp is already in free mode */
- spin_unlock_irq(&phba->ndlp_lock);
- goto skip_logo;
- }
- spin_unlock_irq(&phba->ndlp_lock);
- }
/*
* If the vpi is not registered, then a valid FDISC doesn't
* exist and there is no need for a ELS LOGO. Just cleanup
* the ndlp.
*/
- if (!(vport->vpi_state & LPFC_VPI_REGISTERED)) {
- lpfc_nlp_put(ndlp);
+ if (!(vport->vpi_state & LPFC_VPI_REGISTERED))
goto skip_logo;
- }
+ /* Issue a Fabric LOGO to cleanup fabric resources. */
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp)
+ goto skip_logo;
vport->unreg_vpi_cmpl = VPORT_INVAL;
timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
if (!lpfc_issue_els_npiv_logo(vport, ndlp))
@@ -774,18 +700,10 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
skip_logo:
- /*
- * If the NameServer ndlp has been incremented to allow the DA_ID CT
- * command to be sent, decrement the ndlp now.
- */
- if (ns_ndlp_referenced) {
- ndlp = lpfc_findnode_did(vport, NameServer_DID);
- lpfc_nlp_put(ndlp);
- }
-
lpfc_cleanup(vport);
- lpfc_sli_host_down(vport);
+ /* Remove scsi host now. The nodes are cleaned up. */
+ lpfc_sli_host_down(vport);
lpfc_stop_vport_timers(vport);
if (!(phba->pport->load_flag & FC_UNLOADING)) {
@@ -865,8 +783,6 @@ lpfc_vport_reset_stat_data(struct lpfc_vport *vport)
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->lat_data)
memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT *
sizeof(struct lpfc_scsicmd_bkt));
@@ -887,8 +803,6 @@ lpfc_alloc_bucket(struct lpfc_vport *vport)
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
kfree(ndlp->lat_data);
ndlp->lat_data = NULL;
@@ -921,8 +835,6 @@ lpfc_free_bucket(struct lpfc_vport *vport)
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
kfree(ndlp->lat_data);
ndlp->lat_data = NULL;