summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-04-28 17:22:10 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-04-28 17:22:10 -0700
commitd72cd4ad4174cfd2257c426ad51e4f53bcfde9c9 (patch)
treeb291d1c28bbf6ce61edc3bdf022ea857414230f6 /drivers
parent238da4d004856ac5f832899f6f3fa27c0102381f (diff)
parent7a3beeae289385f7be9f61a33a6e4f6c7e2400d3 (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This consists of the usual driver updates (ufs, target, tcmu, smartpqi, lpfc, zfcp, qla2xxx, mpt3sas, pm80xx). The major core change is using a sbitmap instead of an atomic for queue tracking" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (412 commits) scsi: target: tcm_fc: Fix a kernel-doc header scsi: target: Shorten ALUA error messages scsi: target: Fix two format specifiers scsi: target: Compare explicitly with SAM_STAT_GOOD scsi: sd: Introduce a new local variable in sd_check_events() scsi: dc395x: Open-code status_byte(u8) calls scsi: 53c700: Open-code status_byte(u8) calls scsi: smartpqi: Remove unused functions scsi: qla4xxx: Remove an unused function scsi: myrs: Remove unused functions scsi: myrb: Remove unused functions scsi: mpt3sas: Fix two kernel-doc headers scsi: fcoe: Suppress a compiler warning scsi: libfc: Fix a format specifier scsi: aacraid: Remove an unused function scsi: core: Introduce enum scsi_disposition scsi: core: Modify the scsi_send_eh_cmnd() return value for the SDEV_BLOCK case scsi: core: Rename scsi_softirq_done() into scsi_complete() scsi: core: Remove an incorrect comment scsi: core: Make the scsi_alloc_sgtables() documentation more accurate ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/libata-eh.c2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c14
-rw-r--r--drivers/message/fusion/lsi/mpi.h4
-rw-r--r--drivers/message/fusion/lsi/mpi_ioc.h2
-rw-r--r--drivers/message/fusion/mptbase.c9
-rw-r--r--drivers/message/fusion/mptbase.h2
-rw-r--r--drivers/message/fusion/mptctl.c8
-rw-r--r--drivers/message/fusion/mptdebug.h7
-rw-r--r--drivers/message/fusion/mptlan.c9
-rw-r--r--drivers/message/fusion/mptsas.c10
-rw-r--r--drivers/s390/scsi/zfcp_aux.c28
-rw-r--r--drivers/s390/scsi/zfcp_def.h6
-rw-r--r--drivers/s390/scsi/zfcp_diag.c42
-rw-r--r--drivers/s390/scsi/zfcp_diag.h7
-rw-r--r--drivers/s390/scsi/zfcp_ext.h4
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c1
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c68
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h5
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c14
-rw-r--r--drivers/s390/scsi/zfcp_unit.c4
-rw-r--r--drivers/scsi/3w-9xxx.c14
-rw-r--r--drivers/scsi/3w-sas.c13
-rw-r--r--drivers/scsi/3w-xxxx.c6
-rw-r--r--drivers/scsi/53c700.c6
-rw-r--r--drivers/scsi/BusLogic.c2
-rw-r--r--drivers/scsi/FlashPoint.c6
-rw-r--r--drivers/scsi/a100u2w.c13
-rw-r--r--drivers/scsi/aacraid/aachba.c13
-rw-r--r--drivers/scsi/aacraid/commctrl.c2
-rw-r--r--drivers/scsi/aacraid/commsup.c4
-rw-r--r--drivers/scsi/aacraid/rx.c2
-rw-r--r--drivers/scsi/advansys.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx.h2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dump.c186
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.c2
-rw-r--r--drivers/scsi/atp870u.c7
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c6
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c2
-rw-r--r--drivers/scsi/bfa/bfa_fc.h4
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h3
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c20
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c85
-rw-r--r--drivers/scsi/bnx2i/bnx2i_sysfs.c2
-rw-r--r--drivers/scsi/csiostor/csio_hw_t5.c2
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c4
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c2
-rw-r--r--drivers/scsi/cxlflash/main.c8
-rw-r--r--drivers/scsi/cxlflash/superpipe.c6
-rw-r--r--drivers/scsi/cxlflash/vlun.c8
-rw-r--r--drivers/scsi/dc395x.c15
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c39
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c4
-rw-r--r--drivers/scsi/esas2r/esas2r_log.c7
-rw-r--r--drivers/scsi/esp_scsi.c4
-rw-r--r--drivers/scsi/fcoe/fcoe.c2
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c4
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c2
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c3
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c16
-rw-r--r--drivers/scsi/fnic/fnic_main.c3
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c6
-rw-r--r--drivers/scsi/fnic/fnic_trace.c18
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c38
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c44
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c88
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c73
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c23
-rw-r--r--drivers/scsi/initio.c18
-rw-r--r--drivers/scsi/ipr.c8
-rw-r--r--drivers/scsi/isci/host.c37
-rw-r--r--drivers/scsi/isci/phy.c34
-rw-r--r--drivers/scsi/isci/phy.h1
-rw-r--r--drivers/scsi/isci/port.c62
-rw-r--r--drivers/scsi/isci/port_config.c37
-rw-r--r--drivers/scsi/isci/remote_device.c33
-rw-r--r--drivers/scsi/isci/remote_node_context.c13
-rw-r--r--drivers/scsi/isci/remote_node_table.c64
-rw-r--r--drivers/scsi/isci/request.c60
-rw-r--r--drivers/scsi/isci/task.c3
-rw-r--r--drivers/scsi/jazz_esp.c4
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2
-rw-r--r--drivers/scsi/libfc/fc_lport.c14
-rw-r--r--drivers/scsi/libfc/fc_rport.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c74
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c13
-rw-r--r--drivers/scsi/lpfc/lpfc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c140
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c44
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c755
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h176
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c142
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c38
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c282
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c31
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c124
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c118
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c10
-rw-r--r--drivers/scsi/mac53c94.c13
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c98
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c89
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h2
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c514
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h5
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c10
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c45
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h12
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c65
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c7
-rw-r--r--drivers/scsi/mvsas/mv_sas.h2
-rw-r--r--drivers/scsi/mvumi.c9
-rw-r--r--drivers/scsi/myrb.c126
-rw-r--r--drivers/scsi/myrs.c117
-rw-r--r--drivers/scsi/nsp32.c31
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c151
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.h5
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c92
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h1
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c19
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c31
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h5
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c49
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h1
-rw-r--r--drivers/scsi/pmcraid.c70
-rw-r--r--drivers/scsi/qedf/qedf.h3
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h1
-rw-r--r--drivers/scsi/qedf/qedf_main.c18
-rw-r--r--drivers/scsi/qedi/qedi_main.c4
-rw-r--r--drivers/scsi/qla1280.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h15
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c115
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h46
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c84
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c38
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c223
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c19
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c6
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c33
-rw-r--r--drivers/scsi/scsi.c13
-rw-r--r--drivers/scsi/scsi_debug.c229
-rw-r--r--drivers/scsi/scsi_dh.c2
-rw-r--r--drivers/scsi/scsi_error.c66
-rw-r--r--drivers/scsi/scsi_lib.c119
-rw-r--r--drivers/scsi/scsi_priv.h7
-rw-r--r--drivers/scsi/scsi_scan.c23
-rw-r--r--drivers/scsi/scsi_sysfs.c6
-rw-r--r--drivers/scsi/sd.c7
-rw-r--r--drivers/scsi/sd_zbc.c2
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/sim710.c14
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h310
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c3101
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c39
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c9
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h1
-rw-r--r--drivers/scsi/sni_53c710.c5
-rw-r--r--drivers/scsi/snic/snic_debugfs.c20
-rw-r--r--drivers/scsi/storvsc_drv.c84
-rw-r--r--drivers/scsi/sun3x_esp.c4
-rw-r--r--drivers/scsi/ufs/cdns-pltfrm.c4
-rw-r--r--drivers/scsi/ufs/ufs-debugfs.c90
-rw-r--r--drivers/scsi/ufs/ufs-debugfs.h2
-rw-r--r--drivers/scsi/ufs/ufs-exynos.c3
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c11
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c2
-rw-r--r--drivers/scsi/ufs/ufs.h10
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c169
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c8
-rw-r--r--drivers/scsi/ufs/ufshcd.c285
-rw-r--r--drivers/scsi/ufs/ufshcd.h30
-rw-r--r--drivers/scsi/ufs/ufshci.h40
-rw-r--r--drivers/target/iscsi/iscsi_target.c20
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c17
-rw-r--r--drivers/target/loopback/tcm_loop.c60
-rw-r--r--drivers/target/loopback/tcm_loop.h1
-rw-r--r--drivers/target/sbp/sbp_target.c8
-rw-r--r--drivers/target/target_core_configfs.c36
-rw-r--r--drivers/target/target_core_device.c12
-rw-r--r--drivers/target/target_core_fabric_configfs.c58
-rw-r--r--drivers/target/target_core_file.c3
-rw-r--r--drivers/target/target_core_iblock.c81
-rw-r--r--drivers/target/target_core_iblock.h10
-rw-r--r--drivers/target/target_core_internal.h2
-rw-r--r--drivers/target/target_core_pr.c42
-rw-r--r--drivers/target/target_core_pscsi.c7
-rw-r--r--drivers/target/target_core_rd.c27
-rw-r--r--drivers/target/target_core_rd.h1
-rw-r--r--drivers/target/target_core_sbc.c4
-rw-r--r--drivers/target/target_core_spc.c6
-rw-r--r--drivers/target/target_core_stat.c3
-rw-r--r--drivers/target/target_core_tmr.c4
-rw-r--r--drivers/target/target_core_transport.c300
-rw-r--r--drivers/target/target_core_user.c440
-rw-r--r--drivers/target/target_core_xcopy.c10
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c14
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c2
-rw-r--r--drivers/usb/gadget/function/f_tcm.c36
-rw-r--r--drivers/vhost/scsi.c58
-rw-r--r--drivers/xen/xen-scsiback.c21
237 files changed, 6785 insertions, 5113 deletions
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 2db1e9c66088..bb3637762985 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1599,7 +1599,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
}
if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
- int ret = scsi_check_sense(qc->scsicmd);
+ enum scsi_disposition ret = scsi_check_sense(qc->scsicmd);
/*
* SUCCESS here means that the sense code could be
* evaluated and should be passed to the upper layers
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 6be60aa5ffe2..51c386a215f5 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1528,16 +1528,20 @@ static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
goto busy;
}
- rc = target_submit_cmd_map_sgls(cmd, ch->sess, srp_cmd->cdb,
- &send_ioctx->sense_data[0],
- scsilun_to_int(&srp_cmd->lun), data_len,
- TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF,
- sg, sg_cnt, NULL, 0, NULL, 0);
+ rc = target_init_cmd(cmd, ch->sess, &send_ioctx->sense_data[0],
+ scsilun_to_int(&srp_cmd->lun), data_len,
+ TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
if (rc != 0) {
pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
srp_cmd->tag);
goto busy;
}
+
+ if (target_submit_prep(cmd, srp_cmd->cdb, sg, sg_cnt, NULL, 0, NULL, 0,
+ GFP_KERNEL))
+ return;
+
+ target_submit(cmd);
return;
busy:
diff --git a/drivers/message/fusion/lsi/mpi.h b/drivers/message/fusion/lsi/mpi.h
index a575545d681f..eccbe54d43f3 100644
--- a/drivers/message/fusion/lsi/mpi.h
+++ b/drivers/message/fusion/lsi/mpi.h
@@ -424,8 +424,8 @@ typedef struct _SGE_TRANSACTION32
U8 ContextSize;
U8 DetailsLength;
U8 Flags;
- U32 TransactionContext[1];
- U32 TransactionDetails[1];
+ U32 TransactionContext;
+ U32 TransactionDetails[];
} SGE_TRANSACTION32, MPI_POINTER PTR_SGE_TRANSACTION32,
SGETransaction32_t, MPI_POINTER pSGETransaction32_t;
diff --git a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h
index c249f2994fc1..1534460fd5b1 100644
--- a/drivers/message/fusion/lsi/mpi_ioc.h
+++ b/drivers/message/fusion/lsi/mpi_ioc.h
@@ -448,7 +448,7 @@ typedef struct _MSG_EVENT_NOTIFY_REPLY
U32 IOCLogInfo; /* 10h */
U32 Event; /* 14h */
U32 EventContext; /* 18h */
- U32 Data[1]; /* 1Ch */
+ U32 Data[]; /* 1Ch */
} MSG_EVENT_NOTIFY_REPLY, MPI_POINTER PTR_MSG_EVENT_NOTIFY_REPLY,
EventNotificationReply_t, MPI_POINTER pEventNotificationReply_t;
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 549797d0301d..f4f89cf23631 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -3084,7 +3084,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
int req_sz;
int reply_sz;
int sz;
- u32 status, vv;
+ u32 vv;
u8 shiftFactor=1;
/* IOC *must* NOT be in RESET state! */
@@ -3142,7 +3142,6 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
facts->IOCExceptions = le16_to_cpu(facts->IOCExceptions);
facts->IOCStatus = le16_to_cpu(facts->IOCStatus);
facts->IOCLogInfo = le32_to_cpu(facts->IOCLogInfo);
- status = le16_to_cpu(facts->IOCStatus) & MPI_IOCSTATUS_MASK;
/* CHECKME! IOCStatus, IOCLogInfo */
facts->ReplyQueueDepth = le16_to_cpu(facts->ReplyQueueDepth);
@@ -4974,7 +4973,7 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
if (hdr.PageLength > 0) {
data_sz = hdr.PageLength * 4;
- ppage0_alloc = (LANPage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
+ ppage0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
rc = -ENOMEM;
if (ppage0_alloc) {
memset((u8 *)ppage0_alloc, 0, data_sz);
@@ -5020,7 +5019,7 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
data_sz = hdr.PageLength * 4;
rc = -ENOMEM;
- ppage1_alloc = (LANPage1_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page1_dma);
+ ppage1_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page1_dma);
if (ppage1_alloc) {
memset((u8 *)ppage1_alloc, 0, data_sz);
cfg.physAddr = page1_dma;
@@ -5321,7 +5320,7 @@ GetIoUnitPage2(MPT_ADAPTER *ioc)
/* Read the config page */
data_sz = hdr.PageLength * 4;
rc = -ENOMEM;
- ppage_alloc = (IOUnitPage2_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
+ ppage_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
if (ppage_alloc) {
memset((u8 *)ppage_alloc, 0, data_sz);
cfg.physAddr = page_dma;
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index 813d46311f6a..b9e0376be723 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -274,7 +274,7 @@ typedef union _MPT_FRAME_TRACKER {
} linkage;
/*
* NOTE: When request frames are free, on the linkage structure
- * contets are valid. All other values are invalid.
+ * contents are valid. All other values are invalid.
* In particular, do NOT reply on offset [2]
* (in words) being the * message context.
* The message context must be reset (computed via base address
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 24aebad60366..72025996cd70 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -321,7 +321,6 @@ mptctl_do_taskmgmt(MPT_ADAPTER *ioc, u8 tm_type, u8 bus_id, u8 target_id)
int ii;
int retval;
unsigned long timeout;
- unsigned long time_count;
u16 iocstatus;
@@ -383,7 +382,6 @@ mptctl_do_taskmgmt(MPT_ADAPTER *ioc, u8 tm_type, u8 bus_id, u8 target_id)
ioc->name, tm_type, timeout));
INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
- time_count = jiffies;
if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
(ioc->facts.MsgVersion >= MPI_VERSION_01_05))
mpt_put_msg_frame_hi_pri(mptctl_taskmgmt_id, ioc, mf);
@@ -1369,7 +1367,6 @@ mptctl_gettargetinfo (MPT_ADAPTER *ioc, unsigned long arg)
int lun;
int maxWordsLeft;
int numBytes;
- u8 port;
struct scsi_device *sdev;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_targetinfo))) {
@@ -1381,13 +1378,8 @@ mptctl_gettargetinfo (MPT_ADAPTER *ioc, unsigned long arg)
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_gettargetinfo called.\n",
ioc->name));
- /* Get the port number and set the maximum number of bytes
- * in the returned structure.
- * Ignore the port setting.
- */
numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header);
maxWordsLeft = numBytes/sizeof(int);
- port = karg.hdr.port;
if (maxWordsLeft <= 0) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo() - no memory available!\n",
diff --git a/drivers/message/fusion/mptdebug.h b/drivers/message/fusion/mptdebug.h
index 2205dcab0adb..c281b1359419 100644
--- a/drivers/message/fusion/mptdebug.h
+++ b/drivers/message/fusion/mptdebug.h
@@ -67,12 +67,13 @@
#ifdef CONFIG_FUSION_LOGGING
#define MPT_CHECK_LOGGING(IOC, CMD, BITS) \
-{ \
+do { \
if (IOC->debug_level & BITS) \
CMD; \
-}
+} while (0)
#else
-#define MPT_CHECK_LOGGING(IOC, CMD, BITS)
+#define MPT_CHECK_LOGGING(IOC, CMD, BITS) \
+do { } while (0)
#endif
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 7d3784aa20e5..3261cac762de 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -72,9 +72,6 @@ MODULE_VERSION(my_VERSION);
#define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
(sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
-#define MPT_LAN_TRANSACTION32_SIZE \
- (sizeof(SGETransaction32_t) - sizeof(u32))
-
/*
* Fusion MPT LAN private structures
*/
@@ -745,7 +742,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
pTrans->ContextSize = sizeof(u32);
pTrans->DetailsLength = 2 * sizeof(u32);
pTrans->Flags = 0;
- pTrans->TransactionContext[0] = cpu_to_le32(ctx);
+ pTrans->TransactionContext = cpu_to_le32(ctx);
// dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
// IOC_AND_NETDEV_NAMES_s_s(dev),
@@ -1159,7 +1156,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
__func__, buckets, curr));
max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
- (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
+ (sizeof(SGETransaction32_t) + sizeof(SGESimple64_t));
while (buckets) {
mf = mpt_get_msg_frame(LanCtx, mpt_dev);
@@ -1234,7 +1231,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
pTrans->ContextSize = sizeof(u32);
pTrans->DetailsLength = 0;
pTrans->Flags = 0;
- pTrans->TransactionContext[0] = cpu_to_le32(ctx);
+ pTrans->TransactionContext = cpu_to_le32(ctx);
pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 5eb0b3361e4e..e0a65a348502 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -780,13 +780,11 @@ static void
mptsas_add_device_component_starget(MPT_ADAPTER *ioc,
struct scsi_target *starget)
{
- VirtTarget *vtarget;
struct sas_rphy *rphy;
struct mptsas_phyinfo *phy_info = NULL;
struct mptsas_enclosure enclosure_info;
rphy = dev_to_rphy(starget->dev.parent);
- vtarget = starget->hostdata;
phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
rphy->identify.sas_address);
if (!phy_info)
@@ -3442,14 +3440,12 @@ mptsas_expander_event_add(MPT_ADAPTER *ioc,
__le64 sas_address;
port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
- if (!port_info)
- BUG();
+ BUG_ON(!port_info);
port_info->num_phys = (expander_data->NumPhys) ?
expander_data->NumPhys : 1;
port_info->phy_info = kcalloc(port_info->num_phys,
sizeof(struct mptsas_phyinfo), GFP_KERNEL);
- if (!port_info->phy_info)
- BUG();
+ BUG_ON(!port_info->phy_info);
memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
for (i = 0; i < port_info->num_phys; i++) {
port_info->phy_info[i].portinfo = port_info;
@@ -3781,7 +3777,7 @@ mptsas_send_link_status_event(struct fw_event_work *fw_event)
printk(MYIOC_s_DEBUG_FMT
"SDEV OUTSTANDING CMDS"
"%d\n", ioc->name,
- atomic_read(&sdev->device_busy)));
+ scsi_device_busy(sdev)));
}
}
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 768873dd55b8..fd2f1c31bd21 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -413,12 +413,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
dev_set_drvdata(&ccw_device->dev, adapter);
- if (sysfs_create_group(&ccw_device->dev.kobj,
- &zfcp_sysfs_adapter_attrs))
- goto failed;
-
- if (zfcp_diag_sysfs_setup(adapter))
- goto failed;
+ if (device_add_groups(&ccw_device->dev, zfcp_sysfs_adapter_attr_groups))
+ goto err_sysfs;
/* report size limit per scatter-gather segment */
adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
@@ -427,8 +423,23 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
return adapter;
+err_sysfs:
failed:
- zfcp_adapter_unregister(adapter);
+ /* TODO: make this more fine-granular */
+ cancel_delayed_work_sync(&adapter->scan_work);
+ cancel_work_sync(&adapter->stat_work);
+ cancel_work_sync(&adapter->ns_up_work);
+ cancel_work_sync(&adapter->version_change_lost_work);
+ zfcp_destroy_adapter_work_queue(adapter);
+
+ zfcp_fc_wka_ports_force_offline(adapter->gs);
+ zfcp_scsi_adapter_unregister(adapter);
+
+ zfcp_erp_thread_kill(adapter);
+ zfcp_dbf_adapter_unregister(adapter);
+ zfcp_qdio_destroy(adapter->qdio);
+
+ zfcp_ccw_adapter_put(adapter); /* final put to release */
return ERR_PTR(-ENOMEM);
}
@@ -444,8 +455,7 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
zfcp_fc_wka_ports_force_offline(adapter->gs);
zfcp_scsi_adapter_unregister(adapter);
- zfcp_diag_sysfs_destroy(adapter);
- sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
+ device_remove_groups(&cdev->dev, zfcp_sysfs_adapter_attr_groups);
zfcp_erp_thread_kill(adapter);
zfcp_dbf_adapter_unregister(adapter);
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 26c89c232ef2..94de55304a02 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -156,7 +156,7 @@ struct zfcp_adapter {
u32 fsf_lic_version;
u32 adapter_features; /* FCP channel features */
u32 connection_features; /* host connection features */
- u32 hardware_version; /* of FCP channel */
+ u32 hardware_version; /* of FCP channel */
u32 fc_security_algorithms; /* of FCP channel */
u32 fc_security_algorithms_old; /* of FCP channel */
u16 timer_ticks; /* time int for a tick */
@@ -180,7 +180,7 @@ struct zfcp_adapter {
rwlock_t erp_lock;
wait_queue_head_t erp_done_wqh;
struct zfcp_erp_action erp_action; /* pending error recovery */
- atomic_t erp_counter;
+ atomic_t erp_counter;
u32 erp_total_count; /* total nr of enqueued erp
actions */
u32 erp_low_mem_count; /* nr of erp actions waiting
@@ -217,7 +217,7 @@ struct zfcp_port {
u32 d_id; /* D_ID */
u32 handle; /* handle assigned by FSF */
struct zfcp_erp_action erp_action; /* pending error recovery */
- atomic_t erp_counter;
+ atomic_t erp_counter;
u32 maxframe_size;
u32 supported_classes;
u32 connection_info;
diff --git a/drivers/s390/scsi/zfcp_diag.c b/drivers/s390/scsi/zfcp_diag.c
index 67a8f4e57db1..4d2d89d9c15a 100644
--- a/drivers/s390/scsi/zfcp_diag.c
+++ b/drivers/s390/scsi/zfcp_diag.c
@@ -10,8 +10,6 @@
#include <linux/spinlock.h>
#include <linux/jiffies.h>
#include <linux/string.h>
-#include <linux/kernfs.h>
-#include <linux/sysfs.h>
#include <linux/errno.h>
#include <linux/slab.h>
@@ -80,46 +78,6 @@ void zfcp_diag_adapter_free(struct zfcp_adapter *const adapter)
}
/**
- * zfcp_diag_sysfs_setup() - Setup the sysfs-group for adapter-diagnostics.
- * @adapter: target adapter to which the group should be added.
- *
- * Return: 0 on success; Something else otherwise (see sysfs_create_group()).
- */
-int zfcp_diag_sysfs_setup(struct zfcp_adapter *const adapter)
-{
- int rc = sysfs_create_group(&adapter->ccw_device->dev.kobj,
- &zfcp_sysfs_diag_attr_group);
- if (rc == 0)
- adapter->diagnostics->sysfs_established = 1;
-
- return rc;
-}
-
-/**
- * zfcp_diag_sysfs_destroy() - Remove the sysfs-group for adapter-diagnostics.
- * @adapter: target adapter from which the group should be removed.
- */
-void zfcp_diag_sysfs_destroy(struct zfcp_adapter *const adapter)
-{
- if (adapter->diagnostics == NULL ||
- !adapter->diagnostics->sysfs_established)
- return;
-
- /*
- * We need this state-handling so we can prevent warnings being printed
- * on the kernel-console in case we have to abort a halfway done
- * zfcp_adapter_enqueue(), in which the sysfs-group was not yet
- * established. sysfs_remove_group() does this checking as well, but
- * still prints a warning in case we try to remove a group that has not
- * been established before
- */
- adapter->diagnostics->sysfs_established = 0;
- sysfs_remove_group(&adapter->ccw_device->dev.kobj,
- &zfcp_sysfs_diag_attr_group);
-}
-
-
-/**
* zfcp_diag_update_xdata() - Update a diagnostics buffer.
* @hdr: the meta data to update.
* @data: data to use for the update.
diff --git a/drivers/s390/scsi/zfcp_diag.h b/drivers/s390/scsi/zfcp_diag.h
index 3852367f15f6..da55133da8fe 100644
--- a/drivers/s390/scsi/zfcp_diag.h
+++ b/drivers/s390/scsi/zfcp_diag.h
@@ -40,8 +40,6 @@ struct zfcp_diag_header {
/**
* struct zfcp_diag_adapter - central storage for all diagnostics concerning an
* adapter.
- * @sysfs_established: flag showing that the associated sysfs-group was created
- * during run of zfcp_adapter_enqueue().
* @max_age: maximum age of data in diagnostic buffers before they need to be
* refreshed (in ms).
* @port_data: data retrieved using exchange port data.
@@ -52,8 +50,6 @@ struct zfcp_diag_header {
* @config_data.data: cached QTCB Bottom of command exchange config data.
*/
struct zfcp_diag_adapter {
- u64 sysfs_established :1;
-
unsigned long max_age;
struct zfcp_diag_adapter_port_data {
@@ -69,9 +65,6 @@ struct zfcp_diag_adapter {
int zfcp_diag_adapter_setup(struct zfcp_adapter *const adapter);
void zfcp_diag_adapter_free(struct zfcp_adapter *const adapter);
-int zfcp_diag_sysfs_setup(struct zfcp_adapter *const adapter);
-void zfcp_diag_sysfs_destroy(struct zfcp_adapter *const adapter);
-
void zfcp_diag_update_xdata(struct zfcp_diag_header *const hdr,
const void *const data, const bool incomplete);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 58879213f225..6bc96d70254d 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -11,6 +11,7 @@
#define ZFCP_EXT_H
#include <linux/types.h>
+#include <linux/sysfs.h>
#include <scsi/fc/fc_els.h>
#include "zfcp_def.h"
#include "zfcp_fc.h"
@@ -179,13 +180,12 @@ extern void zfcp_scsi_shost_update_port_data(
const struct fsf_qtcb_bottom_port *const bottom);
/* zfcp_sysfs.c */
+extern const struct attribute_group *zfcp_sysfs_adapter_attr_groups[];
extern const struct attribute_group *zfcp_unit_attr_groups[];
-extern struct attribute_group zfcp_sysfs_adapter_attrs;
extern const struct attribute_group *zfcp_port_attr_groups[];
extern struct mutex zfcp_sysfs_port_units_mutex;
extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
extern struct device_attribute *zfcp_sysfs_shost_attrs[];
-extern const struct attribute_group zfcp_sysfs_diag_attr_group;
bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port);
/* zfcp_unit.c */
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 485028324eae..2e4804ef2fb9 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -846,7 +846,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
if (adapter->req_no == 0)
adapter->req_no++;
- INIT_LIST_HEAD(&req->list);
timer_setup(&req->timer, NULL, 0);
init_completion(&req->completion);
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 049596cbfb5d..6671d9563f6c 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -20,6 +20,9 @@ static bool enable_multibuffer = true;
module_param_named(datarouter, enable_multibuffer, bool, 0400);
MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
+#define ZFCP_QDIO_REQUEST_RESCAN_MSECS (MSEC_PER_SEC * 10)
+#define ZFCP_QDIO_REQUEST_SCAN_MSECS MSEC_PER_SEC
+
static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *dbftag,
unsigned int qdio_err)
{
@@ -70,15 +73,41 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
return;
}
+}
- /* cleanup all SBALs being program-owned now */
- zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
+static void zfcp_qdio_request_tasklet(struct tasklet_struct *tasklet)
+{
+ struct zfcp_qdio *qdio = from_tasklet(qdio, tasklet, request_tasklet);
+ struct ccw_device *cdev = qdio->adapter->ccw_device;
+ unsigned int start, error;
+ int completed;
- spin_lock_irq(&qdio->stat_lock);
- zfcp_qdio_account(qdio);
- spin_unlock_irq(&qdio->stat_lock);
- atomic_add(count, &qdio->req_q_free);
- wake_up(&qdio->req_q_wq);
+ completed = qdio_inspect_queue(cdev, 0, false, &start, &error);
+ if (completed > 0) {
+ if (error) {
+ zfcp_qdio_handler_error(qdio, "qdreqt1", error);
+ } else {
+ /* cleanup all SBALs being program-owned now */
+ zfcp_qdio_zero_sbals(qdio->req_q, start, completed);
+
+ spin_lock_irq(&qdio->stat_lock);
+ zfcp_qdio_account(qdio);
+ spin_unlock_irq(&qdio->stat_lock);
+ atomic_add(completed, &qdio->req_q_free);
+ wake_up(&qdio->req_q_wq);
+ }
+ }
+
+ if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q)
+ timer_reduce(&qdio->request_timer,
+ jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_RESCAN_MSECS));
+}
+
+static void zfcp_qdio_request_timer(struct timer_list *timer)
+{
+ struct zfcp_qdio *qdio = from_timer(qdio, timer, request_timer);
+
+ tasklet_schedule(&qdio->request_tasklet);
}
static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
@@ -139,8 +168,11 @@ static void zfcp_qdio_irq_tasklet(struct tasklet_struct *tasklet)
unsigned int start, error;
int completed;
- /* Check the Response Queue, and kick off the Request Queue tasklet: */
- completed = qdio_get_next_buffers(cdev, 0, &start, &error);
+ if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q)
+ tasklet_schedule(&qdio->request_tasklet);
+
+ /* Check the Response Queue: */
+ completed = qdio_inspect_queue(cdev, 0, true, &start, &error);
if (completed < 0)
return;
if (completed > 0)
@@ -286,7 +318,7 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
/*
* This should actually be a spin_lock_bh(stat_lock), to protect against
- * zfcp_qdio_int_req() in tasklet context.
+ * Request Queue completion processing in tasklet context.
* But we can't do so (and are safe), as we always get called with IRQs
* disabled by spin_lock_irq[save](req_q_lock).
*/
@@ -308,6 +340,12 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
return retval;
}
+ if (atomic_read(&qdio->req_q_free) <= 2 * ZFCP_QDIO_MAX_SBALS_PER_REQ)
+ tasklet_schedule(&qdio->request_tasklet);
+ else
+ timer_reduce(&qdio->request_timer,
+ jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_SCAN_MSECS));
+
/* account for transferred buffers */
qdio->req_q_idx += sbal_number;
qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
@@ -368,6 +406,8 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio)
wake_up(&qdio->req_q_wq);
tasklet_disable(&qdio->irq_tasklet);
+ tasklet_disable(&qdio->request_tasklet);
+ del_timer_sync(&qdio->request_timer);
qdio_stop_irq(adapter->ccw_device);
qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
@@ -428,8 +468,6 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
init_data.int_parm = (unsigned long) qdio;
init_data.input_sbal_addr_array = input_sbals;
init_data.output_sbal_addr_array = output_sbals;
- init_data.scan_threshold =
- QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
if (qdio_establish(cdev, &init_data))
goto failed_establish;
@@ -472,6 +510,8 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
+ /* Enable processing for Request Queue completions: */
+ tasklet_enable(&qdio->request_tasklet);
/* Enable processing for QDIO interrupts: */
tasklet_enable(&qdio->irq_tasklet);
/* This results in a qdio_start_irq(): */
@@ -495,6 +535,7 @@ void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
return;
tasklet_kill(&qdio->irq_tasklet);
+ tasklet_kill(&qdio->request_tasklet);
if (qdio->adapter->ccw_device)
qdio_free(qdio->adapter->ccw_device);
@@ -521,8 +562,11 @@ int zfcp_qdio_setup(struct zfcp_adapter *adapter)
spin_lock_init(&qdio->req_q_lock);
spin_lock_init(&qdio->stat_lock);
+ timer_setup(&qdio->request_timer, zfcp_qdio_request_timer, 0);
tasklet_setup(&qdio->irq_tasklet, zfcp_qdio_irq_tasklet);
+ tasklet_setup(&qdio->request_tasklet, zfcp_qdio_request_tasklet);
tasklet_disable(&qdio->irq_tasklet);
+ tasklet_disable(&qdio->request_tasklet);
adapter->qdio = qdio;
return 0;
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 9c1f310db155..390706867df3 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -30,6 +30,9 @@
* @req_q_util: used for accounting
* @req_q_full: queue full incidents
* @req_q_wq: used to wait for SBAL availability
+ * @irq_tasklet: used for QDIO interrupt processing
+ * @request_tasklet: used for Request Queue completion processing
+ * @request_timer: used to trigger the Request Queue completion processing
* @adapter: adapter used in conjunction with this qdio structure
* @max_sbale_per_sbal: qdio limit per sbal
* @max_sbale_per_req: qdio limit per request
@@ -46,6 +49,8 @@ struct zfcp_qdio {
atomic_t req_q_full;
wait_queue_head_t req_q_wq;
struct tasklet_struct irq_tasklet;
+ struct tasklet_struct request_tasklet;
+ struct timer_list request_timer;
struct zfcp_adapter *adapter;
u16 max_sbale_per_sbal;
u16 max_sbale_per_req;
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 8d9662e8b717..544efd4c42f0 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -327,10 +327,10 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
list_del(&port->list);
write_unlock_irq(&adapter->port_list_lock);
- put_device(&port->dev);
-
zfcp_erp_port_shutdown(port, 0, "syprs_1");
device_unregister(&port->dev);
+
+ put_device(&port->dev); /* undo zfcp_get_port_by_wwpn() */
out:
zfcp_ccw_adapter_put(adapter);
return retval ? retval : (ssize_t) count;
@@ -435,7 +435,7 @@ static struct attribute *zfcp_adapter_attrs[] = {
NULL
};
-struct attribute_group zfcp_sysfs_adapter_attrs = {
+static const struct attribute_group zfcp_sysfs_adapter_attr_group = {
.attrs = zfcp_adapter_attrs,
};
@@ -906,7 +906,13 @@ static struct attribute *zfcp_sysfs_diag_attrs[] = {
NULL,
};
-const struct attribute_group zfcp_sysfs_diag_attr_group = {
+static const struct attribute_group zfcp_sysfs_diag_attr_group = {
.name = "diagnostics",
.attrs = zfcp_sysfs_diag_attrs,
};
+
+const struct attribute_group *zfcp_sysfs_adapter_attr_groups[] = {
+ &zfcp_sysfs_adapter_attr_group,
+ &zfcp_sysfs_diag_attr_group,
+ NULL,
+};
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index e67bf7388cae..59333f0257a8 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -255,9 +255,9 @@ int zfcp_unit_remove(struct zfcp_port *port, u64 fcp_lun)
scsi_device_put(sdev);
}
- put_device(&unit->dev);
-
device_unregister(&unit->dev);
+ put_device(&unit->dev); /* undo _zfcp_unit_find() */
+
return 0;
}
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index b96e82de4237..47028f5e57ab 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -939,13 +939,13 @@ out:
/* This function will empty the response queue */
static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
{
- u32 status_reg_value, response_que_value;
+ u32 status_reg_value;
int count = 0, retval = 1;
status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
- response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
+ readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
count++;
}
@@ -1698,9 +1698,6 @@ out:
static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
{
int heads, sectors, cylinders;
- TW_Device_Extension *tw_dev;
-
- tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
if (capacity >= 0x200000) {
heads = 255;
@@ -1809,14 +1806,11 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
u32 num_sectors = 0x0;
int i, sg_count;
struct scsi_cmnd *srb = NULL;
- struct scatterlist *sglist = NULL, *sg;
+ struct scatterlist *sg;
int retval = 1;
- if (tw_dev->srb[request_id]) {
+ if (tw_dev->srb[request_id])
srb = tw_dev->srb[request_id];
- if (scsi_sglist(srb))
- sglist = scsi_sglist(srb);
- }
/* Initialize command packet */
full_command_packet = tw_dev->command_packet_virt[request_id];
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 3db0e42e9aa7..4fde39da54e4 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -295,14 +295,11 @@ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
TW_Command_Apache *command_packet;
int i, sg_count;
struct scsi_cmnd *srb = NULL;
- struct scatterlist *sglist = NULL, *sg;
+ struct scatterlist *sg;
int retval = 1;
- if (tw_dev->srb[request_id]) {
+ if (tw_dev->srb[request_id])
srb = tw_dev->srb[request_id];
- if (scsi_sglist(srb))
- sglist = scsi_sglist(srb);
- }
/* Initialize command packet */
full_command_packet = tw_dev->command_packet_virt[request_id];
@@ -863,7 +860,6 @@ static int twl_fill_sense(TW_Device_Extension *tw_dev, int i, int request_id, in
TW_Command_Full *full_command_packet;
unsigned short error;
char *error_str;
- int retval = 1;
header = tw_dev->sense_buffer_virt[i];
full_command_packet = tw_dev->command_packet_virt[request_id];
@@ -895,7 +891,7 @@ static int twl_fill_sense(TW_Device_Extension *tw_dev, int i, int request_id, in
goto out;
}
out:
- return retval;
+ return 1;
} /* End twl_fill_sense() */
/* This function will free up device extension resources */
@@ -1408,9 +1404,6 @@ out:
static int twl_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
{
int heads, sectors;
- TW_Device_Extension *tw_dev;
-
- tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
if (capacity >= 0x200000) {
heads = 255;
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index d90b9fca4aea..a7292883b72b 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -460,12 +460,12 @@ static int tw_check_errors(TW_Device_Extension *tw_dev)
/* This function will empty the response que */
static void tw_empty_response_que(TW_Device_Extension *tw_dev)
{
- u32 status_reg_value, response_que_value;
+ u32 status_reg_value;
status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
- response_que_value = inl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
+ inl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
}
} /* End tw_empty_response_que() */
@@ -1342,10 +1342,8 @@ static int tw_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev
sector_t capacity, int geom[])
{
int heads, sectors, cylinders;
- TW_Device_Extension *tw_dev;
dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_biosparam()\n");
- tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
heads = 64;
sectors = 32;
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 3242ff63986f..ab42feab233f 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -980,9 +980,9 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
NCR_700_set_tag_neg_state(SCp->device,
NCR_700_FINISHED_TAG_NEGOTIATION);
- /* check for contingent allegiance contitions */
- if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
- status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
+ /* check for contingent allegiance conditions */
+ if (hostdata->status[0] >> 1 == CHECK_CONDITION ||
+ hostdata->status[0] >> 1 == COMMAND_TERMINATED) {
struct NCR_700_command_slot *slot =
(struct NCR_700_command_slot *)SCp->host_scribble;
if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 69068081f439..3ee46a843cb5 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -3426,7 +3426,7 @@ Target Requested Completed Requested Completed Requested Completed\n\
/*
blogic_msg prints Driver Messages.
*/
-
+__printf(2, 4)
static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
struct blogic_adapter *adapter, ...)
{
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index 24ace1824048..0464e37c806a 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -1615,7 +1615,6 @@ static int FlashPoint_AbortCCB(void *pCurrCard, struct sccb *p_Sccb)
unsigned char thisCard;
CALL_BK_FN callback;
- unsigned char TID;
struct sccb *pSaveSCCB;
struct sccb_mgr_tar_info *currTar_Info;
@@ -1652,9 +1651,6 @@ static int FlashPoint_AbortCCB(void *pCurrCard, struct sccb *p_Sccb)
}
else {
-
- TID = p_Sccb->TargID;
-
if (p_Sccb->Sccb_tag) {
MDISABLE_INT(ioport);
if (((struct sccb_card *)pCurrCard)->
@@ -4534,7 +4530,7 @@ static void FPT_phaseBusFree(u32 port, unsigned char p_card)
*
* Function: Auto Load Default Map
*
- * Description: Load the Automation RAM with the defualt map values.
+ * Description: Load the Automation RAM with the default map values.
*
*---------------------------------------------------------------------*/
static void FPT_autoLoadDefaultMap(u32 p_port)
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 66c514310f3c..028af6b1057c 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -269,7 +269,7 @@ static u8 orc_nv_read(struct orc_host * host, u8 address, u8 *ptr)
}
/**
- * orc_exec_sb - Queue an SCB with the HA
+ * orc_exec_scb - Queue an SCB with the HA
* @host: host adapter the SCB belongs to
* @scb: SCB to queue for execution
*/
@@ -586,7 +586,7 @@ static int orc_reset_scsi_bus(struct orc_host * host)
* orc_device_reset - device reset handler
* @host: host to reset
* @cmd: command causing the reset
- * @target; target device
+ * @target: target device
*
* Reset registers, reset a hanging bus and kill active and disconnected
* commands for target w/o soft reset
@@ -727,7 +727,7 @@ static void orc_release_scb(struct orc_host *host, struct orc_scb *scb)
spin_unlock_irqrestore(&(host->allocation_lock), flags);
}
-/**
+/*
* orchid_abort_scb - abort a command
*
* Abort a queued command that has been passed to the firmware layer
@@ -902,7 +902,7 @@ static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struc
}
/**
- * inia100_queue - queue command with host
+ * inia100_queue_lck - queue command with host
* @cmd: Command block
* @done: Completion function
*
@@ -1088,8 +1088,6 @@ static int inia100_probe_one(struct pci_dev *pdev,
unsigned long port, bios;
int error = -ENODEV;
u32 sz;
- unsigned long biosaddr;
- char *bios_phys;
if (pci_enable_device(pdev))
goto out;
@@ -1139,9 +1137,6 @@ static int inia100_probe_one(struct pci_dev *pdev,
goto out_free_scb_array;
}
- biosaddr = host->BIOScfg;
- biosaddr = (biosaddr << 4);
- bios_phys = phys_to_virt(biosaddr);
if (init_orchid(host)) { /* Initialize orchid chip */
printk("inia100: initial orchid fail!!\n");
goto out_free_escb_array;
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 4ca5e13a26a6..f1f62b5da8b7 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -786,8 +786,8 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
}
/**
- * aac_probe_container - query a logical volume
- * @scsicmd: the scsi command block
+ * aac_probe_container_callback1 - query a logical volume
+ * @scsicmd: the scsi command block
*
* Queries the controller about the given volume. The volume information
* is updated in the struct fsa_dev_info structure rather than returned.
@@ -838,7 +838,7 @@ struct scsi_inq {
};
/**
- * InqStrCopy - string merge
+ * inqstrcpy - string merge
* @a: string to copy from
* @b: string to copy to
*
@@ -1804,7 +1804,7 @@ static inline void aac_free_safw_ciss_luns(struct aac_dev *dev)
}
/**
- * aac_get_safw_ciss_luns() Process topology change
+ * aac_get_safw_ciss_luns() - Process topology change
* @dev: aac_dev structure
*
* Execute a CISS REPORT PHYS LUNS and process the results into
@@ -1881,11 +1881,6 @@ static inline u32 aac_get_safw_phys_nexus(struct aac_dev *dev, int lun)
return *((u32 *)&dev->safw_phys_luns->lun[lun].node_ident[12]);
}
-static inline u32 aac_get_safw_phys_device_type(struct aac_dev *dev, int lun)
-{
- return dev->safw_phys_luns->lun[lun].node_ident[8];
-}
-
static inline void aac_free_safw_identify_resp(struct aac_dev *dev,
int bus, int target)
{
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 1b1da162f5f6..e7cc927ed952 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -472,7 +472,7 @@ static int check_revision(struct aac_dev *dev, void __user *arg)
/**
- * aac_send_raw_scb
+ * aac_send_raw_srb()
* @dev: adapter is being processed
* @arg: arguments to the send call
*/
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 0ae0d1fa2b50..54eb4d41bc2c 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -323,7 +323,7 @@ void aac_fib_init(struct fib *fibptr)
}
/**
- * fib_deallocate - deallocate a fib
+ * fib_dealloc - deallocate a fib
* @fibptr: fib to deallocate
*
* Will deallocate and return to the free pool the FIB pointed to by the
@@ -1950,7 +1950,7 @@ void aac_src_reinit_aif_worker(struct work_struct *work)
}
/**
- * aac_handle_sa_aif Handle a message from the firmware
+ * aac_handle_sa_aif - Handle a message from the firmware
* @dev: Which adapter this fib is from
* @fibptr: Pointer to fibptr from adapter
*
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index cdccf9abcdc4..e06ff83b69ce 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -532,7 +532,7 @@ int aac_rx_select_comm(struct aac_dev *dev, int comm)
}
/**
- * aac_rx_init - initialize an i960 based AAC card
+ * _aac_rx_init - initialize an i960 based AAC card
* @dev: device to configure
*
* Allocate and set up resources for the i960 based AAC variants. The
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 993596d44a12..800052f10699 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -1799,7 +1799,7 @@ typedef struct adv_req {
* Field naming convention:
*
* *_able indicates both whether a feature should be enabled or disabled
- * and whether a device isi capable of the feature. At initialization
+ * and whether a device is capable of the feature. At initialization
* this field may be set, but later if a device is found to be incapable
* of the feature, the field is cleared.
*/
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
index 98978bc199ff..8f24180646c2 100644
--- a/drivers/scsi/aic94xx/aic94xx.h
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -33,7 +33,7 @@
#ifdef ASD_DEBUG
#define ASD_DPRINTK asd_printk
#else
-#define ASD_DPRINTK(fmt, ...)
+#define ASD_DPRINTK(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif
/* 2*ITNL timeout + 1 second */
diff --git a/drivers/scsi/aic94xx/aic94xx_dump.c b/drivers/scsi/aic94xx/aic94xx_dump.c
index 7c4c53a54b78..552f1913e95e 100644
--- a/drivers/scsi/aic94xx/aic94xx_dump.c
+++ b/drivers/scsi/aic94xx/aic94xx_dump.c
@@ -720,154 +720,8 @@ static void asd_dump_lseq_state(struct asd_ha_struct *asd_ha, int lseq)
PRINT_LMIP_dword(asd_ha, lseq, DEV_PRES_TIMER_TERM_TS);
}
-#if 0
-
/**
- * asd_dump_ddb_site -- dump a CSEQ DDB site
- * @asd_ha: pointer to host adapter structure
- * @site_no: site number of interest
- */
-void asd_dump_target_ddb(struct asd_ha_struct *asd_ha, u16 site_no)
-{
- if (site_no >= asd_ha->hw_prof.max_ddbs)
- return;
-
-#define DDB_FIELDB(__name) \
- asd_ddbsite_read_byte(asd_ha, site_no, \
- offsetof(struct asd_ddb_ssp_smp_target_port, __name))
-#define DDB2_FIELDB(__name) \
- asd_ddbsite_read_byte(asd_ha, site_no, \
- offsetof(struct asd_ddb_stp_sata_target_port, __name))
-#define DDB_FIELDW(__name) \
- asd_ddbsite_read_word(asd_ha, site_no, \
- offsetof(struct asd_ddb_ssp_smp_target_port, __name))
-
-#define DDB_FIELDD(__name) \
- asd_ddbsite_read_dword(asd_ha, site_no, \
- offsetof(struct asd_ddb_ssp_smp_target_port, __name))
-
- asd_printk("DDB: 0x%02x\n", site_no);
- asd_printk("conn_type: 0x%02x\n", DDB_FIELDB(conn_type));
- asd_printk("conn_rate: 0x%02x\n", DDB_FIELDB(conn_rate));
- asd_printk("init_conn_tag: 0x%04x\n", be16_to_cpu(DDB_FIELDW(init_conn_tag)));
- asd_printk("send_queue_head: 0x%04x\n", be16_to_cpu(DDB_FIELDW(send_queue_head)));
- asd_printk("sq_suspended: 0x%02x\n", DDB_FIELDB(sq_suspended));
- asd_printk("DDB Type: 0x%02x\n", DDB_FIELDB(ddb_type));
- asd_printk("AWT Default: 0x%04x\n", DDB_FIELDW(awt_def));
- asd_printk("compat_features: 0x%02x\n", DDB_FIELDB(compat_features));
- asd_printk("Pathway Blocked Count: 0x%02x\n",
- DDB_FIELDB(pathway_blocked_count));
- asd_printk("arb_wait_time: 0x%04x\n", DDB_FIELDW(arb_wait_time));
- asd_printk("more_compat_features: 0x%08x\n",
- DDB_FIELDD(more_compat_features));
- asd_printk("Conn Mask: 0x%02x\n", DDB_FIELDB(conn_mask));
- asd_printk("flags: 0x%02x\n", DDB_FIELDB(flags));
- asd_printk("flags2: 0x%02x\n", DDB2_FIELDB(flags2));
- asd_printk("ExecQ Tail: 0x%04x\n",DDB_FIELDW(exec_queue_tail));
- asd_printk("SendQ Tail: 0x%04x\n",DDB_FIELDW(send_queue_tail));
- asd_printk("Active Task Count: 0x%04x\n",
- DDB_FIELDW(active_task_count));
- asd_printk("ITNL Reason: 0x%02x\n", DDB_FIELDB(itnl_reason));
- asd_printk("ITNL Timeout Const: 0x%04x\n", DDB_FIELDW(itnl_timeout));
- asd_printk("ITNL timestamp: 0x%08x\n", DDB_FIELDD(itnl_timestamp));
-}
-
-void asd_dump_ddb_0(struct asd_ha_struct *asd_ha)
-{
-#define DDB0_FIELDB(__name) \
- asd_ddbsite_read_byte(asd_ha, 0, \
- offsetof(struct asd_ddb_seq_shared, __name))
-#define DDB0_FIELDW(__name) \
- asd_ddbsite_read_word(asd_ha, 0, \
- offsetof(struct asd_ddb_seq_shared, __name))
-
-#define DDB0_FIELDD(__name) \
- asd_ddbsite_read_dword(asd_ha,0 , \
- offsetof(struct asd_ddb_seq_shared, __name))
-
-#define DDB0_FIELDA(__name, _o) \
- asd_ddbsite_read_byte(asd_ha, 0, \
- offsetof(struct asd_ddb_seq_shared, __name)+_o)
-
-
- asd_printk("DDB: 0\n");
- asd_printk("q_free_ddb_head:%04x\n", DDB0_FIELDW(q_free_ddb_head));
- asd_printk("q_free_ddb_tail:%04x\n", DDB0_FIELDW(q_free_ddb_tail));
- asd_printk("q_free_ddb_cnt:%04x\n", DDB0_FIELDW(q_free_ddb_cnt));
- asd_printk("q_used_ddb_head:%04x\n", DDB0_FIELDW(q_used_ddb_head));
- asd_printk("q_used_ddb_tail:%04x\n", DDB0_FIELDW(q_used_ddb_tail));
- asd_printk("shared_mem_lock:%04x\n", DDB0_FIELDW(shared_mem_lock));
- asd_printk("smp_conn_tag:%04x\n", DDB0_FIELDW(smp_conn_tag));
- asd_printk("est_nexus_buf_cnt:%04x\n", DDB0_FIELDW(est_nexus_buf_cnt));
- asd_printk("est_nexus_buf_thresh:%04x\n",
- DDB0_FIELDW(est_nexus_buf_thresh));
- asd_printk("conn_not_active:%02x\n", DDB0_FIELDB(conn_not_active));
- asd_printk("phy_is_up:%02x\n", DDB0_FIELDB(phy_is_up));
- asd_printk("port_map_by_links:%02x %02x %02x %02x "
- "%02x %02x %02x %02x\n",
- DDB0_FIELDA(port_map_by_links, 0),
- DDB0_FIELDA(port_map_by_links, 1),
- DDB0_FIELDA(port_map_by_links, 2),
- DDB0_FIELDA(port_map_by_links, 3),
- DDB0_FIELDA(port_map_by_links, 4),
- DDB0_FIELDA(port_map_by_links, 5),
- DDB0_FIELDA(port_map_by_links, 6),
- DDB0_FIELDA(port_map_by_links, 7));
-}
-
-static void asd_dump_scb_site(struct asd_ha_struct *asd_ha, u16 site_no)
-{
-
-#define SCB_FIELDB(__name) \
- asd_scbsite_read_byte(asd_ha, site_no, sizeof(struct scb_header) \
- + offsetof(struct initiate_ssp_task, __name))
-#define SCB_FIELDW(__name) \
- asd_scbsite_read_word(asd_ha, site_no, sizeof(struct scb_header) \
- + offsetof(struct initiate_ssp_task, __name))
-#define SCB_FIELDD(__name) \
- asd_scbsite_read_dword(asd_ha, site_no, sizeof(struct scb_header) \
- + offsetof(struct initiate_ssp_task, __name))
-
- asd_printk("Total Xfer Len: 0x%08x.\n", SCB_FIELDD(total_xfer_len));
- asd_printk("Frame Type: 0x%02x.\n", SCB_FIELDB(ssp_frame.frame_type));
- asd_printk("Tag: 0x%04x.\n", SCB_FIELDW(ssp_frame.tag));
- asd_printk("Target Port Xfer Tag: 0x%04x.\n",
- SCB_FIELDW(ssp_frame.tptt));
- asd_printk("Data Offset: 0x%08x.\n", SCB_FIELDW(ssp_frame.data_offs));
- asd_printk("Retry Count: 0x%02x.\n", SCB_FIELDB(retry_count));
-}
-
-/**
- * asd_dump_scb_sites -- dump currently used CSEQ SCB sites
- * @asd_ha: pointer to host adapter struct
- */
-void asd_dump_scb_sites(struct asd_ha_struct *asd_ha)
-{
- u16 site_no;
-
- for (site_no = 0; site_no < asd_ha->hw_prof.max_scbs; site_no++) {
- u8 opcode;
-
- if (!SCB_SITE_VALID(site_no))
- continue;
-
- /* We are only interested in SCB sites currently used.
- */
- opcode = asd_scbsite_read_byte(asd_ha, site_no,
- offsetof(struct scb_header,
- opcode));
- if (opcode == 0xFF)
- continue;
-
- asd_printk("\nSCB: 0x%x\n", site_no);
- asd_dump_scb_site(asd_ha, site_no);
- }
-}
-
-#endif /* 0 */
-
-/**
- * ads_dump_seq_state -- dump CSEQ and LSEQ states
+ * asd_dump_seq_state -- dump CSEQ and LSEQ states
* @asd_ha: pointer to host adapter structure
* @lseq_mask: mask of LSEQs of interest
*/
@@ -908,42 +762,4 @@ void asd_dump_frame_rcvd(struct asd_phy *phy,
spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
}
-#if 0
-
-static void asd_dump_scb(struct asd_ascb *ascb, int ind)
-{
- asd_printk("scb%d: vaddr: 0x%p, dma_handle: 0x%llx, next: 0x%llx, "
- "index:%d, opcode:0x%02x\n",
- ind, ascb->dma_scb.vaddr,
- (unsigned long long)ascb->dma_scb.dma_handle,
- (unsigned long long)
- le64_to_cpu(ascb->scb->header.next_scb),
- le16_to_cpu(ascb->scb->header.index),
- ascb->scb->header.opcode);
-}
-
-void asd_dump_scb_list(struct asd_ascb *ascb, int num)
-{
- int i = 0;
-
- asd_printk("dumping %d scbs:\n", num);
-
- asd_dump_scb(ascb, i++);
- --num;
-
- if (num > 0 && !list_empty(&ascb->list)) {
- struct list_head *el;
-
- list_for_each(el, &ascb->list) {
- struct asd_ascb *s = list_entry(el, struct asd_ascb,
- list);
- asd_dump_scb(s, i++);
- if (--num <= 0)
- break;
- }
- }
-}
-
-#endif /* 0 */
-
#endif /* ASD_DEBUG */
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 9256ab7b2522..3dd110143471 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -903,7 +903,7 @@ static void asd_dch_sas_isr(struct asd_ha_struct *asd_ha)
}
/**
- * ads_rbi_exsi_isr -- process external system interface interrupt (INITERR)
+ * asd_rbi_exsi_isr -- process external system interface interrupt (INITERR)
* @asd_ha: pointer to host adapter structure
*/
static void asd_rbi_exsi_isr(struct asd_ha_struct *asd_ha)
@@ -1144,7 +1144,7 @@ static void asd_swap_head_scb(struct asd_ha_struct *asd_ha,
}
/**
- * asd_start_timers -- (add and) start timers of SCBs
+ * asd_start_scb_timers -- (add and) start timers of SCBs
* @list: pointer to struct list_head of the scbs
*
* If an SCB in the @list has no timer function, assign the default
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
index 105adba559a1..297a66770260 100644
--- a/drivers/scsi/aic94xx/aic94xx_sds.c
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -1244,7 +1244,7 @@ int asd_chk_write_status(struct asd_ha_struct *asd_ha,
}
/**
- * asd_hwi_erase_nv_sector - Erase the flash memory sectors.
+ * asd_erase_nv_sector - Erase the flash memory sectors.
* @asd_ha: pointer to the host adapter structure
* @flash_addr: pointer to offset from flash memory
* @size: total bytes to erase.
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index da6ca2b153d8..9d179cd15bb8 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -612,7 +612,7 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
return IRQ_HANDLED;
}
/**
- * atp870u_queuecommand - Queue SCSI command
+ * atp870u_queuecommand_lck - Queue SCSI command
* @req_p: request block
* @done: completion function
*
@@ -711,16 +711,15 @@ static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p,
static DEF_SCSI_QCMD(atp870u_queuecommand)
-/**
+/*
* send_s870 - send a command to the controller
- * @host: host
*
* On entry there is work queued to be done. We move some of that work to the
* controller itself.
*
* Caller holds the host lock.
*/
-static void send_s870(struct atp_unit *dev,unsigned char c)
+static void send_s870(struct atp_unit *dev, unsigned char c)
{
struct scsi_cmnd *workreq = NULL;
unsigned int i;//,k;
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index a13c203ef7a9..0e935c49b57b 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -295,7 +295,7 @@ void beiscsi_iface_destroy_default(struct beiscsi_hba *phba)
}
/**
- * beiscsi_set_vlan_tag()- Set the VLAN TAG
+ * beiscsi_iface_config_vlan()- Set the VLAN TAG
* @shost: Scsi Host for the driver instance
* @iface_param: Interface paramters
*
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 90fcddb76f46..22cf7f4b8d8c 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -4926,13 +4926,13 @@ void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle)
schedule_work(&phba->boot_work);
}
-/**
+#define BEISCSI_SYSFS_ISCSI_BOOT_FLAGS 3
+/*
+ * beiscsi_show_boot_tgt_info()
* Boot flag info for iscsi-utilities
* Bit 0 Block valid flag
* Bit 1 Firmware booting selected
*/
-#define BEISCSI_SYSFS_ISCSI_BOOT_FLAGS 3
-
static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
{
struct beiscsi_hba *phba = data;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 0d4928567265..462717bbb5b7 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1256,7 +1256,7 @@ beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr,
}
/**
- * beiscsi_phys_port()- Display Physical Port Identifier
+ * beiscsi_phys_port_disp()- Display Physical Port Identifier
* @dev: ptr to device not used.
* @attr: device attribute, not used.
* @buf: contains formatted text port identifier
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index d536270bbe9f..0314e4b9e1fb 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -1193,7 +1193,7 @@ enum {
};
/*
- * defintions for CT reason code
+ * definitions for CT reason code
*/
enum {
CT_RSN_INV_CMD = 0x01,
@@ -1240,7 +1240,7 @@ enum {
};
/*
- * defintions for the explanation code for all servers
+ * definitions for the explanation code for all servers
*/
enum {
CT_EXP_AUTH_EXCEPTION = 0xF1,
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index 3e117fed95c9..c1baf5cd0d3e 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -217,9 +217,6 @@ struct bfa_vf_event_s {
u32 undefined;
};
-struct bfa_fcs_s;
-struct bfa_fcs_fabric_s;
-
/*
* @todo : need to move to a global config file.
* Maximum Rports supported per port (physical/logical).
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 49a14157f123..b12afcc4b189 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -1408,7 +1408,7 @@ static void bfa_fcs_lport_fdmi_rpa_response(void *fcsarg,
u32 resid_len,
struct fchs_s *rsp_fchs);
static void bfa_fcs_lport_fdmi_timeout(void *arg);
-static u16 bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
+static int bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
u8 *pyld);
static u16 bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
u8 *pyld);
@@ -1887,6 +1887,8 @@ bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_fcs_lport_fdmi_build_rhba_pyld(fdmi,
(u8 *) ((struct ct_hdr_s *) pyld
+ 1));
+ if (attr_len < 0)
+ return;
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, (len + attr_len), &fchs,
@@ -1896,17 +1898,20 @@ bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_sm_send_event(fdmi, FDMISM_EVENT_RHBA_SENT);
}
-static u16
+static int
bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
{
struct bfa_fcs_lport_s *port = fdmi->ms->port;
- struct bfa_fcs_fdmi_hba_attr_s hba_attr;
- struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr = &hba_attr;
+ struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr;
struct fdmi_rhba_s *rhba = (struct fdmi_rhba_s *) pyld;
struct fdmi_attr_s *attr;
+ int len;
u8 *curr_ptr;
- u16 len, count;
- u16 templen;
+ u16 templen, count;
+
+ fcs_hba_attr = kzalloc(sizeof(*fcs_hba_attr), GFP_KERNEL);
+ if (!fcs_hba_attr)
+ return -ENOMEM;
/*
* get hba attributes
@@ -2148,6 +2153,9 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
rhba->hba_attr_blk.attr_count = cpu_to_be32(count);
+
+ kfree(fcs_hba_attr);
+
return len;
}
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index fc515424ca88..be8dfbe13e90 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3409,7 +3409,7 @@ bfad_im_bsg_els_ct_request(struct bsg_job *job)
drv_fcxp->port = fcs_port->bfad_port;
- if (drv_fcxp->port->bfad == 0)
+ if (!drv_fcxp->port->bfad)
drv_fcxp->port->bfad = bfad;
/* Fetch the bfa_rport - if nexus needed */
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 16bb6d2f98de..8863a74e6c57 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -1796,7 +1796,7 @@ static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
/**
* bnx2fc_ulp_get_stats - cnic callback to populate FCoE stats
*
- * @handle: transport handle pointing to adapter struture
+ * @handle: transport handle pointing to adapter structure
*/
static int bnx2fc_ulp_get_stats(void *handle)
{
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index b37b0a9ec12d..0103f811cc25 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1331,7 +1331,7 @@ static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
}
/**
- * bnx2fc_indicae_kcqe - process KCQE
+ * bnx2fc_indicate_kcqe() - process KCQE
*
* @context: adapter structure pointer
* @kcq: kcqe pointer
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index a3e2a38aabf2..9200b718085c 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -819,7 +819,7 @@ mem_alloc_failure:
}
/**
- * bnx2i_free_session_resc - free qp resources for the session
+ * bnx2fc_free_session_resc - free qp resources for the session
*
* @hba: adapter structure pointer
* @tgt: bnx2fc_rport structure pointer
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index bad396e5c601..43e8a1dafec0 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -2206,10 +2206,8 @@ static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba,
{
struct bnx2i_conn *bnx2i_conn;
u32 iscsi_cid;
- char warn_notice[] = "iscsi_warning";
- char error_notice[] = "iscsi_error";
- char additional_notice[64];
- char *message;
+ const char *additional_notice = "";
+ const char *message;
int need_recovery;
u64 err_mask64;
@@ -2224,133 +2222,132 @@ static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba,
if (err_mask64 & iscsi_error_mask) {
need_recovery = 0;
- message = warn_notice;
+ message = "iscsi_warning";
} else {
need_recovery = 1;
- message = error_notice;
+ message = "iscsi_error";
}
switch (iscsi_err->completion_status) {
case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR:
- strcpy(additional_notice, "hdr digest err");
+ additional_notice = "hdr digest err";
break;
case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR:
- strcpy(additional_notice, "data digest err");
+ additional_notice = "data digest err";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE:
- strcpy(additional_notice, "wrong opcode rcvd");
+ additional_notice = "wrong opcode rcvd";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN:
- strcpy(additional_notice, "AHS len > 0 rcvd");
+ additional_notice = "AHS len > 0 rcvd";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT:
- strcpy(additional_notice, "invalid ITT rcvd");
+ additional_notice = "invalid ITT rcvd";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN:
- strcpy(additional_notice, "wrong StatSN rcvd");
+ additional_notice = "wrong StatSN rcvd";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN:
- strcpy(additional_notice, "wrong DataSN rcvd");
+ additional_notice = "wrong DataSN rcvd";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T:
- strcpy(additional_notice, "pend R2T violation");
+ additional_notice = "pend R2T violation";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0:
- strcpy(additional_notice, "ERL0, UO");
+ additional_notice = "ERL0, UO";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1:
- strcpy(additional_notice, "ERL0, U1");
+ additional_notice = "ERL0, U1";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2:
- strcpy(additional_notice, "ERL0, U2");
+ additional_notice = "ERL0, U2";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3:
- strcpy(additional_notice, "ERL0, U3");
+ additional_notice = "ERL0, U3";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4:
- strcpy(additional_notice, "ERL0, U4");
+ additional_notice = "ERL0, U4";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5:
- strcpy(additional_notice, "ERL0, U5");
+ additional_notice = "ERL0, U5";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6:
- strcpy(additional_notice, "ERL0, U6");
+ additional_notice = "ERL0, U6";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN:
- strcpy(additional_notice, "invalid resi len");
+ additional_notice = "invalid resi len";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN:
- strcpy(additional_notice, "MRDSL violation");
+ additional_notice = "MRDSL violation";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO:
- strcpy(additional_notice, "F-bit not set");
+ additional_notice = "F-bit not set";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV:
- strcpy(additional_notice, "invalid TTT");
+ additional_notice = "invalid TTT";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN:
- strcpy(additional_notice, "invalid DataSN");
+ additional_notice = "invalid DataSN";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN:
- strcpy(additional_notice, "burst len violation");
+ additional_notice = "burst len violation";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF:
- strcpy(additional_notice, "buf offset violation");
+ additional_notice = "buf offset violation";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN:
- strcpy(additional_notice, "invalid LUN field");
+ additional_notice = "invalid LUN field";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN:
- strcpy(additional_notice, "invalid R2TSN field");
+ additional_notice = "invalid R2TSN field";
break;
#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0 \
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0
case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0:
- strcpy(additional_notice, "invalid cmd len1");
+ additional_notice = "invalid cmd len1";
break;
#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1 \
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1
case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1:
- strcpy(additional_notice, "invalid cmd len2");
+ additional_notice = "invalid cmd len2";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED:
- strcpy(additional_notice,
- "pend r2t exceeds MaxOutstandingR2T value");
+ additional_notice = "pend r2t exceeds MaxOutstandingR2T value";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV:
- strcpy(additional_notice, "TTT is rsvd");
+ additional_notice = "TTT is rsvd";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN:
- strcpy(additional_notice, "MBL violation");
+ additional_notice = "MBL violation";
break;
#define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO \
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO
case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO:
- strcpy(additional_notice, "data seg len != 0");
+ additional_notice = "data seg len != 0";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN:
- strcpy(additional_notice, "reject pdu len error");
+ additional_notice = "reject pdu len error";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN:
- strcpy(additional_notice, "async pdu len error");
+ additional_notice = "async pdu len error";
break;
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN:
- strcpy(additional_notice, "nopin pdu len error");
+ additional_notice = "nopin pdu len error";
break;
#define BNX2_ERR_PEND_R2T_IN_CLEANUP \
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP
case BNX2_ERR_PEND_R2T_IN_CLEANUP:
- strcpy(additional_notice, "pend r2t in cleanup");
+ additional_notice = "pend r2t in cleanup";
break;
case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT:
- strcpy(additional_notice, "IP fragments rcvd");
+ additional_notice = "IP fragments rcvd";
break;
case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS:
- strcpy(additional_notice, "IP options error");
+ additional_notice = "IP options error";
break;
case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG:
- strcpy(additional_notice, "urgent flag error");
+ additional_notice = "urgent flag error";
break;
default:
printk(KERN_ALERT "iscsi_err - unknown err %x\n",
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
index 3dc790089f0f..bea00073cb7c 100644
--- a/drivers/scsi/bnx2i/bnx2i_sysfs.c
+++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c
@@ -104,7 +104,7 @@ static ssize_t bnx2i_show_ccell_info(struct device *dev,
/**
- * bnx2i_get_link_state - set command cell (HQ) size
+ * bnx2i_set_ccell_info - set command cell (HQ) size
* @dev: device pointer
* @attr: device attribute (unused)
* @buf: buffer to return current SQ size parameter
diff --git a/drivers/scsi/csiostor/csio_hw_t5.c b/drivers/scsi/csiostor/csio_hw_t5.c
index 1df8891d3725..86fded97d799 100644
--- a/drivers/scsi/csiostor/csio_hw_t5.c
+++ b/drivers/scsi/csiostor/csio_hw_t5.c
@@ -244,7 +244,7 @@ csio_t5_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
*
* Reads/writes an [almost] arbitrary memory region in the firmware: the
* firmware memory address, length and host buffer must be aligned on
- * 32-bit boudaries. The memory is transferred as a raw byte sequence
+ * 32-bit boundaries. The memory is transferred as a raw byte sequence
* from/to the firmware's memory. If this memory contains data
* structures which contain multi-byte integers, it's the callers
* responsibility to perform appropriate byte order conversions.
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 55e74da2f3cb..56b9ad0a1ca0 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -147,9 +147,9 @@ csio_scsi_itnexus_loss_error(uint16_t error)
case FW_ERR_RDEV_LOST:
case FW_ERR_RDEV_LOGO:
case FW_ERR_RDEV_IMPL_LOGO:
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/*
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 37d99357120f..203f938fca7e 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -1177,7 +1177,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
}
/**
- * cxgb3i_setup_conn_digest - setup conn. digest setting
+ * ddp_setup_conn_digest - setup conn. digest setting
* @csk: cxgb tcp socket
* @tid: connection id
* @hcrc: header digest enabled
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index e72440d919d2..dc36531d589e 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -1357,7 +1357,7 @@ cxlflash_sync_err_irq_exit:
/**
* process_hrrq() - process the read-response queue
- * @afu: AFU associated with the host.
+ * @hwq: HWQ associated with the host.
* @doneq: Queue of commands harvested from the RRQ.
* @budget: Threshold of RRQ entries to process.
*
@@ -1997,7 +1997,7 @@ out:
/**
* init_mc() - create and register as the master context
* @cfg: Internal structure associated with the host.
- * index: HWQ Index of the master context.
+ * @index: HWQ Index of the master context.
*
* Return: 0 on success, -errno on failure
*/
@@ -3294,7 +3294,7 @@ static char *decode_hioctl(unsigned int cmd)
/**
* cxlflash_lun_provision() - host LUN provisioning handler
* @cfg: Internal structure associated with the host.
- * @arg: Kernel copy of userspace ioctl data structure.
+ * @lunprov: Kernel copy of userspace ioctl data structure.
*
* Return: 0 on success, -errno on failure
*/
@@ -3385,7 +3385,7 @@ out:
/**
* cxlflash_afu_debug() - host AFU debug handler
* @cfg: Internal structure associated with the host.
- * @arg: Kernel copy of userspace ioctl data structure.
+ * @afu_dbg: Kernel copy of userspace ioctl data structure.
*
* For debug requests requiring a data buffer, always provide an aligned
* (cache line) buffer to the AFU to appease any alignment requirements.
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index 5dddf67dfa24..ee11ec340654 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -30,7 +30,7 @@ struct cxlflash_global global;
/**
* marshal_rele_to_resize() - translate release to resize structure
- * @rele: Source structure from which to translate/copy.
+ * @release: Source structure from which to translate/copy.
* @resize: Destination structure for the translate/copy.
*/
static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
@@ -44,7 +44,7 @@ static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
/**
* marshal_det_to_rele() - translate detach to release structure
* @detach: Destination structure for the translate/copy.
- * @rele: Source structure from which to translate/copy.
+ * @release: Source structure from which to translate/copy.
*/
static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
struct dk_cxlflash_release *release)
@@ -517,7 +517,7 @@ void rhte_checkin(struct ctx_info *ctxi,
}
/**
- * rhte_format1() - populates a RHTE for format 1
+ * rht_format1() - populates a RHTE for format 1
* @rhte: RHTE to populate.
* @lun_id: LUN ID of LUN associated with RHTE.
* @perm: Desired permissions for RHTE.
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index f1406ac77b0d..01917b28cdb6 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -41,7 +41,7 @@ static void marshal_virt_to_resize(struct dk_cxlflash_uvirtual *virt,
/**
* marshal_clone_to_rele() - translate clone to release structure
* @clone: Source structure from which to translate/copy.
- * @rele: Destination structure for the translate/copy.
+ * @release: Destination structure for the translate/copy.
*/
static void marshal_clone_to_rele(struct dk_cxlflash_clone *clone,
struct dk_cxlflash_release *release)
@@ -229,7 +229,7 @@ static u64 ba_alloc(struct ba_lun *ba_lun)
/**
* validate_alloc() - validates the specified block has been allocated
- * @ba_lun_info: LUN info owning the block allocator.
+ * @bali: LUN info owning the block allocator.
* @aun: Block to validate.
*
* Return: 0 on success, -1 on failure
@@ -300,7 +300,7 @@ static int ba_free(struct ba_lun *ba_lun, u64 to_free)
/**
* ba_clone() - Clone a chunk of the block allocation table
* @ba_lun: Block allocator from which to allocate a block.
- * @to_free: Block to free.
+ * @to_clone: Block to clone.
*
* Return: 0 on success, -1 on failure
*/
@@ -361,7 +361,7 @@ void cxlflash_ba_terminate(struct ba_lun *ba_lun)
/**
* init_vlun() - initializes a LUN for virtual use
- * @lun_info: LUN information structure that owns the block allocator.
+ * @lli: LUN information structure that owns the block allocator.
*
* Return: 0 on success, -errno on failure
*/
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 3ea345c12467..be87d5a7583d 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -958,7 +958,7 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
/**
- * dc395x_queue_command - queue scsi command passed from the mid
+ * dc395x_queue_command_lck - queue scsi command passed from the mid
* layer, invoke 'done' on completion
*
* @cmd: pointer to scsi command object
@@ -2918,7 +2918,7 @@ static void disconnect(struct AdapterCtlBlk *acb)
} else {
if ((srb->state & (SRB_START_ + SRB_MSGOUT))
|| !(srb->
- state & (SRB_DISCONNECT + SRB_COMPLETED))) {
+ state & (SRB_DISCONNECT | SRB_COMPLETED))) {
/*
* Selection time out
* SRB_START_ || SRB_MSGOUT || (!SRB_DISCONNECT && !SRB_COMPLETED)
@@ -3258,10 +3258,10 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
/*
* target status..........................
*/
- if (status_byte(status) == CHECK_CONDITION) {
+ if (status >> 1 == CHECK_CONDITION) {
request_sense(acb, dcb, srb);
return;
- } else if (status_byte(status) == QUEUE_FULL) {
+ } else if (status >> 1 == QUEUE_FULL) {
tempcnt = (u8)list_size(&dcb->srb_going_list);
dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n",
dcb->target_id, dcb->target_lun, tempcnt);
@@ -4248,7 +4248,7 @@ static void adapter_init_params(struct AdapterCtlBlk *acb)
/**
- * adapter_init_host - Initialize the scsi host instance based on
+ * adapter_init_scsi_host - Initialize the scsi host instance based on
* values that we have already stored in the adapter instance. There's
* some mention that a lot of these are deprecated, so we won't use
* them (we'll use the ones in the adapter instance) but we'll fill
@@ -4336,13 +4336,14 @@ static void adapter_init_chip(struct AdapterCtlBlk *acb)
/**
- * init_adapter - Grab the resource for the card, setup the adapter
+ * adapter_init - Grab the resource for the card, setup the adapter
* information, set the card into a known state, create the various
* tables etc etc. This basically gets all adapter information all up
* to date, initialised and gets the chip in sync with it.
*
- * @host: This hosts adapter structure
+ * @acb: The adapter which we are to init.
* @io_port: The base I/O port
+ * @io_port_len: The I/O port size
* @irq: IRQ
*
* Returns 0 if the initialization succeeds, any other value on
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index ea436a14087f..efa8c0381476 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -405,8 +405,8 @@ static char print_alua_state(unsigned char state)
}
}
-static int alua_check_sense(struct scsi_device *sdev,
- struct scsi_sense_hdr *sense_hdr)
+static enum scsi_disposition alua_check_sense(struct scsi_device *sdev,
+ struct scsi_sense_hdr *sense_hdr)
{
struct alua_dh_data *h = sdev->handler_data;
struct alua_port_group *pg;
@@ -515,6 +515,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
struct scsi_sense_hdr sense_hdr;
struct alua_port_group *tmp_pg;
int len, k, off, bufflen = ALUA_RTPG_SIZE;
+ int group_id_old, state_old, pref_old, valid_states_old;
unsigned char *desc, *buff;
unsigned err, retval;
unsigned int tpg_desc_tbl_off;
@@ -522,6 +523,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
unsigned long flags;
bool transitioning_sense = false;
+ group_id_old = pg->group_id;
+ state_old = pg->state;
+ pref_old = pg->pref;
+ valid_states_old = pg->valid_states;
+
if (!pg->expiry) {
unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ;
@@ -573,10 +579,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
* even though it shouldn't according to T10.
* The retry without rtpg_ext_hdr_req set
* handles this.
+ * Note: some arrays return a sense key of ILLEGAL_REQUEST
+ * with ASC 00h if they don't support the extended header.
*/
if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
- sense_hdr.sense_key == ILLEGAL_REQUEST &&
- sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
+ sense_hdr.sense_key == ILLEGAL_REQUEST) {
pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
goto retry;
}
@@ -686,17 +693,19 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
if (transitioning_sense)
pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
- sdev_printk(KERN_INFO, sdev,
- "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
- ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
- pg->pref ? "preferred" : "non-preferred",
- pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
- pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
- pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
- pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
- pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
- pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
- pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
+ if (group_id_old != pg->group_id || state_old != pg->state ||
+ pref_old != pg->pref || valid_states_old != pg->valid_states)
+ sdev_printk(KERN_INFO, sdev,
+ "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
+ ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
+ pg->pref ? "preferred" : "non-preferred",
+ pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
+ pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
+ pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
+ pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
+ pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
+ pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
+ pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
switch (pg->state) {
case SCSI_ACCESS_STATE_TRANSITIONING:
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index caa685cfe3d4..bd28ec6cfb72 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -280,8 +280,8 @@ static int send_trespass_cmd(struct scsi_device *sdev,
return res;
}
-static int clariion_check_sense(struct scsi_device *sdev,
- struct scsi_sense_hdr *sense_hdr)
+static enum scsi_disposition clariion_check_sense(struct scsi_device *sdev,
+ struct scsi_sense_hdr *sense_hdr)
{
switch (sense_hdr->sense_key) {
case NOT_READY:
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 5efc959493ec..25f6e1ac9e7b 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -656,8 +656,8 @@ static blk_status_t rdac_prep_fn(struct scsi_device *sdev, struct request *req)
return BLK_STS_OK;
}
-static int rdac_check_sense(struct scsi_device *sdev,
- struct scsi_sense_hdr *sense_hdr)
+static enum scsi_disposition rdac_check_sense(struct scsi_device *sdev,
+ struct scsi_sense_hdr *sense_hdr)
{
struct rdac_dh_data *h = sdev->handler_data;
diff --git a/drivers/scsi/esas2r/esas2r_log.c b/drivers/scsi/esas2r/esas2r_log.c
index b545798e400c..d6c87a0bae09 100644
--- a/drivers/scsi/esas2r/esas2r_log.c
+++ b/drivers/scsi/esas2r/esas2r_log.c
@@ -101,6 +101,11 @@ static const char *translate_esas2r_event_level_to_kernel(const long level)
}
}
+#pragma GCC diagnostic push
+#ifndef __clang__
+#pragma GCC diagnostic ignored "-Wsuggest-attribute=format"
+#endif
+
/*
* the master logging function. this function will format the message as
* outlined by the formatting string, the input device information and the
@@ -170,6 +175,8 @@ static int esas2r_log_master(const long level,
return 0;
}
+#pragma GCC diagnostic pop
+
/*
* formats and logs a message to the system log.
*
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 007ccef5d1e2..342535ac0570 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -647,7 +647,7 @@ static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent)
ent->sense_ptr = NULL;
}
-/* When a contingent allegiance conditon is created, we force feed a
+/* When a contingent allegiance condition is created, we force feed a
* REQUEST_SENSE command to the device to fetch the sense data. I
* tried many other schemes, relying on the scsi error handling layer
* to send out the REQUEST_SENSE automatically, but this was difficult
@@ -1341,7 +1341,7 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
bytes_sent -= esp->send_cmd_residual;
/*
- * The am53c974 has a DMA 'pecularity'. The doc states:
+ * The am53c974 has a DMA 'peculiarity'. The doc states:
* In some odd byte conditions, one residual byte will
* be left in the SCSI FIFO, and the FIFO Flags will
* never count to '0 '. When this happens, the residual
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 03bf49adaafe..89ec735929c3 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -2771,7 +2771,7 @@ static int fcoe_vport_disable(struct fc_vport *vport, bool disable)
}
/**
- * fcoe_vport_set_symbolic_name() - append vport string to symbolic name
+ * fcoe_set_vport_symbolic_name() - append vport string to symbolic name
* @vport: fc_vport with a new symbolic name string
*
* After generating a new symbolic name string, a new RSPN_ID request is
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 5ea426effa60..1756a0ac6f08 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1302,7 +1302,7 @@ drop:
}
/**
- * fcoe_ctlr_recv_els() - Handle an incoming link reset frame
+ * fcoe_ctlr_recv_clr_vlink() - Handle an incoming link reset frame
* @fip: The FCoE controller that received the frame
* @skb: The received FIP packet
*
@@ -2952,7 +2952,7 @@ static void fcoe_ctlr_vlan_send(struct fcoe_ctlr *fip,
}
/**
- * fcoe_ctlr_vlan_disk_reply() - send FIP VLAN Discovery Notification.
+ * fcoe_ctlr_vlan_disc_reply() - send FIP VLAN Discovery Notification.
* @fip: The FCoE controller
* @frport: The newly-parsed FCoE rport from the Discovery Request
*
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index b927b3d84523..4d0e19e7c84b 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -863,7 +863,7 @@ static int fcoe_transport_create(const char *buffer,
int rc = -ENODEV;
struct net_device *netdev = NULL;
struct fcoe_transport *ft = NULL;
- enum fip_mode fip_mode = (enum fip_mode)kp->arg;
+ enum fip_mode fip_mode = (enum fip_mode)(uintptr_t)kp->arg;
mutex_lock(&ft_mutex);
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index 6c049360f136..e7326505cabb 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -58,8 +58,7 @@ int fnic_debugfs_init(void)
fnic_trace_debugfs_root);
/* Allocate memory to structure */
- fc_trc_flag = (struct fc_trace_flag_type *)
- vmalloc(sizeof(struct fc_trace_flag_type));
+ fc_trc_flag = vmalloc(sizeof(struct fc_trace_flag_type));
if (fc_trc_flag) {
fc_trc_flag->fc_row_file = 0;
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index e0cee4dcb439..1885218f9d15 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -296,7 +296,7 @@ void fnic_handle_event(struct work_struct *work)
}
/**
- * Check if the Received FIP FLOGI frame is rejected
+ * is_fnic_fip_flogi_reject() - Check if the Received FIP FLOGI frame is rejected
* @fip: The FCoE controller that received the frame
* @skb: The received FIP frame
*
@@ -1343,9 +1343,10 @@ void fnic_handle_fip_timer(struct fnic *fnic)
if (list_empty(&fnic->vlans)) {
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
/* no vlans available, try again */
- if (printk_ratelimit())
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
- "Start VLAN Discovery\n");
+ if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
+ if (printk_ratelimit())
+ shost_printk(KERN_DEBUG, fnic->lport->host,
+ "Start VLAN Discovery\n");
fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
return;
}
@@ -1363,9 +1364,10 @@ void fnic_handle_fip_timer(struct fnic *fnic)
case FIP_VLAN_FAILED:
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
/* if all vlans are in failed state, restart vlan disc */
- if (printk_ratelimit())
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
- "Start VLAN Discovery\n");
+ if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
+ if (printk_ratelimit())
+ shost_printk(KERN_DEBUG, fnic->lport->host,
+ "Start VLAN Discovery\n");
fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
break;
case FIP_VLAN_SENT:
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 186c3ab4456b..786f9d2704b6 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -1100,9 +1100,6 @@ static int __init fnic_init_module(void)
goto err_create_fnic_workq;
}
- spin_lock_init(&fnic_list_lock);
- INIT_LIST_HEAD(&fnic_list);
-
fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q");
if (!fnic_fip_queue) {
printk(KERN_ERR PFX "fnic FIP work queue create failed\n");
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 36744968378f..e619a82f921b 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -173,7 +173,7 @@ static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
}
-/**
+/*
* __fnic_set_state_flags
* Sets/Clears bits in fnic's state_flags
**/
@@ -2287,7 +2287,7 @@ clean_pending_aborts_end:
return ret;
}
-/**
+/*
* fnic_scsi_host_start_tag
* Allocates tagid from host's tag list
**/
@@ -2307,7 +2307,7 @@ fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
return dummy->tag;
}
-/**
+/*
* fnic_scsi_host_end_tag
* frees tag allocated by fnic_scsi_host_start_tag.
**/
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 9d52d83161ed..4a7536bb0ab3 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -153,7 +153,7 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
if (rd_idx > (fnic_max_trace_entries-1))
rd_idx = 0;
/*
- * Continure dumpping trace buffer entries into
+ * Continue dumping trace buffer entries into
* memory file till rd_idx reaches write index
*/
if (rd_idx == wr_idx)
@@ -189,7 +189,7 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
tbp->data[3], tbp->data[4]);
rd_idx++;
/*
- * Continue dumpping trace buffer entries into
+ * Continue dumping trace buffer entries into
* memory file till rd_idx reaches write index
*/
if (rd_idx == wr_idx)
@@ -632,7 +632,7 @@ void fnic_fc_trace_free(void)
* fnic_fc_ctlr_set_trace_data:
* Maintain rd & wr idx accordingly and set data
* Passed parameters:
- * host_no: host number accociated with fnic
+ * host_no: host number associated with fnic
* frame_type: send_frame, rece_frame or link event
* fc_frame: pointer to fc_frame
* frame_len: Length of the fc_frame
@@ -715,13 +715,13 @@ int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
* fnic_fc_ctlr_get_trace_data: Copy trace buffer to a memory file
* Passed parameter:
* @fnic_dbgfs_t: pointer to debugfs trace buffer
- * rdata_flag: 1 => Unformated file
- * 0 => formated file
+ * rdata_flag: 1 => Unformatted file
+ * 0 => formatted file
* Description:
* This routine will copy the trace data to memory file with
* proper formatting and also copy to another memory
- * file without formatting for further procesing.
- * Retrun Value:
+ * file without formatting for further processing.
+ * Return Value:
* Number of bytes that were dumped into fnic_dbgfs_t
*/
@@ -785,10 +785,10 @@ int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag)
* @fc_trace_hdr_t: pointer to trace data
* @fnic_dbgfs_t: pointer to debugfs trace buffer
* @orig_len: pointer to len
- * rdata_flag: 0 => Formated file, 1 => Unformated file
+ * rdata_flag: 0 => Formatted file, 1 => Unformatted file
* Description:
* This routine will format and copy the passed trace data
- * for formated file or unformated file accordingly.
+ * for formatted file or unformatted file accordingly.
*/
void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 2401a9575215..cf879cc59e4c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -346,8 +346,7 @@ struct hisi_sas_hw {
u8 reg_index, u8 reg_count, u8 *write_data);
void (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba,
int delay_ms, int timeout_ms);
- void (*snapshot_prepare)(struct hisi_hba *hisi_hba);
- void (*snapshot_restore)(struct hisi_hba *hisi_hba);
+ void (*debugfs_snapshot_regs)(struct hisi_hba *hisi_hba);
int complete_hdr_size;
struct scsi_host_template *sht;
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index a979edfd9a78..5a204074099c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -1341,10 +1341,12 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
rc = hisi_sas_exec_internal_tmf_task(device, fis,
s, NULL);
if (rc != TMF_RESP_FUNC_COMPLETE)
- dev_err(dev, "ata disk de-reset failed\n");
+ dev_err(dev, "ata disk %016llx de-reset failed\n",
+ SAS_ADDR(device->sas_addr));
}
} else {
- dev_err(dev, "ata disk reset failed\n");
+ dev_err(dev, "ata disk %016llx reset failed\n",
+ SAS_ADDR(device->sas_addr));
}
if (rc == TMF_RESP_FUNC_COMPLETE)
@@ -1568,21 +1570,26 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
}
EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
-static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
+static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba)
{
- struct device *dev = hisi_hba->dev;
- struct Scsi_Host *shost = hisi_hba->shost;
- int rc;
-
- if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
- queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
-
if (!hisi_hba->hw->soft_reset)
return -1;
if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
return -1;
+ if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
+ hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);
+
+ return 0;
+}
+
+static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = hisi_hba->dev;
+ struct Scsi_Host *shost = hisi_hba->shost;
+ int rc;
+
dev_info(dev, "controller resetting...\n");
hisi_sas_controller_reset_prepare(hisi_hba);
@@ -2471,6 +2478,9 @@ void hisi_sas_rst_work_handler(struct work_struct *work)
struct hisi_hba *hisi_hba =
container_of(work, struct hisi_hba, rst_work);
+ if (hisi_sas_controller_prereset(hisi_hba))
+ return;
+
hisi_sas_controller_reset(hisi_hba);
}
EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
@@ -2480,8 +2490,12 @@ void hisi_sas_sync_rst_work_handler(struct work_struct *work)
struct hisi_sas_rst *rst =
container_of(work, struct hisi_sas_rst, work);
+ if (hisi_sas_controller_prereset(rst->hisi_hba))
+ goto rst_complete;
+
if (!hisi_sas_controller_reset(rst->hisi_hba))
rst->done = true;
+rst_complete:
complete(rst->completion);
}
EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
@@ -2689,12 +2703,14 @@ int hisi_sas_probe(struct platform_device *pdev,
rc = hisi_hba->hw->hw_init(hisi_hba);
if (rc)
- goto err_out_register_ha;
+ goto err_out_hw_init;
scsi_scan_host(shost);
return 0;
+err_out_hw_init:
+ sas_unregister_ha(sha);
err_out_register_ha:
scsi_remove_host(shost);
err_out_ha:
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 7451377c4cb6..3e359ac752fd 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1646,7 +1646,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
idx = i * HISI_SAS_PHY_INT_NR;
for (j = 0; j < HISI_SAS_PHY_INT_NR; j++, idx++) {
irq = platform_get_irq(pdev, idx);
- if (!irq) {
+ if (irq < 0) {
dev_err(dev, "irq init: fail map phy interrupt %d\n",
idx);
return -ENOENT;
@@ -1665,7 +1665,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
idx = hisi_hba->n_phy * HISI_SAS_PHY_INT_NR;
for (i = 0; i < hisi_hba->queue_count; i++, idx++) {
irq = platform_get_irq(pdev, idx);
- if (!irq) {
+ if (irq < 0) {
dev_err(dev, "irq init: could not map cq interrupt %d\n",
idx);
return -ENOENT;
@@ -1683,7 +1683,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
idx = (hisi_hba->n_phy * HISI_SAS_PHY_INT_NR) + hisi_hba->queue_count;
for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++, idx++) {
irq = platform_get_irq(pdev, idx);
- if (!irq) {
+ if (irq < 0) {
dev_err(dev, "irq init: could not map fatal interrupt %d\n",
idx);
return -ENOENT;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 4580e081e489..499c770d405c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -531,6 +531,7 @@ module_param(prot_mask, int, 0);
MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 ");
static void debugfs_work_handler_v3_hw(struct work_struct *work);
+static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba);
static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
{
@@ -1717,8 +1718,11 @@ static void handle_chl_int1_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
int i;
irq_value &= ~irq_msk;
- if (!irq_value)
+ if (!irq_value) {
+ dev_warn(dev, "phy%d channel int 1 received with status bits cleared\n",
+ phy_no);
return;
+ }
for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) {
const struct hisi_sas_hw_error *error = &port_axi_error[i];
@@ -1779,8 +1783,11 @@ static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
BIT(CHL_INT2_RX_INVLD_DW_OFF);
irq_value &= ~irq_msk;
- if (!irq_value)
+ if (!irq_value) {
+ dev_warn(dev, "phy%d channel int 2 received with status bits cleared\n",
+ phy_no);
return;
+ }
if (irq_value & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
dev_warn(dev, "phy%d identify timeout\n", phy_no);
@@ -2252,8 +2259,9 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
slot_err_v3_hw(hisi_hba, task, slot);
if (ts->stat != SAS_DATA_UNDERRUN)
- dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
+ dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d addr=%016llx CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
slot->idx, task, sas_dev->device_id,
+ SAS_ADDR(device->sas_addr),
dw0, dw1, complete_hdr->act, dw3,
error_info[0], error_info[1],
error_info[2], error_info[3]);
@@ -3181,6 +3189,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
.get_events = phy_get_events_v3_hw,
.write_gpio = write_gpio_v3_hw,
.wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw,
+ .debugfs_snapshot_regs = debugfs_snapshot_regs_v3_hw,
};
static struct Scsi_Host *
@@ -3665,6 +3674,19 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba)
static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba)
{
+ int debugfs_dump_index = hisi_hba->debugfs_dump_index;
+ struct device *dev = hisi_hba->dev;
+ u64 timestamp = local_clock();
+
+ if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) {
+ dev_warn(dev, "dump count exceeded!\n");
+ return;
+ }
+
+ do_div(timestamp, NSEC_PER_MSEC);
+ hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp;
+ hisi_hba->debugfs_dump_index++;
+
debugfs_snapshot_prepare_v3_hw(hisi_hba);
debugfs_snapshot_global_reg_v3_hw(hisi_hba);
@@ -4407,20 +4429,8 @@ static void debugfs_work_handler_v3_hw(struct work_struct *work)
{
struct hisi_hba *hisi_hba =
container_of(work, struct hisi_hba, debugfs_work);
- int debugfs_dump_index = hisi_hba->debugfs_dump_index;
- struct device *dev = hisi_hba->dev;
- u64 timestamp = local_clock();
-
- if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) {
- dev_warn(dev, "dump count exceeded!\n");
- return;
- }
-
- do_div(timestamp, NSEC_PER_MSEC);
- hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp;
debugfs_snapshot_regs_v3_hw(hisi_hba);
- hisi_hba->debugfs_dump_index++;
}
static void debugfs_release_v3_hw(struct hisi_hba *hisi_hba, int dump_index)
@@ -4760,7 +4770,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rc = hisi_sas_v3_init(hisi_hba);
if (rc)
- goto err_out_register_ha;
+ goto err_out_hw_init;
scsi_scan_host(shost);
@@ -4777,6 +4787,8 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
+err_out_hw_init:
+ sas_unregister_ha(sha);
err_out_register_ha:
scsi_remove_host(shost);
err_out_free_irq_vectors:
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 61831f2fdb30..6540d48eb0e8 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -326,6 +326,7 @@ static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
/**
* ibmvfc_get_err_result - Find the scsi status to return for the fcp response
+ * @vhost: ibmvfc host struct
* @vfc_cmd: ibmvfc command struct
*
* Return value:
@@ -603,8 +604,17 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
vhost->action = action;
break;
+ case IBMVFC_HOST_ACTION_REENABLE:
+ case IBMVFC_HOST_ACTION_RESET:
+ vhost->action = action;
+ break;
case IBMVFC_HOST_ACTION_INIT:
case IBMVFC_HOST_ACTION_TGT_DEL:
+ case IBMVFC_HOST_ACTION_LOGO:
+ case IBMVFC_HOST_ACTION_QUERY_TGTS:
+ case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
+ case IBMVFC_HOST_ACTION_NONE:
+ default:
switch (vhost->action) {
case IBMVFC_HOST_ACTION_RESET:
case IBMVFC_HOST_ACTION_REENABLE:
@@ -614,15 +624,6 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
break;
}
break;
- case IBMVFC_HOST_ACTION_LOGO:
- case IBMVFC_HOST_ACTION_QUERY_TGTS:
- case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
- case IBMVFC_HOST_ACTION_NONE:
- case IBMVFC_HOST_ACTION_RESET:
- case IBMVFC_HOST_ACTION_REENABLE:
- default:
- vhost->action = action;
- break;
}
}
@@ -650,8 +651,6 @@ static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
/**
* ibmvfc_del_tgt - Schedule cleanup and removal of the target
* @tgt: ibmvfc target struct
- * @job_step: job step to perform
- *
**/
static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
{
@@ -768,6 +767,8 @@ static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
/**
* ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
* @vhost: ibmvfc host who owns the event pool
+ * @queue: ibmvfc queue struct
+ * @size: pool size
*
* Returns zero on success.
**/
@@ -820,6 +821,7 @@ static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
/**
* ibmvfc_free_event_pool - Frees memory of the event pool of a host
* @vhost: ibmvfc host who owns the event pool
+ * @queue: ibmvfc queue struct
*
**/
static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
@@ -1414,6 +1416,7 @@ static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
/**
* ibmvfc_gather_partition_info - Gather info about the LPAR
+ * @vhost: ibmvfc host struct
*
* Return value:
* none
@@ -1484,7 +1487,7 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
/**
* ibmvfc_get_event - Gets the next free event in pool
- * @vhost: ibmvfc host struct
+ * @queue: ibmvfc queue struct
*
* Returns a free event from the pool.
**/
@@ -1631,7 +1634,7 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
/**
* ibmvfc_timeout - Internal command timeout handler
- * @evt: struct ibmvfc_event that timed out
+ * @t: struct ibmvfc_event that timed out
*
* Called when an internally generated command times out
**/
@@ -1892,8 +1895,8 @@ static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct s
/**
* ibmvfc_queuecommand - The queuecommand function of the scsi template
+ * @shost: scsi host struct
* @cmnd: struct scsi_cmnd to be executed
- * @done: Callback function to be called when cmnd is completed
*
* Returns:
* 0 on success / other on failure
@@ -2324,7 +2327,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
/**
* ibmvfc_match_rport - Match function for specified remote port
* @evt: ibmvfc event struct
- * @device: device to match (rport)
+ * @rport: device to match
*
* Returns:
* 1 if event matches rport / 0 if event does not match rport
@@ -3176,8 +3179,9 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
* ibmvfc_handle_crq - Handles and frees received events in the CRQ
* @crq: Command/Response queue
* @vhost: ibmvfc host struct
+ * @evt_doneq: Event done queue
*
- **/
+**/
static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
struct list_head *evt_doneq)
{
@@ -3358,7 +3362,6 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
* ibmvfc_change_queue_depth - Change the device's queue depth
* @sdev: scsi device struct
* @qdepth: depth to set
- * @reason: calling context
*
* Return value:
* actual depth set
@@ -3430,6 +3433,7 @@ static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
/**
* ibmvfc_show_log_level - Show the adapter's error logging level
* @dev: class device struct
+ * @attr: unused
* @buf: buffer
*
* Return value:
@@ -3452,7 +3456,9 @@ static ssize_t ibmvfc_show_log_level(struct device *dev,
/**
* ibmvfc_store_log_level - Change the adapter's error logging level
* @dev: class device struct
+ * @attr: unused
* @buf: buffer
+ * @count: buffer size
*
* Return value:
* number of bytes printed to buffer
@@ -3530,7 +3536,7 @@ static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
unsigned long flags = 0;
@@ -4162,6 +4168,7 @@ static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
/**
* __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
* @tgt: ibmvfc target struct
+ * @done: Routine to call when the event is responded to
*
* Returns:
* Allocated and initialized ibmvfc_event struct
@@ -4478,7 +4485,7 @@ static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
/**
* ibmvfc_adisc_timeout - Handle an ADISC timeout
- * @tgt: ibmvfc target struct
+ * @t: ibmvfc target struct
*
* If an ADISC times out, send a cancel. If the cancel times
* out, reset the CRQ. When the ADISC comes back as cancelled,
@@ -4681,7 +4688,7 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
/**
* ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
* @vhost: ibmvfc host struct
- * @scsi_id: SCSI ID to allocate target for
+ * @target: Holds SCSI ID to allocate target forand the WWPN
*
* Returns:
* 0 on success / other on failure
@@ -5111,7 +5118,7 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
/**
* ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
- * @vhost: ibmvfc host struct
+ * @evt: ibmvfc event struct
*
**/
static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
@@ -5373,30 +5380,49 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
case IBMVFC_HOST_ACTION_INIT_WAIT:
break;
case IBMVFC_HOST_ACTION_RESET:
- vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
list_splice_init(&vhost->purge, &purge);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
ibmvfc_complete_purge(&purge);
rc = ibmvfc_reset_crq(vhost);
+
spin_lock_irqsave(vhost->host->host_lock, flags);
- if (rc == H_CLOSED)
+ if (!rc || rc == H_CLOSED)
vio_enable_interrupts(to_vio_dev(vhost->dev));
- if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
- (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
- ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
- dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
+ if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
+ /*
+ * The only action we could have changed to would have
+ * been reenable, in which case, we skip the rest of
+ * this path and wait until we've done the re-enable
+ * before sending the crq init.
+ */
+ vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+
+ if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
+ (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
+ }
}
break;
case IBMVFC_HOST_ACTION_REENABLE:
- vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
list_splice_init(&vhost->purge, &purge);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
ibmvfc_complete_purge(&purge);
rc = ibmvfc_reenable_crq_queue(vhost);
+
spin_lock_irqsave(vhost->host->host_lock, flags);
- if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
- ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
- dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
+ if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
+ /*
+ * The only action we could have changed to would have
+ * been reset, in which case, we skip the rest of this
+ * path and wait until we've done the reset before
+ * sending the crq init.
+ */
+ vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+ if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
+ }
}
break;
case IBMVFC_HOST_ACTION_LOGO:
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 77fafb1bc173..e75b0068ad84 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -130,9 +130,10 @@ static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance)
}
/**
- * release_crq_queue: - Deallocates data and unregisters CRQ
- * @queue: crq_queue to initialize and register
- * @host_data: ibmvscsi_host_data of host
+ * ibmvscsi_release_crq_queue() - Deallocates data and unregisters CRQ
+ * @queue: crq_queue to initialize and register
+ * @hostdata: ibmvscsi_host_data of host
+ * @max_requests: maximum requests (unused)
*
* Frees irq, deallocates a page for messages, unmaps dma, and unregisters
* the crq with the hypervisor.
@@ -276,10 +277,9 @@ static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
}
/**
- * reset_crq_queue: - resets a crq after a failure
+ * ibmvscsi_reset_crq_queue() - resets a crq after a failure
* @queue: crq_queue to initialize and register
* @hostdata: ibmvscsi_host_data of host
- *
*/
static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata)
@@ -314,9 +314,10 @@ static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
}
/**
- * initialize_crq_queue: - Initializes and registers CRQ with hypervisor
- * @queue: crq_queue to initialize and register
- * @hostdata: ibmvscsi_host_data of host
+ * ibmvscsi_init_crq_queue() - Initializes and registers CRQ with hypervisor
+ * @queue: crq_queue to initialize and register
+ * @hostdata: ibmvscsi_host_data of host
+ * @max_requests: maximum requests (unused)
*
* Allocates a page for messages, maps it for dma, and registers
* the crq with the hypervisor.
@@ -404,10 +405,9 @@ static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
}
/**
- * reenable_crq_queue: - reenables a crq after
+ * ibmvscsi_reenable_crq_queue() - reenables a crq after
* @queue: crq_queue to initialize and register
* @hostdata: ibmvscsi_host_data of host
- *
*/
static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata)
@@ -439,7 +439,7 @@ static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
* @hostdata: ibmvscsi_host_data who owns the event pool
*
* Returns zero on success.
-*/
+ */
static int initialize_event_pool(struct event_pool *pool,
int size, struct ibmvscsi_host_data *hostdata)
{
@@ -478,12 +478,12 @@ static int initialize_event_pool(struct event_pool *pool,
}
/**
- * release_event_pool: - Frees memory of an event pool of a host
+ * release_event_pool() - Frees memory of an event pool of a host
* @pool: event_pool to be released
* @hostdata: ibmvscsi_host_data who owns the even pool
*
* Returns zero on success.
-*/
+ */
static void release_event_pool(struct event_pool *pool,
struct ibmvscsi_host_data *hostdata)
{
@@ -526,11 +526,10 @@ static int valid_event_struct(struct event_pool *pool,
}
/**
- * ibmvscsi_free-event_struct: - Changes status of event to "free"
+ * free_event_struct() - Changes status of event to "free"
* @pool: event_pool that contains the event
* @evt: srp_event_struct to be modified
- *
-*/
+ */
static void free_event_struct(struct event_pool *pool,
struct srp_event_struct *evt)
{
@@ -547,7 +546,7 @@ static void free_event_struct(struct event_pool *pool,
}
/**
- * get_evt_struct: - Gets the next free event in pool
+ * get_event_struct() - Gets the next free event in pool
* @pool: event_pool that contains the events to be searched
*
* Returns the next event in "free" state, and NULL if none are free.
@@ -575,7 +574,7 @@ static struct srp_event_struct *get_event_struct(struct event_pool *pool)
/**
* init_event_struct: Initialize fields in an event struct that are always
* required.
- * @evt: The event
+ * @evt_struct: The event
* @done: Routine to call when the event is responded to
* @format: SRP or MAD format
* @timeout: timeout value set in the CRQ
@@ -597,7 +596,7 @@ static void init_event_struct(struct srp_event_struct *evt_struct,
* Routines for receiving SCSI responses from the hosting partition
*/
-/**
+/*
* set_srp_direction: Set the fields in the srp related to data
* direction and number of buffers based on the direction in
* the scsi_cmnd and the number of buffers
@@ -632,9 +631,9 @@ static void set_srp_direction(struct scsi_cmnd *cmd,
/**
* unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
* @cmd: srp_cmd whose additional_data member will be unmapped
+ * @evt_struct: the event
* @dev: device for which the memory is mapped
- *
-*/
+ */
static void unmap_cmd_data(struct srp_cmd *cmd,
struct srp_event_struct *evt_struct,
struct device *dev)
@@ -671,6 +670,7 @@ static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
/**
* map_sg_data: - Maps dma for a scatterlist and initializes descriptor fields
* @cmd: struct scsi_cmnd with the scatterlist
+ * @evt_struct: struct srp_event_struct to map
* @srp_cmd: srp_cmd that contains the memory descriptor
* @dev: device for which to map dma memory
*
@@ -717,8 +717,7 @@ static int map_sg_data(struct scsi_cmnd *cmd,
/* get indirect table */
if (!evt_struct->ext_list) {
- evt_struct->ext_list = (struct srp_direct_buf *)
- dma_alloc_coherent(dev,
+ evt_struct->ext_list = dma_alloc_coherent(dev,
SG_ALL * sizeof(struct srp_direct_buf),
&evt_struct->ext_list_token, 0);
if (!evt_struct->ext_list) {
@@ -745,6 +744,7 @@ static int map_sg_data(struct scsi_cmnd *cmd,
/**
* map_data_for_srp_cmd: - Calls functions to map data for srp cmds
* @cmd: struct scsi_cmnd with the memory to be mapped
+ * @evt_struct: struct srp_event_struct to map
* @srp_cmd: srp_cmd that contains the memory descriptor
* @dev: dma device for which to map dma memory
*
@@ -778,6 +778,7 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
/**
* purge_requests: Our virtual adapter just shut down. purge any sent requests
* @hostdata: the adapter
+ * @error_code: error code to return as the 'result'
*/
static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
{
@@ -838,7 +839,7 @@ static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
/**
* ibmvscsi_timeout - Internal command timeout handler
- * @evt_struct: struct srp_event_struct that timed out
+ * @t: struct srp_event_struct that timed out
*
* Called when an internally generated command times out
*/
@@ -1034,8 +1035,8 @@ static inline u16 lun_from_dev(struct scsi_device *dev)
}
/**
- * ibmvscsi_queue: - The queuecommand function of the scsi template
- * @cmd: struct scsi_cmnd to be executed
+ * ibmvscsi_queuecommand_lck() - The queuecommand function of the scsi template
+ * @cmnd: struct scsi_cmnd to be executed
* @done: Callback function to be called when cmd is completed
*/
static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
@@ -1342,7 +1343,7 @@ static void fast_fail_rsp(struct srp_event_struct *evt_struct)
}
/**
- * init_host - Start host initialization
+ * enable_fast_fail() - Start host initialization
* @hostdata: ibmvscsi_host_data of host
*
* Returns zero if successful.
@@ -1456,16 +1457,15 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
};
-/**
- * init_adapter: Start virtual adapter initialization sequence
- *
+/*
+ * init_adapter() - Start virtual adapter initialization sequence
*/
static void init_adapter(struct ibmvscsi_host_data *hostdata)
{
send_mad_adapter_info(hostdata);
}
-/**
+/*
* sync_completion: Signal that a synchronous command has completed
* Note that after returning from this call, the evt_struct is freed.
* the caller waiting on this completion shouldn't touch the evt_struct
@@ -1480,8 +1480,8 @@ static void sync_completion(struct srp_event_struct *evt_struct)
complete(&evt_struct->comp);
}
-/**
- * ibmvscsi_abort: Abort a command...from scsi host template
+/*
+ * ibmvscsi_eh_abort_handler: Abort a command...from scsi host template
* send this over to the server and wait synchronously for the response
*/
static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
@@ -1618,7 +1618,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
return SUCCESS;
}
-/**
+/*
* ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host
* template send this over to the server and wait synchronously for the
* response
@@ -1884,7 +1884,6 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
* ibmvscsi_change_queue_depth - Change the device's queue depth
* @sdev: scsi device struct
* @qdepth: depth to set
- * @reason: calling context
*
* Return value:
* actual depth set
@@ -2214,7 +2213,7 @@ static int ibmvscsi_work(void *data)
return 0;
}
-/**
+/*
* Called by bus code for each adapter
*/
static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
@@ -2374,7 +2373,7 @@ static int ibmvscsi_resume(struct device *dev)
return 0;
}
-/**
+/*
* ibmvscsi_device_table: Used by vio.c to match devices in the device tree we
* support.
*/
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 9abd9e253af6..41ac9477df7a 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -128,10 +128,10 @@ static bool connection_broken(struct scsi_info *vscsi)
* This function calls h_free_q then frees the interrupt bit etc.
* It must release the lock before doing so because of the time it can take
* for h_free_crq in PHYP
- * NOTE: the caller must make sure that state and or flags will prevent
- * interrupt handler from scheduling work.
- * NOTE: anyone calling this function may need to set the CRQ_CLOSED flag
- * we can't do it here, because we don't have the lock
+ * NOTE: * the caller must make sure that state and or flags will prevent
+ * interrupt handler from scheduling work.
+ * * anyone calling this function may need to set the CRQ_CLOSED flag
+ * we can't do it here, because we don't have the lock
*
* EXECUTION ENVIRONMENT:
* Process level
@@ -2670,7 +2670,6 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
u64 data_len = 0;
enum dma_data_direction dir;
int attr = 0;
- int rc = 0;
nexus = vscsi->tport.ibmv_nexus;
/*
@@ -2725,17 +2724,9 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
srp->lun.scsi_lun[0] &= 0x3f;
- rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
- cmd->sense_buf, scsilun_to_int(&srp->lun),
- data_len, attr, dir, 0);
- if (rc) {
- dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
- spin_lock_bh(&vscsi->intr_lock);
- list_del(&cmd->list);
- ibmvscsis_free_cmd_resources(vscsi, cmd);
- spin_unlock_bh(&vscsi->intr_lock);
- goto fail;
- }
+ target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
+ cmd->sense_buf, scsilun_to_int(&srp->lun),
+ data_len, attr, dir, 0);
return;
fail:
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 814acc57069d..9b75e19a9bab 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -546,7 +546,6 @@ static int initio_reset_scsi(struct initio_host * host, int seconds)
/**
* initio_init - set up an InitIO host adapter
* @host: InitIO host adapter
- * @num_scbs: Number of SCBS
* @bios_addr: BIOS address
*
* Set up the host adapter and devices according to the configuration
@@ -866,17 +865,16 @@ static void initio_unlink_busy_scb(struct initio_host * host, struct scsi_ctrl_b
struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun)
{
- struct scsi_ctrl_blk *tmp, *prev;
+ struct scsi_ctrl_blk *tmp;
u16 scbp_tarlun;
- prev = tmp = host->first_busy;
+ tmp = host->first_busy;
while (tmp != NULL) {
scbp_tarlun = (tmp->lun << 8) | (tmp->target);
if (scbp_tarlun == tarlun) { /* Unlink this SCB */
break;
}
- prev = tmp;
tmp = tmp->next;
}
#if DEBUG_QUEUE
@@ -1888,7 +1886,7 @@ static int int_initio_scsi_rst(struct initio_host * host)
}
/**
- * int_initio_scsi_resel - Reselection occurred
+ * int_initio_resel - Reselection occurred
* @host: InitIO host adapter
*
* A SCSI reselection event has been signalled and the interrupt
@@ -2602,7 +2600,7 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c
}
/**
- * i91u_queuecommand - Queue a new command if possible
+ * i91u_queuecommand_lck - Queue a new command if possible
* @cmd: SCSI command block from the mid layer
* @done: Completion handler
*
@@ -2651,9 +2649,9 @@ static int i91u_bus_reset(struct scsi_cmnd * cmnd)
}
/**
- * i91u_biospararm - return the "logical geometry
+ * i91u_biosparam - return the "logical geometry
* @sdev: SCSI device
- * @dev; Matching block device
+ * @dev: Matching block device
* @capacity: Sector size of drive
* @info_array: Return space for BIOS geometry
*
@@ -2728,10 +2726,8 @@ static void i91u_unmap_scb(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd)
}
}
-/**
+/*
* i91uSCBPost - SCSI callback
- * @host: Pointer to host adapter control block.
- * @cmnd: Pointer to SCSI control block.
*
* This is callback routine be called when tulip finish one
* SCSI command.
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index e451102b9a29..30c30a1db5b1 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -5321,7 +5321,7 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
}
/**
- * ipr_eh_dev_reset - Reset the device
+ * __ipr_eh_dev_reset - Reset the device
* @scsi_cmd: scsi command struct
*
* This function issues a device reset to the affected device.
@@ -5583,7 +5583,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
}
/**
- * ipr_eh_abort - Abort a single op
+ * ipr_scan_finished - Report whether scan is done
* @shost: scsi host struct
* @elapsed_time: elapsed time
*
@@ -5606,7 +5606,7 @@ static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time
}
/**
- * ipr_eh_host_reset - Reset the host adapter
+ * ipr_eh_abort - Reset the host adapter
* @scsi_cmd: scsi command struct
*
* Return value:
@@ -6715,7 +6715,7 @@ static int ipr_ioctl(struct scsi_device *sdev, unsigned int cmd,
}
/**
- * ipr_info - Get information about the card/driver
+ * ipr_ioa_info - Get information about the card/driver
* @host: scsi host struct
*
* Return value:
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 7ebfa3c8cdc7..d690d9cf7eb1 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -89,16 +89,14 @@
#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
-/**
- *
- *
+/*
* The number of milliseconds to wait while a given phy is consuming power
* before allowing another set of phys to consume power. Ultimately, this will
* be specified by OEM parameter.
*/
#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
-/**
+/*
* NORMALIZE_PUT_POINTER() -
*
* This macro will normalize the completion queue put pointer so its value can
@@ -108,7 +106,7 @@
((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
-/**
+/*
* NORMALIZE_EVENT_POINTER() -
*
* This macro will normalize the completion queue event entry so its value can
@@ -120,7 +118,7 @@
>> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
)
-/**
+/*
* NORMALIZE_GET_POINTER() -
*
* This macro will normalize the completion queue get pointer so its value can
@@ -129,7 +127,7 @@
#define NORMALIZE_GET_POINTER(x) \
((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
-/**
+/*
* NORMALIZE_GET_POINTER_CYCLE_BIT() -
*
* This macro will normalize the completion queue cycle pointer so it matches
@@ -138,7 +136,7 @@
#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
-/**
+/*
* COMPLETION_QUEUE_CYCLE_BIT() -
*
* This macro will return the cycle bit of the completion queue entry
@@ -637,7 +635,7 @@ irqreturn_t isci_error_isr(int vec, void *data)
/**
* isci_host_start_complete() - This function is called by the core library,
* through the ISCI Module, to indicate controller start status.
- * @isci_host: This parameter specifies the ISCI host object
+ * @ihost: This parameter specifies the ISCI host object
* @completion_status: This parameter specifies the completion status from the
* core library.
*
@@ -670,7 +668,7 @@ int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
* use any timeout value, but this method provides the suggested minimum
* start timeout value. The returned value is based upon empirical
* information determined as a result of interoperability testing.
- * @controller: the handle to the controller object for which to return the
+ * @ihost: the handle to the controller object for which to return the
* suggested start timeout.
*
* This method returns the number of milliseconds for the suggested start
@@ -893,7 +891,7 @@ bool is_controller_start_complete(struct isci_host *ihost)
/**
* sci_controller_start_next_phy - start phy
- * @scic: controller
+ * @ihost: controller
*
* If all the phys have been started, then attempt to transition the
* controller to the READY state and inform the user
@@ -1145,7 +1143,7 @@ void isci_host_completion_routine(unsigned long data)
* controller has been quiesced. This method will ensure that all IO
* requests are quiesced, phys are stopped, and all additional operation by
* the hardware is halted.
- * @controller: the handle to the controller object to stop.
+ * @ihost: the handle to the controller object to stop.
* @timeout: This parameter specifies the number of milliseconds in which the
* stop operation should complete.
*
@@ -1174,7 +1172,7 @@ static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
* considered destructive. In other words, all current operations are wiped
* out. No IO completions for outstanding devices occur. Outstanding IO
* requests are not aborted or completed at the actual remote device.
- * @controller: the handle to the controller object to reset.
+ * @ihost: the handle to the controller object to reset.
*
* Indicate if the controller reset method succeeded or failed in some way.
* SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
@@ -1331,7 +1329,7 @@ static inline void sci_controller_starting_state_exit(struct sci_base_state_mach
/**
* sci_controller_set_interrupt_coalescence() - This method allows the user to
* configure the interrupt coalescence.
- * @controller: This parameter represents the handle to the controller object
+ * @ihost: This parameter represents the handle to the controller object
* for which its interrupt coalesce register is overridden.
* @coalesce_number: Used to control the number of entries in the Completion
* Queue before an interrupt is generated. If the number of entries exceed
@@ -2479,12 +2477,13 @@ struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
}
/**
+ * sci_controller_allocate_remote_node_context()
* This method allocates remote node index and the reserves the remote node
* context space for use. This method can fail if there are no more remote
* node index available.
- * @scic: This is the controller object which contains the set of
+ * @ihost: This is the controller object which contains the set of
* free remote node ids
- * @sci_dev: This is the device object which is requesting the a remote node
+ * @idev: This is the device object which is requesting the a remote node
* id
* @node_id: This is the remote node id that is assinged to the device if one
* is available
@@ -2709,11 +2708,11 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq)
/**
* sci_controller_start_task() - This method is called by the SCIC user to
* send/start a framework task management request.
- * @controller: the handle to the controller object for which to start the task
+ * @ihost: the handle to the controller object for which to start the task
* management request.
- * @remote_device: the handle to the remote device object for which to start
+ * @idev: the handle to the remote device object for which to start
* the task management request.
- * @task_request: the handle to the task request object to start.
+ * @ireq: the handle to the task request object to start.
*/
enum sci_status sci_controller_start_task(struct isci_host *ihost,
struct isci_remote_device *idev,
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 1b87d9080ebe..aa8787343e83 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -339,10 +339,11 @@ done:
}
/**
- * This method returns the port currently containing this phy. If the phy is
- * currently contained by the dummy port, then the phy is considered to not
- * be part of a port.
- * @sci_phy: This parameter specifies the phy for which to retrieve the
+ * phy_get_non_dummy_port() - This method returns the port currently containing
+ * this phy. If the phy is currently contained by the dummy port, then the phy
+ * is considered to not be part of a port.
+ *
+ * @iphy: This parameter specifies the phy for which to retrieve the
* containing port.
*
* This method returns a handle to a port that contains the supplied phy.
@@ -360,12 +361,8 @@ struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy)
return iphy->owning_port;
}
-/**
- * This method will assign a port to the phy object.
- * @out]: iphy This parameter specifies the phy for which to assign a port
- * object.
- *
- *
+/*
+ * sci_phy_set_port() - This method will assign a port to the phy object.
*/
void sci_phy_set_port(
struct isci_phy *iphy,
@@ -398,11 +395,11 @@ enum sci_status sci_phy_initialize(struct isci_phy *iphy,
}
/**
- * This method assigns the direct attached device ID for this phy.
+ * sci_phy_setup_transport() - This method assigns the direct attached device ID for this phy.
*
- * @iphy The phy for which the direct attached device id is to
+ * @iphy: The phy for which the direct attached device id is to
* be assigned.
- * @device_id The direct attached device ID to assign to the phy.
+ * @device_id: The direct attached device ID to assign to the phy.
* This will either be the RNi for the device or an invalid RNi if there
* is no current device assigned to the phy.
*/
@@ -597,7 +594,7 @@ static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
/**
* sci_phy_complete_link_training - perform processing common to
* all protocols upon completion of link training.
- * @sci_phy: This parameter specifies the phy object for which link training
+ * @iphy: This parameter specifies the phy object for which link training
* has completed.
* @max_link_rate: This parameter specifies the maximum link rate to be
* associated with this phy.
@@ -1167,8 +1164,8 @@ static void sci_phy_starting_final_substate_enter(struct sci_base_state_machine
}
/**
- *
- * @sci_phy: This is the struct isci_phy object to stop.
+ * scu_link_layer_stop_protocol_engine()
+ * @iphy: This is the struct isci_phy object to stop.
*
* This method will stop the struct isci_phy object. This does not reset the
* protocol engine it just suspends it and places it in a state where it will
@@ -1219,7 +1216,8 @@ static void scu_link_layer_start_oob(struct isci_phy *iphy)
}
/**
- *
+ * scu_link_layer_tx_hard_reset()
+ * @iphy: This is the struct isci_phy object to stop.
*
* This method will transmit a hard reset request on the specified phy. The SCU
* hardware requires that we reset the OOB state machine and set the hard reset
@@ -1420,7 +1418,7 @@ void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
/**
* isci_phy_control() - This function is one of the SAS Domain Template
* functions. This is a phy management function.
- * @phy: This parameter specifies the sphy being controlled.
+ * @sas_phy: This parameter specifies the sphy being controlled.
* @func: This parameter specifies the phy control function being invoked.
* @buf: This parameter is specific to the phy function being invoked.
*
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h
index 45fecfa36a98..5aaf95b14b2e 100644
--- a/drivers/scsi/isci/phy.h
+++ b/drivers/scsi/isci/phy.h
@@ -447,7 +447,6 @@ void sci_phy_get_attached_sas_address(
struct isci_phy *iphy,
struct sci_sas_address *sas_address);
-struct sci_phy_proto;
void sci_phy_get_protocols(
struct isci_phy *iphy,
struct sci_phy_proto *protocols);
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index 448a8c31ba35..1609aba1c9c1 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -62,7 +62,7 @@
#undef C
#define C(a) (#a)
-const char *port_state_name(enum sci_port_states state)
+static const char *port_state_name(enum sci_port_states state)
{
static const char * const strings[] = PORT_STATES;
@@ -115,9 +115,9 @@ static u32 sci_port_get_phys(struct isci_port *iport)
/**
* sci_port_get_properties() - This method simply returns the properties
* regarding the port, such as: physical index, protocols, sas address, etc.
- * @port: this parameter specifies the port for which to retrieve the physical
+ * @iport: this parameter specifies the port for which to retrieve the physical
* index.
- * @properties: This parameter specifies the properties structure into which to
+ * @prop: This parameter specifies the properties structure into which to
* copy the requested information.
*
* Indicate if the user specified a valid port. SCI_SUCCESS This value is
@@ -233,8 +233,8 @@ static void isci_port_link_up(struct isci_host *isci_host,
* isci_port_link_down() - This function is called by the sci core when a link
* becomes inactive.
* @isci_host: This parameter specifies the isci host object.
- * @phy: This parameter specifies the isci phy with the active link.
- * @port: This parameter specifies the isci port with the active link.
+ * @isci_phy: This parameter specifies the isci phy with the active link.
+ * @isci_port: This parameter specifies the isci port with the active link.
*
*/
static void isci_port_link_down(struct isci_host *isci_host,
@@ -308,7 +308,7 @@ static void port_state_machine_change(struct isci_port *iport,
/**
* isci_port_hard_reset_complete() - This function is called by the sci core
* when the hard reset complete notification has been received.
- * @port: This parameter specifies the sci port with the active link.
+ * @isci_port: This parameter specifies the sci port with the active link.
* @completion_status: This parameter specifies the core status for the reset
* process.
*
@@ -395,9 +395,10 @@ bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
}
/**
- *
- * @sci_port: This is the port object for which to determine if the phy mask
+ * sci_port_is_phy_mask_valid()
+ * @iport: This is the port object for which to determine if the phy mask
* can be supported.
+ * @phy_mask: Phy mask belonging to this port
*
* This method will return a true value if the port's phy mask can be supported
* by the SCU. The following is a list of valid PHY mask configurations for
@@ -533,7 +534,7 @@ void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_a
/**
* sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
*
- * @sci_port: logical port on which we need to create the remote node context
+ * @iport: logical port on which we need to create the remote node context
* @rni: remote node index for this remote node context.
*
* This routine will construct a dummy remote node context data structure
@@ -677,8 +678,8 @@ static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *i
/**
* sci_port_general_link_up_handler - phy can be assigned to port?
- * @sci_port: sci_port object for which has a phy that has gone link up.
- * @sci_phy: This is the struct isci_phy object that has gone link up.
+ * @iport: sci_port object for which has a phy that has gone link up.
+ * @iphy: This is the struct isci_phy object that has gone link up.
* @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
*
* Determine if this phy can be assigned to this port . If the phy is
@@ -716,10 +717,11 @@ static void sci_port_general_link_up_handler(struct isci_port *iport,
/**
+ * sci_port_is_wide()
* This method returns false if the port only has a single phy object assigned.
* If there are no phys or more than one phy then the method will return
* true.
- * @sci_port: The port for which the wide port condition is to be checked.
+ * @iport: The port for which the wide port condition is to be checked.
*
* bool true Is returned if this is a wide ported port. false Is returned if
* this is a narrow port.
@@ -739,12 +741,13 @@ static bool sci_port_is_wide(struct isci_port *iport)
}
/**
+ * sci_port_link_detected()
* This method is called by the PHY object when the link is detected. if the
* port wants the PHY to continue on to the link up state then the port
* layer must return true. If the port object returns false the phy object
* must halt its attempt to go link up.
- * @sci_port: The port associated with the phy object.
- * @sci_phy: The phy object that is trying to go link up.
+ * @iport: The port associated with the phy object.
+ * @iphy: The phy object that is trying to go link up.
*
* true if the phy object can continue to the link up condition. true Is
* returned if this phy can continue to the ready state. false Is returned if
@@ -817,10 +820,8 @@ done:
/* --------------------------------------------------------------------------- */
-/**
+/*
* This function updates the hardwares VIIT entry for this port.
- *
- *
*/
static void sci_port_update_viit_entry(struct isci_port *iport)
{
@@ -874,7 +875,7 @@ static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
/**
* sci_port_post_dummy_request() - post dummy/workaround request
- * @sci_port: port to post task
+ * @iport: port to post task
*
* Prevent the hardware scheduler from posting new requests to the front
* of the scheduler queue causing a starvation problem for currently
@@ -899,10 +900,11 @@ static void sci_port_post_dummy_request(struct isci_port *iport)
}
/**
- * This routine will abort the dummy request. This will alow the hardware to
+ * sci_port_abort_dummy_request()
+ * This routine will abort the dummy request. This will allow the hardware to
* power down parts of the silicon to save power.
*
- * @sci_port: The port on which the task must be aborted.
+ * @iport: The port on which the task must be aborted.
*
*/
static void sci_port_abort_dummy_request(struct isci_port *iport)
@@ -923,8 +925,8 @@ static void sci_port_abort_dummy_request(struct isci_port *iport)
}
/**
- *
- * @sci_port: This is the struct isci_port object to resume.
+ * sci_port_resume_port_task_scheduler()
+ * @iport: This is the struct isci_port object to resume.
*
* This method will resume the port task scheduler for this port object. none
*/
@@ -1014,8 +1016,8 @@ static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
}
/**
- *
- * @object: This is the object which is cast to a struct isci_port object.
+ * sci_port_ready_substate_operational_exit()
+ * @sm: This is the object which is cast to a struct isci_port object.
*
* This method will perform the actions required by the struct isci_port on
* exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
@@ -1186,9 +1188,9 @@ static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
}
/**
- * sci_port_add_phy() -
- * @sci_port: This parameter specifies the port in which the phy will be added.
- * @sci_phy: This parameter is the phy which is to be added to the port.
+ * sci_port_add_phy()
+ * @iport: This parameter specifies the port in which the phy will be added.
+ * @iphy: This parameter is the phy which is to be added to the port.
*
* This method will add a PHY to the selected port. This method returns an
* enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
@@ -1257,9 +1259,9 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
}
/**
- * sci_port_remove_phy() -
- * @sci_port: This parameter specifies the port in which the phy will be added.
- * @sci_phy: This parameter is the phy which is to be added to the port.
+ * sci_port_remove_phy()
+ * @iport: This parameter specifies the port in which the phy will be added.
+ * @iphy: This parameter is the phy which is to be added to the port.
*
* This method will remove the PHY from the selected PORT. This method returns
* an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index b1c197505579..c382a257b51b 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -73,7 +73,7 @@ enum SCIC_SDS_APC_ACTIVITY {
* ****************************************************************************** */
/**
- *
+ * sci_sas_address_compare()
* @address_one: A SAS Address to be compared.
* @address_two: A SAS Address to be compared.
*
@@ -102,9 +102,9 @@ static s32 sci_sas_address_compare(
}
/**
- *
- * @controller: The controller object used for the port search.
- * @phy: The phy object to match.
+ * sci_port_configuration_agent_find_port()
+ * @ihost: The controller object used for the port search.
+ * @iphy: The phy object to match.
*
* This routine will find a matching port for the phy. This means that the
* port and phy both have the same broadcast sas address and same received sas
@@ -145,8 +145,8 @@ static struct isci_port *sci_port_configuration_agent_find_port(
}
/**
- *
- * @controller: This is the controller object that contains the port agent
+ * sci_port_configuration_agent_validate_ports()
+ * @ihost: This is the controller object that contains the port agent
* @port_agent: This is the port configuration agent for the controller.
*
* This routine will validate the port configuration is correct for the SCU
@@ -373,15 +373,16 @@ static void sci_mpc_agent_link_up(struct isci_host *ihost,
}
/**
- *
- * @controller: This is the controller object that receives the link down
+ * sci_mpc_agent_link_down()
+ * @ihost: This is the controller object that receives the link down
* notification.
- * @port: This is the port object associated with the phy. If the is no
+ * @port_agent: This is the port configuration agent for the controller.
+ * @iport: This is the port object associated with the phy. If the is no
* associated port this is an NULL. The port is an invalid
* handle only if the phy was never port of this port. This happens when
* the phy is not broadcasting the same SAS address as the other phys in the
* assigned port.
- * @phy: This is the phy object which has gone link down.
+ * @iphy: This is the phy object which has gone link down.
*
* This function handles the manual port configuration link down notifications.
* Since all ports and phys are associated at initialization time we just turn
@@ -590,11 +591,12 @@ static void sci_apc_agent_configure_ports(struct isci_host *ihost,
/**
* sci_apc_agent_link_up - handle apc link up events
- * @scic: This is the controller object that receives the link up
+ * @ihost: This is the controller object that receives the link up
* notification.
- * @sci_port: This is the port object associated with the phy. If the is no
+ * @port_agent: This is the port configuration agent for the controller.
+ * @iport: This is the port object associated with the phy. If the is no
* associated port this is an NULL.
- * @sci_phy: This is the phy object which has gone link up.
+ * @iphy: This is the phy object which has gone link up.
*
* This method handles the automatic port configuration for link up
* notifications. Is it possible to get a link down notification from a phy
@@ -620,9 +622,10 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
}
/**
- *
- * @controller: This is the controller object that receives the link down
+ * sci_apc_agent_link_down()
+ * @ihost: This is the controller object that receives the link down
* notification.
+ * @port_agent: This is the port configuration agent for the controller.
* @iport: This is the port object associated with the phy. If the is no
* associated port this is an NULL.
* @iphy: This is the phy object which has gone link down.
@@ -697,9 +700,7 @@ done:
* Public port configuration agent routines
* ****************************************************************************** */
-/**
- *
- *
+/*
* This method will construct the port configuration agent for operation. This
* call is universal for both manual port configuration and automatic port
* configuration modes.
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index c3f540b55689..866950a02965 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -288,8 +288,9 @@ enum sci_status isci_remote_device_terminate_requests(
* isci_remote_device_not_ready() - This function is called by the ihost when
* the remote device is not ready. We mark the isci device as ready (not
* "ready_for_io") and signal the waiting proccess.
-* @isci_host: This parameter specifies the isci host object.
-* @isci_device: This parameter specifies the remote device
+* @ihost: This parameter specifies the isci host object.
+* @idev: This parameter specifies the remote device
+* @reason: Reason to switch on
*
* sci_lock is held on entrance to this function.
*/
@@ -1000,7 +1001,7 @@ static void sci_remote_device_initial_state_enter(struct sci_base_state_machine
/**
* sci_remote_device_destruct() - free remote node context and destruct
- * @remote_device: This parameter specifies the remote device to be destructed.
+ * @idev: This parameter specifies the remote device to be destructed.
*
* Remote device objects are a limited resource. As such, they must be
* protected. Thus calls to construct and destruct are mutually exclusive and
@@ -1236,8 +1237,8 @@ static const struct sci_base_state sci_remote_device_state_table[] = {
/**
* sci_remote_device_construct() - common construction
- * @sci_port: SAS/SATA port through which this device is accessed.
- * @sci_dev: remote device to construct
+ * @iport: SAS/SATA port through which this device is accessed.
+ * @idev: remote device to construct
*
* This routine just performs benign initialization and does not
* allocate the remote_node_context which is left to
@@ -1256,7 +1257,7 @@ static void sci_remote_device_construct(struct isci_port *iport,
SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
}
-/**
+/*
* sci_remote_device_da_construct() - construct direct attached device.
*
* The information (e.g. IAF, Signature FIS, etc.) necessary to build
@@ -1294,7 +1295,7 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
return SCI_SUCCESS;
}
-/**
+/*
* sci_remote_device_ea_construct() - construct expander attached device
*
* Remote node context(s) is/are a global resource allocated by this
@@ -1384,7 +1385,7 @@ static bool isci_remote_device_test_resume_done(
return done;
}
-void isci_remote_device_wait_for_resume_from_abort(
+static void isci_remote_device_wait_for_resume_from_abort(
struct isci_host *ihost,
struct isci_remote_device *idev)
{
@@ -1439,7 +1440,7 @@ enum sci_status isci_remote_device_resume_from_abort(
* sci_remote_device_start() - This method will start the supplied remote
* device. This method enables normal IO requests to flow through to the
* remote device.
- * @remote_device: This parameter specifies the device to be started.
+ * @idev: This parameter specifies the device to be started.
* @timeout: This parameter specifies the number of milliseconds in which the
* start operation should complete.
*
@@ -1501,10 +1502,11 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport,
}
/**
+ * isci_remote_device_alloc()
* This function builds the isci_remote_device when a libsas dev_found message
* is received.
- * @isci_host: This parameter specifies the isci host object.
- * @port: This parameter specifies the isci_port connected to this device.
+ * @ihost: This parameter specifies the isci host object.
+ * @iport: This parameter specifies the isci_port connected to this device.
*
* pointer to new isci_remote_device.
*/
@@ -1549,8 +1551,8 @@ void isci_remote_device_release(struct kref *kref)
/**
* isci_remote_device_stop() - This function is called internally to stop the
* remote device.
- * @isci_host: This parameter specifies the isci host object.
- * @isci_device: This parameter specifies the remote device.
+ * @ihost: This parameter specifies the isci host object.
+ * @idev: This parameter specifies the remote device.
*
* The status of the ihost request to stop.
*/
@@ -1585,8 +1587,7 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem
/**
* isci_remote_device_gone() - This function is called by libsas when a domain
* device is removed.
- * @domain_device: This parameter specifies the libsas domain device.
- *
+ * @dev: This parameter specifies the libsas domain device.
*/
void isci_remote_device_gone(struct domain_device *dev)
{
@@ -1606,7 +1607,7 @@ void isci_remote_device_gone(struct domain_device *dev)
* device is discovered. A remote device object is created and started. the
* function then sleeps until the sci core device started message is
* received.
- * @domain_device: This parameter specifies the libsas domain device.
+ * @dev: This parameter specifies the libsas domain device.
*
* status, zero indicates success.
*/
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 68333f523b35..77ba0291134e 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -74,7 +74,7 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
#undef C
/**
- *
+ * sci_remote_node_context_is_ready()
* @sci_rnc: The state of the remote node context object to check.
*
* This method will return true if the remote node context is in a READY state
@@ -163,12 +163,7 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont
rnc->ssp.oaf_source_zone_group = 0;
rnc->ssp.oaf_more_compatibility_features = 0;
}
-/**
- *
- * @sci_rnc:
- * @callback:
- * @callback_parameter:
- *
+/*
* This method will setup the remote node context object so it will transition
* to its ready state. If the remote node context is already setup to
* transition to its final state then this function does nothing. none
@@ -202,9 +197,7 @@ static void sci_remote_node_context_setup_to_destroy(
wake_up(&ihost->eventq);
}
-/**
- *
- *
+/*
* This method just calls the user callback function and then resets the
* callback.
*/
diff --git a/drivers/scsi/isci/remote_node_table.c b/drivers/scsi/isci/remote_node_table.c
index 301b3141945e..1bcaf528c1c9 100644
--- a/drivers/scsi/isci/remote_node_table.c
+++ b/drivers/scsi/isci/remote_node_table.c
@@ -53,17 +53,15 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/**
+/*
* This file contains the implementation of the SCIC_SDS_REMOTE_NODE_TABLE
* public, protected, and private methods.
- *
- *
*/
#include "remote_node_table.h"
#include "remote_node_context.h"
/**
- *
+ * sci_remote_node_table_get_group_index()
* @remote_node_table: This is the remote node index table from which the
* selection will be made.
* @group_table_index: This is the index to the group table from which to
@@ -98,10 +96,10 @@ static u32 sci_remote_node_table_get_group_index(
}
/**
- *
- * @out]: remote_node_table This the remote node table in which to clear the
+ * sci_remote_node_table_clear_group_index()
+ * @remote_node_table: This the remote node table in which to clear the
* selector.
- * @set_index: This is the remote node selector in which the change will be
+ * @group_table_index: This is the remote node selector in which the change will be
* made.
* @group_index: This is the bit index in the table to be modified.
*
@@ -128,8 +126,8 @@ static void sci_remote_node_table_clear_group_index(
}
/**
- *
- * @out]: remote_node_table This the remote node table in which to set the
+ * sci_remote_node_table_set_group_index()
+ * @remote_node_table: This the remote node table in which to set the
* selector.
* @group_table_index: This is the remote node selector in which the change
* will be made.
@@ -158,8 +156,8 @@ static void sci_remote_node_table_set_group_index(
}
/**
- *
- * @out]: remote_node_table This is the remote node table in which to modify
+ * sci_remote_node_table_set_node_index()
+ * @remote_node_table: This is the remote node table in which to modify
* the remote node availability.
* @remote_node_index: This is the remote node index that is being returned to
* the table.
@@ -191,8 +189,8 @@ static void sci_remote_node_table_set_node_index(
}
/**
- *
- * @out]: remote_node_table This is the remote node table from which to clear
+ * sci_remote_node_table_clear_node_index()
+ * @remote_node_table: This is the remote node table from which to clear
* the available remote node bit.
* @remote_node_index: This is the remote node index which is to be cleared
* from the table.
@@ -224,8 +222,8 @@ static void sci_remote_node_table_clear_node_index(
}
/**
- *
- * @out]: remote_node_table The remote node table from which the slot will be
+ * sci_remote_node_table_clear_group()
+ * @remote_node_table: The remote node table from which the slot will be
* cleared.
* @group_index: The index for the slot that is to be cleared.
*
@@ -252,9 +250,8 @@ static void sci_remote_node_table_clear_group(
remote_node_table->available_remote_nodes[dword_location] = dword_value;
}
-/**
- *
- * @remote_node_table:
+/*
+ * sci_remote_node_table_set_group()
*
* THis method sets an entire remote node group in the remote node table.
*/
@@ -280,7 +277,7 @@ static void sci_remote_node_table_set_group(
}
/**
- *
+ * sci_remote_node_table_get_group_value()
* @remote_node_table: This is the remote node table that for which the group
* value is to be returned.
* @group_index: This is the group index to use to find the group value.
@@ -307,8 +304,8 @@ static u8 sci_remote_node_table_get_group_value(
}
/**
- *
- * @out]: remote_node_table The remote that which is to be initialized.
+ * sci_remote_node_table_initialize()
+ * @remote_node_table: The remote that which is to be initialized.
* @remote_node_entries: The number of entries to put in the table.
*
* This method will initialize the remote node table for use. none
@@ -365,10 +362,10 @@ void sci_remote_node_table_initialize(
}
/**
- *
- * @out]: remote_node_table The remote node table from which to allocate a
+ * sci_remote_node_table_allocate_single_remote_node()
+ * @remote_node_table: The remote node table from which to allocate a
* remote node.
- * @table_index: The group index that is to be used for the search.
+ * @group_table_index: The group index that is to be used for the search.
*
* This method will allocate a single RNi from the remote node table. The
* table index will determine from which remote node group table to search.
@@ -425,10 +422,10 @@ static u16 sci_remote_node_table_allocate_single_remote_node(
}
/**
- *
+ * sci_remote_node_table_allocate_triple_remote_node()
* @remote_node_table: This is the remote node table from which to allocate the
* remote node entries.
- * @group_table_index: THis is the group table index which must equal two (2)
+ * @group_table_index: This is the group table index which must equal two (2)
* for this operation.
*
* This method will allocate three consecutive remote node context entries. If
@@ -462,7 +459,7 @@ static u16 sci_remote_node_table_allocate_triple_remote_node(
}
/**
- *
+ * sci_remote_node_table_allocate_remote_node()
* @remote_node_table: This is the remote node table from which the remote node
* allocation is to take place.
* @remote_node_count: This is ther remote node count which is one of
@@ -505,9 +502,10 @@ u16 sci_remote_node_table_allocate_remote_node(
}
/**
- *
- * @remote_node_table:
- *
+ * sci_remote_node_table_release_single_remote_node()
+ * @remote_node_table: This is the remote node table from which the remote node
+ * release is to take place.
+ * @remote_node_index: This is the remote node index that is being released.
* This method will free a single remote node index back to the remote node
* table. This routine will update the remote node groups
*/
@@ -550,9 +548,10 @@ static void sci_remote_node_table_release_single_remote_node(
}
/**
- *
+ * sci_remote_node_table_release_triple_remote_node()
* @remote_node_table: This is the remote node table to which the remote node
* index is to be freed.
+ * @remote_node_index: This is the remote node index that is being released.
*
* This method will release a group of three consecutive remote nodes back to
* the free remote nodes.
@@ -573,11 +572,12 @@ static void sci_remote_node_table_release_triple_remote_node(
}
/**
- *
+ * sci_remote_node_table_release_remote_node_index()
* @remote_node_table: The remote node table to which the remote node index is
* to be freed.
* @remote_node_count: This is the count of consecutive remote nodes that are
* to be freed.
+ * @remote_node_index: This is the remote node index that is being released.
*
* This method will release the remote node index back into the remote node
* table free pool.
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 58e62162882f..e7c6cb4c1556 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -207,11 +207,8 @@ static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
SCI_CONTROLLER_INVALID_IO_TAG;
}
-/**
+/*
* This method is will fill in the SCU Task Context for any type of SSP request.
- * @sci_req:
- * @task_context:
- *
*/
static void scu_ssp_request_construct_task_context(
struct isci_request *ireq,
@@ -410,10 +407,8 @@ static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op)
tc->ref_tag_seed_gen = 0;
}
-/**
+/*
* This method is will fill in the SCU Task Context for a SSP IO request.
- * @sci_req:
- *
*/
static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
enum dma_data_direction dir,
@@ -456,17 +451,16 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
}
/**
- * This method will fill in the SCU Task Context for a SSP Task request. The
- * following important settings are utilized: -# priority ==
- * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
- * ahead of other task destined for the same Remote Node. -# task_type ==
- * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
- * (i.e. non-raw frame) is being utilized to perform task management. -#
- * control_frame == 1. This ensures that the proper endianess is set so
- * that the bytes are transmitted in the right order for a task frame.
- * @sci_req: This parameter specifies the task request object being
- * constructed.
- *
+ * scu_ssp_task_request_construct_task_context() - This method will fill in
+ * the SCU Task Context for a SSP Task request. The following important
+ * settings are utilized: -# priority == SCU_TASK_PRIORITY_HIGH. This
+ * ensures that the task request is issued ahead of other task destined
+ * for the same Remote Node. -# task_type == SCU_TASK_TYPE_IOREAD. This
+ * simply indicates that a normal request type (i.e. non-raw frame) is
+ * being utilized to perform task management. -#control_frame == 1. This
+ * ensures that the proper endianness is set so that the bytes are
+ * transmitted in the right order for a task frame.
+ * @ireq: This parameter specifies the task request object being constructed.
*/
static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
{
@@ -484,9 +478,10 @@ static void scu_ssp_task_request_construct_task_context(struct isci_request *ire
}
/**
+ * scu_sata_request_construct_task_context()
* This method is will fill in the SCU Task Context for any type of SATA
* request. This is called from the various SATA constructors.
- * @sci_req: The general IO request object which is to be used in
+ * @ireq: The general IO request object which is to be used in
* constructing the SCU task context.
* @task_context: The buffer pointer for the SCU task context which is being
* constructed.
@@ -593,9 +588,9 @@ static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
return SCI_SUCCESS;
}
-/**
- *
- * @sci_req: This parameter specifies the request to be constructed as an
+/*
+ * sci_stp_optimized_request_construct()
+ * @ireq: This parameter specifies the request to be constructed as an
* optimized request.
* @optimized_task_type: This parameter specifies whether the request is to be
* an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
@@ -778,11 +773,11 @@ static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *
return status;
}
+#define SCU_TASK_CONTEXT_SRAM 0x200000
/**
* sci_req_tx_bytes - bytes transferred when reply underruns request
* @ireq: request that was terminated early
*/
-#define SCU_TASK_CONTEXT_SRAM 0x200000
static u32 sci_req_tx_bytes(struct isci_request *ireq)
{
struct isci_host *ihost = ireq->owning_controller;
@@ -1396,10 +1391,10 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re
}
/**
- *
- * @stp_request: The request that is used for the SGL processing.
- * @data_buffer: The buffer of data to be copied.
- * @length: The length of the data transfer.
+ * sci_stp_request_pio_data_in_copy_data_buffer()
+ * @stp_req: The request that is used for the SGL processing.
+ * @data_buf: The buffer of data to be copied.
+ * @len: The length of the data transfer.
*
* Copy the data from the buffer for the length specified to the IO request SGL
* specified data region. enum sci_status
@@ -1443,8 +1438,8 @@ sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
}
/**
- *
- * @sci_req: The PIO DATA IN request that is to receive the data.
+ * sci_stp_request_pio_data_in_copy_data()
+ * @stp_req: The PIO DATA IN request that is to receive the data.
* @data_buffer: The buffer to copy from.
*
* Copy the data buffer to the io request data region. enum sci_status
@@ -2452,7 +2447,7 @@ sci_io_request_tc_completion(struct isci_request *ireq,
* isci_request_process_response_iu() - This function sets the status and
* response iu, in the task struct, from the request object for the upper
* layer driver.
- * @sas_task: This parameter is the task struct from the upper layer driver.
+ * @task: This parameter is the task struct from the upper layer driver.
* @resp_iu: This parameter points to the response iu of the completed request.
* @dev: This parameter specifies the linux device struct.
*
@@ -2485,6 +2480,7 @@ static void isci_request_process_response_iu(
* isci_request_set_open_reject_status() - This function prepares the I/O
* completion for OPEN_REJECT conditions.
* @request: This parameter is the completed isci_request object.
+ * @task: This parameter is the task struct from the upper layer driver.
* @response_ptr: This parameter specifies the service response for the I/O.
* @status_ptr: This parameter specifies the exec status for the I/O.
* @open_rej_reason: This parameter specifies the encoded reason for the
@@ -2509,7 +2505,9 @@ static void isci_request_set_open_reject_status(
/**
* isci_request_handle_controller_specific_errors() - This function decodes
* controller-specific I/O completion error conditions.
+ * @idev: Remote device
* @request: This parameter is the completed isci_request object.
+ * @task: This parameter is the task struct from the upper layer driver.
* @response_ptr: This parameter specifies the service response for the I/O.
* @status_ptr: This parameter specifies the exec status for the I/O.
*
@@ -3326,7 +3324,7 @@ static enum sci_status isci_smp_request_build(struct isci_request *ireq)
* @ihost: This parameter specifies the ISCI host object
* @request: This parameter points to the isci_request object allocated in the
* request construct function.
- * @sci_device: This parameter is the handle for the sci core's remote device
+ * @idev: This parameter is the handle for the sci core's remote device
* object that is the destination for this request.
*
* SCI_SUCCESS on successfull completion, or specific failure code.
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 26fa1a4d1e6b..62062ed6cd9a 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -369,7 +369,7 @@ static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
tmf->io_tag = old_request->io_tag;
}
-/**
+/*
* isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
* Template functions.
* @lun: This parameter specifies the lun to be reset.
@@ -668,7 +668,6 @@ int isci_task_clear_task_set(
* returned, libsas turns this into a LUN reset; when FUNC_FAILED is
* returned, libsas will turn this into a target reset
* @task: This parameter specifies the sas task being queried.
- * @lun: This parameter specifies the lun associated with this request.
*
* status, zero indicates success.
*/
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index f0ed6863cc70..60a88a95a8e2 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -143,7 +143,9 @@ static int esp_jazz_probe(struct platform_device *dev)
if (!esp->command_block)
goto fail_unmap_regs;
- host->irq = platform_get_irq(dev, 0);
+ host->irq = err = platform_get_irq(dev, 0);
+ if (err < 0)
+ goto fail_unmap_command_block;
err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
if (err < 0)
goto fail_unmap_command_block;
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index b43b5f62ee3e..509eacd7893d 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -2248,7 +2248,7 @@ int fc_slave_alloc(struct scsi_device *sdev)
EXPORT_SYMBOL(fc_slave_alloc);
/**
- * fc_fcp_destory() - Tear down the FCP layer for a given local port
+ * fc_fcp_destroy() - Tear down the FCP layer for a given local port
* @lport: The local port that no longer needs the FCP layer
*/
void fc_fcp_destroy(struct fc_lport *lport)
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 22826544da7e..cf36c8cb5493 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -703,7 +703,7 @@ static void fc_lport_disc_callback(struct fc_lport *lport,
}
/**
- * fc_rport_enter_ready() - Enter the ready state and start discovery
+ * fc_lport_enter_ready() - Enter the ready state and start discovery
* @lport: The local port that is ready
*/
static void fc_lport_enter_ready(struct fc_lport *lport)
@@ -747,7 +747,7 @@ static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
}
/**
- * fc_lport_set_port_id() - set the local port Port ID for point-to-multipoint
+ * fc_lport_set_local_id() - set the local port Port ID for point-to-multipoint
* @lport: The local port which will have its Port ID set.
* @port_id: The new port ID.
*
@@ -1393,7 +1393,7 @@ static struct fc_rport_operations fc_lport_rport_ops = {
};
/**
- * fc_rport_enter_dns() - Create a fc_rport for the name server
+ * fc_lport_enter_dns() - Create a fc_rport for the name server
* @lport: The local port requesting a remote port for the name server
*/
static void fc_lport_enter_dns(struct fc_lport *lport)
@@ -1509,7 +1509,7 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
}
/**
- * fc_rport_enter_fdmi() - Create a fc_rport for the management server
+ * fc_lport_enter_fdmi() - Create a fc_rport for the management server
* @lport: The local port requesting a remote port for the management server
*/
static void fc_lport_enter_fdmi(struct fc_lport *lport)
@@ -1640,7 +1640,7 @@ err:
EXPORT_SYMBOL(fc_lport_logo_resp);
/**
- * fc_rport_enter_logo() - Logout of the fabric
+ * fc_lport_enter_logo() - Logout of the fabric
* @lport: The local port to be logged out
*/
static void fc_lport_enter_logo(struct fc_lport *lport)
@@ -1731,7 +1731,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) {
FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
- "lport->mfs:%hu\n", mfs, lport->mfs);
+ "lport->mfs:%u\n", mfs, lport->mfs);
fc_lport_error(lport, fp);
goto out;
}
@@ -1782,7 +1782,7 @@ err:
EXPORT_SYMBOL(fc_lport_flogi_resp);
/**
- * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
+ * fc_lport_enter_flogi() - Send a FLOGI request to the fabric manager
* @lport: Fibre Channel local port to be logged in to the fabric
*/
static void fc_lport_enter_flogi(struct fc_lport *lport)
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 56003208d2e7..cd0fb8ca2425 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -1486,7 +1486,7 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
}
/**
- * fc_rport_els_adisc_resp() - Handler for Address Discovery (ADISC) responses
+ * fc_rport_adisc_resp() - Handler for Address Discovery (ADISC) responses
* @sp: The sequence the ADISC response was on
* @fp: The ADISC response frame
* @rdata_arg: The remote port that sent the ADISC response
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 8b9a39077dba..e9a86128f1f1 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -35,46 +35,40 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
/* ts->resp == SAS_TASK_COMPLETE */
/* task delivered, what happened afterwards? */
switch (ts->stat) {
- case SAS_DEV_NO_RESPONSE:
- return AC_ERR_TIMEOUT;
-
- case SAS_INTERRUPTED:
- case SAS_PHY_DOWN:
- case SAS_NAK_R_ERR:
- return AC_ERR_ATA_BUS;
-
-
- case SAS_DATA_UNDERRUN:
- /*
- * Some programs that use the taskfile interface
- * (smartctl in particular) can cause underrun
- * problems. Ignore these errors, perhaps at our
- * peril.
- */
- return 0;
-
- case SAS_DATA_OVERRUN:
- case SAS_QUEUE_FULL:
- case SAS_DEVICE_UNKNOWN:
- case SAS_SG_ERR:
- return AC_ERR_INVALID;
-
- case SAS_OPEN_TO:
- case SAS_OPEN_REJECT:
- pr_warn("%s: Saw error %d. What to do?\n",
- __func__, ts->stat);
- return AC_ERR_OTHER;
-
- case SAM_STAT_CHECK_CONDITION:
- case SAS_ABORTED_TASK:
- return AC_ERR_DEV;
-
- case SAS_PROTO_RESPONSE:
- /* This means the ending_fis has the error
- * value; return 0 here to collect it */
- return 0;
- default:
- return 0;
+ case SAS_DEV_NO_RESPONSE:
+ return AC_ERR_TIMEOUT;
+ case SAS_INTERRUPTED:
+ case SAS_PHY_DOWN:
+ case SAS_NAK_R_ERR:
+ return AC_ERR_ATA_BUS;
+ case SAS_DATA_UNDERRUN:
+ /*
+ * Some programs that use the taskfile interface
+ * (smartctl in particular) can cause underrun
+ * problems. Ignore these errors, perhaps at our
+ * peril.
+ */
+ return 0;
+ case SAS_DATA_OVERRUN:
+ case SAS_QUEUE_FULL:
+ case SAS_DEVICE_UNKNOWN:
+ case SAS_SG_ERR:
+ return AC_ERR_INVALID;
+ case SAS_OPEN_TO:
+ case SAS_OPEN_REJECT:
+ pr_warn("%s: Saw error %d. What to do?\n",
+ __func__, ts->stat);
+ return AC_ERR_OTHER;
+ case SAM_STAT_CHECK_CONDITION:
+ case SAS_ABORTED_TASK:
+ return AC_ERR_DEV;
+ case SAS_PROTO_RESPONSE:
+ /* This means the ending_fis has the error
+ * value; return 0 here to collect it
+ */
+ return 0;
+ default:
+ return 0;
}
}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 161c9b387da7..9f5068f3bcfb 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -75,7 +75,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
struct dev_to_host_fis *fis =
(struct dev_to_host_fis *) dev->frame_rcvd;
if (fis->interrupt_reason == 1 && fis->lbal == 1 &&
- fis->byte_count_low==0x69 && fis->byte_count_high == 0x96
+ fis->byte_count_low == 0x69 && fis->byte_count_high == 0x96
&& (fis->device & ~0x10) == 0)
dev->dev_type = SAS_SATA_PM;
else
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 8d6bcc19359f..6d583e8c403a 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -553,7 +553,7 @@ static int sas_ex_manuf_info(struct domain_device *dev)
mi_req[1] = SMP_REPORT_MANUF_INFO;
- res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp,MI_RESP_SIZE);
+ res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp, MI_RESP_SIZE);
if (res) {
pr_notice("MI: ex %016llx failed:0x%x\n",
SAS_ADDR(dev->sas_addr), res);
@@ -594,13 +594,13 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id,
pc_req[1] = SMP_PHY_CONTROL;
pc_req[9] = phy_id;
- pc_req[10]= phy_func;
+ pc_req[10] = phy_func;
if (rates) {
pc_req[32] = rates->minimum_linkrate << 4;
pc_req[33] = rates->maximum_linkrate << 4;
}
- res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE);
+ res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp, PC_RESP_SIZE);
if (res) {
pr_err("ex %016llx phy%02d PHY control failed: %d\n",
SAS_ADDR(dev->sas_addr), phy_id, res);
@@ -678,7 +678,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
req[9] = phy->number;
res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
- resp, RPEL_RESP_SIZE);
+ resp, RPEL_RESP_SIZE);
if (res)
goto out;
@@ -714,7 +714,7 @@ int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
rps_req[9] = phy_id;
res = smp_execute_task(dev, rps_req, RPS_REQ_SIZE,
- rps_resp, RPS_RESP_SIZE);
+ rps_resp, RPS_RESP_SIZE);
/* 0x34 is the FIS type for the D2H fis. There's a potential
* standards cockup here. sas-2 explicitly specifies the FIS
@@ -1506,7 +1506,8 @@ static int sas_configure_phy(struct domain_device *dev, int phy_id,
if (res)
return res;
if (include ^ present)
- return sas_configure_set(dev, phy_id, sas_addr, index,include);
+ return sas_configure_set(dev, phy_id, sas_addr, index,
+ include);
return res;
}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 6ba5fa08c47a..f8de0d10620b 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -782,6 +782,7 @@ struct lpfc_hba {
#define HBA_NEEDS_CFG_PORT 0x2000000 /* SLI3 - needs a CONFIG_PORT mbox */
#define HBA_HBEAT_INP 0x4000000 /* mbox HBEAT is in progress */
#define HBA_HBEAT_TMO 0x8000000 /* HBEAT initiated after timeout */
+#define HBA_FLOGI_OUTSTANDING 0x10000000 /* FLOGI is outstanding */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index bdd9a29f4201..0975a8b252a0 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -487,8 +487,8 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&lport->xmt_fcp_noxri),
atomic_read(&lport->xmt_fcp_bad_ndlp),
atomic_read(&lport->xmt_fcp_qdepth),
- atomic_read(&lport->xmt_fcp_err),
- atomic_read(&lport->xmt_fcp_wqerr));
+ atomic_read(&lport->xmt_fcp_wqerr),
+ atomic_read(&lport->xmt_fcp_err));
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
goto buffer_done;
@@ -512,11 +512,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
"6314 Catching potential buffer "
"overflow > PAGE_SIZE = %lu bytes\n",
PAGE_SIZE);
- strlcpy(buf + PAGE_SIZE - 1 -
- strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1),
+ strlcpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_NVME_INFO_MORE_STR),
LPFC_NVME_INFO_MORE_STR,
- strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1)
- + 1);
+ sizeof(LPFC_NVME_INFO_MORE_STR) + 1);
}
return len;
@@ -864,7 +862,7 @@ lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
}
/**
- * lpfc_state_show - Return the link state of the port
+ * lpfc_link_state_show - Return the link state of the port
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains text describing the state of the link.
@@ -1176,8 +1174,7 @@ lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock)
msleep(20);
if (cnt++ > 250) { /* 5 secs */
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "0466 %s %s\n",
- "Outstanding IO when ",
+ "0466 Outstanding IO when "
"bringing Adapter offline\n");
return 0;
}
@@ -1687,8 +1684,7 @@ lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"0071 Set trunk mode failed with status: %d",
rc);
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, phba->mbox_mem_pool);
+ mempool_free(mbox, phba->mbox_mem_pool);
return 0;
}
@@ -2276,14 +2272,14 @@ lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val)
{
if (lpfc_rangecheck(val, 0, 1) && phba->sli_rev == LPFC_SLI_REV4) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3068 %s_enable_bbcr changed from %d to %d\n",
- LPFC_DRIVER_NAME, phba->cfg_enable_bbcr, val);
+ "3068 lpfc_enable_bbcr changed from %d to "
+ "%d\n", phba->cfg_enable_bbcr, val);
phba->cfg_enable_bbcr = val;
return 0;
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0451 %s_enable_bbcr cannot set to %d, range is 0, 1\n",
- LPFC_DRIVER_NAME, val);
+ "0451 lpfc_enable_bbcr cannot set to %d, range is 0, "
+ "1\n", val);
return -EINVAL;
}
@@ -2726,8 +2722,8 @@ lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
*/
if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0051 "LPFC_DRIVER_NAME" soft wwpn can not"
- " be enabled: fawwpn is enabled\n");
+ "0051 lpfc soft wwpn can not be enabled: "
+ "fawwpn is enabled\n");
return -EINVAL;
}
@@ -3819,7 +3815,7 @@ lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH,
LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH);
/**
- * lpfc_tgt_queue_depth_store: Sets an attribute value.
+ * lpfc_tgt_queue_depth_set: Sets an attribute value.
* @vport: lpfc vport structure pointer.
* @val: integer attribute value.
*
@@ -4004,7 +4000,7 @@ LPFC_ATTR(topology, 0, 0, 6,
"Select Fibre Channel topology");
/**
- * lpfc_topology_set - Set the adapters topology field
+ * lpfc_topology_store - Set the adapters topology field
* @dev: class device that is converted into a scsi_host.
* @attr:device attribute, not used.
* @buf: buffer for passing information.
@@ -4457,7 +4453,7 @@ static struct bin_attribute sysfs_drvr_stat_data_attr = {
# Value range is [0,16]. Default value is 0.
*/
/**
- * lpfc_link_speed_set - Set the adapters link speed
+ * lpfc_link_speed_store - Set the adapters link speed
* @dev: Pointer to class device.
* @attr: Unused.
* @buf: Data buffer.
@@ -4858,7 +4854,7 @@ lpfc_param_show(sriov_nr_virtfn)
static DEVICE_ATTR_RW(lpfc_sriov_nr_virtfn);
/**
- * lpfc_request_firmware_store - Request for Linux generic firmware upgrade
+ * lpfc_request_firmware_upgrade_store - Request for Linux generic firmware upgrade
*
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
@@ -5212,8 +5208,8 @@ lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val)
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0371 "LPFC_DRIVER_NAME"_cq_max_proc_limit: "
- "%d out of range, using default\n",
+ "0371 lpfc_cq_max_proc_limit: %d out of range, using "
+ "default\n",
phba->cfg_cq_max_proc_limit);
return 0;
@@ -5222,7 +5218,7 @@ lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val)
static DEVICE_ATTR_RW(lpfc_cq_max_proc_limit);
/**
- * lpfc_state_show - Display current driver CPU affinity
+ * lpfc_fcp_cpu_map_show - Display current driver CPU affinity
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains text describing the state of the link.
@@ -5816,7 +5812,9 @@ lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
}
/* Check if default setting was passed */
- if (val == LPFC_IRQ_CHANN_DEF)
+ if (val == LPFC_IRQ_CHANN_DEF &&
+ phba->cfg_hdw_queue == LPFC_HBA_HDWQ_DEF &&
+ phba->sli_rev == LPFC_SLI_REV4)
lpfc_assign_default_irq_chann(phba);
if (phba->irq_chann_mode != NORMAL_MODE) {
@@ -5855,7 +5853,12 @@ lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
return -EINVAL;
}
- phba->cfg_irq_chann = val;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ phba->cfg_irq_chann = val;
+ } else {
+ phba->cfg_irq_chann = 2;
+ phba->cfg_hdw_queue = 1;
+ }
}
return 0;
@@ -5923,7 +5926,7 @@ LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
/*
-# lpfc_prot_mask: i
+# lpfc_prot_mask:
# - Bit mask of host protection capabilities used to register with the
# SCSI mid-layer
# - Only meaningful if BG is turned on (lpfc_enable_bg=1).
@@ -5948,7 +5951,7 @@ LPFC_ATTR(prot_mask,
"T10-DIF host protection capabilities mask");
/*
-# lpfc_prot_guard: i
+# lpfc_prot_guard:
# - Bit mask of protection guard types to register with the SCSI mid-layer
# - Guard types are currently either 1) T10-DIF CRC 2) IP checksum
# - Allows you to ultimately specify which profiles to use
@@ -6040,8 +6043,8 @@ lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
return 0;
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0409 "LPFC_DRIVER_NAME"_sg_seg_cnt attribute cannot "
- "be set to %d, allowed range is [%d, %d]\n",
+ "0409 lpfc_sg_seg_cnt attribute cannot be set to %d, "
+ "allowed range is [%d, %d]\n",
val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT);
phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
return -EINVAL;
@@ -6793,15 +6796,19 @@ lpfc_get_stats(struct Scsi_Host *shost)
pmboxq->ctx_buf = NULL;
pmboxq->vport = vport;
- if (vport->fc_flag & FC_OFFLINE_MODE)
+ if (vport->fc_flag & FC_OFFLINE_MODE) {
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
- else
- rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
-
- if (rc != MBX_SUCCESS) {
- if (rc != MBX_TIMEOUT)
+ if (rc != MBX_SUCCESS) {
mempool_free(pmboxq, phba->mbox_mem_pool);
- return NULL;
+ return NULL;
+ }
+ } else {
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+ if (rc != MBX_SUCCESS) {
+ if (rc != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return NULL;
+ }
}
memset(hs, 0, sizeof (struct fc_host_statistics));
@@ -6825,15 +6832,19 @@ lpfc_get_stats(struct Scsi_Host *shost)
pmboxq->ctx_buf = NULL;
pmboxq->vport = vport;
- if (vport->fc_flag & FC_OFFLINE_MODE)
+ if (vport->fc_flag & FC_OFFLINE_MODE) {
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
- else
- rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
-
- if (rc != MBX_SUCCESS) {
- if (rc != MBX_TIMEOUT)
+ if (rc != MBX_SUCCESS) {
mempool_free(pmboxq, phba->mbox_mem_pool);
- return NULL;
+ return NULL;
+ }
+ } else {
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+ if (rc != MBX_SUCCESS) {
+ if (rc != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return NULL;
+ }
}
hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
@@ -6906,15 +6917,19 @@ lpfc_reset_stats(struct Scsi_Host *shost)
pmboxq->vport = vport;
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
- (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
- else
- rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
-
- if (rc != MBX_SUCCESS) {
- if (rc != MBX_TIMEOUT)
+ if (rc != MBX_SUCCESS) {
mempool_free(pmboxq, phba->mbox_mem_pool);
- return;
+ return;
+ }
+ } else {
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+ if (rc != MBX_SUCCESS) {
+ if (rc != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return;
+ }
}
memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
@@ -6924,15 +6939,19 @@ lpfc_reset_stats(struct Scsi_Host *shost)
pmboxq->vport = vport;
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
- (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
- else
+ if (rc != MBX_SUCCESS) {
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return;
+ }
+ } else {
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
-
- if (rc != MBX_SUCCESS) {
- if (rc != MBX_TIMEOUT)
- mempool_free( pmboxq, phba->mbox_mem_pool);
- return;
+ if (rc != MBX_SUCCESS) {
+ if (rc != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return;
+ }
}
lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
@@ -7396,7 +7415,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
if (phba->cfg_irq_chann == 0)
phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
- if (phba->cfg_irq_chann > phba->cfg_hdw_queue)
+ if (phba->cfg_irq_chann > phba->cfg_hdw_queue &&
+ phba->sli_rev == LPFC_SLI_REV4)
phba->cfg_irq_chann = phba->cfg_hdw_queue;
phba->cfg_soft_wwnn = 0L;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index b974d39d233b..c2776b88d493 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -2435,8 +2435,10 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
goto job_error;
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!pmboxq)
+ if (!pmboxq) {
+ rc = -ENOMEM;
goto link_diag_test_exit;
+ }
req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
sizeof(struct lpfc_sli4_cfg_mhdr));
@@ -3580,7 +3582,7 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
}
/**
- * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
+ * lpfc_bsg_mbox_ext_session_reset - clean up context of multi-buffer mbox session
* @phba: Pointer to HBA context object.
*
* This is routine clean up and reset BSG handling of multi-buffer mbox
@@ -3869,14 +3871,14 @@ lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
}
/**
- * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
+ * lpfc_bsg_sli_cfg_read_cmd_ext - sli_config non-embedded mailbox cmd read
* @phba: Pointer to HBA context object.
* @job: Pointer to the job object.
* @nemb_tp: Enumerate of non-embedded mailbox command type.
* @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
- * non-embedded external bufffers.
+ * non-embedded external buffers.
**/
static int
lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
@@ -4065,7 +4067,7 @@ job_error:
* @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
- * non-embedded external bufffers.
+ * non-embedded external buffers.
**/
static int
lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
@@ -4209,7 +4211,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
goto job_error;
}
- /* wait for additoinal external buffers */
+ /* wait for additional external buffers */
bsg_reply->result = 0;
bsg_job_done(job, bsg_reply->result,
@@ -4231,8 +4233,8 @@ job_error:
* @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
- * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
- * with embedded sussystem 0x1 and opcodes with external HBDs.
+ * external buffers, including both 0x9B with non-embedded MSEs and 0x9B
+ * with embedded subsystem 0x1 and opcodes with external HBDs.
**/
static int
lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
@@ -4360,7 +4362,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
}
/**
- * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
+ * lpfc_bsg_mbox_ext_abort - request to abort mbox command with ext buffers
* @phba: Pointer to HBA context object.
*
* This routine is for requesting to abort a pass-through mailbox command with
@@ -4557,7 +4559,7 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
goto job_error;
}
- /* wait for additoinal external buffers */
+ /* wait for additional external buffers */
bsg_reply->result = 0;
bsg_job_done(job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
@@ -4623,7 +4625,7 @@ lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job,
* @job: Pointer to the job object.
* @dmabuf: Pointer to a DMA buffer descriptor.
*
- * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
+ * This routine checks and handles non-embedded multi-buffer SLI_CONFIG
* (0x9B) mailbox commands and external buffers.
**/
static int
@@ -4701,7 +4703,7 @@ sli_cfg_ext_error:
* from the mailbox pool, copy the caller mailbox command.
*
* If offline and the sli is active we need to poll for the command (port is
- * being reset) and com-plete the job, otherwise issue the mailbox command and
+ * being reset) and complete the job, otherwise issue the mailbox command and
* let our completion handler finish the command.
**/
static int
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index a0aad4896a45..383abf46fd29 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -55,9 +55,6 @@ void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
-void lpfc_supported_pages(struct lpfcMboxq *);
-void lpfc_pc_sli4_params(struct lpfcMboxq *);
-int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
uint16_t, uint16_t, bool);
int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -103,6 +100,8 @@ int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_nodelist *lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did);
struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *);
int lpfc_nlp_put(struct lpfc_nodelist *);
+void lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb);
int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp);
struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t);
void lpfc_disc_list_loopmap(struct lpfc_vport *);
@@ -351,8 +350,8 @@ int lpfc_sli_hbq_size(void);
int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *, void *);
int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd);
-int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t,
- uint64_t, lpfc_ctx_cmd);
+int lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
+ lpfc_ctx_cmd abort_cmd);
int
lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *,
uint16_t, uint64_t, lpfc_ctx_cmd);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index dd0b432f7ac5..3bbefa225484 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -137,11 +137,11 @@ lpfc_ct_unsol_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
/**
- * lpfc_ct_reject_event : Issue reject for unhandled CT MIB commands
- * @ndlp : pointer to a node-list data structure.
- * ct_req : pointer to the CT request data structure.
- * rx_id : rx_id of the received UNSOL CT command
- * ox_id : ox_id of the UNSOL CT command
+ * lpfc_ct_reject_event - Issue reject for unhandled CT MIB commands
+ * @ndlp: pointer to a node-list data structure.
+ * @ct_req: pointer to the CT request data structure.
+ * @rx_id: rx_id of the received UNSOL CT command
+ * @ox_id: ox_id of the UNSOL CT command
*
* This routine is invoked by the lpfc_ct_handle_mibreq routine for sending
* a reject response. Reject response is sent for the unhandled commands.
@@ -272,7 +272,7 @@ ct_exit:
/**
* lpfc_ct_handle_mibreq - Process an unsolicited CT MIB request data buffer
* @phba: pointer to lpfc hba data structure.
- * @ctiocb: pointer to lpfc CT command iocb data structure.
+ * @ctiocbq: pointer to lpfc CT command iocb data structure.
*
* This routine is used for processing the IOCB associated with a unsolicited
* CT MIB request. It first determines whether there is an existing ndlp that
@@ -777,7 +777,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0239 Skip x%06x NameServer Rsp "
- "Data: x%x x%x %p\n",
+ "Data: x%x x%x x%px\n",
Did, vport->fc_flag,
vport->fc_rscn_id_cnt, ndlp);
}
@@ -2253,12 +2253,12 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
case SLI_MGMT_RPA:
- /* No retry on Vendor RPA */
+ /* No retry on Vendor, RPA only done on physical port */
if (phba->link_flag & LS_CT_VEN_RPA) {
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_DISCOVERY | LOG_ELS,
- "6460 VEN FDMI RPA failure\n");
phba->link_flag &= ~LS_CT_VEN_RPA;
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_DISCOVERY | LOG_ELS,
+ "6460 VEN FDMI RPA failure\n");
return;
}
if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) {
@@ -2306,23 +2306,24 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (phba->link_flag & LS_CT_VEN_RPA) {
lpfc_printf_vlog(vport, KERN_INFO,
LOG_DISCOVERY | LOG_ELS,
- "6449 VEN RPA Success\n");
+ "6449 VEN RPA FDMI Success\n");
+ phba->link_flag &= ~LS_CT_VEN_RPA;
break;
}
if (lpfc_fdmi_cmd(vport, ndlp, cmd,
LPFC_FDMI_VENDOR_ATTR_mi) == 0)
phba->link_flag |= LS_CT_VEN_RPA;
- lpfc_printf_vlog(vport, KERN_INFO,
+ lpfc_printf_log(phba, KERN_INFO,
LOG_DISCOVERY | LOG_ELS,
"6458 Send MI FDMI:%x Flag x%x\n",
phba->sli4_hba.pc_sli4_params.mi_value,
phba->link_flag);
} else {
- lpfc_printf_vlog(vport, KERN_INFO,
- LOG_DISCOVERY | LOG_ELS,
- "6459 No FDMI VEN MI support - "
- "RPA Success\n");
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_DISCOVERY | LOG_ELS,
+ "6459 No FDMI VEN MI support - "
+ "RPA Success\n");
}
break;
}
@@ -2369,10 +2370,13 @@ lpfc_fdmi_change_check(struct lpfc_vport *vport)
* DHBA -> DPRT -> RHBA -> RPA (physical port)
* DPRT -> RPRT (vports)
*/
- if (vport->port_type == LPFC_PHYSICAL_PORT)
+ if (vport->port_type == LPFC_PHYSICAL_PORT) {
+ /* For extra Vendor RPA */
+ phba->link_flag &= ~LS_CT_VEN_RPA;
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
- else
+ } else {
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
+ }
/* Since this code path registers all the port attributes
* we can just return without further checking.
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 46a8f2d1d2b8..658a962832b3 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -381,7 +381,7 @@ skipit:
static int lpfc_debugfs_last_xripool;
/**
- * lpfc_debugfs_common_xri_data - Dump Hardware Queue info to a buffer
+ * lpfc_debugfs_commonxripools_data - Dump Hardware Queue info to a buffer
* @phba: The HBA to gather host buffer info from.
* @buf: The buffer to dump log into.
* @size: The maximum amount of data to process.
@@ -869,7 +869,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
"WWNN x%llx ",
wwn_to_u64(ndlp->nlp_nodename.u.wwn));
if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
- len += scnprintf(buf+len, size-len, "RPI:%03d ",
+ len += scnprintf(buf+len, size-len, "RPI:%04d ",
ndlp->nlp_rpi);
else
len += scnprintf(buf+len, size-len, "RPI:none ");
@@ -895,7 +895,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
if (ndlp->nlp_type & NLP_NVME_INITIATOR)
len += scnprintf(buf + len,
size - len, "NVME_INITIATOR ");
- len += scnprintf(buf+len, size-len, "refcnt:%x",
+ len += scnprintf(buf+len, size-len, "refcnt:%d",
kref_read(&ndlp->kref));
if (iocnt) {
i = atomic_read(&ndlp->cmd_pending);
@@ -904,8 +904,11 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
i, ndlp->cmd_qdepth);
outio += i;
}
- len += scnprintf(buf + len, size - len, "defer:%x ",
- ndlp->nlp_defer_did);
+ len += scnprintf(buf+len, size-len, " xpt:x%x",
+ ndlp->fc4_xpt_flags);
+ if (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)
+ len += scnprintf(buf+len, size-len, " defer:%x",
+ ndlp->nlp_defer_did);
len += scnprintf(buf+len, size-len, "\n");
}
spin_unlock_irq(shost->host_lock);
@@ -5151,7 +5154,7 @@ error_out:
* This routine is to get the available extent information.
*
* Returns:
- * overall lenth of the data read into the internal buffer.
+ * overall length of the data read into the internal buffer.
**/
static int
lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len)
@@ -5202,7 +5205,7 @@ lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len)
* This routine is to get the allocated extent information.
*
* Returns:
- * overall lenth of the data read into the internal buffer.
+ * overall length of the data read into the internal buffer.
**/
static int
lpfc_idiag_extacc_alloc_get(struct lpfc_hba *phba, char *pbuffer, int len)
@@ -5274,7 +5277,7 @@ lpfc_idiag_extacc_alloc_get(struct lpfc_hba *phba, char *pbuffer, int len)
* This routine is to get the driver extent information.
*
* Returns:
- * overall lenth of the data read into the internal buffer.
+ * overall length of the data read into the internal buffer.
**/
static int
lpfc_idiag_extacc_drivr_get(struct lpfc_hba *phba, char *pbuffer, int len)
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 8ce13ef3cac3..08999aad6a10 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -159,7 +159,6 @@ struct lpfc_node_rrq {
uint16_t rxid;
uint32_t nlp_DID; /* FC D_ID of entry */
struct lpfc_vport *vport;
- struct lpfc_nodelist *ndlp;
unsigned long rrq_stop_time;
};
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index f0a758138ae8..21108f322c99 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1182,7 +1182,8 @@ flogifail:
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
spin_unlock_irq(&phba->hbalock);
- lpfc_nlp_put(ndlp);
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)))
+ lpfc_nlp_put(ndlp);
if (!lpfc_error_lost_link(irsp)) {
/* FLOGI failed, so just use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
@@ -1199,6 +1200,7 @@ flogifail:
lpfc_issue_clear_la(phba, vport);
}
out:
+ phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING;
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
}
@@ -1249,10 +1251,9 @@ lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* function field. The lpfc_issue_fabric_iocb routine is invoked to send
* out FLOGI ELS command with one outstanding fabric IOCB at a time.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the FLOGI ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the FLOGI ELS command.
*
* Return code
* 0 - successfully issued flogi iocb for @vport
@@ -1341,14 +1342,19 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
phba->sli3_options, 0, 0);
elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
- goto out;
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
rc = lpfc_issue_fabric_iocb(phba, elsiocb);
- if (rc == IOCB_ERROR)
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
lpfc_nlp_put(ndlp);
+ return 1;
+ }
- phba->hba_flag |= HBA_FLOGI_ISSUED;
+ phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING);
/* Check for a deferred FLOGI ACC condition */
if (phba->defer_flogi_acc_flag) {
@@ -1376,11 +1382,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
vport->fc_myDID = did;
}
- if (!rc)
- return 0;
- out:
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
+ return 0;
}
/**
@@ -1423,9 +1425,14 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
icmd = &iocb->iocb;
if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
ndlp = (struct lpfc_nodelist *)(iocb->context1);
- if (ndlp && (ndlp->nlp_DID == Fabric_DID))
+ if (ndlp && ndlp->nlp_DID == Fabric_DID) {
+ if ((phba->pport->fc_flag & FC_PT2PT) &&
+ !(phba->pport->fc_flag & FC_PT2PT_PLOGI))
+ iocb->fabric_iocb_cmpl =
+ lpfc_ignore_els_cmpl;
lpfc_sli_issue_abort_iotag(phba, pring, iocb,
NULL);
+ }
}
}
/* Make sure HBA is alive */
@@ -1600,7 +1607,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
struct lpfc_nodelist *new_ndlp;
struct serv_parm *sp;
uint8_t name[sizeof(struct lpfc_name)];
- uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
+ uint32_t keepDID = 0, keep_nlp_flag = 0;
uint32_t keep_new_nlp_flag = 0;
uint16_t keep_nlp_state;
u32 keep_nlp_fc4_type = 0;
@@ -1622,7 +1629,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
/* return immediately if the WWPN matches ndlp */
- if (new_ndlp == ndlp)
+ if (!new_ndlp || (new_ndlp == ndlp))
return ndlp;
if (phba->sli_rev == LPFC_SLI_REV4) {
@@ -1641,30 +1648,11 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
(new_ndlp ? new_ndlp->nlp_flag : 0),
(new_ndlp ? new_ndlp->nlp_fc4_type : 0));
- if (!new_ndlp) {
- rc = memcmp(&ndlp->nlp_portname, name,
- sizeof(struct lpfc_name));
- if (!rc) {
- if (active_rrqs_xri_bitmap)
- mempool_free(active_rrqs_xri_bitmap,
- phba->active_rrq_pool);
- return ndlp;
- }
- new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID);
- if (!new_ndlp) {
- if (active_rrqs_xri_bitmap)
- mempool_free(active_rrqs_xri_bitmap,
- phba->active_rrq_pool);
- return ndlp;
- }
- } else {
- keepDID = new_ndlp->nlp_DID;
- if (phba->sli_rev == LPFC_SLI_REV4 &&
- active_rrqs_xri_bitmap)
- memcpy(active_rrqs_xri_bitmap,
- new_ndlp->active_rrqs_xri_bitmap,
- phba->cfg_rrq_xri_bitmap_sz);
- }
+ keepDID = new_ndlp->nlp_DID;
+
+ if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap)
+ memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
/* At this point in this routine, we know new_ndlp will be
* returned. however, any previous GID_FTs that were done
@@ -1703,6 +1691,15 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
else
new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
+ /*
+ * Retain the DROPPED flag. This will take care of the init
+ * refcount when affecting the state change
+ */
+ if (keep_new_nlp_flag & NLP_DROPPED)
+ new_ndlp->nlp_flag |= NLP_DROPPED;
+ else
+ new_ndlp->nlp_flag &= ~NLP_DROPPED;
+
ndlp->nlp_flag = keep_new_nlp_flag;
/* if ndlp had NLP_UNREG_INP set, keep it */
@@ -1717,6 +1714,15 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
else
ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
+ /*
+ * Retain the DROPPED flag. This will take care of the init
+ * refcount when affecting the state change
+ */
+ if (keep_nlp_flag & NLP_DROPPED)
+ ndlp->nlp_flag |= NLP_DROPPED;
+ else
+ ndlp->nlp_flag &= ~NLP_DROPPED;
+
spin_unlock_irq(&new_ndlp->lock);
spin_unlock_irq(&ndlp->lock);
@@ -1849,7 +1855,7 @@ lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_vport *vport = cmdiocb->vport;
IOCB_t *irsp;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = cmdiocb->context1;
struct lpfc_node_rrq *rrq;
/* we pass cmdiocb to state machine which needs rspiocb as well */
@@ -1862,22 +1868,12 @@ lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->un.elsreq64.remoteID);
- ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
- if (!ndlp || ndlp != rrq->ndlp) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "2882 RRQ completes to NPort x%x "
- "with no ndlp. Data: x%x x%x x%x\n",
- irsp->un.elsreq64.remoteID,
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpIoTag);
- goto out;
- }
-
/* rrq completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "2880 RRQ completes to NPort x%x "
+ "2880 RRQ completes to DID x%x "
"Data: x%x x%x x%x x%x x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->un.elsreq64.remoteID,
+ irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpTimeout, rrq->xritag, rrq->rxid);
if (irsp->ulpStatus) {
@@ -1893,10 +1889,8 @@ lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4]);
}
-out:
- if (rrq)
- lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+ lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
return;
@@ -1912,7 +1906,7 @@ out:
* ndlp on the vport node list that matches the remote node ID from the
* PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
* ignored and command IOCB released. The PLOGI response IOCB status is
- * checked for error conditons. If there is error status reported, PLOGI
+ * checked for error conditions. If there is error status reported, PLOGI
* retry shall be attempted by invoking the lpfc_els_retry() routine.
* Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
* the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
@@ -2063,13 +2057,12 @@ out_freeiocb:
* This routine issues a Port Login (PLOGI) command to a remote N_Port
* (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
* the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
- * This routine constructs the proper feilds of the PLOGI IOCB and invokes
+ * This routine constructs the proper fields of the PLOGI IOCB and invokes
* the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the PLOGI ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding
+ * the ndlp and the reference to ndlp will be stored into the context1 field
+ * of the IOCB for the completion callback function to the PLOGI ELS command.
*
* Return code
* 0 - Successfully issued a plogi for @vport
@@ -2087,29 +2080,28 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
int ret;
ndlp = lpfc_findnode_did(vport, did);
+ if (!ndlp)
+ return 1;
- if (ndlp) {
- /* Defer the processing of the issue PLOGI until after the
- * outstanding UNREG_RPI mbox command completes, unless we
- * are going offline. This logic does not apply for Fabric DIDs
- */
- if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
- ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
- !(vport->fc_flag & FC_OFFLINE_MODE)) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "4110 Issue PLOGI x%x deferred "
- "on NPort x%x rpi x%x Data: x%px\n",
- ndlp->nlp_defer_did, ndlp->nlp_DID,
- ndlp->nlp_rpi, ndlp);
-
- /* We can only defer 1st PLOGI */
- if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
- ndlp->nlp_defer_did = did;
- return 0;
- }
+ /* Defer the processing of the issue PLOGI until after the
+ * outstanding UNREG_RPI mbox command completes, unless we
+ * are going offline. This logic does not apply for Fabric DIDs
+ */
+ if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
+ ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
+ !(vport->fc_flag & FC_OFFLINE_MODE)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "4110 Issue PLOGI x%x deferred "
+ "on NPort x%x rpi x%x Data: x%px\n",
+ ndlp->nlp_defer_did, ndlp->nlp_DID,
+ ndlp->nlp_rpi, ndlp);
+
+ /* We can only defer 1st PLOGI */
+ if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
+ ndlp->nlp_defer_did = did;
+ return 0;
}
- /* If ndlp is not NULL, we will bump the reference count on it */
cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
ELS_CMD_PLOGI);
@@ -2165,19 +2157,19 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
"Issue PLOGI: did:x%x refcnt %d",
did, kref_read(&ndlp->kref), 0);
elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
- goto io_err;
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (ret) {
+ lpfc_els_free_iocb(phba, elsiocb);
lpfc_nlp_put(ndlp);
- goto io_err;
+ return 1;
}
- return 0;
- io_err:
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
+ return 0;
}
/**
@@ -2259,9 +2251,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->un.ulpWord[4], ndlp->fc4_prli_sent);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (lpfc_error_lost_link(irsp))
- goto out;
- else
+ if (!lpfc_error_lost_link(irsp))
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PRLI);
@@ -2306,10 +2296,9 @@ out:
* is put to the IOCB completion callback func field before invoking the
* routine lpfc_sli_issue_iocb() to send out PRLI command.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the PRLI ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the PRLI ELS command.
*
* Return code
* 0 - successfully issued prli iocb command for @vport
@@ -2471,12 +2460,17 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"Issue PRLI: did:x%x refcnt %d",
ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
- goto io_err;
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ goto err;
+ }
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
- goto node_err;
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ goto err;
+ }
/* The driver supports 2 FC4 types. Make sure
@@ -2488,13 +2482,10 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
else
return 0;
- node_err:
- lpfc_nlp_put(ndlp);
- io_err:
+err:
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_PRLI_SND;
spin_unlock_irq(&ndlp->lock);
- lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -2733,10 +2724,9 @@ out:
* and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
* to issue the ADISC ELS command.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the ADISC ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the ADISC ELS command.
*
* Return code
* 0 - successfully issued adisc
@@ -2778,24 +2768,27 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_flag |= NLP_ADISC_SND;
spin_unlock_irq(&ndlp->lock);
elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
- goto node_err;
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ goto err;
+ }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue ADISC: did:x%x refcnt %d",
ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
- goto io_err;
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ goto err;
+ }
+
return 0;
- io_err:
- lpfc_nlp_put(ndlp);
- node_err:
+err:
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_ADISC_SND;
spin_unlock_irq(&ndlp->lock);
- lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -2808,8 +2801,7 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* This routine is the completion function for issuing the ELS Logout (LOGO)
* command. If no error status was reported from the LOGO response, the
* state machine of the associated ndlp shall be invoked for transition with
- * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
- * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
+ * respect to NLP_EVT_CMPL_LOGO event.
**/
static void
lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
@@ -2946,10 +2938,9 @@ out:
* payload of the IOCB, properly sets up the @ndlp state, and invokes the
* lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the LOGO ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the LOGO ELS command.
*
* Callers of this routine are expected to unregister the RPI first
*
@@ -2996,15 +2987,20 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
spin_unlock_irq(&ndlp->lock);
elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
- goto node_err;
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ goto err;
+ }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue LOGO: did:x%x refcnt %d",
ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
- goto io_err;
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ goto err;
+ }
spin_lock_irq(&ndlp->lock);
ndlp->nlp_prev_state = ndlp->nlp_state;
@@ -3012,13 +3008,10 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
return 0;
- io_err:
- lpfc_nlp_put(ndlp);
- node_err:
+err:
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_LOGO_SND;
spin_unlock_irq(&ndlp->lock);
- lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -3183,10 +3176,9 @@ out:
* IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
* routine is invoked to send the SCR IOCB.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the SCR ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the SCR ELS command.
*
* Return code
* 0 - Successfully issued scr command
@@ -3234,25 +3226,24 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
phba->fc_stat.elsXmitSCR++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
- goto node_err;
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue SCR: did:x%x refcnt %d",
ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
- goto io_err;
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ return 1;
+ }
/* Keep the ndlp just in case RDF is being sent */
return 0;
-
- io_err:
- lpfc_nlp_put(ndlp);
- node_err:
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
}
/**
@@ -3266,10 +3257,9 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
* in point-to-point mode. When sent to the Fabric Controller, it will
* replay the RSCN to registered recipients.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the RSCN ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the RSCN ELS command.
*
* Return code
* 0 - Successfully issued RSCN command
@@ -3334,16 +3324,21 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
phba->fc_stat.elsXmitRSCN++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
- goto node_err;
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue RSCN: did:x%x",
ndlp->nlp_DID, 0, 0);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
- goto io_err;
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ return 1;
+ }
/* This will cause the callback-function lpfc_cmpl_els_cmd to
* trigger the release of node.
@@ -3351,11 +3346,6 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
if (!(vport->fc_flag & FC_PT2PT))
lpfc_nlp_put(ndlp);
return 0;
-io_err:
- lpfc_nlp_put(ndlp);
-node_err:
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
}
/**
@@ -3371,10 +3361,9 @@ node_err:
* for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
* lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the PARPR ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the FARPR ELS command.
*
* Return code
* 0 - Successfully issued farpr command
@@ -3450,8 +3439,8 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
* lpfc_els_free_iocb routine to trigger the release of
* the node.
*/
- lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
return 1;
}
/* This will cause the callback-function lpfc_cmpl_els_cmd to
@@ -3469,10 +3458,9 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
* This routine issues an ELS RDF to the Fabric Controller to register
* for diagnostic functions.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the RDF ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the RDF ELS command.
*
* Return code
* 0 - Successfully issued rdf command
@@ -3531,23 +3519,22 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
- goto node_err;
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return -EIO;
+ }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue RDF: did:x%x refcnt %d",
ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
- goto io_err;
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ return -EIO;
+ }
return 0;
-
- io_err:
- lpfc_nlp_put(ndlp);
- node_err:
- lpfc_els_free_iocb(phba, elsiocb);
- return -EIO;
}
/**
@@ -3784,7 +3771,7 @@ lpfc_link_reset(struct lpfc_vport *vport)
* This routine makes a retry decision on an ELS command IOCB, which has
* failed. The following ELS IOCBs use this function for retrying the command
* when previously issued command responsed with error status: FLOGI, PLOGI,
- * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
+ * PRLI, ADISC and FDISC. Based on the ELS command type and the
* returned error status, it makes the decision whether a retry shall be
* issued for the command, and whether a retry shall be made immediately or
* delayed. In the former case, the corresponding ELS command issuing-function
@@ -3829,12 +3816,12 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
did = irsp->un.elsreq64.remoteID;
ndlp = lpfc_findnode_did(vport, did);
if (!ndlp && (cmd != ELS_CMD_PLOGI))
- return 1;
+ return 0;
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Retry ELS: wd7:x%x wd4:x%x did:x%x",
- *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
+ *(((uint32_t *)irsp) + 7), irsp->un.ulpWord[4], did);
switch (irsp->ulpStatus) {
case IOSTAT_FCP_RSP_ERROR:
@@ -4391,7 +4378,7 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
/* ACC to LOGO completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0109 ACC to LOGO completes to NPort x%x refcnt %d"
+ "0109 ACC to LOGO completes to NPort x%x refcnt %d "
"Data: x%x x%x x%x\n",
ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
@@ -4473,10 +4460,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* nlp_flag bitmap in the ndlp data structure, if the mbox command reference
* field in the command IOCB is not NULL, the referred mailbox command will
* be send out, and then invokes the lpfc_els_free_iocb() routine to release
- * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
- * link down event occurred during the discovery, the lpfc_nlp_not_used()
- * routine shall be invoked trying to release the ndlp if no other threads
- * are currently referring it.
+ * the IOCB.
**/
static void
lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
@@ -4486,10 +4470,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
IOCB_t *irsp;
- uint8_t *pcmd;
LPFC_MBOXQ_t *mbox = NULL;
struct lpfc_dmabuf *mp = NULL;
- uint32_t ls_rjt = 0;
irsp = &rspiocb->iocb;
@@ -4501,18 +4483,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (cmdiocb->context_un.mbox)
mbox = cmdiocb->context_un.mbox;
- /* First determine if this is a LS_RJT cmpl. Note, this callback
- * function can have cmdiocb->contest1 (ndlp) field set to NULL.
- */
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
- if (ndlp && (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
- /* A LS_RJT associated with Default RPI cleanup has its own
- * separate code path.
- */
- if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
- ls_rjt = 1;
- }
-
/* Check to see if link went down during discovery */
if (!ndlp || lpfc_els_chk_latt(vport)) {
if (mbox) {
@@ -4523,15 +4493,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
mempool_free(mbox, phba->mbox_mem_pool);
}
- if (ndlp && (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
- if (lpfc_nlp_not_used(ndlp)) {
- ndlp = NULL;
- /* Indicate the node has already released,
- * should not reference to it from within
- * the routine lpfc_els_free_iocb.
- */
- cmdiocb->context1 = NULL;
- }
goto out;
}
@@ -4609,29 +4570,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"Data: x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
-
- if (lpfc_nlp_not_used(ndlp)) {
- ndlp = NULL;
- /* Indicate node has already been released,
- * should not reference to it from within
- * the routine lpfc_els_free_iocb.
- */
- cmdiocb->context1 = NULL;
- }
- } else {
- /* Do not drop node for lpfc_els_abort'ed ELS cmds */
- if (!lpfc_error_lost_link(irsp) &&
- ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
- if (lpfc_nlp_not_used(ndlp)) {
- ndlp = NULL;
- /* Indicate node has already been
- * released, should not reference
- * to it from within the routine
- * lpfc_els_free_iocb.
- */
- cmdiocb->context1 = NULL;
- }
- }
}
mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
if (mp) {
@@ -4647,19 +4585,6 @@ out:
ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI;
spin_unlock_irq(&ndlp->lock);
-
- /* If the node is not being used by another discovery thread,
- * and we are sending a reject, we are done with it.
- * Release driver reference count here and free associated
- * resources.
- */
- if (ls_rjt)
- if (lpfc_nlp_not_used(ndlp))
- /* Indicate node has already been released,
- * should not reference to it from within
- * the routine lpfc_els_free_iocb.
- */
- cmdiocb->context1 = NULL;
}
/* Release the originating I/O reference. */
@@ -4684,10 +4609,10 @@ out:
* field of the IOCB for the completion callback function to issue the
* mailbox command to the HBA later when callback is invoked.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the corresponding response ELS IOCB command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the corresponding
+ * response ELS IOCB command.
*
* Return code
* 0 - Successfully issued acc response
@@ -4834,12 +4759,17 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
phba->fc_stat.elsXmitACC++;
elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
- goto node_err;
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
- goto io_err;
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ return 1;
+ }
/* Xmit ELS ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -4850,16 +4780,10 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi, vport->fc_flag);
return 0;
-
-io_err:
- lpfc_nlp_put(ndlp);
-node_err:
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
}
/**
- * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command
+ * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command
* @vport: pointer to a virtual N_Port data structure.
* @rejectError: reject response to issue
* @oldiocb: pointer to the original lpfc command iocb data structure.
@@ -4871,10 +4795,10 @@ node_err:
* context_un.mbox field of the IOCB for the completion callback function
* to issue to the HBA later.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the reject response ELS IOCB command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the reject response
+ * ELS IOCB command.
*
* Return code
* 0 - Successfully issued reject response
@@ -4927,20 +4851,19 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
phba->fc_stat.elsXmitLSRJT++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
- goto node_err;
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
- goto io_err;
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ return 1;
+ }
return 0;
-
- io_err:
- lpfc_nlp_put(ndlp);
- node_err:
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
}
/**
@@ -4953,10 +4876,10 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
* Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
* and invokes the lpfc_sli_issue_iocb() routine to send out the command.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the ADISC Accept response ELS IOCB command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the ADISC Accept response
+ * ELS IOCB command.
*
* Return code
* 0 - Successfully issued acc adisc response
@@ -5010,12 +4933,17 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
phba->fc_stat.elsXmitACC++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
- goto node_err;
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
- goto io_err;
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ return 1;
+ }
/* Xmit ELS ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -5026,12 +4954,6 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi, vport->fc_flag);
return 0;
-
-io_err:
- lpfc_nlp_put(ndlp);
-node_err:
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
}
/**
@@ -5044,10 +4966,10 @@ node_err:
* Login (PRLI) ELS command. It simply prepares the payload of the IOCB
* and invokes the lpfc_sli_issue_iocb() routine to send out the command.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the PRLI Accept response ELS IOCB command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the PRLI Accept response
+ * ELS IOCB command.
*
* Return code
* 0 - Successfully issued acc prli response
@@ -5185,19 +5107,19 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
phba->fc_stat.elsXmitACC++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
- goto node_err;
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
- goto io_err;
- return 0;
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ return 1;
+ }
- io_err:
- lpfc_nlp_put(ndlp);
- node_err:
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
+ return 0;
}
/**
@@ -5210,17 +5132,11 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
* This routine issues a Request Node Identification Data (RNID) Accept
* (ACC) response. It constructs the RNID ACC response command according to
* the proper @format and then calls the lpfc_sli_issue_iocb() routine to
- * issue the response. Note that this command does not need to hold the ndlp
- * reference count for the callback. So, the ndlp reference count taken by
- * the lpfc_prep_els_iocb() routine is put back and the context1 field of
- * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
- * there is no ndlp reference available.
- *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function. However, for the RNID Accept Response ELS command,
- * this is undone later by this routine after the IOCB is allocated.
+ * issue the response.
+ *
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function.
*
* Return code
* 0 - Successfully issued acc rnid response
@@ -5292,20 +5208,19 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
phba->fc_stat.elsXmitACC++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
- goto node_err;
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
- goto io_err;
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ return 1;
+ }
return 0;
-
- io_err:
- lpfc_nlp_put(ndlp);
- node_err:
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
}
/**
@@ -5407,19 +5322,19 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
phba->fc_stat.elsXmitACC++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
- goto node_err;
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
- goto io_err;
- return 0;
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ return 1;
+ }
- io_err:
- lpfc_nlp_put(ndlp);
- node_err:
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
+ return 0;
}
/**
@@ -6063,8 +5978,8 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
- lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
}
goto free_rdp_context;
@@ -6095,8 +6010,8 @@ error:
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
- lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
}
free_rdp_context:
@@ -6308,16 +6223,16 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
phba->fc_stat.elsXmitACC++;
elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
- goto node_err;
-
- rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (!rc)
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
goto out;
+ }
- lpfc_nlp_put(ndlp);
- node_err:
- lpfc_els_free_iocb(phba, elsiocb);
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ }
out:
kfree(lcb_context);
return;
@@ -6353,8 +6268,8 @@ error:
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
- lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
}
free_lcb_context:
kfree(lcb_context);
@@ -7342,16 +7257,16 @@ lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
*
* This routine is the completion callback function for the MBX_READ_LNK_STAT
* mailbox command. This callback function is to actually send the Accept
- * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
+ * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It
* collects the link statistics from the completion of the MBX_READ_LNK_STAT
- * mailbox command, constructs the RPS response with the link statistics
+ * mailbox command, constructs the RLS response with the link statistics
* collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
- * response to the RPS.
+ * response to the RLS.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the RPS Accept Response ELS IOCB command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the RLS Accept Response
+ * ELS IOCB command.
*
**/
static void
@@ -7420,18 +7335,17 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
- goto node_err;
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return;
+ }
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
- goto io_err;
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ }
return;
-
- io_err:
- lpfc_nlp_put(ndlp);
- node_err:
- lpfc_els_free_iocb(phba, elsiocb);
}
/**
@@ -7510,10 +7424,10 @@ reject_out:
* response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
* Value (RTV) unsolicited IOCB event.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the RTV Accept Response ELS IOCB command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the RTV Accept Response
+ * ELS IOCB command.
*
* Return codes
* 0 - Successfully processed rtv iocb (currently always return 0)
@@ -7580,8 +7494,8 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
- lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
}
return 0;
@@ -7619,9 +7533,6 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint16_t cmdsize;
int ret;
-
- if (ndlp != rrq->ndlp)
- ndlp = rrq->ndlp;
if (!ndlp)
return 1;
@@ -7651,9 +7562,9 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
did, rrq->xritag, rrq->rxid);
elsiocb->context_un.rrq = rrq;
elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
- goto node_err;
+
+ lpfc_nlp_get(ndlp);
+ elsiocb->context1 = ndlp;
ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (ret == IOCB_ERROR)
@@ -7661,9 +7572,8 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 0;
io_err:
- lpfc_nlp_put(ndlp);
- node_err:
lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
return 1;
}
@@ -7704,10 +7614,10 @@ lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
* This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
* It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the RPL Accept Response ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the RPL Accept Response
+ * ELS command.
*
* Return code
* 0 - Successfully issued ACC RPL ELS command
@@ -7760,19 +7670,19 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
- goto node_err;
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
- goto io_err;
- return 0;
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ return 1;
+ }
- io_err:
- lpfc_nlp_put(ndlp);
- node_err:
- lpfc_els_free_iocb(phba, elsiocb);
- return 1;
+ return 0;
}
/**
@@ -9598,10 +9508,9 @@ out:
* routine to issue the IOCB, which makes sure only one outstanding fabric
* IOCB will be sent off HBA at any given time.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the FDISC ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the FDISC ELS command.
*
* Return code
* 0 - Successfully issued fdisc iocb command
@@ -9678,11 +9587,14 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
did, 0, 0);
elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
goto err_out;
+ }
rc = lpfc_issue_fabric_iocb(phba, elsiocb);
if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
lpfc_nlp_put(ndlp);
goto err_out;
}
@@ -9691,7 +9603,6 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 0;
err_out:
- lpfc_els_free_iocb(phba, elsiocb);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0256 Issue FDISC: Cannot send IOCB\n");
@@ -9757,10 +9668,9 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*
* This routine issues a LOGO ELS command to an @ndlp off a @vport.
*
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the LOGO ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the LOGO ELS command.
*
* Return codes
* 0 - Successfully issued logo off the @vport
@@ -9799,20 +9709,23 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_flag |= NLP_LOGO_SND;
spin_unlock_irq(&ndlp->lock);
elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
- goto node_err;
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ goto err;
+ }
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (rc == IOCB_ERROR)
- goto io_err;
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ goto err;
+ }
return 0;
- io_err:
- lpfc_nlp_put(ndlp);
- node_err:
+err:
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_LOGO_SND;
spin_unlock_irq(&ndlp->lock);
- lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -10074,7 +9987,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
* driver internal fabric IOCB list. The list contains fabric IOCBs to be
* issued to the ELS IOCB ring. This abort function walks the fabric IOCB
* list, removes each IOCB associated with the @vport off the list, set the
- * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
+ * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function
* associated with the IOCB.
**/
static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
@@ -10107,7 +10020,7 @@ static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
* driver internal fabric IOCB list. The list contains fabric IOCBs to be
* issued to the ELS IOCB ring. This abort function walks the fabric IOCB
* list, removes each IOCB associated with the @ndlp off the list, set the
- * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
+ * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function
* associated with the IOCB.
**/
void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
@@ -10144,7 +10057,7 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
* This routine aborts all the IOCBs currently on the driver internal
* fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
* IOCB ring. This function takes the entire IOCB list off the fabric IOCB
- * list, removes IOCBs off the list, set the status feild to
+ * list, removes IOCBs off the list, set the status field to
* IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
* the IOCB.
**/
@@ -10175,8 +10088,7 @@ lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
unsigned long iflag = 0;
- spin_lock_irqsave(&phba->hbalock, iflag);
- spin_lock(&phba->sli4_hba.sgl_list_lock);
+ spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag);
list_for_each_entry_safe(sglq_entry, sglq_next,
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) {
@@ -10184,8 +10096,7 @@ lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
sglq_entry->ndlp = NULL;
}
}
- spin_unlock(&phba->sli4_hba.sgl_list_lock);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
+ spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag);
return;
}
@@ -10212,8 +10123,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
pring = lpfc_phba_elsring(phba);
- spin_lock_irqsave(&phba->hbalock, iflag);
- spin_lock(&phba->sli4_hba.sgl_list_lock);
+ spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag);
list_for_each_entry_safe(sglq_entry, sglq_next,
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
if (sglq_entry->sli4_xritag == xri) {
@@ -10223,8 +10133,8 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
list_add_tail(&sglq_entry->list,
&phba->sli4_hba.lpfc_els_sgl_list);
sglq_entry->state = SGL_FREED;
- spin_unlock(&phba->sli4_hba.sgl_list_lock);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
+ spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
+ iflag);
if (ndlp) {
lpfc_set_rrq_active(phba, ndlp,
@@ -10239,21 +10149,18 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
return;
}
}
- spin_unlock(&phba->sli4_hba.sgl_list_lock);
+ spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag);
lxri = lpfc_sli4_xri_inrange(phba, xri);
- if (lxri == NO_XRI) {
- spin_unlock_irqrestore(&phba->hbalock, iflag);
+ if (lxri == NO_XRI)
return;
- }
- spin_lock(&phba->sli4_hba.sgl_list_lock);
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
sglq_entry = __lpfc_get_active_sglq(phba, lxri);
if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
- spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
sglq_entry->state = SGL_XRI_ABORTED;
- spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 48ca4a612f80..f5a898c2c904 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -102,20 +102,20 @@ lpfc_rport_invalid(struct fc_rport *rport)
rdata = rport->dd_data;
if (!rdata) {
- pr_err("**** %s: NULL dd_data on rport %p SID x%x\n",
+ pr_err("**** %s: NULL dd_data on rport x%px SID x%x\n",
__func__, rport, rport->scsi_target_id);
return -EINVAL;
}
ndlp = rdata->pnode;
if (!rdata->pnode) {
- pr_err("**** %s: NULL ndlp on rport %p SID x%x\n",
+ pr_err("**** %s: NULL ndlp on rport x%px SID x%x\n",
__func__, rport, rport->scsi_target_id);
return -EINVAL;
}
if (!ndlp->vport) {
- pr_err("**** %s: Null vport on ndlp %p, DID x%x rport %p "
+ pr_err("**** %s: Null vport on ndlp x%px, DID x%x rport x%px "
"SID x%x\n", __func__, ndlp, ndlp->nlp_DID, rport,
rport->scsi_target_id);
return -EINVAL;
@@ -140,11 +140,8 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
"rport terminate: sid:x%x did:x%x flg:x%x",
ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
- if (ndlp->nlp_sid != NLP_NO_SID) {
- lpfc_sli_abort_iocb(vport,
- &vport->phba->sli.sli3_ring[LPFC_FCP_RING],
- ndlp->nlp_sid, 0, LPFC_CTX_TGT);
- }
+ if (ndlp->nlp_sid != NLP_NO_SID)
+ lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
/*
@@ -171,7 +168,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3181 dev_loss_callbk x%06x, rport %p flg x%x "
+ "3181 dev_loss_callbk x%06x, rport x%px flg x%x "
"load_flag x%x refcnt %d\n",
ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
vport->load_flag, kref_read(&ndlp->kref));
@@ -299,8 +296,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
if (ndlp->nlp_sid != NLP_NO_SID) {
warn_on = 1;
- lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
- ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+ lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
if (warn_on) {
@@ -1486,7 +1482,7 @@ lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
}
/**
- * lpfc_update_fcf_record - Update driver fcf record
+ * __lpfc_update_fcf_record - Update driver fcf record
* @phba: pointer to lpfc hba data structure.
* @fcf_rec: pointer to driver fcf record.
* @new_fcf_record: pointer to hba fcf record.
@@ -5956,10 +5952,12 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* DHBA -> DPRT -> RHBA -> RPA (physical port)
* DPRT -> RPRT (vports)
*/
- if (vport->port_type == LPFC_PHYSICAL_PORT)
+ if (vport->port_type == LPFC_PHYSICAL_PORT) {
+ phba->link_flag &= ~LS_CT_VEN_RPA; /* For extra Vendor RPA */
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
- else
+ } else {
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
+ }
/* decrement the node reference count held for this callback
@@ -6081,12 +6079,12 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
* Translate the physical vpi to the logical vpi. The
* vport stores the logical vpi.
*/
- for (i = 0; i < phba->max_vpi; i++) {
+ for (i = 0; i <= phba->max_vpi; i++) {
if (vpi == phba->vpi_ids[i])
break;
}
- if (i >= phba->max_vpi) {
+ if (i > phba->max_vpi) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2936 Could not find Vport mapped "
"to vpi %d\n", vpi);
@@ -6170,7 +6168,7 @@ lpfc_nlp_release(struct kref *kref)
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0279 %s: ndlp:%p did %x refcnt:%d rpi:%x\n",
+ "0279 %s: ndlp: x%px did %x refcnt:%d rpi:%x\n",
__func__, ndlp, ndlp->nlp_DID,
kref_read(&ndlp->kref), ndlp->nlp_rpi);
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 541b9aef6bfe..f77e71e6dbbd 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -124,6 +124,7 @@ struct lpfc_sli_intf {
/* Define SLI4 Alignment requirements. */
#define LPFC_ALIGN_16_BYTE 16
#define LPFC_ALIGN_64_BYTE 64
+#define SLI4_PAGE_SIZE 4096
/* Define SLI4 specific definitions. */
#define LPFC_MQ_CQE_BYTE_OFFSET 256
@@ -2976,62 +2977,6 @@ struct lpfc_mbx_request_features {
#define lpfc_mbx_rq_ftr_rsp_mrqp_WORD word3
};
-struct lpfc_mbx_supp_pages {
- uint32_t word1;
-#define qs_SHIFT 0
-#define qs_MASK 0x00000001
-#define qs_WORD word1
-#define wr_SHIFT 1
-#define wr_MASK 0x00000001
-#define wr_WORD word1
-#define pf_SHIFT 8
-#define pf_MASK 0x000000ff
-#define pf_WORD word1
-#define cpn_SHIFT 16
-#define cpn_MASK 0x000000ff
-#define cpn_WORD word1
- uint32_t word2;
-#define list_offset_SHIFT 0
-#define list_offset_MASK 0x000000ff
-#define list_offset_WORD word2
-#define next_offset_SHIFT 8
-#define next_offset_MASK 0x000000ff
-#define next_offset_WORD word2
-#define elem_cnt_SHIFT 16
-#define elem_cnt_MASK 0x000000ff
-#define elem_cnt_WORD word2
- uint32_t word3;
-#define pn_0_SHIFT 24
-#define pn_0_MASK 0x000000ff
-#define pn_0_WORD word3
-#define pn_1_SHIFT 16
-#define pn_1_MASK 0x000000ff
-#define pn_1_WORD word3
-#define pn_2_SHIFT 8
-#define pn_2_MASK 0x000000ff
-#define pn_2_WORD word3
-#define pn_3_SHIFT 0
-#define pn_3_MASK 0x000000ff
-#define pn_3_WORD word3
- uint32_t word4;
-#define pn_4_SHIFT 24
-#define pn_4_MASK 0x000000ff
-#define pn_4_WORD word4
-#define pn_5_SHIFT 16
-#define pn_5_MASK 0x000000ff
-#define pn_5_WORD word4
-#define pn_6_SHIFT 8
-#define pn_6_MASK 0x000000ff
-#define pn_6_WORD word4
-#define pn_7_SHIFT 0
-#define pn_7_MASK 0x000000ff
-#define pn_7_WORD word4
- uint32_t rsvd[27];
-#define LPFC_SUPP_PAGES 0
-#define LPFC_BLOCK_GUARD_PROFILES 1
-#define LPFC_SLI4_PARAMETERS 2
-};
-
struct lpfc_mbx_memory_dump_type3 {
uint32_t word1;
#define lpfc_mbx_memory_dump_type3_type_SHIFT 0
@@ -3248,121 +3193,6 @@ struct user_eeprom {
uint8_t reserved191[57];
};
-struct lpfc_mbx_pc_sli4_params {
- uint32_t word1;
-#define qs_SHIFT 0
-#define qs_MASK 0x00000001
-#define qs_WORD word1
-#define wr_SHIFT 1
-#define wr_MASK 0x00000001
-#define wr_WORD word1
-#define pf_SHIFT 8
-#define pf_MASK 0x000000ff
-#define pf_WORD word1
-#define cpn_SHIFT 16
-#define cpn_MASK 0x000000ff
-#define cpn_WORD word1
- uint32_t word2;
-#define if_type_SHIFT 0
-#define if_type_MASK 0x00000007
-#define if_type_WORD word2
-#define sli_rev_SHIFT 4
-#define sli_rev_MASK 0x0000000f
-#define sli_rev_WORD word2
-#define sli_family_SHIFT 8
-#define sli_family_MASK 0x000000ff
-#define sli_family_WORD word2
-#define featurelevel_1_SHIFT 16
-#define featurelevel_1_MASK 0x000000ff
-#define featurelevel_1_WORD word2
-#define featurelevel_2_SHIFT 24
-#define featurelevel_2_MASK 0x0000001f
-#define featurelevel_2_WORD word2
- uint32_t word3;
-#define fcoe_SHIFT 0
-#define fcoe_MASK 0x00000001
-#define fcoe_WORD word3
-#define fc_SHIFT 1
-#define fc_MASK 0x00000001
-#define fc_WORD word3
-#define nic_SHIFT 2
-#define nic_MASK 0x00000001
-#define nic_WORD word3
-#define iscsi_SHIFT 3
-#define iscsi_MASK 0x00000001
-#define iscsi_WORD word3
-#define rdma_SHIFT 4
-#define rdma_MASK 0x00000001
-#define rdma_WORD word3
- uint32_t sge_supp_len;
-#define SLI4_PAGE_SIZE 4096
- uint32_t word5;
-#define if_page_sz_SHIFT 0
-#define if_page_sz_MASK 0x0000ffff
-#define if_page_sz_WORD word5
-#define loopbk_scope_SHIFT 24
-#define loopbk_scope_MASK 0x0000000f
-#define loopbk_scope_WORD word5
-#define rq_db_window_SHIFT 28
-#define rq_db_window_MASK 0x0000000f
-#define rq_db_window_WORD word5
- uint32_t word6;
-#define eq_pages_SHIFT 0
-#define eq_pages_MASK 0x0000000f
-#define eq_pages_WORD word6
-#define eqe_size_SHIFT 8
-#define eqe_size_MASK 0x000000ff
-#define eqe_size_WORD word6
- uint32_t word7;
-#define cq_pages_SHIFT 0
-#define cq_pages_MASK 0x0000000f
-#define cq_pages_WORD word7
-#define cqe_size_SHIFT 8
-#define cqe_size_MASK 0x000000ff
-#define cqe_size_WORD word7
- uint32_t word8;
-#define mq_pages_SHIFT 0
-#define mq_pages_MASK 0x0000000f
-#define mq_pages_WORD word8
-#define mqe_size_SHIFT 8
-#define mqe_size_MASK 0x000000ff
-#define mqe_size_WORD word8
-#define mq_elem_cnt_SHIFT 16
-#define mq_elem_cnt_MASK 0x000000ff
-#define mq_elem_cnt_WORD word8
- uint32_t word9;
-#define wq_pages_SHIFT 0
-#define wq_pages_MASK 0x0000ffff
-#define wq_pages_WORD word9
-#define wqe_size_SHIFT 8
-#define wqe_size_MASK 0x000000ff
-#define wqe_size_WORD word9
- uint32_t word10;
-#define rq_pages_SHIFT 0
-#define rq_pages_MASK 0x0000ffff
-#define rq_pages_WORD word10
-#define rqe_size_SHIFT 8
-#define rqe_size_MASK 0x000000ff
-#define rqe_size_WORD word10
- uint32_t word11;
-#define hdr_pages_SHIFT 0
-#define hdr_pages_MASK 0x0000000f
-#define hdr_pages_WORD word11
-#define hdr_size_SHIFT 8
-#define hdr_size_MASK 0x0000000f
-#define hdr_size_WORD word11
-#define hdr_pp_align_SHIFT 16
-#define hdr_pp_align_MASK 0x0000ffff
-#define hdr_pp_align_WORD word11
- uint32_t word12;
-#define sgl_pages_SHIFT 0
-#define sgl_pages_MASK 0x0000000f
-#define sgl_pages_WORD word12
-#define sgl_pp_align_SHIFT 16
-#define sgl_pp_align_MASK 0x0000ffff
-#define sgl_pp_align_WORD word12
- uint32_t rsvd_13_63[51];
-};
#define SLI4_PAGE_ALIGN(addr) (((addr)+((SLI4_PAGE_SIZE)-1)) \
&(~((SLI4_PAGE_SIZE)-1)))
@@ -3994,8 +3824,6 @@ struct lpfc_mqe {
struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
struct lpfc_mbx_query_fw_config query_fw_cfg;
struct lpfc_mbx_set_beacon_config beacon_config;
- struct lpfc_mbx_supp_pages supp_pages;
- struct lpfc_mbx_pc_sli4_params sli4_params;
struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
struct lpfc_mbx_set_link_diag_state link_diag_state;
struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 71f340dd4fbd..1e4c792bb660 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1043,12 +1043,11 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
* driver is unloading or reposted if the driver is restarting
* the port.
*/
- spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */
- /* scsl_buf_list */
+
/* sgl_list_lock required because worker thread uses this
* list.
*/
- spin_lock(&phba->sli4_hba.sgl_list_lock);
+ spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
list_for_each_entry(sglq_entry,
&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
sglq_entry->state = SGL_FREED;
@@ -1057,11 +1056,12 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
&phba->sli4_hba.lpfc_els_sgl_list);
- spin_unlock(&phba->sli4_hba.sgl_list_lock);
+ spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
/* abts_xxxx_buf_list_lock required because worker thread uses this
* list.
*/
+ spin_lock_irq(&phba->hbalock);
cnt = 0;
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
qp = &phba->sli4_hba.hdwq[idx];
@@ -3552,7 +3552,7 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
lpfc_printf_vlog(vports[i], KERN_INFO,
LOG_NODE | LOG_DISCOVERY,
"0011 Free RPI x%x on "
- "ndlp: %p did x%x\n",
+ "ndlp: x%px did x%x\n",
ndlp->nlp_rpi, ndlp,
ndlp->nlp_DID);
lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
@@ -3804,12 +3804,10 @@ lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
sglq_entry->state = SGL_FREED;
list_add_tail(&sglq_entry->list, &els_sgl_list);
}
- spin_lock_irq(&phba->hbalock);
- spin_lock(&phba->sli4_hba.sgl_list_lock);
+ spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
list_splice_init(&els_sgl_list,
&phba->sli4_hba.lpfc_els_sgl_list);
- spin_unlock(&phba->sli4_hba.sgl_list_lock);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
} else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
/* els xri-sgl shrinked */
xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
@@ -3817,8 +3815,7 @@ lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
"3158 ELS xri-sgl count decreased from "
"%d to %d\n", phba->sli4_hba.els_xri_cnt,
els_xri_cnt);
- spin_lock_irq(&phba->hbalock);
- spin_lock(&phba->sli4_hba.sgl_list_lock);
+ spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
&els_sgl_list);
/* release extra els sgls from list */
@@ -3833,8 +3830,7 @@ lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
}
list_splice_init(&els_sgl_list,
&phba->sli4_hba.lpfc_els_sgl_list);
- spin_unlock(&phba->sli4_hba.sgl_list_lock);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
} else
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3163 ELS xri-sgl count unchanged: %d\n",
@@ -6573,8 +6569,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
LPFC_MBOXQ_t *mboxq;
MAILBOX_t *mb;
int rc, i, max_buf_size;
- uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
- struct lpfc_mqe *mqe;
int longs;
int extra;
uint64_t wwn;
@@ -6808,32 +6802,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
lpfc_nvme_mod_param_dep(phba);
- /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
- lpfc_supported_pages(mboxq);
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- if (!rc) {
- mqe = &mboxq->u.mqe;
- memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
- LPFC_MAX_SUPPORTED_PAGES);
- for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
- switch (pn_page[i]) {
- case LPFC_SLI4_PARAMETERS:
- phba->sli4_hba.pc_sli4_params.supported = 1;
- break;
- default:
- break;
- }
- }
- /* Read the port's SLI4 Parameters capabilities if supported. */
- if (phba->sli4_hba.pc_sli4_params.supported)
- rc = lpfc_pc_sli4_params_get(phba, mboxq);
- if (rc) {
- mempool_free(mboxq, phba->mbox_mem_pool);
- rc = -EIO;
- goto out_free_bsmbx;
- }
- }
-
/*
* Get sli4 parameters that override parameters from Port capabilities.
* If this call fails, it isn't critical unless the SLI4 parameters come
@@ -7388,11 +7356,9 @@ lpfc_free_els_sgl_list(struct lpfc_hba *phba)
LIST_HEAD(sglq_list);
/* Retrieve all els sgls from driver list */
- spin_lock_irq(&phba->hbalock);
- spin_lock(&phba->sli4_hba.sgl_list_lock);
+ spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
- spin_unlock(&phba->sli4_hba.sgl_list_lock);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
/* Now free the sgl list */
lpfc_free_sgl_list(phba, &sglq_list);
@@ -9660,8 +9626,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
"3250 QUERY_FW_CFG mailbox failed with status "
"x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
- if (rc != MBX_TIMEOUT)
- mempool_free(mboxq, phba->mbox_mem_pool);
+ mempool_free(mboxq, phba->mbox_mem_pool);
rc = -ENXIO;
goto out_error;
}
@@ -9677,8 +9642,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
"ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
- if (rc != MBX_TIMEOUT)
- mempool_free(mboxq, phba->mbox_mem_pool);
+ mempool_free(mboxq, phba->mbox_mem_pool);
/*
* Set up HBA Event Queues (EQs)
@@ -10276,8 +10240,7 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
&shdr->response);
- if (rc != MBX_TIMEOUT)
- mempool_free(mboxq, phba->mbox_mem_pool);
+ mempool_free(mboxq, phba->mbox_mem_pool);
if (shdr_status || shdr_add_status || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0495 SLI_FUNCTION_RESET mailbox "
@@ -12075,78 +12038,6 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
phba->pport->work_port_events = 0;
}
- /**
- * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
- * @phba: Pointer to HBA context object.
- * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
- *
- * This function is called in the SLI4 code path to read the port's
- * sli4 capabilities.
- *
- * This function may be be called from any context that can block-wait
- * for the completion. The expectation is that this routine is called
- * typically from probe_one or from the online routine.
- **/
-int
-lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
-{
- int rc;
- struct lpfc_mqe *mqe;
- struct lpfc_pc_sli4_params *sli4_params;
- uint32_t mbox_tmo;
-
- rc = 0;
- mqe = &mboxq->u.mqe;
-
- /* Read the port's SLI4 Parameters port capabilities */
- lpfc_pc_sli4_params(mboxq);
- if (!phba->sli4_hba.intr_enable)
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- else {
- mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
- rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
- }
-
- if (unlikely(rc))
- return 1;
-
- sli4_params = &phba->sli4_hba.pc_sli4_params;
- sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
- sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
- sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
- sli4_params->featurelevel_1 = bf_get(featurelevel_1,
- &mqe->un.sli4_params);
- sli4_params->featurelevel_2 = bf_get(featurelevel_2,
- &mqe->un.sli4_params);
- sli4_params->proto_types = mqe->un.sli4_params.word3;
- sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
- sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
- sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
- sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
- sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
- sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
- sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
- sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
- sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
- sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
- sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
- sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
- sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
- sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
- sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
- sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
- sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
- sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
- sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
- sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
-
- /* Make sure that sge_supp_len can be handled by the driver */
- if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
- sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
-
- return rc;
-}
-
/**
* lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
* @phba: Pointer to HBA context object.
@@ -12205,7 +12096,8 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
else
phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
- sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
+ sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
+ mbx_sli4_parameters);
sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index c03a7f12dd65..1b40a3bbd1cd 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -2624,39 +2624,3 @@ lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
resume_rpi->event_tag = ndlp->phba->fc_eventTag;
}
-/**
- * lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages
- * mailbox command.
- * @mbox: pointer to lpfc mbox command to initialize.
- *
- * The PORT_CAPABILITIES supported pages mailbox command is issued to
- * retrieve the particular feature pages supported by the port.
- **/
-void
-lpfc_supported_pages(struct lpfcMboxq *mbox)
-{
- struct lpfc_mbx_supp_pages *supp_pages;
-
- memset(mbox, 0, sizeof(*mbox));
- supp_pages = &mbox->u.mqe.un.supp_pages;
- bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
- bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
-}
-
-/**
- * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd.
- * @mbox: pointer to lpfc mbox command to initialize.
- *
- * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
- * retrieve the particular SLI4 features supported by the port.
- **/
-void
-lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
-{
- struct lpfc_mbx_pc_sli4_params *sli4_params;
-
- memset(mbox, 0, sizeof(*mbox));
- sli4_params = &mbox->u.mqe.un.sli4_params;
- bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
- bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
-}
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 135d8e8a42ba..bb4e65a32ecc 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -279,106 +279,43 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
}
-/* lpfc_defer_pt2pt_acc - Complete SLI3 pt2pt processing on link up
+/* lpfc_defer_plogi_acc - Issue PLOGI ACC after reg_login completes
* @phba: pointer to lpfc hba data structure.
- * @link_mbox: pointer to CONFIG_LINK mailbox object
+ * @login_mbox: pointer to REG_RPI mailbox object
*
- * This routine is only called if we are SLI3, direct connect pt2pt
- * mode and the remote NPort issues the PLOGI after link up.
+ * The ACC for a rcv'ed PLOGI is deferred until AFTER the REG_RPI completes
*/
static void
-lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
+lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
{
- LPFC_MBOXQ_t *login_mbox;
- MAILBOX_t *mb = &link_mbox->u.mb;
struct lpfc_iocbq *save_iocb;
struct lpfc_nodelist *ndlp;
+ MAILBOX_t *mb = &login_mbox->u.mb;
+
int rc;
- ndlp = link_mbox->ctx_ndlp;
- login_mbox = link_mbox->context3;
+ ndlp = login_mbox->ctx_ndlp;
save_iocb = login_mbox->context3;
- link_mbox->context3 = NULL;
- login_mbox->context3 = NULL;
- /* Check for CONFIG_LINK error */
- if (mb->mbxStatus) {
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "4575 CONFIG_LINK fails pt2pt discovery: %x\n",
- mb->mbxStatus);
- mempool_free(login_mbox, phba->mbox_mem_pool);
- mempool_free(link_mbox, phba->mbox_mem_pool);
- kfree(save_iocb);
- return;
- }
-
- /* Now that CONFIG_LINK completed, and our SID is configured,
- * we can now proceed with sending the PLOGI ACC.
- */
- rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI,
- save_iocb, ndlp, login_mbox);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "4576 PLOGI ACC fails pt2pt discovery: %x\n",
- rc);
- mempool_free(login_mbox, phba->mbox_mem_pool);
+ if (mb->mbxStatus == MBX_SUCCESS) {
+ /* Now that REG_RPI completed successfully,
+ * we can now proceed with sending the PLOGI ACC.
+ */
+ rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
+ save_iocb, ndlp, NULL);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "4576 PLOGI ACC fails pt2pt discovery: "
+ "DID %x Data: %x\n", ndlp->nlp_DID, rc);
+ }
}
- mempool_free(link_mbox, phba->mbox_mem_pool);
+ /* Now process the REG_RPI cmpl */
+ lpfc_mbx_cmpl_reg_login(phba, login_mbox);
+ ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
kfree(save_iocb);
}
-/**
- * lpfc_defer_tgt_acc - Progress SLI4 target rcv PLOGI handler
- * @phba: Pointer to HBA context object.
- * @pmb: Pointer to mailbox object.
- *
- * This function provides the unreg rpi mailbox completion handler for a tgt.
- * The routine frees the memory resources associated with the completed
- * mailbox command and transmits the ELS ACC.
- *
- * This routine is only called if we are SLI4, acting in target
- * mode and the remote NPort issues the PLOGI after link up.
- **/
-static void
-lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
-{
- struct lpfc_vport *vport = pmb->vport;
- struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
- LPFC_MBOXQ_t *mbox = pmb->context3;
- struct lpfc_iocbq *piocb = NULL;
- int rc;
-
- if (mbox) {
- pmb->context3 = NULL;
- piocb = mbox->context3;
- mbox->context3 = NULL;
- }
-
- /*
- * Complete the unreg rpi mbx request, and update flags.
- * This will also restart any deferred events.
- */
- lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb);
-
- if (!piocb) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "4578 PLOGI ACC fail\n");
- if (mbox)
- mempool_free(mbox, phba->mbox_mem_pool);
- return;
- }
-
- rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox);
- if (rc) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "4579 PLOGI ACC fail %x\n", rc);
- if (mbox)
- mempool_free(mbox, phba->mbox_mem_pool);
- }
- kfree(piocb);
-}
-
static int
lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
@@ -395,8 +332,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *save_iocb;
struct ls_rjt stat;
uint32_t vid, flag;
- u16 rpi;
- int rc, defer_acc;
+ int rc;
memset(&stat, 0, sizeof (struct ls_rjt));
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -404,7 +340,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
if (wwn_to_u64(sp->portName.u.wwn) == 0) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "0140 PLOGI Reject: invalid nname\n");
+ "0140 PLOGI Reject: invalid pname\n");
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
@@ -413,7 +349,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "0141 PLOGI Reject: invalid pname\n");
+ "0141 PLOGI Reject: invalid nname\n");
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
@@ -445,7 +381,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
else
ndlp->nlp_fcp_info |= CLASS3;
- defer_acc = 0;
ndlp->nlp_class_sup = 0;
if (sp->cls1.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS1;
@@ -523,6 +458,16 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* rcv'ed PLOGI decides what our NPortId will be */
vport->fc_myDID = icmd->un.rcvels.parmRo;
+ /* If there is an outstanding FLOGI, abort it now.
+ * The remote NPort is not going to ACC our FLOGI
+ * if its already issuing a PLOGI for pt2pt mode.
+ * This indicates our FLOGI was dropped; however, we
+ * must have ACCed the remote NPorts FLOGI to us
+ * to make it here.
+ */
+ if (phba->hba_flag & HBA_FLOGI_OUTSTANDING)
+ lpfc_els_abort_flogi(phba);
+
ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
if (sp->cmn.edtovResolution) {
/* E_D_TOV ticks are in nanoseconds */
@@ -539,27 +484,26 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
- /* Issue config_link / reg_vfi to account for updated TOV's */
-
+ /* Issue CONFIG_LINK for SLI3 or REG_VFI for SLI4,
+ * to account for updated TOV's / parameters
+ */
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_issue_reg_vfi(vport);
else {
- defer_acc = 1;
link_mbox = mempool_alloc(phba->mbox_mem_pool,
GFP_KERNEL);
if (!link_mbox)
goto out;
lpfc_config_link(phba, link_mbox);
- link_mbox->mbox_cmpl = lpfc_defer_pt2pt_acc;
+ link_mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
link_mbox->vport = vport;
link_mbox->ctx_ndlp = ndlp;
- save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
- if (!save_iocb)
+ rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(link_mbox, phba->mbox_mem_pool);
goto out;
- /* Save info from cmd IOCB used in rsp */
- memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
- sizeof(struct lpfc_iocbq));
+ }
}
lpfc_can_disctmo(vport);
@@ -578,59 +522,28 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!login_mbox)
goto out;
- /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
- if (phba->nvmet_support && !defer_acc) {
- link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!link_mbox)
- goto out;
-
- /* As unique identifiers such as iotag would be overwritten
- * with those from the cmdiocb, allocate separate temporary
- * storage for the copy.
- */
- save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
- if (!save_iocb)
- goto out;
-
- /* Unreg RPI is required for SLI4. */
- rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
- lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox);
- link_mbox->vport = vport;
- link_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
- if (!link_mbox->ctx_ndlp)
- goto out;
-
- link_mbox->mbox_cmpl = lpfc_defer_acc_rsp;
-
- if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
- (!(vport->fc_flag & FC_OFFLINE_MODE)))
- ndlp->nlp_flag |= NLP_UNREG_INP;
+ save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
+ if (!save_iocb)
+ goto out;
- /* Save info from cmd IOCB used in rsp */
- memcpy(save_iocb, cmdiocb, sizeof(*save_iocb));
+ /* Save info from cmd IOCB to be used in rsp after all mbox completes */
+ memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
+ sizeof(struct lpfc_iocbq));
- /* Delay sending ACC till unreg RPI completes. */
- defer_acc = 1;
- } else if (phba->sli_rev == LPFC_SLI_REV4)
+ /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
+ if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_unreg_rpi(vport, ndlp);
+ /* Issue REG_LOGIN first, before ACCing the PLOGI, thus we will
+ * always be deferring the ACC.
+ */
rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
(uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
if (rc)
goto out;
- /* ACC PLOGI rsp command needs to execute first,
- * queue this login_mbox command to be processed later.
- */
login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
- /*
- * login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
- * command issued in lpfc_cmpl_els_acc().
- */
login_mbox->vport = vport;
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
- spin_unlock_irq(&ndlp->lock);
/*
* If there is an outstanding PLOGI issued, abort it before
@@ -660,7 +573,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* to register, then unregister the RPI.
*/
spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
+ ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN |
+ NLP_RCV_PLOGI);
spin_unlock_irq(&ndlp->lock);
stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
@@ -670,42 +584,39 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
mempool_free(login_mbox, phba->mbox_mem_pool);
return 1;
}
- if (defer_acc) {
- /* So the order here should be:
- * SLI3 pt2pt
- * Issue CONFIG_LINK mbox
- * CONFIG_LINK cmpl
- * SLI4 tgt
- * Issue UNREG RPI mbx
- * UNREG RPI cmpl
- * Issue PLOGI ACC
- * PLOGI ACC cmpl
- * Issue REG_LOGIN mbox
- */
- /* Save the REG_LOGIN mbox for and rcv IOCB copy later */
- link_mbox->context3 = login_mbox;
- login_mbox->context3 = save_iocb;
+ /* So the order here should be:
+ * SLI3 pt2pt
+ * Issue CONFIG_LINK mbox
+ * CONFIG_LINK cmpl
+ * SLI4 pt2pt
+ * Issue REG_VFI mbox
+ * REG_VFI cmpl
+ * SLI4
+ * Issue UNREG RPI mbx
+ * UNREG RPI cmpl
+ * Issue REG_RPI mbox
+ * REG RPI cmpl
+ * Issue PLOGI ACC
+ * PLOGI ACC cmpl
+ */
+ login_mbox->mbox_cmpl = lpfc_defer_plogi_acc;
+ login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ login_mbox->context3 = save_iocb; /* For PLOGI ACC */
+
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
+ spin_unlock_irq(&ndlp->lock);
- /* Start the ball rolling by issuing CONFIG_LINK here */
- rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED)
- goto out;
- return 1;
- }
+ /* Start the ball rolling by issuing REG_LOGIN here */
+ rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ goto out;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
- rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, login_mbox);
- if (rc)
- mempool_free(login_mbox, phba->mbox_mem_pool);
return 1;
out:
- if (defer_acc)
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "4577 discovery failure: %p %p %p\n",
- save_iocb, link_mbox, login_mbox);
kfree(save_iocb);
- if (link_mbox)
- mempool_free(link_mbox, phba->mbox_mem_pool);
if (login_mbox)
mempool_free(login_mbox, phba->mbox_mem_pool);
@@ -913,9 +824,14 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
} else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
((ndlp->nlp_type & NLP_FCP_TARGET) ||
- !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
+ (ndlp->nlp_type & NLP_NVME_TARGET) ||
+ (vport->fc_flag & FC_PT2PT))) ||
(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
- /* Only try to re-login if this is NOT a Fabric Node */
+ /* Only try to re-login if this is NOT a Fabric Node
+ * AND the remote NPORT is a FCP/NVME Target or we
+ * are in pt2pt mode. NLP_STE_ADISC_ISSUE is a special
+ * case for LOGO as a response to ADISC behavior.
+ */
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000 * 1));
spin_lock_irq(&ndlp->lock);
@@ -1985,8 +1901,6 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
lpfc_issue_els_logo(vport, ndlp, 0);
- ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
return ndlp->nlp_state;
}
@@ -2570,6 +2484,16 @@ lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
static uint32_t
+lpfc_device_rm_unmap_node(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
+{
+ lpfc_drop_node(vport, ndlp);
+ return NLP_STE_FREED_NODE;
+}
+
+static uint32_t
lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp,
void *arg,
@@ -2633,12 +2557,10 @@ static uint32_t
lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
/* flush the target */
- lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
- ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+ lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
/* Treat like rcv logo */
lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
@@ -3062,7 +2984,7 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
lpfc_disc_illegal, /* CMPL_LOGO */
lpfc_disc_illegal, /* CMPL_ADISC */
lpfc_disc_illegal, /* CMPL_REG_LOGIN */
- lpfc_disc_illegal, /* DEVICE_RM */
+ lpfc_device_rm_unmap_node, /* DEVICE_RM */
lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */
lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 4d819e52496a..41e49f61fac2 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -190,14 +190,14 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
ndlp = rport->ndlp;
if (!ndlp) {
- pr_err("**** %s: NULL ndlp on rport %p remoteport %p\n",
+ pr_err("**** %s: NULL ndlp on rport x%px remoteport x%px\n",
__func__, rport, remoteport);
goto rport_err;
}
vport = ndlp->vport;
if (!vport) {
- pr_err("**** %s: Null vport on ndlp %p, ste x%x rport %p\n",
+ pr_err("**** %s: Null vport on ndlp x%px, ste x%x rport x%px\n",
__func__, ndlp, ndlp->nlp_state, rport);
goto rport_err;
}
@@ -209,7 +209,7 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
* calling state machine to remove the node.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6146 remoteport delete of remoteport %p\n",
+ "6146 remoteport delete of remoteport x%px\n",
remoteport);
spin_lock_irq(&ndlp->lock);
@@ -317,7 +317,7 @@ __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6047 NVMEx LS REQ %px cmpl DID %x Xri: %x "
+ "6047 NVMEx LS REQ x%px cmpl DID %x Xri: %x "
"status %x reason x%x cmd:x%px lsreg:x%px bmp:x%px "
"ndlp:x%px\n",
pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
@@ -339,7 +339,7 @@ __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
else
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6046 NVMEx cmpl without done call back? "
- "Data %px DID %x Xri: %x status %x\n",
+ "Data x%px DID %x Xri: %x status %x\n",
pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
cmdwqe->sli4_xritag, status);
if (ndlp) {
@@ -707,7 +707,7 @@ __lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
"6040 NVMEx LS REQ Abort: Issue LS_ABORT for lsreq "
- "x%p rqstlen:%d rsplen:%d %pad %pad\n",
+ "x%px rqstlen:%d rsplen:%d %pad %pad\n",
pnvme_lsreq, pnvme_lsreq->rqstlen,
pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
&pnvme_lsreq->rspdma);
@@ -736,7 +736,7 @@ __lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 0;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
- "6213 NVMEx LS REQ Abort: Unable to locate req x%p\n",
+ "6213 NVMEx LS REQ Abort: Unable to locate req x%px\n",
pnvme_lsreq);
return -EINVAL;
}
@@ -1839,7 +1839,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6144 Outstanding NVME I/O Abort Request "
"still pending on nvme_fcreq x%px, "
- "lpfc_ncmd %px xri x%x\n",
+ "lpfc_ncmd x%px xri x%x\n",
pnvme_fcreq, lpfc_nbuf,
nvmereq_wqe->sli4_xritag);
goto out_unlock;
@@ -2002,7 +2002,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
/**
* lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
- * @vport - the lpfc_vport instance requesting a localport.
+ * @vport: the lpfc_vport instance requesting a localport.
*
* This routine is invoked to create an nvme localport instance to bind
* to the nvme_fc_transport. It is called once during driver load
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index bb2a4a0d1295..f2d9a3580887 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
- * Fibre Channsel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1440,7 +1440,10 @@ __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
list_del_init(&ctx_buf->list);
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+ spin_lock(&phba->hbalock);
__lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
+ spin_unlock(&phba->hbalock);
+
ctx_buf->sglq->state = SGL_FREED;
ctx_buf->sglq->ndlp = NULL;
@@ -1787,8 +1790,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
}
- spin_lock_irqsave(&phba->hbalock, iflag);
- spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+ spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
list_for_each_entry_safe(ctxp, next_ctxp,
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
list) {
@@ -1806,10 +1808,10 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
}
ctxp->flag &= ~LPFC_NVME_XBUSY;
spin_unlock(&ctxp->ctxlock);
- spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+ spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
+ iflag);
rrq_empty = list_empty(&phba->active_rrq_list);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
if (ndlp &&
(ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
@@ -1830,9 +1832,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
lpfc_worker_wake_up(phba);
return;
}
- spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
-
+ spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
if (ctxp) {
/*
@@ -1876,8 +1876,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
sid = sli4_sid_from_fc_hdr(fc_hdr);
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
- spin_lock_irqsave(&phba->hbalock, iflag);
- spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+ spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
list_for_each_entry_safe(ctxp, next_ctxp,
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
list) {
@@ -1886,9 +1885,8 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
xri = ctxp->ctxbuf->sglq->sli4_xritag;
- spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
-
+ spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
+ iflag);
spin_lock_irqsave(&ctxp->ctxlock, iflag);
ctxp->flag |= LPFC_NVME_ABTS_RCV;
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
@@ -1907,9 +1905,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
return 0;
}
- spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
-
+ spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
/* check the wait list */
if (phba->sli4_hba.nvmet_io_wait_cnt) {
struct rqb_dmabuf *nvmebuf;
@@ -3304,7 +3300,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
/* Word 10 */
- bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
LPFC_WQE_LENLOC_WORD12);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index a4d697373c71..eefbb9b22798 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -132,6 +132,8 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
}
}
+#define LPFC_INVALID_REFTAG ((u32)-1)
+
/**
* lpfc_update_stats - Update statistical data for the command completion
* @vport: The virtual port on which this call is executing.
@@ -734,7 +736,7 @@ lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
}
/**
- * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
+ * lpfc_release_scsi_buf_s3 - Return a scsi buffer back to hba scsi buf list
* @phba: The Hba for which this call is being executed.
* @psb: The scsi buffer which is being released.
*
@@ -972,10 +974,10 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
#define BG_ERR_TGT 0x2
/* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
#define BG_ERR_SWAP 0x10
-/**
+/*
* Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
* error injection
- **/
+ */
#define BG_ERR_CHECK 0x20
/**
@@ -1000,7 +1002,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
uint32_t op = scsi_get_prot_op(sc);
uint32_t blksize;
uint32_t numblks;
- sector_t lba;
+ u32 lba;
int rc = 0;
int blockoff = 0;
@@ -1008,7 +1010,9 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
return 0;
sgpe = scsi_prot_sglist(sc);
- lba = scsi_get_lba(sc);
+ lba = t10_pi_ref_tag(sc->request);
+ if (lba == LPFC_INVALID_REFTAG)
+ return 0;
/* First check if we need to match the LBA */
if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
@@ -1016,11 +1020,11 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
/* Make sure we have the right LBA if one is specified */
- if ((phba->lpfc_injerr_lba < lba) ||
- (phba->lpfc_injerr_lba >= (lba + numblks)))
+ if (phba->lpfc_injerr_lba < (u64)lba ||
+ (phba->lpfc_injerr_lba >= (u64)(lba + numblks)))
return 0;
if (sgpe) {
- blockoff = phba->lpfc_injerr_lba - lba;
+ blockoff = phba->lpfc_injerr_lba - (u64)lba;
numblks = sg_dma_len(sgpe) /
sizeof(struct scsi_dif_tuple);
if (numblks < blockoff)
@@ -1589,7 +1593,9 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
goto out;
/* extract some info from the scsi command for pde*/
- reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+ reftag = t10_pi_ref_tag(sc->request);
+ if (reftag == LPFC_INVALID_REFTAG)
+ goto out;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
@@ -1750,7 +1756,9 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
/* extract some info from the scsi command */
blksize = lpfc_cmd_blksize(sc);
- reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+ reftag = t10_pi_ref_tag(sc->request);
+ if (reftag == LPFC_INVALID_REFTAG)
+ goto out;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
@@ -1979,7 +1987,9 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
goto out;
/* extract some info from the scsi command for pde*/
- reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+ reftag = t10_pi_ref_tag(sc->request);
+ if (reftag == LPFC_INVALID_REFTAG)
+ goto out;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
@@ -2178,7 +2188,9 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
/* extract some info from the scsi command */
blksize = lpfc_cmd_blksize(sc);
- reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+ reftag = t10_pi_ref_tag(sc->request);
+ if (reftag == LPFC_INVALID_REFTAG)
+ goto out;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
@@ -2770,7 +2782,9 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
chk_guard = 1;
src = (struct scsi_dif_tuple *)sg_virt(sgpe);
- start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
+ start_ref_tag = t10_pi_ref_tag(cmd->request);
+ if (start_ref_tag == LPFC_INVALID_REFTAG)
+ goto out;
start_app_tag = src->app_tag;
len = sgpe->length;
while (src && protsegcnt) {
@@ -2861,8 +2875,8 @@ out:
SAM_STAT_CHECK_CONDITION;
phba->bg_guard_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
- (unsigned long)scsi_get_lba(cmd),
+ "9069 BLKGRD: reftag %x grd_tag err %x != %x\n",
+ t10_pi_ref_tag(cmd->request),
sum, guard_tag);
} else if (err_type == BGS_REFTAG_ERR_MASK) {
@@ -2873,8 +2887,8 @@ out:
phba->bg_reftag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
- (unsigned long)scsi_get_lba(cmd),
+ "9066 BLKGRD: reftag %x ref_tag err %x != %x\n",
+ t10_pi_ref_tag(cmd->request),
ref_tag, start_ref_tag);
} else if (err_type == BGS_APPTAG_ERR_MASK) {
@@ -2885,8 +2899,8 @@ out:
phba->bg_apptag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
- (unsigned long)scsi_get_lba(cmd),
+ "9041 BLKGRD: reftag %x app_tag err %x != %x\n",
+ t10_pi_ref_tag(cmd->request),
app_tag, start_app_tag);
}
}
@@ -3062,10 +3076,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
if (lpfc_bgs_get_invalid_prof(bgstat)) {
cmd->result = DID_ERROR << 16;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9072 BLKGRD: Invalid BG Profile in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
+ "9072 BLKGRD: Invalid BG Profile in cmd "
+ "0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
+ t10_pi_ref_tag(cmd->request),
blk_rq_sectors(cmd->request), bgstat, bghm);
ret = (-1);
goto out;
@@ -3074,10 +3088,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
cmd->result = DID_ERROR << 16;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9073 BLKGRD: Invalid BG PDIF Block in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
+ "9073 BLKGRD: Invalid BG PDIF Block in cmd "
+ "0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
+ t10_pi_ref_tag(cmd->request),
blk_rq_sectors(cmd->request), bgstat, bghm);
ret = (-1);
goto out;
@@ -3092,10 +3106,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
SAM_STAT_CHECK_CONDITION;
phba->bg_guard_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9055 BLKGRD: Guard Tag error in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
+ "9055 BLKGRD: Guard Tag error in cmd "
+ "0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
+ t10_pi_ref_tag(cmd->request),
blk_rq_sectors(cmd->request), bgstat, bghm);
}
@@ -3109,10 +3123,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
phba->bg_reftag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9056 BLKGRD: Ref Tag error in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
+ "9056 BLKGRD: Ref Tag error in cmd "
+ "0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
+ t10_pi_ref_tag(cmd->request),
blk_rq_sectors(cmd->request), bgstat, bghm);
}
@@ -3126,10 +3140,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
phba->bg_apptag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9061 BLKGRD: App Tag error in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
+ "9061 BLKGRD: App Tag error in cmd "
+ "0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
+ t10_pi_ref_tag(cmd->request),
blk_rq_sectors(cmd->request), bgstat, bghm);
}
@@ -3170,10 +3184,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
if (!ret) {
/* No error was reported - problem in FW? */
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9057 BLKGRD: Unknown error in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
+ "9057 BLKGRD: Unknown error in cmd "
+ "0x%x reftag 0x%x blk cnt 0x%x "
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
+ t10_pi_ref_tag(cmd->request),
blk_rq_sectors(cmd->request), bgstat, bghm);
/* Calcuate what type of error it was */
@@ -3685,7 +3699,7 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
/**
* lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi
* buffer
- * @phba: The Hba for which this call is being executed.
+ * @vport: Pointer to vport object.
* @lpfc_cmd: The scsi buffer which is going to be mapped.
* @tmo: Timeout value for IO
*
@@ -3707,7 +3721,7 @@ lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
* @phba: Pointer to hba context object.
* @vport: Pointer to vport object.
* @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
- * @rsp_iocb: Pointer to response iocb object which reported error.
+ * @fcpi_parm: FCP Initiator parameter.
*
* This function posts an event when there is a SCSI command reporting
* error from the scsi device.
@@ -3822,10 +3836,10 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
}
/**
- * lpfc_handler_fcp_err - FCP response handler
+ * lpfc_handle_fcp_err - FCP response handler
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: Pointer to lpfc_io_buf data structure.
- * @rsp_iocb: The response IOCB which contains FCP error.
+ * @fcpi_parm: FCP Initiator parameter.
*
* This routine is called to process response IOCB with status field
* IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
@@ -4009,7 +4023,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
* lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO
* @phba: The hba for which this call is being executed.
* @pwqeIn: The command WQE for the scsi cmnd.
- * @pwqeOut: The response WQE for the scsi cmnd.
+ * @wcqe: Pointer to driver response CQE object.
*
* This routine assigns scsi command result by looking into response WQE
* status field appropriately. This routine handles QUEUE FULL condition as
@@ -4060,7 +4074,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
/* Sanity check on return of outstanding command */
cmd = lpfc_cmd->pCmd;
- if (!cmd || !phba) {
+ if (!cmd) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"9042 I/O completion: Not an active IO\n");
spin_unlock(&lpfc_cmd->buf_lock);
@@ -4278,7 +4292,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
u32 *lp = (u32 *)cmd->sense_buffer;
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "9039 Iodone <%d/%llu> cmd x%p, error "
+ "9039 Iodone <%d/%llu> cmd x%px, error "
"x%x SNS x%x x%x Data: x%x x%x\n",
cmd->device->id, cmd->device->lun, cmd,
cmd->result, *lp, *(lp + 3), cmd->retries,
@@ -4605,7 +4619,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
/**
* lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO
- * @phba: Pointer to vport object for which I/O is executed
+ * @vport: Pointer to vport object.
* @lpfc_cmd: The scsi buffer which is going to be prep'ed.
* @tmo: timeout value for the IO
*
@@ -4682,7 +4696,7 @@ static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
/**
* lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO
- * @phba: Pointer to vport object for which I/O is executed
+ * @vport: Pointer to vport object.
* @lpfc_cmd: The scsi buffer which is going to be prep'ed.
* @tmo: timeout value for the IO
*
@@ -4939,7 +4953,7 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
}
/**
- * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
+ * lpfc_tskmgmt_def_cmpl - IOCB completion routine for task management command
* @phba: The Hba for which this call is being executed.
* @cmdiocbq: Pointer to lpfc_iocbq data structure.
* @rspiocbq: Pointer to lpfc_iocbq data structure.
@@ -4998,7 +5012,7 @@ lpfc_check_pci_resettable(struct lpfc_hba *phba)
break;
default:
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "8347 Invalid device found: "
+ "8347 Incapable PCI reset device: "
"0x%04x\n", ptr->device);
return -EBADSLT;
}
@@ -5084,7 +5098,7 @@ buffer_done:
}
/**
- * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
+ * lpfc_poll_rearm_timer - Routine to modify fcp_poll timer of hba
* @phba: The Hba for which this call is being executed.
*
* This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
@@ -5252,10 +5266,10 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_printf_vlog(vport,
KERN_INFO, LOG_SCSI_CMD,
"9033 BLKGRD: rcvd %s cmd:x%x "
- "sector x%llx cnt %u pt %x\n",
+ "reftag x%x cnt %u pt %x\n",
dif_op_str[scsi_get_prot_op(cmnd)],
cmnd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmnd),
+ t10_pi_ref_tag(cmnd->request),
blk_rq_sectors(cmnd->request),
(cmnd->cmnd[1]>>5));
}
@@ -5265,9 +5279,9 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_printf_vlog(vport,
KERN_INFO, LOG_SCSI_CMD,
"9038 BLKGRD: rcvd PROT_NORMAL cmd: "
- "x%x sector x%llx cnt %u pt %x\n",
+ "x%x reftag x%x cnt %u pt %x\n",
cmnd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmnd),
+ t10_pi_ref_tag(cmnd->request),
blk_rq_sectors(cmnd->request),
(cmnd->cmnd[1]>>5));
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index fa1a714a78f0..06ccc0157bd8 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,8 +1,8 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.broadcom.com *
@@ -935,7 +935,7 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
* @phba: Pointer to HBA context object.
* @xritag: XRI value.
*
- * This function clears the sglq pointer from the array of acive
+ * This function clears the sglq pointer from the array of active
* sglq's. The xritag that is passed in is used to index into the
* array. Before the xritag can be used it needs to be adjusted
* by subtracting the xribase.
@@ -957,7 +957,7 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
* @phba: Pointer to HBA context object.
* @xritag: XRI value.
*
- * This function returns the sglq pointer from the array of acive
+ * This function returns the sglq pointer from the array of active
* sglq's. The xritag that is passed in is used to index into the
* array. Before the xritag can be used it needs to be adjusted
* by subtracting the xribase.
@@ -987,16 +987,10 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
{
struct lpfc_nodelist *ndlp = NULL;
+ /* Lookup did to verify if did is still active on this vport */
if (rrq->vport)
ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
- /* The target DID could have been swapped (cable swap)
- * we should use the ndlp from the findnode if it is
- * available.
- */
- if ((!ndlp) && rrq->ndlp)
- ndlp = rrq->ndlp;
-
if (!ndlp)
goto out;
@@ -1118,9 +1112,14 @@ lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
}
spin_lock_irqsave(&phba->hbalock, iflags);
- list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
- if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
+ list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
+ if (rrq->vport != vport)
+ continue;
+
+ if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
list_move(&rrq->list, &rrq_list);
+
+ }
spin_unlock_irqrestore(&phba->hbalock, iflags);
list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
@@ -1213,7 +1212,6 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
rrq->xritag = xritag;
rrq->rrq_stop_time = jiffies +
msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
- rrq->ndlp = ndlp;
rrq->nlp_DID = ndlp->nlp_DID;
rrq->vport = ndlp->vport;
rrq->rxid = rxid;
@@ -1405,7 +1403,6 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
goto out;
}
- pring = phba->sli4_hba.els_wq->pring;
if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
(sglq->state != SGL_XRI_ABORTED)) {
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
@@ -1428,9 +1425,9 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
&phba->sli4_hba.lpfc_els_sgl_list);
spin_unlock_irqrestore(
&phba->sli4_hba.sgl_list_lock, iflag);
-
+ pring = lpfc_phba_elsring(phba);
/* Check if TXQ queue needs to be serviced */
- if (!list_empty(&pring->txq))
+ if (pring && (!list_empty(&pring->txq)))
lpfc_worker_wake_up(phba);
}
}
@@ -2659,7 +2656,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport,
KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
"1438 UNREG cmpl deferred mbox x%x "
- "on NPort x%x Data: x%x x%x %px x%x x%x\n",
+ "on NPort x%x Data: x%x x%x x%px x%x x%x\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_defer_did,
ndlp, vport->load_flag, kref_read(&ndlp->kref));
@@ -2724,7 +2721,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport, KERN_INFO, LOG_MBOX | LOG_SLI,
"0010 UNREG_LOGIN vpi:%x "
"rpi:%x DID:%x defer x%x flg x%x "
- "%px\n",
+ "x%px\n",
vport->vpi, ndlp->nlp_rpi,
ndlp->nlp_DID, ndlp->nlp_defer_did,
ndlp->nlp_flag,
@@ -3026,7 +3023,7 @@ lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
goto out_fail;
lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
- "6206 NVMET unsol ls_req ndlp %p "
+ "6206 NVMET unsol ls_req ndlp x%px "
"DID x%x xflags x%x refcnt %d\n",
ndlp, ndlp->nlp_DID,
ndlp->fc4_xpt_flags,
@@ -5683,12 +5680,10 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
phba->sli4_hba.lnk_info.lnk_no,
phba->BIOSVersion);
out_free_mboxq:
- if (rc != MBX_TIMEOUT) {
- if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
- lpfc_sli4_mbox_cmd_free(phba, mboxq);
- else
- mempool_free(mboxq, phba->mbox_mem_pool);
- }
+ if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ else
+ mempool_free(mboxq, phba->mbox_mem_pool);
return rc;
}
@@ -5789,12 +5784,10 @@ retrieve_ppname:
}
out_free_mboxq:
- if (rc != MBX_TIMEOUT) {
- if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
- lpfc_sli4_mbox_cmd_free(phba, mboxq);
- else
- mempool_free(mboxq, phba->mbox_mem_pool);
- }
+ if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ else
+ mempool_free(mboxq, phba->mbox_mem_pool);
return rc;
}
@@ -9635,7 +9628,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
}
/**
- * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
+ * lpfc_sli4_iocb2wqe - Convert the IOCB to a work queue entry.
* @phba: Pointer to HBA context object.
* @iocbq: Pointer to command iocb.
* @wqe: Pointer to the work queue entry.
@@ -10421,7 +10414,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
return 0;
}
-/**
+/*
* lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
*
* This routine wraps the actual fcp i/o function for issusing WQE for sli-4
@@ -11593,7 +11586,7 @@ release_iocb:
* which are aborted. The function frees memory resources used for
* the aborted ELS commands.
**/
-static void
+void
lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
@@ -11647,7 +11640,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
icmd = &cmdiocb->iocb;
if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
- (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
+ cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED)
return IOCB_ABORTING;
if (!pring) {
@@ -11945,7 +11938,6 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/**
* lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
* @vport: Pointer to virtual port.
- * @pring: Pointer to driver SLI ring object.
* @tgt_id: SCSI ID of the target.
* @lun_id: LUN ID of the scsi device.
* @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
@@ -11960,18 +11952,22 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* FCP iocbs associated with SCSI target specified by tgt_id parameter.
* When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
* FCP iocbs associated with virtual port.
+ * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
+ * lpfc_sli4_calc_ring is used.
* This function returns number of iocbs it failed to abort.
* This function is called with no locks held.
**/
int
-lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
- uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
+lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
+ lpfc_ctx_cmd abort_cmd)
{
struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli_ring *pring = NULL;
struct lpfc_iocbq *iocbq;
int errcnt = 0, ret_val = 0;
unsigned long iflags;
int i;
+ void *fcp_cmpl = NULL;
/* all I/Os are in process of being flushed */
if (phba->hba_flag & HBA_IOQ_FLUSH)
@@ -11985,8 +11981,15 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
continue;
spin_lock_irqsave(&phba->hbalock, iflags);
+ if (phba->sli_rev == LPFC_SLI_REV3) {
+ pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
+ fcp_cmpl = lpfc_sli_abort_fcp_cmpl;
+ } else if (phba->sli_rev == LPFC_SLI_REV4) {
+ pring = lpfc_sli4_calc_ring(phba, iocbq);
+ fcp_cmpl = lpfc_sli4_abort_fcp_cmpl;
+ }
ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
- lpfc_sli_abort_fcp_cmpl);
+ fcp_cmpl);
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (ret_val != IOCB_SUCCESS)
errcnt++;
@@ -14170,7 +14173,7 @@ rearm_and_exit:
}
/**
- * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
+ * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
* @cq: pointer to CQ to process
*
* This routine calls the cq processing routine with a handler specific
@@ -14744,7 +14747,7 @@ lpfc_sli4_hba_process_cq(struct work_struct *work)
}
/**
- * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer
+ * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer
* @work: pointer to work element
*
* translates from the work handler and calls the fast-path handler.
@@ -14848,7 +14851,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
}
return IRQ_HANDLED;
-} /* lpfc_sli4_fp_intr_handler */
+} /* lpfc_sli4_hba_intr_handler */
/**
* lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
@@ -17072,8 +17075,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
"2509 RQ_DESTROY mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
- if (rc != MBX_TIMEOUT)
- mempool_free(mbox, hrq->phba->mbox_mem_pool);
+ mempool_free(mbox, hrq->phba->mbox_mem_pool);
return -ENXIO;
}
bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
@@ -17170,7 +17172,9 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
- if (rc != MBX_TIMEOUT)
+ if (!phba->sli4_hba.intr_enable)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ else if (rc != MBX_TIMEOUT)
mempool_free(mbox, phba->mbox_mem_pool);
if (shdr_status || shdr_add_status || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -17218,7 +17222,7 @@ lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
}
/**
- * lpfc_sli4_free_xri - Release an xri for reuse.
+ * __lpfc_sli4_free_xri - Release an xri for reuse.
* @phba: pointer to lpfc hba data structure.
* @xri: xri to release.
*
@@ -17367,7 +17371,9 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
- if (rc != MBX_TIMEOUT)
+ if (!phba->sli4_hba.intr_enable)
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ else if (rc != MBX_TIMEOUT)
lpfc_sli4_mbox_cmd_free(phba, mbox);
if (shdr_status || shdr_add_status || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -17480,7 +17486,9 @@ lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
- if (rc != MBX_TIMEOUT)
+ if (!phba->sli4_hba.intr_enable)
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ else if (rc != MBX_TIMEOUT)
lpfc_sli4_mbox_cmd_free(phba, mbox);
if (shdr_status || shdr_add_status || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -18064,7 +18072,6 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
if (cmd_iocbq) {
ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
lpfc_nlp_put(ndlp);
- lpfc_nlp_not_used(ndlp);
lpfc_sli_release_iocbq(phba, cmd_iocbq);
}
@@ -18099,7 +18106,7 @@ lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
/**
* lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
- * @vport: pointer to a vitural port.
+ * @vport: pointer to a virtual port.
* @fc_hdr: pointer to a FC frame header.
* @aborted: was the partially assembled receive sequence successfully aborted
*
@@ -18831,8 +18838,7 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
- if (rc != MBX_TIMEOUT)
- mempool_free(mboxq, phba->mbox_mem_pool);
+ mempool_free(mboxq, phba->mbox_mem_pool);
if (shdr_status || shdr_add_status || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2514 POST_RPI_HDR mailbox failed with "
@@ -18938,7 +18944,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
}
/**
- * lpfc_sli4_free_rpi - Release an rpi for reuse.
+ * __lpfc_sli4_free_rpi - Release an rpi for reuse.
* @phba: pointer to lpfc hba data structure.
* @rpi: rpi to free
*
@@ -20076,7 +20082,9 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
break;
}
}
- if (rc != MBX_TIMEOUT)
+ if (!phba->sli4_hba.intr_enable)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ else if (rc != MBX_TIMEOUT)
mempool_free(mbox, phba->mbox_mem_pool);
if (shdr_status || shdr_add_status || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index fade044c8f15..4b8e89375644 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.8.0.7"
+#define LPFC_DRIVER_VERSION "12.8.0.9"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@@ -32,6 +32,6 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright (C) 2017-2020 Broadcom. All Rights " \
+#define LPFC_COPYRIGHT "Copyright (C) 2017-2021 Broadcom. All Rights " \
"Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
"and/or its subsidiaries."
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index ccf7b6cd0bd8..da9a1f72d938 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -190,7 +190,7 @@ lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn,
((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0)))
return 1;
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"1822 Invalid %s: %02x:%02x:%02x:%02x:"
"%02x:%02x:%02x:%02x\n",
name_type,
@@ -531,7 +531,7 @@ disable_vport(struct fc_vport *fc_vport)
}
lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1826 Vport Disabled.\n");
return VPORT_OK;
}
@@ -579,7 +579,7 @@ enable_vport(struct fc_vport *fc_vport)
}
out:
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1827 Vport Enabled.\n");
return VPORT_OK;
}
@@ -725,7 +725,7 @@ skip_logo:
spin_lock_irq(&phba->port_list_lock);
list_del_init(&vport->listentry);
spin_unlock_irq(&phba->port_list_lock);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1828 Vport Deleted.\n");
scsi_host_put(shost);
return VPORT_OK;
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 9e989776609b..ec9840d322e5 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -346,7 +346,7 @@ static void cmd_done(struct fsc_state *state, int result)
struct scsi_cmnd *cmd;
cmd = state->current_req;
- if (cmd != 0) {
+ if (cmd) {
cmd->result = result;
(*cmd->scsi_done)(cmd);
state->current_req = NULL;
@@ -467,12 +467,13 @@ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *mat
dma_cmd_space = kmalloc_array(host->sg_tablesize + 2,
sizeof(struct dbdma_cmd),
GFP_KERNEL);
- if (dma_cmd_space == 0) {
- printk(KERN_ERR "mac53c94: couldn't allocate dma "
- "command space for %pOF\n", node);
+ if (!dma_cmd_space) {
+ printk(KERN_ERR "mac53c94: couldn't allocate dma "
+ "command space for %pOF\n", node);
rc = -ENOMEM;
- goto out_free;
- }
+ goto out_free;
+ }
+
state->dma_cmds = (struct dbdma_cmd *)DBDMA_ALIGN(dma_cmd_space);
memset(state->dma_cmds, 0, (host->sg_tablesize + 1)
* sizeof(struct dbdma_cmd));
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index d57e93872d7b..b1a2d3536add 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -1427,7 +1427,7 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb)
/**
- * megaraid_queue_command - generic queue entry point for all LLDs
+ * megaraid_queue_command_lck - generic queue entry point for all LLDs
* @scp : pointer to the scsi command to be executed
* @done : callback routine to be called after the cmd has be completed
*
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 8df53446641a..abf7b401f5b9 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -490,7 +490,7 @@ mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
}
/**
- * mraid_mm_attch_buf - Attach a free dma buffer for required size
+ * mraid_mm_attach_buf - Attach a free dma buffer for required size
* @adp : Adapter softstate
* @kioc : kioc that the buffer needs to be attached to
* @xferlen : required length for buffer
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 0f808d63580e..b5a765b73c76 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -2019,10 +2019,12 @@ union megasas_frame {
* struct MR_PRIV_DEVICE - sdev private hostdata
* @is_tm_capable: firmware managed tm_capable flag
* @tm_busy: TM request is in progress
+ * @sdev_priv_busy: pending command per sdev
*/
struct MR_PRIV_DEVICE {
bool is_tm_capable;
bool tm_busy;
+ atomic_t sdev_priv_busy;
atomic_t r1_ldio_hint;
u8 interface_type;
u8 task_abort_tmo;
@@ -2212,6 +2214,7 @@ struct megasas_irq_context {
struct irq_poll irqpoll;
bool irq_poll_scheduled;
bool irq_line_enable;
+ atomic_t in_used;
};
struct MR_DRV_SYSTEM_INFO {
@@ -2446,6 +2449,7 @@ struct megasas_instance {
bool support_pci_lane_margining;
u8 low_latency_index_start;
int perf_mode;
+ int iopoll_q_count;
};
struct MR_LD_VF_MAP {
@@ -2726,5 +2730,6 @@ void megasas_init_debugfs(void);
void megasas_exit_debugfs(void);
void megasas_setup_debugfs(struct megasas_instance *instance);
void megasas_destroy_debugfs(struct megasas_instance *instance);
+int megasas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 63a4f48bdc75..4d4e9dbe5193 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -114,6 +114,15 @@ unsigned int enable_sdev_max_qd;
module_param(enable_sdev_max_qd, int, 0444);
MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
+int poll_queues;
+module_param(poll_queues, int, 0444);
+MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t"
+ "This parameter is effective only if host_tagset_enable=1 &\n\t\t"
+ "It is not applicable for MFI_SERIES. &\n\t\t"
+ "Driver will work in latency mode. &\n\t\t"
+ "High iops queues are not allocated &\n\t\t"
+ );
+
int host_tagset_enable = 1;
module_param(host_tagset_enable, int, 0444);
MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
@@ -207,6 +216,7 @@ static bool support_pci_lane_margining;
static spinlock_t poll_aen_lock;
extern struct dentry *megasas_debugfs_root;
+extern int megasas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
void
megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
@@ -475,7 +485,7 @@ megasas_read_fw_status_reg_xscale(struct megasas_instance *instance)
return readl(&instance->reg_set->outbound_msg_0);
}
/**
- * megasas_clear_interrupt_xscale - Check & clear interrupt
+ * megasas_clear_intr_xscale - Check & clear interrupt
* @instance: Adapter soft state
*/
static int
@@ -658,7 +668,7 @@ megasas_read_fw_status_reg_ppc(struct megasas_instance *instance)
}
/**
- * megasas_clear_interrupt_ppc - Check & clear interrupt
+ * megasas_clear_intr_ppc - Check & clear interrupt
* @instance: Adapter soft state
*/
static int
@@ -787,7 +797,7 @@ megasas_read_fw_status_reg_skinny(struct megasas_instance *instance)
}
/**
- * megasas_clear_interrupt_skinny - Check & clear interrupt
+ * megasas_clear_intr_skinny - Check & clear interrupt
* @instance: Adapter soft state
*/
static int
@@ -935,7 +945,7 @@ megasas_read_fw_status_reg_gen2(struct megasas_instance *instance)
}
/**
- * megasas_clear_interrupt_gen2 - Check & clear interrupt
+ * megasas_clear_intr_gen2 - Check & clear interrupt
* @instance: Adapter soft state
*/
static int
@@ -3127,14 +3137,37 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
static int megasas_map_queues(struct Scsi_Host *shost)
{
struct megasas_instance *instance;
+ int qoff = 0, offset;
+ struct blk_mq_queue_map *map;
instance = (struct megasas_instance *)shost->hostdata;
if (shost->nr_hw_queues == 1)
return 0;
- return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
- instance->pdev, instance->low_latency_index_start);
+ offset = instance->low_latency_index_start;
+
+ /* Setup Default hctx */
+ map = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+ map->nr_queues = instance->msix_vectors - offset;
+ map->queue_offset = 0;
+ blk_mq_pci_map_queues(map, instance->pdev, offset);
+ qoff += map->nr_queues;
+ offset += map->nr_queues;
+
+ /* Setup Poll hctx */
+ map = &shost->tag_set.map[HCTX_TYPE_POLL];
+ map->nr_queues = instance->iopoll_q_count;
+ if (map->nr_queues) {
+ /*
+ * The poll queue(s) doesn't have an IRQ (and hence IRQ
+ * affinity), so use the regular blk-mq cpu mapping
+ */
+ map->queue_offset = qoff;
+ blk_mq_map_queues(map);
+ }
+
+ return 0;
}
static void megasas_aen_polling(struct work_struct *work);
@@ -3446,6 +3479,7 @@ static struct scsi_host_template megasas_template = {
.shost_attrs = megaraid_host_attrs,
.bios_param = megasas_bios_param,
.map_queues = megasas_map_queues,
+ .mq_poll = megasas_blk_mq_poll,
.change_queue_depth = scsi_change_queue_depth,
.max_segment_size = 0xffffffff,
};
@@ -4884,6 +4918,7 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
}
/**
+ * megasas_host_device_list_query
* dcmd.opcode - MR_DCMD_CTRL_DEVICE_LIST_GET
* dcmd.mbox - reserved
* dcmd.sge IN - ptr to return MR_HOST_DEVICE_LIST structure
@@ -5161,7 +5196,7 @@ void megasas_get_snapdump_properties(struct megasas_instance *instance)
}
/**
- * megasas_get_controller_info - Returns FW's controller structure
+ * megasas_get_ctrl_info - Returns FW's controller structure
* @instance: Adapter soft state
*
* Issues an internal command (DCMD) to get the FW's controller structure.
@@ -5834,13 +5869,16 @@ __megasas_alloc_irq_vectors(struct megasas_instance *instance)
irq_flags = PCI_IRQ_MSIX;
if (instance->smp_affinity_enable)
- irq_flags |= PCI_IRQ_AFFINITY;
+ irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
else
descp = NULL;
+ /* Do not allocate msix vectors for poll_queues.
+ * msix_vectors is always within a range of FW supported reply queue.
+ */
i = pci_alloc_irq_vectors_affinity(instance->pdev,
instance->low_latency_index_start,
- instance->msix_vectors, irq_flags, descp);
+ instance->msix_vectors - instance->iopoll_q_count, irq_flags, descp);
return i;
}
@@ -5856,10 +5894,30 @@ megasas_alloc_irq_vectors(struct megasas_instance *instance)
int i;
unsigned int num_msix_req;
+ instance->iopoll_q_count = 0;
+ if ((instance->adapter_type != MFI_SERIES) &&
+ poll_queues) {
+
+ instance->perf_mode = MR_LATENCY_PERF_MODE;
+ instance->low_latency_index_start = 1;
+
+ /* reserve for default and non-mananged pre-vector. */
+ if (instance->msix_vectors > (poll_queues + 2))
+ instance->iopoll_q_count = poll_queues;
+ else
+ instance->iopoll_q_count = 0;
+
+ num_msix_req = num_online_cpus() + instance->low_latency_index_start;
+ instance->msix_vectors = min(num_msix_req,
+ instance->msix_vectors);
+
+ }
+
i = __megasas_alloc_irq_vectors(instance);
- if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
- (i != instance->msix_vectors)) {
+ if (((instance->perf_mode == MR_BALANCED_PERF_MODE)
+ || instance->iopoll_q_count) &&
+ (i != (instance->msix_vectors - instance->iopoll_q_count))) {
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
/* Disable Balanced IOPS mode and try realloc vectors */
@@ -5870,12 +5928,15 @@ megasas_alloc_irq_vectors(struct megasas_instance *instance)
instance->msix_vectors = min(num_msix_req,
instance->msix_vectors);
+ instance->iopoll_q_count = 0;
i = __megasas_alloc_irq_vectors(instance);
}
dev_info(&instance->pdev->dev,
- "requested/available msix %d/%d\n", instance->msix_vectors, i);
+ "requested/available msix %d/%d poll_queue %d\n",
+ instance->msix_vectors - instance->iopoll_q_count,
+ i, instance->iopoll_q_count);
if (i > 0)
instance->msix_vectors = i;
@@ -6841,12 +6902,18 @@ static int megasas_io_attach(struct megasas_instance *instance)
instance->smp_affinity_enable) {
host->host_tagset = 1;
host->nr_hw_queues = instance->msix_vectors -
- instance->low_latency_index_start;
+ instance->low_latency_index_start + instance->iopoll_q_count;
+ if (instance->iopoll_q_count)
+ host->nr_maps = 3;
+ } else {
+ instance->iopoll_q_count = 0;
}
dev_info(&instance->pdev->dev,
- "Max firmware commands: %d shared with nr_hw_queues = %d\n",
- instance->max_fw_cmds, host->nr_hw_queues);
+ "Max firmware commands: %d shared with default "
+ "hw_queues = %d poll_queues %d\n", instance->max_fw_cmds,
+ host->nr_hw_queues - instance->iopoll_q_count,
+ instance->iopoll_q_count);
/*
* Notify the mid-layer about the new controller
*/
@@ -8859,6 +8926,7 @@ static int __init megasas_init(void)
msix_vectors = 1;
rdpq_enable = 0;
dual_qdepth_disable = 1;
+ poll_queues = 0;
}
/*
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 38fc9467c625..2221175ae051 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -220,6 +220,40 @@ megasas_clear_intr_fusion(struct megasas_instance *instance)
return 1;
}
+static inline void
+megasas_sdev_busy_inc(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd)
+{
+ if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
+ struct MR_PRIV_DEVICE *mr_device_priv_data =
+ scmd->device->hostdata;
+ atomic_inc(&mr_device_priv_data->sdev_priv_busy);
+ }
+}
+
+static inline void
+megasas_sdev_busy_dec(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd)
+{
+ if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
+ struct MR_PRIV_DEVICE *mr_device_priv_data =
+ scmd->device->hostdata;
+ atomic_dec(&mr_device_priv_data->sdev_priv_busy);
+ }
+}
+
+static inline int
+megasas_sdev_busy_read(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd)
+{
+ if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
+ struct MR_PRIV_DEVICE *mr_device_priv_data =
+ scmd->device->hostdata;
+ return atomic_read(&mr_device_priv_data->sdev_priv_busy);
+ }
+ return 0;
+}
+
/**
* megasas_get_cmd_fusion - Get a command from the free pool
* @instance: Adapter soft state
@@ -357,15 +391,9 @@ megasas_get_msix_index(struct megasas_instance *instance,
struct megasas_cmd_fusion *cmd,
u8 data_arms)
{
- int sdev_busy;
-
- /* TBD - if sml remove device_busy in future, driver
- * should track counter in internal structure.
- */
- sdev_busy = atomic_read(&scmd->device->device_busy);
-
if (instance->perf_mode == MR_BALANCED_PERF_MODE &&
- sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) {
+ (megasas_sdev_busy_read(instance, scmd) >
+ (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))) {
cmd->request_desc->SCSIIO.MSIxIndex =
mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
@@ -685,6 +713,8 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance)
fusion = instance->ctrl_context;
count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ count += instance->iopoll_q_count;
+
fusion->reply_frames_desc_pool =
dma_pool_create("mr_reply", &instance->pdev->dev,
fusion->reply_alloc_sz * count, 16, 0);
@@ -779,6 +809,7 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
}
msix_count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ msix_count += instance->iopoll_q_count;
fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq",
&instance->pdev->dev,
@@ -1129,7 +1160,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0;
IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
IOCInitMessage->SenseBufferAddressHigh = cpu_to_le32(upper_32_bits(fusion->sense_phys_addr));
- IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
+ IOCInitMessage->HostMSIxVectors = instance->msix_vectors + instance->iopoll_q_count;
IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
time = ktime_get_real();
@@ -1823,6 +1854,8 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
sizeof(union MPI2_SGE_IO_UNION))/16;
count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ count += instance->iopoll_q_count;
+
for (i = 0 ; i < count; i++)
fusion->last_reply_idx[i] = 0;
@@ -1835,6 +1868,9 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
MEGASAS_FUSION_IOCTL_CMDS);
sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
+ for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++)
+ atomic_set(&fusion->busy_mq_poll[i], 0);
+
if (megasas_alloc_ioc_init_frame(instance))
return 1;
@@ -3390,6 +3426,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
* Issue the command to the FW
*/
+ megasas_sdev_busy_inc(instance, scmd);
megasas_fire_cmd_fusion(instance, req_desc);
if (r1_cmd)
@@ -3450,6 +3487,7 @@ megasas_complete_r1_command(struct megasas_instance *instance,
scmd_local->SCp.ptr = NULL;
megasas_return_cmd_fusion(instance, cmd);
scsi_dma_unmap(scmd_local);
+ megasas_sdev_busy_dec(instance, scmd_local);
scmd_local->scsi_done(scmd_local);
}
}
@@ -3500,6 +3538,9 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
return IRQ_NONE;
+ if (irq_context && !atomic_add_unless(&irq_context->in_used, 1, 1))
+ return 0;
+
num_completed = 0;
while (d_val.u.low != cpu_to_le32(UINT_MAX) &&
@@ -3550,6 +3591,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
scmd_local->SCp.ptr = NULL;
megasas_return_cmd_fusion(instance, cmd_fusion);
scsi_dma_unmap(scmd_local);
+ megasas_sdev_busy_dec(instance, scmd_local);
scmd_local->scsi_done(scmd_local);
} else /* Optimal VD - R1 FP command completion. */
megasas_complete_r1_command(instance, cmd_fusion);
@@ -3613,6 +3655,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
irq_context->irq_line_enable = true;
irq_poll_sched(&irq_context->irqpoll);
}
+ atomic_dec(&irq_context->in_used);
return num_completed;
}
}
@@ -3630,9 +3673,35 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
instance->reply_post_host_index_addr[0]);
megasas_check_and_restore_queue_depth(instance);
}
+
+ if (irq_context)
+ atomic_dec(&irq_context->in_used);
+
return num_completed;
}
+int megasas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+
+ struct megasas_instance *instance;
+ int num_entries = 0;
+ struct fusion_context *fusion;
+
+ instance = (struct megasas_instance *)shost->hostdata;
+
+ fusion = instance->ctrl_context;
+
+ queue_num = queue_num + instance->low_latency_index_start;
+
+ if (!atomic_add_unless(&fusion->busy_mq_poll[queue_num], 1, 1))
+ return 0;
+
+ num_entries = complete_cmd_fusion(instance, queue_num, NULL);
+ atomic_dec(&fusion->busy_mq_poll[queue_num]);
+
+ return num_entries;
+}
+
/**
* megasas_enable_irq_poll() - enable irqpoll
* @instance: Adapter soft state
@@ -4163,6 +4232,8 @@ void megasas_reset_reply_desc(struct megasas_instance *instance)
fusion = instance->ctrl_context;
count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ count += instance->iopoll_q_count;
+
for (i = 0 ; i < count ; i++) {
fusion->last_reply_idx[i] = 0;
reply_desc = fusion->reply_frames_desc[i];
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 30de4b01f703..ce84f811e5e1 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -1303,6 +1303,8 @@ struct fusion_context {
u8 *sense;
dma_addr_t sense_phys_addr;
+ atomic_t busy_mq_poll[MAX_MSIX_QUEUES_FUSION];
+
dma_addr_t reply_frames_desc_phys[MAX_MSIX_QUEUES_FUSION];
union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc[MAX_MSIX_QUEUES_FUSION];
struct rdpq_alloc_detail rdpq_tracker[RDPQ_MAX_CHUNK_COUNT];
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 43a3bf8ff428..d00431f553e1 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -992,7 +992,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 {
*one and check the value returned for GPIOCount at runtime.
*/
#ifndef MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX
-#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (1)
+#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (36)
#endif
typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_3 {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index ac0eef975f17..5779f313f6f8 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2905,23 +2905,22 @@ static int
_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
{
struct sysinfo s;
- int dma_mask;
if (ioc->is_mcpu_endpoint ||
sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
dma_get_required_mask(&pdev->dev) <= 32)
- dma_mask = 32;
+ ioc->dma_mask = 32;
/* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
- dma_mask = 63;
+ ioc->dma_mask = 63;
else
- dma_mask = 64;
+ ioc->dma_mask = 64;
- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)))
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)) ||
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)))
return -ENODEV;
- if (dma_mask > 32) {
+ if (ioc->dma_mask > 32) {
ioc->base_add_sg_single = &_base_add_sg_single_64;
ioc->sge_size = sizeof(Mpi2SGESimple64_t);
} else {
@@ -2931,7 +2930,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
si_meminfo(&s);
ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
- dma_mask, convert_to_kb(s.totalram));
+ ioc->dma_mask, convert_to_kb(s.totalram));
return 0;
}
@@ -3678,8 +3677,7 @@ _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
* IOs on the target device is >=8.
*/
- if (atomic_read(&scmd->device->device_busy) >
- MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
+ if (scsi_device_busy(scmd->device) > MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
return base_mod64((
atomic64_add_return(1, &ioc->high_iops_outstanding) /
MPT3SAS_HIGH_IOPS_BATCH_COUNT),
@@ -4173,7 +4171,7 @@ _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
}
/**
- * _base_put_smid_default - Default, primarily used for config pages
+ * _base_put_smid_default_atomic - Default, primarily used for config pages
* use Atomic Request Descriptor
* @ioc: per adapter object
* @smid: system request message index
@@ -5232,7 +5230,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
* mpt3sas_free_enclosure_list - release memory
* @ioc: per adapter object
*
- * Free memory allocated during encloure add.
+ * Free memory allocated during enclosure add.
*/
void
mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
@@ -5338,10 +5336,10 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
dma_pool_free(ioc->pcie_sgl_dma_pool,
ioc->pcie_sg_lookup[i].pcie_sgl,
ioc->pcie_sg_lookup[i].pcie_sgl_dma);
+ ioc->pcie_sg_lookup[i].pcie_sgl = NULL;
}
dma_pool_destroy(ioc->pcie_sgl_dma_pool);
}
-
if (ioc->config_page) {
dexitprintk(ioc,
ioc_info(ioc, "config_page(0x%p): free\n",
@@ -5400,6 +5398,271 @@ mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz)
}
/**
+ * _base_reduce_hba_queue_depth- Retry with reduced queue depth
+ * @ioc: Adapter object
+ *
+ * Return: 0 for success, non-zero for failure.
+ **/
+static inline int
+_base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc)
+{
+ int reduce_sz = 64;
+
+ if ((ioc->hba_queue_depth - reduce_sz) >
+ (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) {
+ ioc->hba_queue_depth -= reduce_sz;
+ return 0;
+ } else
+ return -ENOMEM;
+}
+
+/**
+ * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory
+ * for pcie sgl pools.
+ * @ioc: Adapter object
+ * @sz: DMA Pool size
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+
+static int
+_base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
+{
+ int i = 0, j = 0;
+ struct chain_tracker *ct;
+
+ ioc->pcie_sgl_dma_pool =
+ dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz,
+ ioc->page_size, 0);
+ if (!ioc->pcie_sgl_dma_pool) {
+ ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n");
+ return -ENOMEM;
+ }
+
+ ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
+ ioc->chains_per_prp_buffer =
+ min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io);
+ for (i = 0; i < ioc->scsiio_depth; i++) {
+ ioc->pcie_sg_lookup[i].pcie_sgl =
+ dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL,
+ &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
+ if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
+ ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
+ return -EAGAIN;
+ }
+
+ if (!mpt3sas_check_same_4gb_region(
+ (long)ioc->pcie_sg_lookup[i].pcie_sgl, sz)) {
+ ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n",
+ ioc->pcie_sg_lookup[i].pcie_sgl,
+ (unsigned long long)
+ ioc->pcie_sg_lookup[i].pcie_sgl_dma);
+ ioc->use_32bit_dma = true;
+ return -EAGAIN;
+ }
+
+ for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
+ ct = &ioc->chain_lookup[i].chains_per_smid[j];
+ ct->chain_buffer =
+ ioc->pcie_sg_lookup[i].pcie_sgl +
+ (j * ioc->chain_segment_sz);
+ ct->chain_buffer_dma =
+ ioc->pcie_sg_lookup[i].pcie_sgl_dma +
+ (j * ioc->chain_segment_sz);
+ }
+ }
+ dinitprintk(ioc, ioc_info(ioc,
+ "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
+ ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
+ dinitprintk(ioc, ioc_info(ioc,
+ "Number of chains can fit in a PRP page(%d)\n",
+ ioc->chains_per_prp_buffer));
+ return 0;
+}
+
+/**
+ * _base_allocate_chain_dma_pool - Allocating DMA'able memory
+ * for chain dma pool.
+ * @ioc: Adapter object
+ * @sz: DMA Pool size
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+_base_allocate_chain_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
+{
+ int i = 0, j = 0;
+ struct chain_tracker *ctr;
+
+ ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
+ ioc->chain_segment_sz, 16, 0);
+ if (!ioc->chain_dma_pool)
+ return -ENOMEM;
+
+ for (i = 0; i < ioc->scsiio_depth; i++) {
+ for (j = ioc->chains_per_prp_buffer;
+ j < ioc->chains_needed_per_io; j++) {
+ ctr = &ioc->chain_lookup[i].chains_per_smid[j];
+ ctr->chain_buffer = dma_pool_alloc(ioc->chain_dma_pool,
+ GFP_KERNEL, &ctr->chain_buffer_dma);
+ if (!ctr->chain_buffer)
+ return -EAGAIN;
+ if (!mpt3sas_check_same_4gb_region((long)
+ ctr->chain_buffer, ioc->chain_segment_sz)) {
+ ioc_err(ioc,
+ "Chain buffers are not in same 4G !!! Chain buff (0x%p) dma = (0x%llx)\n",
+ ctr->chain_buffer,
+ (unsigned long long)ctr->chain_buffer_dma);
+ ioc->use_32bit_dma = true;
+ return -EAGAIN;
+ }
+ }
+ }
+ dinitprintk(ioc, ioc_info(ioc,
+ "chain_lookup depth (%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->scsiio_depth, ioc->chain_segment_sz, ((ioc->scsiio_depth *
+ (ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) *
+ ioc->chain_segment_sz))/1024));
+ return 0;
+}
+
+/**
+ * _base_allocate_sense_dma_pool - Allocating DMA'able memory
+ * for sense dma pool.
+ * @ioc: Adapter object
+ * @sz: DMA Pool size
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+_base_allocate_sense_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
+{
+ ioc->sense_dma_pool =
+ dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4, 0);
+ if (!ioc->sense_dma_pool)
+ return -ENOMEM;
+ ioc->sense = dma_pool_alloc(ioc->sense_dma_pool,
+ GFP_KERNEL, &ioc->sense_dma);
+ if (!ioc->sense)
+ return -EAGAIN;
+ if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) {
+ dinitprintk(ioc, pr_err(
+ "Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n",
+ ioc->sense, (unsigned long long) ioc->sense_dma));
+ ioc->use_32bit_dma = true;
+ return -EAGAIN;
+ }
+ ioc_info(ioc,
+ "sense pool(0x%p) - dma(0x%llx): depth(%d), element_size(%d), pool_size (%d kB)\n",
+ ioc->sense, (unsigned long long)ioc->sense_dma,
+ ioc->scsiio_depth, SCSI_SENSE_BUFFERSIZE, sz/1024);
+ return 0;
+}
+
+/**
+ * _base_allocate_reply_pool - Allocating DMA'able memory
+ * for reply pool.
+ * @ioc: Adapter object
+ * @sz: DMA Pool size
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+_base_allocate_reply_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
+{
+ /* reply pool, 4 byte align */
+ ioc->reply_dma_pool = dma_pool_create("reply pool",
+ &ioc->pdev->dev, sz, 4, 0);
+ if (!ioc->reply_dma_pool)
+ return -ENOMEM;
+ ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
+ &ioc->reply_dma);
+ if (!ioc->reply)
+ return -EAGAIN;
+ if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) {
+ dinitprintk(ioc, pr_err(
+ "Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n",
+ ioc->reply, (unsigned long long) ioc->reply_dma));
+ ioc->use_32bit_dma = true;
+ return -EAGAIN;
+ }
+ ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
+ ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
+ ioc_info(ioc,
+ "reply pool(0x%p) - dma(0x%llx): depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->reply, (unsigned long long)ioc->reply_dma,
+ ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024);
+ return 0;
+}
+
+/**
+ * _base_allocate_reply_free_dma_pool - Allocating DMA'able memory
+ * for reply free dma pool.
+ * @ioc: Adapter object
+ * @sz: DMA Pool size
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+_base_allocate_reply_free_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
+{
+ /* reply free queue, 16 byte align */
+ ioc->reply_free_dma_pool = dma_pool_create(
+ "reply_free pool", &ioc->pdev->dev, sz, 16, 0);
+ if (!ioc->reply_free_dma_pool)
+ return -ENOMEM;
+ ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool,
+ GFP_KERNEL, &ioc->reply_free_dma);
+ if (!ioc->reply_free)
+ return -EAGAIN;
+ if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) {
+ dinitprintk(ioc,
+ pr_err("Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
+ ioc->reply_free, (unsigned long long) ioc->reply_free_dma));
+ ioc->use_32bit_dma = true;
+ return -EAGAIN;
+ }
+ memset(ioc->reply_free, 0, sz);
+ dinitprintk(ioc, ioc_info(ioc,
+ "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+ ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
+ dinitprintk(ioc, ioc_info(ioc,
+ "reply_free_dma (0x%llx)\n",
+ (unsigned long long)ioc->reply_free_dma));
+ return 0;
+}
+
+/**
+ * _base_allocate_reply_post_free_array - Allocating DMA'able memory
+ * for reply post free array.
+ * @ioc: Adapter object
+ * @reply_post_free_array_sz: DMA Pool size
+ * Return: 0 for success, non-zero for failure.
+ */
+
+static int
+_base_allocate_reply_post_free_array(struct MPT3SAS_ADAPTER *ioc,
+ u32 reply_post_free_array_sz)
+{
+ ioc->reply_post_free_array_dma_pool =
+ dma_pool_create("reply_post_free_array pool",
+ &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
+ if (!ioc->reply_post_free_array_dma_pool)
+ return -ENOMEM;
+ ioc->reply_post_free_array =
+ dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
+ GFP_KERNEL, &ioc->reply_post_free_array_dma);
+ if (!ioc->reply_post_free_array)
+ return -EAGAIN;
+ if (!mpt3sas_check_same_4gb_region((long)ioc->reply_post_free_array,
+ reply_post_free_array_sz)) {
+ dinitprintk(ioc, pr_err(
+ "Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
+ ioc->reply_free,
+ (unsigned long long) ioc->reply_free_dma));
+ ioc->use_32bit_dma = true;
+ return -EAGAIN;
+ }
+ return 0;
+}
+/**
* base_alloc_rdpq_dma_pool - Allocating DMA'able memory
* for reply queues.
* @ioc: per adapter object
@@ -5492,13 +5755,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
u16 chains_needed_per_io;
u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
u32 retry_sz;
- u32 rdpq_sz = 0;
+ u32 rdpq_sz = 0, sense_sz = 0;
u16 max_request_credit, nvme_blocks_needed;
unsigned short sg_tablesize;
u16 sge_size;
- int i, j;
- int ret = 0;
- struct chain_tracker *ct;
+ int i;
+ int ret = 0, rc = 0;
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
@@ -5802,6 +6064,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
* be required for NVMe PRP's, only each set of NVMe blocks will be
* contiguous, so a new set is allocated for each possible I/O.
*/
+
ioc->chains_per_prp_buffer = 0;
if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
nvme_blocks_needed =
@@ -5816,190 +6079,67 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
goto out;
}
sz = nvme_blocks_needed * ioc->page_size;
- ioc->pcie_sgl_dma_pool =
- dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
- if (!ioc->pcie_sgl_dma_pool) {
- ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
- goto out;
- }
-
- ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
- ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer,
- ioc->chains_needed_per_io);
-
- for (i = 0; i < ioc->scsiio_depth; i++) {
- ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
- ioc->pcie_sgl_dma_pool, GFP_KERNEL,
- &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
- if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
- ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
- goto out;
- }
- for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
- ct = &ioc->chain_lookup[i].chains_per_smid[j];
- ct->chain_buffer =
- ioc->pcie_sg_lookup[i].pcie_sgl +
- (j * ioc->chain_segment_sz);
- ct->chain_buffer_dma =
- ioc->pcie_sg_lookup[i].pcie_sgl_dma +
- (j * ioc->chain_segment_sz);
- }
- }
-
- dinitprintk(ioc,
- ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
- ioc->scsiio_depth, sz,
- (sz * ioc->scsiio_depth) / 1024));
- dinitprintk(ioc,
- ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
- ioc->chains_per_prp_buffer));
+ rc = _base_allocate_pcie_sgl_pool(ioc, sz);
+ if (rc == -ENOMEM)
+ return -ENOMEM;
+ else if (rc == -EAGAIN)
+ goto try_32bit_dma;
total_sz += sz * ioc->scsiio_depth;
}
- ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
- ioc->chain_segment_sz, 16, 0);
- if (!ioc->chain_dma_pool) {
- ioc_err(ioc, "chain_dma_pool: dma_pool_create failed\n");
- goto out;
- }
- for (i = 0; i < ioc->scsiio_depth; i++) {
- for (j = ioc->chains_per_prp_buffer;
- j < ioc->chains_needed_per_io; j++) {
- ct = &ioc->chain_lookup[i].chains_per_smid[j];
- ct->chain_buffer = dma_pool_alloc(
- ioc->chain_dma_pool, GFP_KERNEL,
- &ct->chain_buffer_dma);
- if (!ct->chain_buffer) {
- ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
- goto out;
- }
- }
- total_sz += ioc->chain_segment_sz;
- }
-
+ rc = _base_allocate_chain_dma_pool(ioc, ioc->chain_segment_sz);
+ if (rc == -ENOMEM)
+ return -ENOMEM;
+ else if (rc == -EAGAIN)
+ goto try_32bit_dma;
+ total_sz += ioc->chain_segment_sz * ((ioc->chains_needed_per_io -
+ ioc->chains_per_prp_buffer) * ioc->scsiio_depth);
dinitprintk(ioc,
- ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
- ioc->chain_depth, ioc->chain_segment_sz,
- (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
-
+ ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->chain_depth, ioc->chain_segment_sz,
+ (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
/* sense buffers, 4 byte align */
- sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
- ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
- 4, 0);
- if (!ioc->sense_dma_pool) {
- ioc_err(ioc, "sense pool: dma_pool_create failed\n");
- goto out;
- }
- ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
- &ioc->sense_dma);
- if (!ioc->sense) {
- ioc_err(ioc, "sense pool: dma_pool_alloc failed\n");
- goto out;
- }
- /* sense buffer requires to be in same 4 gb region.
- * Below function will check the same.
- * In case of failure, new pci pool will be created with updated
- * alignment. Older allocation and pool will be destroyed.
- * Alignment will be used such a way that next allocation if
- * success, will always meet same 4gb region requirement.
- * Actual requirement is not alignment, but we need start and end of
- * DMA address must have same upper 32 bit address.
- */
- if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) {
- //Release Sense pool & Reallocate
- dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
- dma_pool_destroy(ioc->sense_dma_pool);
- ioc->sense = NULL;
-
- ioc->sense_dma_pool =
- dma_pool_create("sense pool", &ioc->pdev->dev, sz,
- roundup_pow_of_two(sz), 0);
- if (!ioc->sense_dma_pool) {
- ioc_err(ioc, "sense pool: pci_pool_create failed\n");
- goto out;
- }
- ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
- &ioc->sense_dma);
- if (!ioc->sense) {
- ioc_err(ioc, "sense pool: pci_pool_alloc failed\n");
- goto out;
- }
- }
+ sense_sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
+ rc = _base_allocate_sense_dma_pool(ioc, sense_sz);
+ if (rc == -ENOMEM)
+ return -ENOMEM;
+ else if (rc == -EAGAIN)
+ goto try_32bit_dma;
+ total_sz += sense_sz;
ioc_info(ioc,
"sense pool(0x%p)- dma(0x%llx): depth(%d),"
"element_size(%d), pool_size(%d kB)\n",
ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,
SCSI_SENSE_BUFFERSIZE, sz / 1024);
-
- total_sz += sz;
-
/* reply pool, 4 byte align */
sz = ioc->reply_free_queue_depth * ioc->reply_sz;
- ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
- 4, 0);
- if (!ioc->reply_dma_pool) {
- ioc_err(ioc, "reply pool: dma_pool_create failed\n");
- goto out;
- }
- ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
- &ioc->reply_dma);
- if (!ioc->reply) {
- ioc_err(ioc, "reply pool: dma_pool_alloc failed\n");
- goto out;
- }
- ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
- ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
- dinitprintk(ioc,
- ioc_info(ioc, "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
- ioc->reply, ioc->reply_free_queue_depth,
- ioc->reply_sz, sz / 1024));
- dinitprintk(ioc,
- ioc_info(ioc, "reply_dma(0x%llx)\n",
- (unsigned long long)ioc->reply_dma));
+ rc = _base_allocate_reply_pool(ioc, sz);
+ if (rc == -ENOMEM)
+ return -ENOMEM;
+ else if (rc == -EAGAIN)
+ goto try_32bit_dma;
total_sz += sz;
/* reply free queue, 16 byte align */
sz = ioc->reply_free_queue_depth * 4;
- ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
- &ioc->pdev->dev, sz, 16, 0);
- if (!ioc->reply_free_dma_pool) {
- ioc_err(ioc, "reply_free pool: dma_pool_create failed\n");
- goto out;
- }
- ioc->reply_free = dma_pool_zalloc(ioc->reply_free_dma_pool, GFP_KERNEL,
- &ioc->reply_free_dma);
- if (!ioc->reply_free) {
- ioc_err(ioc, "reply_free pool: dma_pool_alloc failed\n");
- goto out;
- }
- dinitprintk(ioc,
- ioc_info(ioc, "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
- ioc->reply_free, ioc->reply_free_queue_depth,
- 4, sz / 1024));
+ rc = _base_allocate_reply_free_dma_pool(ioc, sz);
+ if (rc == -ENOMEM)
+ return -ENOMEM;
+ else if (rc == -EAGAIN)
+ goto try_32bit_dma;
dinitprintk(ioc,
ioc_info(ioc, "reply_free_dma (0x%llx)\n",
(unsigned long long)ioc->reply_free_dma));
total_sz += sz;
-
if (ioc->rdpq_array_enable) {
reply_post_free_array_sz = ioc->reply_queue_count *
sizeof(Mpi2IOCInitRDPQArrayEntry);
- ioc->reply_post_free_array_dma_pool =
- dma_pool_create("reply_post_free_array pool",
- &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
- if (!ioc->reply_post_free_array_dma_pool) {
- dinitprintk(ioc,
- ioc_info(ioc, "reply_post_free_array pool: dma_pool_create failed\n"));
- goto out;
- }
- ioc->reply_post_free_array =
- dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
- GFP_KERNEL, &ioc->reply_post_free_array_dma);
- if (!ioc->reply_post_free_array) {
- dinitprintk(ioc,
- ioc_info(ioc, "reply_post_free_array pool: dma_pool_alloc failed\n"));
- goto out;
- }
+ rc = _base_allocate_reply_post_free_array(ioc,
+ reply_post_free_array_sz);
+ if (rc == -ENOMEM)
+ return -ENOMEM;
+ else if (rc == -EAGAIN)
+ goto try_32bit_dma;
}
ioc->config_page_sz = 512;
ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
@@ -6022,6 +6162,19 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->shost->sg_tablesize);
return 0;
+try_32bit_dma:
+ _base_release_memory_pools(ioc);
+ if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) {
+ /* Change dma coherent mask to 32 bit and reallocate */
+ if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
+ pr_err("Setting 32 bit coherent DMA mask Failed %s\n",
+ pci_name(ioc->pdev));
+ return -ENODEV;
+ }
+ } else if (_base_reduce_hba_queue_depth(ioc) != 0)
+ return -ENOMEM;
+ goto retry_allocation;
+
out:
return -ENOMEM;
}
@@ -7252,6 +7405,8 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc, "sending diag reset !!\n");
+ pci_cfg_access_lock(ioc->pdev);
+
drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
count = 0;
@@ -7342,10 +7497,12 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
goto out;
}
+ pci_cfg_access_unlock(ioc->pdev);
ioc_info(ioc, "diag reset: SUCCESS\n");
return 0;
out:
+ pci_cfg_access_unlock(ioc->pdev);
ioc_err(ioc, "diag reset: FAILED\n");
return -EFAULT;
}
@@ -7682,6 +7839,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->rdpq_array_enable_assigned = 0;
ioc->use_32bit_dma = false;
+ ioc->dma_mask = 64;
if (ioc->is_aero_ioc)
ioc->base_readl = &_base_readl_aero;
else
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 315aee6ef86f..98558d9c8c2d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -77,9 +77,9 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "37.100.00.00"
+#define MPT3SAS_DRIVER_VERSION "37.101.00.00"
#define MPT3SAS_MAJOR_VERSION 37
-#define MPT3SAS_MINOR_VERSION 100
+#define MPT3SAS_MINOR_VERSION 101
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -1371,6 +1371,7 @@ struct MPT3SAS_ADAPTER {
u16 thresh_hold;
u8 high_iops_queues;
u32 drv_support_bitmap;
+ u32 dma_mask;
bool enable_sdev_max_qd;
bool use_32bit_dma;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 8238843523b5..55cd32908924 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -1781,7 +1781,7 @@ mpt3sas_config_get_driver_trigger_pg0(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * mpt3sas_config_set_driver_trigger_pg0 - write driver trigger page 0
+ * _config_set_driver_trigger_pg0 - write driver trigger page 0
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
* @config_page: contents of the config page
@@ -1915,7 +1915,7 @@ mpt3sas_config_get_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * mpt3sas_config_set_driver_trigger_pg1 - write driver trigger page 1
+ * _config_set_driver_trigger_pg1 - write driver trigger page 1
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
* @config_page: contents of the config page
@@ -2066,7 +2066,7 @@ mpt3sas_config_get_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * mpt3sas_config_set_driver_trigger_pg2 - write driver trigger page 2
+ * _config_set_driver_trigger_pg2 - write driver trigger page 2
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
* @config_page: contents of the config page
@@ -2226,7 +2226,7 @@ mpt3sas_config_get_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * mpt3sas_config_set_driver_trigger_pg3 - write driver trigger page 3
+ * _config_set_driver_trigger_pg3 - write driver trigger page 3
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
* @config_page: contents of the config page
@@ -2383,7 +2383,7 @@ mpt3sas_config_get_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * mpt3sas_config_set_driver_trigger_pg4 - write driver trigger page 4
+ * _config_set_driver_trigger_pg4 - write driver trigger page 4
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
* @config_page: contents of the config page
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 44f9a05db94e..b66140e4c370 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -454,7 +454,7 @@ out:
}
/**
- * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
+ * mpt3sas_ctl_pre_reset_handler - reset callback handler (for ctl)
* @ioc: per adapter object
*
* The handler for doing any required cleanup or initialization.
@@ -486,7 +486,7 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * mpt3sas_ctl_reset_handler - clears outstanding ioctl cmd.
+ * mpt3sas_ctl_clear_outstanding_ioctls - clears outstanding ioctl cmd.
* @ioc: per adapter object
*
* The handler for doing any required cleanup or initialization.
@@ -503,7 +503,7 @@ void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
+ * mpt3sas_ctl_reset_done_handler - reset callback handler (for ctl)
* @ioc: per adapter object
*
* The handler for doing any required cleanup or initialization.
@@ -2507,7 +2507,7 @@ _ctl_addnl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
__func__, karg.unique_id);
return -EPERM;
}
- memset(&karg.buffer_rel_condition, 0, sizeof(struct htb_rel_query));
+ memset(&karg.rel_query, 0, sizeof(karg.rel_query));
if ((ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
ioc_info(ioc, "%s: buffer_type(0x%02x) is not registered\n",
@@ -2520,8 +2520,7 @@ _ctl_addnl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
__func__, buffer_type);
return -EPERM;
}
- memcpy(&karg.buffer_rel_condition, &ioc->htb_rel,
- sizeof(struct htb_rel_query));
+ memcpy(&karg.rel_query, &ioc->htb_rel, sizeof(karg.rel_query));
out:
if (copy_to_user(arg, &karg, sizeof(struct mpt3_addnl_diag_query))) {
ioc_err(ioc, "%s: unable to write mpt3_addnl_diag_query data @ %p\n",
@@ -2759,7 +2758,7 @@ _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
#ifdef CONFIG_COMPAT
/**
- *_ ctl_ioctl_compat - main ioctl entry point (compat)
+ * _ctl_ioctl_compat - main ioctl entry point (compat)
* @file: ?
* @cmd: ?
* @arg: ?
@@ -2777,7 +2776,7 @@ _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
}
/**
- *_ ctl_mpt2_ioctl_compat - main ioctl entry point (compat)
+ * _ctl_mpt2_ioctl_compat - main ioctl entry point (compat)
* @file: ?
* @cmd: ?
* @arg: ?
@@ -3045,7 +3044,7 @@ fw_queue_depth_show(struct device *cdev, struct device_attribute *attr,
static DEVICE_ATTR_RO(fw_queue_depth);
/**
- * sas_address_show - sas address
+ * host_sas_address_show - sas address
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -3203,7 +3202,7 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr,
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
- Mpi2IOUnitPage3_t *io_unit_pg3 = NULL;
+ Mpi2IOUnitPage3_t io_unit_pg3;
Mpi2ConfigReply_t mpi_reply;
u16 backup_rail_monitor_status = 0;
u16 ioc_status;
@@ -3220,17 +3219,10 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr,
if (ioc->pci_error_recovery || ioc->remove_host)
goto out;
- /* allocate upto GPIOVal 36 entries */
- sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
- io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
- if (!io_unit_pg3) {
- rc = -ENOMEM;
- ioc_err(ioc, "%s: failed allocating memory for iounit_pg3: (%d) bytes\n",
- __func__, sz);
- goto out;
- }
+ sz = sizeof(io_unit_pg3);
+ memset(&io_unit_pg3, 0, sz);
- if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) !=
+ if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, &io_unit_pg3, sz) !=
0) {
ioc_err(ioc, "%s: failed reading iounit_pg3\n",
__func__);
@@ -3246,19 +3238,18 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr,
goto out;
}
- if (io_unit_pg3->GPIOCount < 25) {
- ioc_err(ioc, "%s: iounit_pg3->GPIOCount less than 25 entries, detected (%d) entries\n",
- __func__, io_unit_pg3->GPIOCount);
+ if (io_unit_pg3.GPIOCount < 25) {
+ ioc_err(ioc, "%s: iounit_pg3.GPIOCount less than 25 entries, detected (%d) entries\n",
+ __func__, io_unit_pg3.GPIOCount);
rc = -EINVAL;
goto out;
}
/* BRM status is in bit zero of GPIOVal[24] */
- backup_rail_monitor_status = le16_to_cpu(io_unit_pg3->GPIOVal[24]);
+ backup_rail_monitor_status = le16_to_cpu(io_unit_pg3.GPIOVal[24]);
rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1));
out:
- kfree(io_unit_pg3);
mutex_unlock(&ioc->pci_access_mutex);
return rc;
}
@@ -3669,7 +3660,7 @@ static DEVICE_ATTR_RW(diag_trigger_scsi);
/**
- * diag_trigger_scsi_show - show the diag_trigger_mpi attribute
+ * diag_trigger_mpi_show - show the diag_trigger_mpi attribute
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -3928,7 +3919,7 @@ sas_device_handle_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(sas_device_handle);
/**
- * sas_ncq_io_prio_show - send prioritized io commands to device
+ * sas_ncq_prio_enable_show - send prioritized io commands to device
* @dev: pointer to embedded device
* @attr: ?
* @buf: the buffer returned
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index d2ccdafb8df2..8f6ffb40261c 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -50,6 +50,8 @@
#include <linux/miscdevice.h>
#endif
+#include "mpt3sas_base.h"
+
#ifndef MPT2SAS_MINOR
#define MPT2SAS_MINOR (MPT_MINOR + 1)
#endif
@@ -436,19 +438,13 @@ struct mpt3_diag_read_buffer {
* struct mpt3_addnl_diag_query - diagnostic buffer release reason
* @hdr - generic header
* @unique_id - unique id associated with this buffer.
- * @buffer_rel_condition - Release condition ioctl/sysfs/reset
- * @reserved1
- * @trigger_type - Master/Event/scsi/MPI
- * @trigger_info_dwords - Data Correspondig to trigger type
+ * @rel_query - release query.
* @reserved2
*/
struct mpt3_addnl_diag_query {
struct mpt3_ioctl_header hdr;
uint32_t unique_id;
- uint16_t buffer_rel_condition;
- uint16_t reserved1;
- uint32_t trigger_type;
- uint32_t trigger_info_dwords[2];
+ struct htb_rel_query rel_query;
uint32_t reserved2[2];
};
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 6aa6de729187..d00aca3c77ce 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -749,9 +749,10 @@ __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided
+ * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided
* sas address from sas_device_list list
* @ioc: per adapter object
+ * @sas_address: device sas address
* @port: port number
*
* Search for _sas_device object corresponding to provided sas address,
@@ -3423,7 +3424,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
tr_timeout, tr_method);
/* Check for busy commands after reset */
- if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
+ if (r == SUCCESS && scsi_device_busy(scmd->device))
r = FAILED;
out:
sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
@@ -4518,7 +4519,7 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * _scsih_check_for_pending_internal_cmds - check for pending internal messages
+ * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages
* @ioc: per adapter object
* @smid: system request message index
*
@@ -6174,10 +6175,10 @@ enum hba_port_matched_codes {
* _scsih_look_and_get_matched_port_entry - Get matched hba port entry
* from HBA port table
* @ioc: per adapter object
- * @port_entry - hba port entry from temporary port table which needs to be
+ * @port_entry: hba port entry from temporary port table which needs to be
* searched for matched entry in the HBA port table
- * @matched_port_entry - save matched hba port entry here
- * @count - count of matched entries
+ * @matched_port_entry: save matched hba port entry here
+ * @count: count of matched entries
*
* return type of matched entry found.
*/
@@ -6483,6 +6484,9 @@ _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
if (!vphy)
return NULL;
+ if (!port->vphys_mask)
+ INIT_LIST_HEAD(&port->vphys_list);
+
/*
* Enable bit corresponding to HBA phy number on its
* parent hba_port object's vphys_mask field.
@@ -6490,7 +6494,6 @@ _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
port->vphys_mask |= (1 << phy_num);
vphy->phy_mask |= (1 << phy_num);
- INIT_LIST_HEAD(&port->vphys_list);
list_add_tail(&vphy->list, &port->vphys_list);
ioc_info(ioc,
@@ -6952,6 +6955,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* mpt3sas_expander_remove - removing expander object
* @ioc: per adapter object
* @sas_address: expander sas_address
+ * @port: hba port entry
*/
void
mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
@@ -10219,8 +10223,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
Mpi2ExpanderPage0_t expander_pg0;
Mpi2SasDevicePage0_t sas_device_pg0;
Mpi26PCIeDevicePage0_t pcie_device_pg0;
- Mpi2RaidVolPage1_t volume_pg1;
- Mpi2RaidVolPage0_t volume_pg0;
+ Mpi2RaidVolPage1_t *volume_pg1;
+ Mpi2RaidVolPage0_t *volume_pg0;
Mpi2RaidPhysDiskPage0_t pd_pg0;
Mpi2EventIrConfigElement_t element;
Mpi2ConfigReply_t mpi_reply;
@@ -10235,6 +10239,16 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
u8 retry_count;
unsigned long flags;
+ volume_pg0 = kzalloc(sizeof(*volume_pg0), GFP_KERNEL);
+ if (!volume_pg0)
+ return;
+
+ volume_pg1 = kzalloc(sizeof(*volume_pg1), GFP_KERNEL);
+ if (!volume_pg1) {
+ kfree(volume_pg0);
+ return;
+ }
+
ioc_info(ioc, "scan devices: start\n");
_scsih_sas_host_refresh(ioc);
@@ -10344,7 +10358,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
/* volumes */
handle = 0xFFFF;
while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
- &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+ volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
@@ -10352,15 +10366,15 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
- handle = le16_to_cpu(volume_pg1.DevHandle);
+ handle = le16_to_cpu(volume_pg1->DevHandle);
spin_lock_irqsave(&ioc->raid_device_lock, flags);
raid_device = _scsih_raid_device_find_by_wwid(ioc,
- le64_to_cpu(volume_pg1.WWID));
+ le64_to_cpu(volume_pg1->WWID));
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
if (raid_device)
continue;
if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
- &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
sizeof(Mpi2RaidVolPage0_t)))
continue;
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
@@ -10370,17 +10384,17 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
- if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
- volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
- volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
+ if (volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
+ volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
+ volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
- element.VolDevHandle = volume_pg1.DevHandle;
+ element.VolDevHandle = volume_pg1->DevHandle;
ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
- volume_pg1.DevHandle);
+ volume_pg1->DevHandle);
_scsih_sas_volume_add(ioc, &element);
ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
- volume_pg1.DevHandle);
+ volume_pg1->DevHandle);
}
}
@@ -10468,12 +10482,16 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
}
+
+ kfree(volume_pg0);
+ kfree(volume_pg1);
+
ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
ioc_info(ioc, "scan devices: complete\n");
}
/**
- * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
+ * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih)
* @ioc: per adapter object
*
* The handler for doing any required cleanup or initialization.
@@ -10514,7 +10532,7 @@ mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
+ * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih)
* @ioc: per adapter object
*
* The handler for doing any required cleanup or initialization.
@@ -10802,7 +10820,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
pr_notice("cannot be powered and devices connected\n");
pr_notice("to this active cable will not be seen\n");
pr_notice("This active cable requires %d mW of power\n",
- ActiveCableEventData->ActiveCablePowerRequirement);
+ le32_to_cpu(
+ ActiveCableEventData->ActiveCablePowerRequirement));
break;
case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
@@ -12281,7 +12300,7 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
}
/**
- * scsih__ncq_prio_supp - Check for NCQ command priority support
+ * scsih_ncq_prio_supp - Check for NCQ command priority support
* @sdev: scsi device struct
*
* This is called when a user indicates they would like to enable
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 6f4708224755..0681daee6c14 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -62,7 +62,7 @@
/**
* _transport_get_port_id_by_sas_phy - get zone's port id that Phy belong to
- * @phy - sas_phy object
+ * @phy: sas_phy object
*
* Return Port number
*/
@@ -339,10 +339,11 @@ struct rep_manu_reply {
};
/**
- * transport_expander_report_manufacture - obtain SMP report_manufacture
+ * _transport_expander_report_manufacture - obtain SMP report_manufacture
* @ioc: per adapter object
* @sas_address: expander sas address
* @edev: the sas_expander_device object
+ * @port_id: Port ID number
*
* Fills in the sas_expander_device object when SMP port is created.
*
@@ -671,7 +672,7 @@ _transport_sanity_check(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node,
* @ioc: per adapter object
* @handle: handle of attached device
* @sas_address: sas address of parent expander or sas host
- * @port: hba port entry
+ * @hba_port: hba port entry
* Context: This function will acquire ioc->sas_node_lock.
*
* Adding new port object to the sas_node->sas_port_list.
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 327fdd5ee962..8ff976c9967e 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -40,7 +40,7 @@
#define mv_dprintk(format, arg...) \
printk(KERN_DEBUG"%s %d:" format, __FILE__, __LINE__, ## arg)
#else
-#define mv_dprintk(format, arg...)
+#define mv_dprintk(format, arg...) no_printk(format, ## arg)
#endif
#define MV_MAX_U32 0xffffffff
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 71b6a1f834cd..9d5743627604 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -66,9 +66,9 @@ static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
static bool tag_is_empty(struct mvumi_tag *st)
{
if (st->top == 0)
- return 1;
+ return true;
else
- return 0;
+ return false;
}
static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
@@ -182,7 +182,7 @@ static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
* @mhba: Adapter soft state
* @scmd: SCSI command from the mid-layer
* @sgl_p: SGL to be filled in
- * @sg_count return the number of SG elements
+ * @sg_count: return the number of SG elements
*
* If successful, this function returns 0. otherwise, it returns -1.
*/
@@ -1295,6 +1295,7 @@ static unsigned char mvumi_start(struct mvumi_hba *mhba)
* mvumi_complete_cmd - Completes a command
* @mhba: Adapter soft state
* @cmd: Command to be completed
+ * @ob_frame: Command response
*/
static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
struct mvumi_rsp_frame *ob_frame)
@@ -2076,8 +2077,8 @@ error:
/**
* mvumi_queue_command - Queue entry point
+ * @shost: Scsi host to queue command on
* @scmd: SCSI command to be queued
- * @done: Callback entry point
*/
static int mvumi_queue_command(struct Scsi_Host *shost,
struct scsi_cmnd *scmd)
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index 3d8e91c07dc7..d9c82e211ae7 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -82,7 +82,7 @@ static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
return NULL;
}
-/**
+/*
* myrb_create_mempools - allocates auxiliary data structures
*
* Return: true on success, false otherwise.
@@ -134,7 +134,7 @@ static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
return true;
}
-/**
+/*
* myrb_destroy_mempools - tears down the memory pools for the controller
*/
static void myrb_destroy_mempools(struct myrb_hba *cb)
@@ -146,7 +146,7 @@ static void myrb_destroy_mempools(struct myrb_hba *cb)
dma_pool_destroy(cb->dcdb_pool);
}
-/**
+/*
* myrb_reset_cmd - reset command block
*/
static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
@@ -157,7 +157,7 @@ static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
cmd_blk->status = 0;
}
-/**
+/*
* myrb_qcmd - queues command block for execution
*/
static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
@@ -177,7 +177,7 @@ static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
cb->next_cmd_mbox = next_mbox;
}
-/**
+/*
* myrb_exec_cmd - executes command block and waits for completion.
*
* Return: command status
@@ -198,7 +198,7 @@ static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
return cmd_blk->status;
}
-/**
+/*
* myrb_exec_type3 - executes a type 3 command and waits for completion.
*
* Return: command status
@@ -220,7 +220,7 @@ static unsigned short myrb_exec_type3(struct myrb_hba *cb,
return status;
}
-/**
+/*
* myrb_exec_type3D - executes a type 3D command and waits for completion.
*
* Return: command status
@@ -332,7 +332,7 @@ static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
ev_buf, ev_addr);
}
-/**
+/*
* myrb_get_errtable - retrieves the error table from the controller
*
* Executes a type 3 command and logs the error table from the controller.
@@ -377,7 +377,7 @@ static void myrb_get_errtable(struct myrb_hba *cb)
}
}
-/**
+/*
* myrb_get_ldev_info - retrieves the logical device table from the controller
*
* Executes a type 3 command and updates the logical device table.
@@ -427,7 +427,7 @@ static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
return status;
}
-/**
+/*
* myrb_get_rbld_progress - get rebuild progress information
*
* Executes a type 3 command and returns the rebuild progress
@@ -462,11 +462,10 @@ static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
return status;
}
-/**
+/*
* myrb_update_rbld_progress - updates the rebuild status
*
* Updates the rebuild status for the attached logical devices.
- *
*/
static void myrb_update_rbld_progress(struct myrb_hba *cb)
{
@@ -523,7 +522,7 @@ static void myrb_update_rbld_progress(struct myrb_hba *cb)
cb->last_rbld_status = status;
}
-/**
+/*
* myrb_get_cc_progress - retrieve the rebuild status
*
* Execute a type 3 Command and fetch the rebuild / consistency check
@@ -571,7 +570,7 @@ static void myrb_get_cc_progress(struct myrb_hba *cb)
rbld_buf, rbld_addr);
}
-/**
+/*
* myrb_bgi_control - updates background initialisation status
*
* Executes a type 3B command and updates the background initialisation status
@@ -660,7 +659,7 @@ static void myrb_bgi_control(struct myrb_hba *cb)
bgi, bgi_addr);
}
-/**
+/*
* myrb_hba_enquiry - updates the controller status
*
* Executes a DAC_V1_Enquiry command and updates the controller status.
@@ -772,7 +771,7 @@ static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
return MYRB_STATUS_SUCCESS;
}
-/**
+/*
* myrb_set_pdev_state - sets the device state for a physical device
*
* Return: command status
@@ -796,7 +795,7 @@ static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
return status;
}
-/**
+/*
* myrb_enable_mmio - enables the Memory Mailbox Interface
*
* PD and P controller types have no memory mailbox, but still need the
@@ -901,7 +900,7 @@ static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
return true;
}
-/**
+/*
* myrb_get_hba_config - reads the configuration information
*
* Reads the configuration information from the controller and
@@ -1193,7 +1192,7 @@ out_free:
return ret;
}
-/**
+/*
* myrb_unmap - unmaps controller structures
*/
static void myrb_unmap(struct myrb_hba *cb)
@@ -1229,7 +1228,7 @@ static void myrb_unmap(struct myrb_hba *cb)
}
}
-/**
+/*
* myrb_cleanup - cleanup controller structures
*/
static void myrb_cleanup(struct myrb_hba *cb)
@@ -2243,7 +2242,7 @@ static struct scsi_host_template myrb_template = {
/**
* myrb_is_raid - return boolean indicating device is raid volume
- * @dev the device struct object
+ * @dev: the device struct object
*/
static int myrb_is_raid(struct device *dev)
{
@@ -2254,7 +2253,7 @@ static int myrb_is_raid(struct device *dev)
/**
* myrb_get_resync - get raid volume resync percent complete
- * @dev the device struct object
+ * @dev: the device struct object
*/
static void myrb_get_resync(struct device *dev)
{
@@ -2281,7 +2280,7 @@ static void myrb_get_resync(struct device *dev)
/**
* myrb_get_state - get raid volume status
- * @dev the device struct object
+ * @dev: the device struct object
*/
static void myrb_get_state(struct device *dev)
{
@@ -2480,7 +2479,7 @@ static void myrb_monitor(struct work_struct *work)
queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
}
-/**
+/*
* myrb_err_status - reports controller BIOS messages
*
* Controller BIOS messages are passed through the Error Status Register
@@ -2553,11 +2552,6 @@ static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
}
-static inline void DAC960_LA_gen_intr(void __iomem *base)
-{
- writeb(DAC960_LA_IDB_GEN_IRQ, base + DAC960_LA_IDB_OFFSET);
-}
-
static inline void DAC960_LA_reset_ctrl(void __iomem *base)
{
writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
@@ -2587,11 +2581,6 @@ static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
}
-static inline void DAC960_LA_ack_mem_mbox_intr(void __iomem *base)
-{
- writeb(DAC960_LA_ODB_MMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
-}
-
static inline void DAC960_LA_ack_intr(void __iomem *base)
{
writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
@@ -2605,13 +2594,6 @@ static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
}
-static inline bool DAC960_LA_mem_mbox_status_available(void __iomem *base)
-{
- unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
-
- return odb & DAC960_LA_ODB_MMBOX_STS_AVAIL;
-}
-
static inline void DAC960_LA_enable_intr(void __iomem *base)
{
unsigned char odb = 0xFF;
@@ -2628,13 +2610,6 @@ static inline void DAC960_LA_disable_intr(void __iomem *base)
writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
}
-static inline bool DAC960_LA_intr_enabled(void __iomem *base)
-{
- unsigned char imask = readb(base + DAC960_LA_IRQMASK_OFFSET);
-
- return !(imask & DAC960_LA_IRQMASK_DISABLE_IRQ);
-}
-
static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
union myrb_cmd_mbox *mbox)
{
@@ -2657,11 +2632,6 @@ static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
}
-static inline unsigned char DAC960_LA_read_status_cmd_ident(void __iomem *base)
-{
- return readb(base + DAC960_LA_STSID_OFFSET);
-}
-
static inline unsigned short DAC960_LA_read_status(void __iomem *base)
{
return readw(base + DAC960_LA_STS_OFFSET);
@@ -2810,7 +2780,7 @@ static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-struct myrb_privdata DAC960_LA_privdata = {
+static struct myrb_privdata DAC960_LA_privdata = {
.hw_init = DAC960_LA_hw_init,
.irq_handler = DAC960_LA_intr_handler,
.mmio_size = DAC960_LA_mmio_size,
@@ -2829,11 +2799,6 @@ static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
}
-static inline void DAC960_PG_gen_intr(void __iomem *base)
-{
- writel(DAC960_PG_IDB_GEN_IRQ, base + DAC960_PG_IDB_OFFSET);
-}
-
static inline void DAC960_PG_reset_ctrl(void __iomem *base)
{
writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
@@ -2863,11 +2828,6 @@ static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
}
-static inline void DAC960_PG_ack_mem_mbox_intr(void __iomem *base)
-{
- writel(DAC960_PG_ODB_MMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
-}
-
static inline void DAC960_PG_ack_intr(void __iomem *base)
{
writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
@@ -2881,13 +2841,6 @@ static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
}
-static inline bool DAC960_PG_mem_mbox_status_available(void __iomem *base)
-{
- unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
-
- return odb & DAC960_PG_ODB_MMBOX_STS_AVAIL;
-}
-
static inline void DAC960_PG_enable_intr(void __iomem *base)
{
unsigned int imask = (unsigned int)-1;
@@ -2903,13 +2856,6 @@ static inline void DAC960_PG_disable_intr(void __iomem *base)
writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
}
-static inline bool DAC960_PG_intr_enabled(void __iomem *base)
-{
- unsigned int imask = readl(base + DAC960_PG_IRQMASK_OFFSET);
-
- return !(imask & DAC960_PG_IRQMASK_DISABLE_IRQ);
-}
-
static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
union myrb_cmd_mbox *mbox)
{
@@ -2932,12 +2878,6 @@ static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
}
-static inline unsigned char
-DAC960_PG_read_status_cmd_ident(void __iomem *base)
-{
- return readb(base + DAC960_PG_STSID_OFFSET);
-}
-
static inline unsigned short
DAC960_PG_read_status(void __iomem *base)
{
@@ -3086,7 +3026,7 @@ static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-struct myrb_privdata DAC960_PG_privdata = {
+static struct myrb_privdata DAC960_PG_privdata = {
.hw_init = DAC960_PG_hw_init,
.irq_handler = DAC960_PG_intr_handler,
.mmio_size = DAC960_PG_mmio_size,
@@ -3107,11 +3047,6 @@ static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
}
-static inline void DAC960_PD_gen_intr(void __iomem *base)
-{
- writeb(DAC960_PD_IDB_GEN_IRQ, base + DAC960_PD_IDB_OFFSET);
-}
-
static inline void DAC960_PD_reset_ctrl(void __iomem *base)
{
writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
@@ -3153,13 +3088,6 @@ static inline void DAC960_PD_disable_intr(void __iomem *base)
writeb(0, base + DAC960_PD_IRQEN_OFFSET);
}
-static inline bool DAC960_PD_intr_enabled(void __iomem *base)
-{
- unsigned char imask = readb(base + DAC960_PD_IRQEN_OFFSET);
-
- return imask & DAC960_PD_IRQMASK_ENABLE_IRQ;
-}
-
static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
union myrb_cmd_mbox *mbox)
{
@@ -3289,7 +3217,7 @@ static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-struct myrb_privdata DAC960_PD_privdata = {
+static struct myrb_privdata DAC960_PD_privdata = {
.hw_init = DAC960_PD_hw_init,
.irq_handler = DAC960_PD_intr_handler,
.mmio_size = DAC960_PD_mmio_size,
@@ -3487,7 +3415,7 @@ static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-struct myrb_privdata DAC960_P_privdata = {
+static struct myrb_privdata DAC960_P_privdata = {
.hw_init = DAC960_P_hw_init,
.irq_handler = DAC960_P_intr_handler,
.mmio_size = DAC960_PD_mmio_size,
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index 329fd025c718..3b68c68d1716 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -1190,7 +1190,6 @@ static ssize_t consistency_check_show(struct device *dev,
struct myrs_hba *cs = shost_priv(sdev->host);
struct myrs_ldev_info *ldev_info;
unsigned short ldev_num;
- unsigned char status;
if (sdev->channel < cs->ctlr_info->physchan_present)
return snprintf(buf, 32, "physical device - not checking\n");
@@ -1199,7 +1198,7 @@ static ssize_t consistency_check_show(struct device *dev,
if (!ldev_info)
return -ENXIO;
ldev_num = ldev_info->ldev_num;
- status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ myrs_get_ldev_info(cs, ldev_num, ldev_info);
if (ldev_info->cc_active)
return snprintf(buf, 32, "checking block %zu of %zu\n",
(size_t)ldev_info->cc_lba,
@@ -1959,7 +1958,7 @@ static struct myrs_hba *myrs_alloc_host(struct pci_dev *pdev,
/**
* myrs_is_raid - return boolean indicating device is raid volume
- * @dev the device struct object
+ * @dev: the device struct object
*/
static int
myrs_is_raid(struct device *dev)
@@ -1972,7 +1971,7 @@ myrs_is_raid(struct device *dev)
/**
* myrs_get_resync - get raid volume resync percent complete
- * @dev the device struct object
+ * @dev: the device struct object
*/
static void
myrs_get_resync(struct device *dev)
@@ -1981,14 +1980,13 @@ myrs_get_resync(struct device *dev)
struct myrs_hba *cs = shost_priv(sdev->host);
struct myrs_ldev_info *ldev_info = sdev->hostdata;
u64 percent_complete = 0;
- u8 status;
if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
return;
if (ldev_info->rbld_active) {
unsigned short ldev_num = ldev_info->ldev_num;
- status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ myrs_get_ldev_info(cs, ldev_num, ldev_info);
percent_complete = ldev_info->rbld_lba * 100;
do_div(percent_complete, ldev_info->cfg_devsize);
}
@@ -1997,7 +1995,7 @@ myrs_get_resync(struct device *dev)
/**
* myrs_get_state - get raid volume status
- * @dev the device struct object
+ * @dev: the device struct object
*/
static void
myrs_get_state(struct device *dev)
@@ -2412,13 +2410,6 @@ static inline void DAC960_GEM_ack_hw_mbox_status(void __iomem *base)
writel(val, base + DAC960_GEM_IDB_CLEAR_OFFSET);
}
-static inline void DAC960_GEM_gen_intr(void __iomem *base)
-{
- __le32 val = cpu_to_le32(DAC960_GEM_IDB_GEN_IRQ << 24);
-
- writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
-}
-
static inline void DAC960_GEM_reset_ctrl(void __iomem *base)
{
__le32 val = cpu_to_le32(DAC960_GEM_IDB_CTRL_RESET << 24);
@@ -2456,13 +2447,6 @@ static inline void DAC960_GEM_ack_hw_mbox_intr(void __iomem *base)
writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
}
-static inline void DAC960_GEM_ack_mem_mbox_intr(void __iomem *base)
-{
- __le32 val = cpu_to_le32(DAC960_GEM_ODB_MMBOX_ACK_IRQ << 24);
-
- writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
-}
-
static inline void DAC960_GEM_ack_intr(void __iomem *base)
{
__le32 val = cpu_to_le32((DAC960_GEM_ODB_HWMBOX_ACK_IRQ |
@@ -2479,14 +2463,6 @@ static inline bool DAC960_GEM_hw_mbox_status_available(void __iomem *base)
return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_HWMBOX_STS_AVAIL;
}
-static inline bool DAC960_GEM_mem_mbox_status_available(void __iomem *base)
-{
- __le32 val;
-
- val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
- return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_MMBOX_STS_AVAIL;
-}
-
static inline void DAC960_GEM_enable_intr(void __iomem *base)
{
__le32 val = cpu_to_le32((DAC960_GEM_IRQMASK_HWMBOX_IRQ |
@@ -2501,16 +2477,6 @@ static inline void DAC960_GEM_disable_intr(void __iomem *base)
writel(val, base + DAC960_GEM_IRQMASK_READ_OFFSET);
}
-static inline bool DAC960_GEM_intr_enabled(void __iomem *base)
-{
- __le32 val;
-
- val = readl(base + DAC960_GEM_IRQMASK_READ_OFFSET);
- return !((le32_to_cpu(val) >> 24) &
- (DAC960_GEM_IRQMASK_HWMBOX_IRQ |
- DAC960_GEM_IRQMASK_MMBOX_IRQ));
-}
-
static inline void DAC960_GEM_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
union myrs_cmd_mbox *mbox)
{
@@ -2529,11 +2495,6 @@ static inline void DAC960_GEM_write_hw_mbox(void __iomem *base,
dma_addr_writeql(cmd_mbox_addr, base + DAC960_GEM_CMDMBX_OFFSET);
}
-static inline unsigned short DAC960_GEM_read_cmd_ident(void __iomem *base)
-{
- return readw(base + DAC960_GEM_CMDSTS_OFFSET);
-}
-
static inline unsigned char DAC960_GEM_read_cmd_status(void __iomem *base)
{
return readw(base + DAC960_GEM_CMDSTS_OFFSET + 2);
@@ -2658,7 +2619,7 @@ static irqreturn_t DAC960_GEM_intr_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-struct myrs_privdata DAC960_GEM_privdata = {
+static struct myrs_privdata DAC960_GEM_privdata = {
.hw_init = DAC960_GEM_hw_init,
.irq_handler = DAC960_GEM_intr_handler,
.mmio_size = DAC960_GEM_mmio_size,
@@ -2678,11 +2639,6 @@ static inline void DAC960_BA_ack_hw_mbox_status(void __iomem *base)
writeb(DAC960_BA_IDB_HWMBOX_ACK_STS, base + DAC960_BA_IDB_OFFSET);
}
-static inline void DAC960_BA_gen_intr(void __iomem *base)
-{
- writeb(DAC960_BA_IDB_GEN_IRQ, base + DAC960_BA_IDB_OFFSET);
-}
-
static inline void DAC960_BA_reset_ctrl(void __iomem *base)
{
writeb(DAC960_BA_IDB_CTRL_RESET, base + DAC960_BA_IDB_OFFSET);
@@ -2714,11 +2670,6 @@ static inline void DAC960_BA_ack_hw_mbox_intr(void __iomem *base)
writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
}
-static inline void DAC960_BA_ack_mem_mbox_intr(void __iomem *base)
-{
- writeb(DAC960_BA_ODB_MMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
-}
-
static inline void DAC960_BA_ack_intr(void __iomem *base)
{
writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ | DAC960_BA_ODB_MMBOX_ACK_IRQ,
@@ -2733,14 +2684,6 @@ static inline bool DAC960_BA_hw_mbox_status_available(void __iomem *base)
return val & DAC960_BA_ODB_HWMBOX_STS_AVAIL;
}
-static inline bool DAC960_BA_mem_mbox_status_available(void __iomem *base)
-{
- u8 val;
-
- val = readb(base + DAC960_BA_ODB_OFFSET);
- return val & DAC960_BA_ODB_MMBOX_STS_AVAIL;
-}
-
static inline void DAC960_BA_enable_intr(void __iomem *base)
{
writeb(~DAC960_BA_IRQMASK_DISABLE_IRQ, base + DAC960_BA_IRQMASK_OFFSET);
@@ -2751,14 +2694,6 @@ static inline void DAC960_BA_disable_intr(void __iomem *base)
writeb(0xFF, base + DAC960_BA_IRQMASK_OFFSET);
}
-static inline bool DAC960_BA_intr_enabled(void __iomem *base)
-{
- u8 val;
-
- val = readb(base + DAC960_BA_IRQMASK_OFFSET);
- return !(val & DAC960_BA_IRQMASK_DISABLE_IRQ);
-}
-
static inline void DAC960_BA_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
union myrs_cmd_mbox *mbox)
{
@@ -2778,11 +2713,6 @@ static inline void DAC960_BA_write_hw_mbox(void __iomem *base,
dma_addr_writeql(cmd_mbox_addr, base + DAC960_BA_CMDMBX_OFFSET);
}
-static inline unsigned short DAC960_BA_read_cmd_ident(void __iomem *base)
-{
- return readw(base + DAC960_BA_CMDSTS_OFFSET);
-}
-
static inline unsigned char DAC960_BA_read_cmd_status(void __iomem *base)
{
return readw(base + DAC960_BA_CMDSTS_OFFSET + 2);
@@ -2908,7 +2838,7 @@ static irqreturn_t DAC960_BA_intr_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-struct myrs_privdata DAC960_BA_privdata = {
+static struct myrs_privdata DAC960_BA_privdata = {
.hw_init = DAC960_BA_hw_init,
.irq_handler = DAC960_BA_intr_handler,
.mmio_size = DAC960_BA_mmio_size,
@@ -2928,11 +2858,6 @@ static inline void DAC960_LP_ack_hw_mbox_status(void __iomem *base)
writeb(DAC960_LP_IDB_HWMBOX_ACK_STS, base + DAC960_LP_IDB_OFFSET);
}
-static inline void DAC960_LP_gen_intr(void __iomem *base)
-{
- writeb(DAC960_LP_IDB_GEN_IRQ, base + DAC960_LP_IDB_OFFSET);
-}
-
static inline void DAC960_LP_reset_ctrl(void __iomem *base)
{
writeb(DAC960_LP_IDB_CTRL_RESET, base + DAC960_LP_IDB_OFFSET);
@@ -2964,11 +2889,6 @@ static inline void DAC960_LP_ack_hw_mbox_intr(void __iomem *base)
writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
}
-static inline void DAC960_LP_ack_mem_mbox_intr(void __iomem *base)
-{
- writeb(DAC960_LP_ODB_MMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
-}
-
static inline void DAC960_LP_ack_intr(void __iomem *base)
{
writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ | DAC960_LP_ODB_MMBOX_ACK_IRQ,
@@ -2983,14 +2903,6 @@ static inline bool DAC960_LP_hw_mbox_status_available(void __iomem *base)
return val & DAC960_LP_ODB_HWMBOX_STS_AVAIL;
}
-static inline bool DAC960_LP_mem_mbox_status_available(void __iomem *base)
-{
- u8 val;
-
- val = readb(base + DAC960_LP_ODB_OFFSET);
- return val & DAC960_LP_ODB_MMBOX_STS_AVAIL;
-}
-
static inline void DAC960_LP_enable_intr(void __iomem *base)
{
writeb(~DAC960_LP_IRQMASK_DISABLE_IRQ, base + DAC960_LP_IRQMASK_OFFSET);
@@ -3001,14 +2913,6 @@ static inline void DAC960_LP_disable_intr(void __iomem *base)
writeb(0xFF, base + DAC960_LP_IRQMASK_OFFSET);
}
-static inline bool DAC960_LP_intr_enabled(void __iomem *base)
-{
- u8 val;
-
- val = readb(base + DAC960_LP_IRQMASK_OFFSET);
- return !(val & DAC960_LP_IRQMASK_DISABLE_IRQ);
-}
-
static inline void DAC960_LP_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
union myrs_cmd_mbox *mbox)
{
@@ -3027,11 +2931,6 @@ static inline void DAC960_LP_write_hw_mbox(void __iomem *base,
dma_addr_writeql(cmd_mbox_addr, base + DAC960_LP_CMDMBX_OFFSET);
}
-static inline unsigned short DAC960_LP_read_cmd_ident(void __iomem *base)
-{
- return readw(base + DAC960_LP_CMDSTS_OFFSET);
-}
-
static inline unsigned char DAC960_LP_read_cmd_status(void __iomem *base)
{
return readw(base + DAC960_LP_CMDSTS_OFFSET + 2);
@@ -3158,7 +3057,7 @@ static irqreturn_t DAC960_LP_intr_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-struct myrs_privdata DAC960_LP_privdata = {
+static struct myrs_privdata DAC960_LP_privdata = {
.hw_init = DAC960_LP_hw_init,
.irq_handler = DAC960_LP_intr_handler,
.mmio_size = DAC960_LP_mmio_size,
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index e44b1a0f6709..134bbd2d8b66 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -309,6 +309,7 @@ static struct scsi_host_template nsp32_template = {
#define NSP32_DEBUG_BUF_LEN 100
+__printf(4, 5)
static void nsp32_message(const char *func, int line, char *type, char *fmt, ...)
{
va_list args;
@@ -580,7 +581,6 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
int status;
unsigned short command = 0;
unsigned int msgout = 0;
- unsigned short execph;
int i;
nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "in");
@@ -604,7 +604,7 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
/*
* clear execph
*/
- execph = nsp32_read2(base, SCSI_EXECUTE_PHASE);
+ nsp32_read2(base, SCSI_EXECUTE_PHASE);
/*
* clear FIFO counter to set CDBs
@@ -876,7 +876,7 @@ static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt)
if (le32_to_cpu(sgt[i].len) > 0x10000) {
nsp32_msg(KERN_ERR,
- "can't transfer over 64KB at a time, size=0x%lx", le32_to_cpu(sgt[i].len));
+ "can't transfer over 64KB at a time, size=0x%x", le32_to_cpu(sgt[i].len));
return FALSE;
}
nsp32_dbg(NSP32_DEBUG_SGLIST,
@@ -1780,8 +1780,6 @@ static void nsp32_msgout_occur(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
- //unsigned short command;
- long new_sgtp;
int i;
nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR,
@@ -1796,14 +1794,6 @@ static void nsp32_msgout_occur(struct scsi_cmnd *SCpnt)
}
/*
- * Set SGTP ADDR current entry for restarting AUTOSCSI,
- * because SGTP is incremented next point.
- * There is few statement in the specification...
- */
- new_sgtp = data->cur_lunt->sglun_paddr +
- (data->cur_lunt->cur_entry * sizeof(nsp32_sgtable));
-
- /*
* send messages
*/
for (i = 0; i < data->msgout_len; i++) {
@@ -2219,17 +2209,12 @@ static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
nsp32_target *target = data->cur_target;
- nsp32_sync_table *synct;
unsigned char get_period = data->msginbuf[3];
unsigned char get_offset = data->msginbuf[4];
int entry;
- int syncnum;
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "enter");
- synct = data->synct;
- syncnum = data->syncnum;
-
/*
* If this inititor sent the SDTR message, then target responds SDTR,
* initiator SYNCREG, ACKWIDTH from SDTR parameter.
@@ -2731,7 +2716,7 @@ static int nsp32_detect(struct pci_dev *pdev)
res = request_region(host->io_port, host->n_io_port, "nsp32");
if (res == NULL) {
nsp32_msg(KERN_ERR,
- "I/O region 0x%lx+0x%lx is already used",
+ "I/O region 0x%x+0x%x is already used",
data->BaseAddress, data->NumAddress);
goto free_irq;
}
@@ -2837,8 +2822,8 @@ static int nsp32_eh_abort(struct scsi_cmnd *SCpnt)
static void nsp32_do_bus_reset(nsp32_hw_data *data)
{
unsigned int base = data->BaseAddress;
- unsigned short intrdat;
int i;
+ unsigned short __maybe_unused intrdat;
nsp32_dbg(NSP32_DEBUG_BUSRESET, "in");
@@ -2908,7 +2893,8 @@ static int nsp32_getprom_param(nsp32_hw_data *data)
{
int vendor = data->pci_devid->vendor;
int device = data->pci_devid->device;
- int ret, val, i;
+ int ret, i;
+ int __maybe_unused val;
/*
* EEPROM checking.
@@ -3278,7 +3264,8 @@ static int nsp32_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
- nsp32_msg(KERN_INFO, "pci-suspend: pdev=0x%p, state=%ld, slot=%s, host=0x%p", pdev, state, pci_name(pdev), host);
+ nsp32_msg(KERN_INFO, "pci-suspend: pdev=0x%p, state.event=%x, slot=%s, host=0x%p",
+ pdev, state.event, pci_name(pdev), host);
pci_save_state (pdev);
pci_disable_device (pdev);
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 12035baf0997..0b8802beb7ce 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -41,6 +41,7 @@
#include <linux/slab.h>
#include "pm8001_sas.h"
#include "pm8001_ctl.h"
+#include "pm8001_chips.h"
/* scsi host attributes */
@@ -299,7 +300,7 @@ static DEVICE_ATTR(sas_spec_support, S_IRUGO,
pm8001_ctl_sas_spec_support_show, NULL);
/**
- * pm8001_ctl_sas_address_show - sas address
+ * pm8001_ctl_host_sas_address_show - sas address
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
@@ -369,24 +370,22 @@ static ssize_t pm8001_ctl_aap_log_show(struct device *cdev,
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ u8 *ptr = (u8 *)pm8001_ha->memoryMap.region[AAP1].virt_ptr;
int i;
-#define AAP1_MEMMAP(r, c) \
- (*(u32 *)((u8*)pm8001_ha->memoryMap.region[AAP1].virt_ptr + (r) * 32 \
- + (c)))
char *str = buf;
int max = 2;
for (i = 0; i < max; i++) {
str += sprintf(str, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x"
"0x%08x 0x%08x\n",
- AAP1_MEMMAP(i, 0),
- AAP1_MEMMAP(i, 4),
- AAP1_MEMMAP(i, 8),
- AAP1_MEMMAP(i, 12),
- AAP1_MEMMAP(i, 16),
- AAP1_MEMMAP(i, 20),
- AAP1_MEMMAP(i, 24),
- AAP1_MEMMAP(i, 28));
+ pm8001_ctl_aap1_memmap(ptr, i, 0),
+ pm8001_ctl_aap1_memmap(ptr, i, 4),
+ pm8001_ctl_aap1_memmap(ptr, i, 8),
+ pm8001_ctl_aap1_memmap(ptr, i, 12),
+ pm8001_ctl_aap1_memmap(ptr, i, 16),
+ pm8001_ctl_aap1_memmap(ptr, i, 20),
+ pm8001_ctl_aap1_memmap(ptr, i, 24),
+ pm8001_ctl_aap1_memmap(ptr, i, 28));
}
return str - buf;
@@ -518,7 +517,7 @@ static ssize_t event_log_size_show(struct device *cdev,
}
static DEVICE_ATTR_RO(event_log_size);
/**
- * pm8001_ctl_aap_log_show - IOP event log
+ * pm8001_ctl_iop_log_show - IOP event log
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
@@ -647,8 +646,7 @@ struct flash_command {
int code;
};
-static struct flash_command flash_command_table[] =
-{
+static const struct flash_command flash_command_table[] = {
{"set_nvmd", FLASH_CMD_SET_NVMD},
{"update", FLASH_CMD_UPDATE},
{"", FLASH_CMD_NONE} /* Last entry should be NULL. */
@@ -659,8 +657,7 @@ struct error_fw {
int err_code;
};
-static struct error_fw flash_error_table[] =
-{
+static const struct error_fw flash_error_table[] = {
{"Failed to open fw image file", FAIL_OPEN_BIOS_FILE},
{"image header mismatch", FLASH_UPDATE_HDR_ERR},
{"image offset mismatch", FLASH_UPDATE_OFFSET_ERR},
@@ -883,9 +880,122 @@ static ssize_t pm8001_show_update_fw(struct device *cdev,
flash_error_table[i].err_code,
flash_error_table[i].reason);
}
-
static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP,
pm8001_show_update_fw, pm8001_store_update_fw);
+
+/**
+ * ctl_mpi_state_show - controller MPI state check
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+
+static const char *const mpiStateText[] = {
+ "MPI is not initialized",
+ "MPI is successfully initialized",
+ "MPI termination is in progress",
+ "MPI initialization failed with error in [31:16]"
+};
+
+static ssize_t ctl_mpi_state_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ unsigned int mpidw0;
+
+ mpidw0 = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 0);
+ return sysfs_emit(buf, "%s\n", mpiStateText[mpidw0 & 0x0003]);
+}
+static DEVICE_ATTR_RO(ctl_mpi_state);
+
+/**
+ * ctl_hmi_error_show - controller MPI initialization fails
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+
+static ssize_t ctl_hmi_error_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ unsigned int mpidw0;
+
+ mpidw0 = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 0);
+ return sysfs_emit(buf, "0x%08x\n", (mpidw0 >> 16));
+}
+static DEVICE_ATTR_RO(ctl_hmi_error);
+
+/**
+ * ctl_raae_count_show - controller raae count check
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+
+static ssize_t ctl_raae_count_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ unsigned int raaecnt;
+
+ raaecnt = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 12);
+ return sysfs_emit(buf, "0x%08x\n", raaecnt);
+}
+static DEVICE_ATTR_RO(ctl_raae_count);
+
+/**
+ * ctl_iop0_count_show - controller iop0 count check
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+
+static ssize_t ctl_iop0_count_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ unsigned int iop0cnt;
+
+ iop0cnt = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 16);
+ return sysfs_emit(buf, "0x%08x\n", iop0cnt);
+}
+static DEVICE_ATTR_RO(ctl_iop0_count);
+
+/**
+ * ctl_iop1_count_show - controller iop1 count check
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+
+static ssize_t ctl_iop1_count_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ unsigned int iop1cnt;
+
+ iop1cnt = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 20);
+ return sysfs_emit(buf, "0x%08x\n", iop1cnt);
+
+}
+static DEVICE_ATTR_RO(ctl_iop1_count);
+
struct device_attribute *pm8001_host_attrs[] = {
&dev_attr_interface_rev,
&dev_attr_controller_fatal_error,
@@ -909,6 +1019,11 @@ struct device_attribute *pm8001_host_attrs[] = {
&dev_attr_ob_log,
&dev_attr_ila_version,
&dev_attr_inc_fw_ver,
+ &dev_attr_ctl_mpi_state,
+ &dev_attr_ctl_hmi_error,
+ &dev_attr_ctl_raae_count,
+ &dev_attr_ctl_iop0_count,
+ &dev_attr_ctl_iop1_count,
NULL,
};
diff --git a/drivers/scsi/pm8001/pm8001_ctl.h b/drivers/scsi/pm8001/pm8001_ctl.h
index d0d43a250b9e..4743f0de223e 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.h
+++ b/drivers/scsi/pm8001/pm8001_ctl.h
@@ -59,5 +59,10 @@
#define SYSFS_OFFSET 1024
#define PM80XX_IB_OB_QUEUE_SIZE (32 * 1024)
#define PM8001_IB_OB_QUEUE_SIZE (16 * 1024)
+
+static inline u32 pm8001_ctl_aap1_memmap(u8 *ptr, int idx, int off)
+{
+ return *(u32 *)(ptr + idx * 32 + off);
+}
#endif /* PM8001_CTL_H_INCLUDED */
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 31e5455d280c..ecd06d2d7e81 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -240,6 +240,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_lo;
pm8001_ha->inbnd_q_tbl[i].ci_virt =
pm8001_ha->memoryMap.region[ci_offset + i].virt_ptr;
+ pm8001_write_32(pm8001_ha->inbnd_q_tbl[i].ci_virt, 0, 0);
offsetib = i * 0x20;
pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
get_pci_bar_index(pm8001_mr32(addressib,
@@ -268,6 +269,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
0 | (10 << 16) | (i << 24);
pm8001_ha->outbnd_q_tbl[i].pi_virt =
pm8001_ha->memoryMap.region[pi_offset + i].virt_ptr;
+ pm8001_write_32(pm8001_ha->outbnd_q_tbl[i].pi_virt, 0, 0);
offsetob = i * 0x24;
pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
get_pci_bar_index(pm8001_mr32(addressob,
@@ -643,7 +645,7 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
*/
static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
{
- u8 i = 0;
+ u32 i = 0;
u16 deviceid;
pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
/* 8081 controllers need BAR shift to access MPI space
@@ -1175,7 +1177,7 @@ void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
#ifndef PM8001_USE_MSIX
/**
- * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * pm8001_chip_intx_interrupt_enable - enable PM8001 chip interrupt
* @pm8001_ha: our hba card information
*/
static void
@@ -1248,7 +1250,7 @@ pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
}
/**
- * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
+ * pm8001_chip_interrupt_disable - disable PM8001 chip interrupt
* @pm8001_ha: our hba card information
* @vec: unused
*/
@@ -1499,12 +1501,14 @@ void pm8001_work_fn(struct work_struct *work)
* was cancelled. This nullification happens when the device
* goes away.
*/
- pm8001_dev = pw->data; /* Most stash device structure */
- if ((pm8001_dev == NULL)
- || ((pw->handler != IO_XFER_ERROR_BREAK)
- && (pm8001_dev->dev_type == SAS_PHY_UNUSED))) {
- kfree(pw);
- return;
+ if (pw->handler != IO_FATAL_ERROR) {
+ pm8001_dev = pw->data; /* Most stash device structure */
+ if ((pm8001_dev == NULL)
+ || ((pw->handler != IO_XFER_ERROR_BREAK)
+ && (pm8001_dev->dev_type == SAS_PHY_UNUSED))) {
+ kfree(pw);
+ return;
+ }
}
switch (pw->handler) {
@@ -1668,6 +1672,58 @@ void pm8001_work_fn(struct work_struct *work)
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
+ case IO_FATAL_ERROR:
+ {
+ struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha;
+ struct pm8001_ccb_info *ccb;
+ struct task_status_struct *ts;
+ struct sas_task *task;
+ int i;
+ u32 tag, device_id;
+
+ for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
+ ccb = &pm8001_ha->ccb_info[i];
+ task = ccb->task;
+ ts = &task->task_status;
+ tag = ccb->ccb_tag;
+ /* check if tag is NULL */
+ if (!tag) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "tag Null\n");
+ continue;
+ }
+ if (task != NULL) {
+ dev = task->dev;
+ if (!dev) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "dev is NULL\n");
+ continue;
+ }
+ /*complete sas task and update to top layer */
+ pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+ ts->resp = SAS_TASK_COMPLETE;
+ task->task_done(task);
+ } else if (tag != 0xFFFFFFFF) {
+ /* complete the internal commands/non-sas task */
+ pm8001_dev = ccb->device;
+ if (pm8001_dev->dcompletion) {
+ complete(pm8001_dev->dcompletion);
+ pm8001_dev->dcompletion = NULL;
+ }
+ complete(pm8001_ha->nvmd_completion);
+ pm8001_tag_free(pm8001_ha, tag);
+ }
+ }
+ /* Deregister all the device ids */
+ for (i = 0; i < PM8001_MAX_DEVICES; i++) {
+ pm8001_dev = &pm8001_ha->devices[i];
+ device_id = pm8001_dev->device_id;
+ if (device_id) {
+ PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
+ pm8001_free_dev(pm8001_dev);
+ }
+ }
+ } break;
}
kfree(pw);
}
@@ -1826,7 +1882,7 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
* that the task has been finished.
*/
static void
-mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
+mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
struct sas_task *t;
struct pm8001_ccb_info *ccb;
@@ -2058,7 +2114,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
}
/*See the comments for mpi_ssp_completion */
-static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
struct sas_task *t;
unsigned long flags;
@@ -2294,9 +2350,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
(status != IO_UNDERFLOW)) {
if (!((t->dev->parent) &&
(dev_is_expander(t->dev->parent->dev_type)))) {
- for (i = 0 , j = 4; j <= 7 && i <= 3; i++ , j++)
+ for (i = 0, j = 4; j <= 7 && i <= 3; i++, j++)
sata_addr_low[i] = pm8001_ha->sas_addr[j];
- for (i = 0 , j = 0; j <= 3 && i <= 3; i++ , j++)
+ for (i = 0, j = 0; j <= 3 && i <= 3; i++, j++)
sata_addr_hi[i] = pm8001_ha->sas_addr[j];
memcpy(&temp_sata_addr_low, sata_addr_low,
sizeof(sata_addr_low));
@@ -2625,7 +2681,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
/*See the comments for mpi_ssp_completion */
-static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
struct sas_task *t;
struct task_status_struct *ts;
@@ -3219,7 +3275,7 @@ void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
}
/**
- * asd_get_attached_sas_addr -- extract/generate attached SAS address
+ * pm8001_get_attached_sas_addr - extract/generate attached SAS address
* @phy: pointer to asd_phy
* @sas_addr: pointer to buffer where the SAS address is to be written
*
@@ -3546,7 +3602,7 @@ int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
/**
- * fw_flash_update_resp - Response from FW for flash update command.
+ * pm8001_mpi_fw_flash_update_resp - Response from FW for flash update command.
* @pm8001_ha: our hba card information
* @piomb: IO message buffer
*/
@@ -3602,7 +3658,7 @@ int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
return 0;
}
-int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
u32 status;
int i;
@@ -3685,7 +3741,7 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
* @pm8001_ha: our hba card information
* @piomb: IO message buffer
*/
-static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
unsigned long flags;
struct hw_event_resp *pPayload =
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index 6d91e2446542..d1f3aa93325b 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -805,6 +805,7 @@ struct set_dev_state_resp {
#define IO_ABORT_IN_PROGRESS 0x40
#define IO_ABORT_DELAYED 0x41
#define IO_INVALID_LENGTH 0x42
+#define IO_FATAL_ERROR 0x51
/* WARNING: This error code must always be the last number.
* If you add error code, modify this code also
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index bd626ef876da..390c33df0357 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -184,7 +184,7 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
#ifdef PM8001_USE_TASKLET
/**
- * tasklet for 64 msi-x interrupt handler
+ * pm8001_tasklet() - tasklet for 64 msi-x interrupt handler
* @opaque: the passed general host adapter struct
* Note: pm8001_tasklet is common for pm8001 & pm80xx
*/
@@ -267,7 +267,8 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
{
int i, count = 0, rc = 0;
u32 ci_offset, ib_offset, ob_offset, pi_offset;
- struct inbound_queue_table *circularQ;
+ struct inbound_queue_table *ibq;
+ struct outbound_queue_table *obq;
spin_lock_init(&pm8001_ha->lock);
spin_lock_init(&pm8001_ha->bitmap_lock);
@@ -315,8 +316,8 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
pm8001_ha->memoryMap.region[IOP].alignment = 32;
for (i = 0; i < count; i++) {
- circularQ = &pm8001_ha->inbnd_q_tbl[i];
- spin_lock_init(&circularQ->iq_lock);
+ ibq = &pm8001_ha->inbnd_q_tbl[i];
+ spin_lock_init(&ibq->iq_lock);
/* MPI Memory region 3 for consumer Index of inbound queues */
pm8001_ha->memoryMap.region[ci_offset+i].num_elements = 1;
pm8001_ha->memoryMap.region[ci_offset+i].element_size = 4;
@@ -345,6 +346,8 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
}
for (i = 0; i < count; i++) {
+ obq = &pm8001_ha->outbnd_q_tbl[i];
+ spin_lock_init(&obq->oq_lock);
/* MPI Memory region 4 for producer Index of outbound queues */
pm8001_ha->memoryMap.region[pi_offset+i].num_elements = 1;
pm8001_ha->memoryMap.region[pi_offset+i].element_size = 4;
@@ -864,7 +867,7 @@ void pm8001_get_phy_mask(struct pm8001_hba_info *pm8001_ha, int *phymask)
}
/**
- * pm8001_set_phy_settings_ven_117c_12Gb : Configure ATTO 12Gb PHY settings
+ * pm8001_set_phy_settings_ven_117c_12G() : Configure ATTO 12Gb PHY settings
* @pm8001_ha : our adapter
*/
static
@@ -963,6 +966,7 @@ static u32 pm8001_request_msix(struct pm8001_hba_info *pm8001_ha)
{
u32 i = 0, j = 0;
int flag = 0, rc = 0;
+ int nr_irqs = pm8001_ha->number_of_intr;
if (pm8001_ha->chip_id != chip_8001)
flag &= ~IRQF_SHARED;
@@ -971,7 +975,10 @@ static u32 pm8001_request_msix(struct pm8001_hba_info *pm8001_ha)
"pci_enable_msix request number of intr %d\n",
pm8001_ha->number_of_intr);
- for (i = 0; i < pm8001_ha->number_of_intr; i++) {
+ if (nr_irqs > ARRAY_SIZE(pm8001_ha->intr_drvname))
+ nr_irqs = ARRAY_SIZE(pm8001_ha->intr_drvname);
+
+ for (i = 0; i < nr_irqs; i++) {
snprintf(pm8001_ha->intr_drvname[i],
sizeof(pm8001_ha->intr_drvname[0]),
"%s-%d", pm8001_ha->name, i);
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index a98d4496ff8b..d28af413b93a 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -590,7 +590,7 @@ struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
return NULL;
}
-static void pm8001_free_dev(struct pm8001_device *pm8001_dev)
+void pm8001_free_dev(struct pm8001_device *pm8001_dev)
{
u32 id = pm8001_dev->id;
memset(pm8001_dev, 0, sizeof(*pm8001_dev));
@@ -738,7 +738,7 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
if (pm8001_ha->chip_id != chip_8001) {
pm8001_dev->setds_completion = &completion_setstate;
PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
- pm8001_dev, 0x01);
+ pm8001_dev, DS_OPERATIONAL);
wait_for_completion(&completion_setstate);
}
res = -TMF_RESP_FUNC_FAILED;
@@ -877,8 +877,8 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
pm8001_dev->device_id, pm8001_dev->dev_type);
if (atomic_read(&pm8001_dev->running_req)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
- dev, 1, 0);
+ pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
+ dev, 1, 0);
while (atomic_read(&pm8001_dev->running_req))
msleep(20);
spin_lock_irqsave(&pm8001_ha->lock, flags);
@@ -981,6 +981,7 @@ void pm8001_open_reject_retry(
}
/**
+ * pm8001_I_T_nexus_reset()
* Standard mandates link reset for ATA (type 0) and hard reset for
* SSP (type 1) , only for RECOVERY
* @dev: the device structure for the device to reset.
@@ -1013,8 +1014,8 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
goto out;
}
msleep(2000);
- rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
- dev, 1, 0);
+ rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
+ dev, 1, 0);
if (rc) {
pm8001_dbg(pm8001_ha, EH, "task abort failed %x\n"
"with rc %d\n", pm8001_dev->device_id, rc);
@@ -1059,8 +1060,8 @@ int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
goto out;
}
/* send internal ssp/sata/smp abort command to FW */
- rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
- dev, 1, 0);
+ rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
+ dev, 1, 0);
msleep(100);
/* deregister the target device */
@@ -1075,8 +1076,8 @@ int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
wait_for_completion(&completion_setstate);
} else {
/* send internal ssp/sata/smp abort command to FW */
- rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
- dev, 1, 0);
+ rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
+ dev, 1, 0);
msleep(100);
/* deregister the target device */
@@ -1104,13 +1105,13 @@ int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
DECLARE_COMPLETION_ONSTACK(completion_setstate);
if (dev_is_sata(dev)) {
struct sas_phy *phy = sas_get_local_phy(dev);
- rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
- dev, 1, 0);
+ rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
+ dev, 1, 0);
rc = sas_phy_reset(phy, 1);
sas_put_local_phy(phy);
pm8001_dev->setds_completion = &completion_setstate;
rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
- pm8001_dev, 0x01);
+ pm8001_dev, DS_OPERATIONAL);
wait_for_completion(&completion_setstate);
} else {
tmf_task.tmf = TMF_LU_RESET;
@@ -1229,7 +1230,7 @@ int pm8001_abort_task(struct sas_task *task)
/* 1. Set Device state as Recovery */
pm8001_dev->setds_completion = &completion;
PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
- pm8001_dev, 0x03);
+ pm8001_dev, DS_IN_RECOVERY);
wait_for_completion(&completion);
/* 2. Send Phy Control Hard Reset */
@@ -1300,7 +1301,7 @@ int pm8001_abort_task(struct sas_task *task)
reinit_completion(&completion);
pm8001_dev->setds_completion = &completion;
PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
- pm8001_dev, 0x01);
+ pm8001_dev, DS_OPERATIONAL);
wait_for_completion(&completion);
} else {
rc = pm8001_exec_internal_task_abort(pm8001_ha,
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 039ed91e9841..62d08b535a4b 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -281,7 +281,6 @@ struct pm8001_prd {
* CCB(Command Control Block)
*/
struct pm8001_ccb_info {
- struct list_head entry;
struct sas_task *task;
u32 n_elem;
u32 ccb_tag;
@@ -457,6 +456,7 @@ struct outbound_queue_table {
u32 dinterrup_to_pci_offset;
__le32 producer_index;
u32 consumer_idx;
+ spinlock_t oq_lock;
};
struct pm8001_hba_memspace {
void __iomem *memvirtaddr;
@@ -705,7 +705,7 @@ int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb);
-int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb);
+int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha, void *piomb);
int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
struct sas_task *pm8001_alloc_task(void);
void pm8001_task_done(struct sas_task *task);
@@ -727,6 +727,7 @@ ssize_t pm80xx_get_non_fatal_dump(struct device *cdev,
struct device_attribute *attr, char *buf);
ssize_t pm8001_get_gsm_dump(struct device *cdev, u32, char *buf);
int pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha);
+void pm8001_free_dev(struct pm8001_device *pm8001_dev);
/* ctl shared API */
extern struct device_attribute *pm8001_host_attrs[];
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 84315560e8e1..4e980830f9f5 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -90,7 +90,7 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr;
- u32 accum_len , reg_val, index, *temp;
+ u32 accum_len, reg_val, index, *temp;
u32 status = 1;
unsigned long start;
u8 *direct_data;
@@ -787,6 +787,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_lo;
pm8001_ha->inbnd_q_tbl[i].ci_virt =
pm8001_ha->memoryMap.region[ci_offset + i].virt_ptr;
+ pm8001_write_32(pm8001_ha->inbnd_q_tbl[i].ci_virt, 0, 0);
offsetib = i * 0x20;
pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
get_pci_bar_index(pm8001_mr32(addressib,
@@ -820,6 +821,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = (i << 24);
pm8001_ha->outbnd_q_tbl[i].pi_virt =
pm8001_ha->memoryMap.region[pi_offset + i].virt_ptr;
+ pm8001_write_32(pm8001_ha->outbnd_q_tbl[i].pi_virt, 0, 0);
offsetob = i * 0x24;
pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
get_pci_bar_index(pm8001_mr32(addressob,
@@ -1420,7 +1422,7 @@ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
}
/**
- * pm8001_chip_init - the main init function that initialize whole PM8001 chip.
+ * pm80xx_chip_init - the main init function that initialize whole PM8001 chip.
* @pm8001_ha: our hba card information
*/
static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
@@ -1502,12 +1504,12 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
/* wait until Inbound DoorBell Clear Register toggled */
if (IS_SPCV_12G(pm8001_ha->pdev)) {
- max_wait_count = 4 * 1000 * 1000;/* 4 sec */
+ max_wait_count = SPCV_DOORBELL_CLEAR_TIMEOUT;
} else {
- max_wait_count = 2 * 1000 * 1000;/* 2 sec */
+ max_wait_count = SPC_DOORBELL_CLEAR_TIMEOUT;
}
do {
- udelay(1);
+ msleep(FW_READY_INTERVAL);
value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
value &= SPCv_MSGU_CFG_TABLE_RESET;
} while ((value != 0) && (--max_wait_count));
@@ -1519,9 +1521,9 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
/* check the MPI-State for termination in progress */
/* wait until Inbound DoorBell Clear Register toggled */
- max_wait_count = 2 * 1000 * 1000; /* 2 sec for spcv/ve */
+ max_wait_count = 100; /* 2 sec for spcv/ve */
do {
- udelay(1);
+ msleep(FW_READY_INTERVAL);
gst_len_mpistate =
pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
GST_GSTLEN_MPIS_OFFSET);
@@ -1574,7 +1576,7 @@ pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha)
}
/**
- * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
+ * pm80xx_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
* the FW register status to the originated status.
* @pm8001_ha: our hba card information
*/
@@ -1703,7 +1705,7 @@ static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
}
/**
- * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * pm80xx_chip_intx_interrupt_enable - enable PM8001 chip interrupt
* @pm8001_ha: our hba card information
*/
static void
@@ -1714,7 +1716,7 @@ pm80xx_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
}
/**
- * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
+ * pm80xx_chip_intx_interrupt_disable - disable PM8001 chip interrupt
* @pm8001_ha: our hba card information
*/
static void
@@ -1724,7 +1726,7 @@ pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
}
/**
- * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * pm80xx_chip_interrupt_enable - enable PM8001 chip interrupt
* @pm8001_ha: our hba card information
* @vec: interrupt number to enable
*/
@@ -1743,7 +1745,7 @@ pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
}
/**
- * pm8001_chip_interrupt_disable- disable PM8001 chip interrupt
+ * pm80xx_chip_interrupt_disable - disable PM8001 chip interrupt
* @pm8001_ha: our hba card information
* @vec: interrupt number to disable
*/
@@ -1904,7 +1906,7 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
* that the task has been finished.
*/
static void
-mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
+mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
struct sas_task *t;
struct pm8001_ccb_info *ccb;
@@ -2194,7 +2196,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
}
/*See the comments for mpi_ssp_completion */
-static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
struct sas_task *t;
unsigned long flags;
@@ -2444,9 +2446,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
(status != IO_UNDERFLOW)) {
if (!((t->dev->parent) &&
(dev_is_expander(t->dev->parent->dev_type)))) {
- for (i = 0 , j = 4; i <= 3 && j <= 7; i++ , j++)
+ for (i = 0, j = 4; i <= 3 && j <= 7; i++, j++)
sata_addr_low[i] = pm8001_ha->sas_addr[j];
- for (i = 0 , j = 0; i <= 3 && j <= 3; i++ , j++)
+ for (i = 0, j = 0; i <= 3 && j <= 3; i++, j++)
sata_addr_hi[i] = pm8001_ha->sas_addr[j];
memcpy(&temp_sata_addr_low, sata_addr_low,
sizeof(sata_addr_low));
@@ -2788,7 +2790,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
/*See the comments for mpi_ssp_completion */
-static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
struct sas_task *t;
struct task_status_struct *ts;
@@ -4126,12 +4128,13 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
pm8001_dbg(pm8001_ha, FAIL,
"Firmware Fatal error! Regval:0x%x\n",
regval);
+ pm8001_handle_event(pm8001_ha, NULL, IO_FATAL_ERROR);
print_scratchpad_registers(pm8001_ha);
return ret;
}
}
- spin_lock_irqsave(&pm8001_ha->lock, flags);
circularQ = &pm8001_ha->outbnd_q_tbl[vec];
+ spin_lock_irqsave(&circularQ->oq_lock, flags);
do {
/* spurious interrupt during setup if kexec-ing and
* driver doing a doorbell access w/ the pre-kexec oq
@@ -4157,7 +4160,7 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
break;
}
} while (1);
- spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ spin_unlock_irqrestore(&circularQ->oq_lock, flags);
return ret;
}
@@ -4183,7 +4186,7 @@ static void build_smp_cmd(u32 deviceID, __le32 hTag,
}
/**
- * pm8001_chip_smp_req - send a SMP task to FW
+ * pm80xx_chip_smp_req - send a SMP task to FW
* @pm8001_ha: our hba card information.
* @ccb: the ccb information this request used.
*/
@@ -4766,7 +4769,7 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
}
/**
- * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND
+ * pm80xx_chip_phy_stop_req - start phy via PHY_STOP COMMAND
* @pm8001_ha: our hba card information.
* @phy_id: the phy id which we wanted to start up.
*/
@@ -4898,7 +4901,7 @@ static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
}
/**
- * pm8001_chip_isr - PM8001 isr handler.
+ * pm80xx_chip_isr - PM8001 isr handler.
* @pm8001_ha: our hba card information.
* @vec: irq number.
*/
@@ -4918,7 +4921,7 @@ static void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
u32 operation, u32 phyid,
u32 length, u32 *buf)
{
- u32 tag , i, j = 0;
+ u32 tag, i, j = 0;
int rc;
struct set_phy_profile_req payload;
struct inbound_queue_table *circularQ;
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index 2c8e85cfdbc4..c7e5d93bea92 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -1272,6 +1272,7 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE 0x47
#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED 0x48
#define IO_DS_INVALID 0x49
+#define IO_FATAL_ERROR 0x51
/* WARNING: the value is not contiguous from here */
#define IO_XFER_ERR_LAST_PIO_DATAIN_CRC_ERR 0x52
#define IO_XFER_DMA_ACTIVATE_TIMEOUT 0x53
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 834556ea21d2..bffd9a9349e7 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -443,15 +443,14 @@ static void pmcraid_disable_interrupts(
* pmcraid_enable_interrupts - Enables specified interrupts
*
* @pinstance: pointer to per adapter instance structure
- * @intr: interrupts to enable
+ * @intrs: interrupts to enable
*
* Return Value
* None
*/
static void pmcraid_enable_interrupts(
struct pmcraid_instance *pinstance,
- u32 intrs
-)
+ u32 intrs)
{
u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
u32 nmask = gmask & (~GLOBAL_INTERRUPT_MASK);
@@ -533,15 +532,13 @@ static void pmcraid_reset_type(struct pmcraid_instance *pinstance)
pinstance->ioa_unit_check = 1;
}
+static void pmcraid_ioa_reset(struct pmcraid_cmd *);
/**
* pmcraid_bist_done - completion function for PCI BIST
- * @cmd: pointer to reset command
+ * @t: pointer to reset command
* Return Value
* none
*/
-
-static void pmcraid_ioa_reset(struct pmcraid_cmd *);
-
static void pmcraid_bist_done(struct timer_list *t)
{
struct pmcraid_cmd *cmd = from_timer(cmd, t, timer);
@@ -595,7 +592,7 @@ static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
/**
* pmcraid_reset_alert_done - completion routine for reset_alert
- * @cmd: pointer to command block used in reset sequence
+ * @t: pointer to command block used in reset sequence
* Return value
* None
*/
@@ -626,16 +623,16 @@ static void pmcraid_reset_alert_done(struct timer_list *t)
}
}
+static void pmcraid_notify_ioastate(struct pmcraid_instance *, u32);
/**
* pmcraid_reset_alert - alerts IOA for a possible reset
- * @cmd : command block to be used for reset sequence.
+ * @cmd: command block to be used for reset sequence.
*
* Return Value
* returns 0 if pci config-space is accessible and RESET_DOORBELL is
* successfully written to IOA. Returns non-zero in case pci_config_space
* is not accessible
*/
-static void pmcraid_notify_ioastate(struct pmcraid_instance *, u32);
static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
@@ -676,7 +673,7 @@ static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
/**
* pmcraid_timeout_handler - Timeout handler for internally generated ops
*
- * @cmd : pointer to command structure, that got timedout
+ * @t: pointer to command structure, that got timedout
*
* This function blocks host requests and initiates an adapter reset.
*
@@ -844,7 +841,7 @@ static void pmcraid_erp_done(struct pmcraid_cmd *cmd)
}
/**
- * pmcraid_fire_command - sends an IOA command to adapter
+ * _pmcraid_fire_command - sends an IOA command to adapter
*
* This function adds the given block into pending command list
* and returns without waiting
@@ -961,6 +958,7 @@ static void pmcraid_ioa_shutdown(struct pmcraid_cmd *cmd)
pmcraid_timeout_handler);
}
+static void pmcraid_querycfg(struct pmcraid_cmd *);
/**
* pmcraid_get_fwversion_done - completion function for get_fwversion
*
@@ -969,8 +967,6 @@ static void pmcraid_ioa_shutdown(struct pmcraid_cmd *cmd)
* Return Value
* none
*/
-static void pmcraid_querycfg(struct pmcraid_cmd *);
-
static void pmcraid_get_fwversion_done(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
@@ -1382,10 +1378,9 @@ static void pmcraid_netlink_release(void)
genl_unregister_family(&pmcraid_event_family);
}
-/**
+/*
* pmcraid_notify_aen - sends event msg to user space application
* @pinstance: pointer to adapter instance structure
- * @type: HCAM type
*
* Return value:
* 0 if success, error value in case of any failure.
@@ -1393,8 +1388,7 @@ static void pmcraid_netlink_release(void)
static int pmcraid_notify_aen(
struct pmcraid_instance *pinstance,
struct pmcraid_aen_msg *aen_msg,
- u32 data_size
-)
+ u32 data_size)
{
struct sk_buff *skb;
void *msg_header;
@@ -1771,6 +1765,8 @@ static void pmcraid_process_ccn(struct pmcraid_cmd *cmd)
}
}
+static void pmcraid_initiate_reset(struct pmcraid_instance *);
+static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd);
/**
* pmcraid_process_ldn - op done function for an LDN
* @cmd: pointer to command block
@@ -1778,9 +1774,6 @@ static void pmcraid_process_ccn(struct pmcraid_cmd *cmd)
* Return value
* none
*/
-static void pmcraid_initiate_reset(struct pmcraid_instance *);
-static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd);
-
static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
@@ -1878,14 +1871,14 @@ static void pmcraid_unregister_hcams(struct pmcraid_cmd *cmd)
pmcraid_cancel_ldn(cmd);
}
+static void pmcraid_reinit_buffers(struct pmcraid_instance *);
+
/**
* pmcraid_reset_enable_ioa - re-enable IOA after a hard reset
* @pinstance: pointer to adapter instance structure
* Return Value
* 1 if TRANSITION_TO_OPERATIONAL is active, otherwise 0
*/
-static void pmcraid_reinit_buffers(struct pmcraid_instance *);
-
static int pmcraid_reset_enable_ioa(struct pmcraid_instance *pinstance)
{
u32 intrs;
@@ -2687,6 +2680,7 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
* pmcraid_reset_device - device reset handler functions
*
* @scsi_cmd: scsi command struct
+ * @timeout: command timeout
* @modifier: reset modifier indicating the reset sequence to be performed
*
* This function issues a device reset to the affected device.
@@ -2699,8 +2693,7 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
static int pmcraid_reset_device(
struct scsi_cmnd *scsi_cmd,
unsigned long timeout,
- u8 modifier
-)
+ u8 modifier)
{
struct pmcraid_cmd *cmd;
struct pmcraid_instance *pinstance;
@@ -3008,7 +3001,7 @@ static int pmcraid_eh_abort_handler(struct scsi_cmnd *scsi_cmd)
}
/**
- * pmcraid_eh_xxxx_reset_handler - bus/target/device reset handler callbacks
+ * pmcraid_eh_device_reset_handler - bus/target/device reset handler callbacks
*
* @scmd: pointer to scsi_cmd that was sent to the resource to be reset.
*
@@ -3307,7 +3300,7 @@ static int pmcraid_copy_sglist(
}
/**
- * pmcraid_queuecommand - Queue a mid-layer request
+ * pmcraid_queuecommand_lck - Queue a mid-layer request
* @scsi_cmd: scsi command struct
* @done: done function
*
@@ -3430,7 +3423,7 @@ static int pmcraid_queuecommand_lck(
static DEF_SCSI_QCMD(pmcraid_queuecommand)
-/**
+/*
* pmcraid_open -char node "open" entry, allowed only users with admin access
*/
static int pmcraid_chr_open(struct inode *inode, struct file *filep)
@@ -3447,7 +3440,7 @@ static int pmcraid_chr_open(struct inode *inode, struct file *filep)
return 0;
}
-/**
+/*
* pmcraid_fasync - Async notifier registration from applications
*
* This function adds the calling process to a driver global queue. When an
@@ -3559,7 +3552,8 @@ static void pmcraid_release_passthrough_ioadls(
* pmcraid_ioctl_passthrough - handling passthrough IOCTL commands
*
* @pinstance: pointer to adapter instance structure
- * @cmd: ioctl code
+ * @ioctl_cmd: ioctl code
+ * @buflen: unused
* @arg: pointer to pmcraid_passthrough_buffer user buffer
*
* Return value
@@ -3894,7 +3888,7 @@ static int pmcraid_check_ioctl_buffer(
return 0;
}
-/**
+/*
* pmcraid_ioctl - char node ioctl entry point
*/
static long pmcraid_chr_ioctl(
@@ -3963,7 +3957,7 @@ static long pmcraid_chr_ioctl(
return retval;
}
-/**
+/*
* File operations structure for management interface
*/
static const struct file_operations pmcraid_fops = {
@@ -3981,6 +3975,7 @@ static const struct file_operations pmcraid_fops = {
/**
* pmcraid_show_log_level - Display adapter's error logging level
* @dev: class device struct
+ * @attr: unused
* @buf: buffer
*
* Return value:
@@ -4000,6 +3995,7 @@ static ssize_t pmcraid_show_log_level(
/**
* pmcraid_store_log_level - Change the adapter's error logging level
* @dev: class device struct
+ * @attr: unused
* @buf: buffer
* @count: not used
*
@@ -4042,6 +4038,7 @@ static struct device_attribute pmcraid_log_level_attr = {
/**
* pmcraid_show_drv_version - Display driver version
* @dev: class device struct
+ * @attr: unused
* @buf: buffer
*
* Return value:
@@ -4066,8 +4063,9 @@ static struct device_attribute pmcraid_driver_version_attr = {
};
/**
- * pmcraid_show_io_adapter_id - Display driver assigned adapter id
+ * pmcraid_show_adapter_id - Display driver assigned adapter id
* @dev: class device struct
+ * @attr: unused
* @buf: buffer
*
* Return value:
@@ -4589,7 +4587,7 @@ pmcraid_release_control_blocks(
/**
* pmcraid_allocate_cmd_blocks - allocate memory for cmd block structures
- * @pinstance - pointer to per adapter instance structure
+ * @pinstance: pointer to per adapter instance structure
*
* Allocates memory for command blocks using kernel slab allocator.
*
@@ -5134,7 +5132,7 @@ static void pmcraid_shutdown(struct pci_dev *pdev)
}
-/**
+/*
* pmcraid_get_minor - returns unused minor number from minor number bitmap
*/
static unsigned short pmcraid_get_minor(void)
@@ -5146,7 +5144,7 @@ static unsigned short pmcraid_get_minor(void)
return minor;
}
-/**
+/*
* pmcraid_release_minor - releases given minor back to minor number bitmap
*/
static void pmcraid_release_minor(unsigned short minor)
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 88a592d09433..ba94413fe2ea 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -11,8 +11,6 @@
#include <scsi/fc/fc_fip.h>
#include <scsi/fc/fc_fc2.h>
#include <scsi/scsi_tcq.h>
-#include <linux/version.h>
-
/* qedf_hsi.h needs to before included any qed includes */
#include "qedf_hsi.h"
@@ -335,6 +333,7 @@ struct qedf_ctx {
unsigned int curr_conn_id;
struct workqueue_struct *ll2_recv_wq;
struct workqueue_struct *link_update_wq;
+ struct devlink *devlink;
struct delayed_work link_update;
struct delayed_work link_recovery;
struct completion flogi_compl;
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
index 2386bfb73c46..f4d81127239e 100644
--- a/drivers/scsi/qedf/qedf_dbg.h
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/compiler.h>
#include <linux/string.h>
-#include <linux/version.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <scsi/scsi_transport.h>
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index cec27f2ef70d..69f7784233f9 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3408,6 +3408,14 @@ retry_probe:
goto err2;
}
+ if (mode != QEDF_MODE_RECOVERY) {
+ qedf->devlink = qed_ops->common->devlink_register(qedf->cdev);
+ if (IS_ERR(qedf->devlink)) {
+ QEDF_ERR(&qedf->dbg_ctx, "Cannot register devlink\n");
+ qedf->devlink = NULL;
+ }
+ }
+
/* Record BDQ producer doorbell addresses */
qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
@@ -3789,6 +3797,11 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
QEDF_ERR(&(qedf->dbg_ctx),
"Failed to send drv state to MFW.\n");
+ if (mode != QEDF_MODE_RECOVERY && qedf->devlink) {
+ qed_ops->common->devlink_unregister(qedf->devlink);
+ qedf->devlink = NULL;
+ }
+
qed_ops->common->slowpath_stop(qedf->cdev);
qed_ops->common->remove(qedf->cdev);
@@ -3846,8 +3859,9 @@ void qedf_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type)
/* Prevent HW attentions from being reasserted */
qed_ops->common->attn_clr_enable(qedf->cdev, true);
- if (qedf_enable_recovery)
- qed_ops->common->recovery_process(qedf->cdev);
+ if (qedf_enable_recovery && qedf->devlink)
+ qed_ops->common->report_fatal_error(qedf->devlink,
+ err_type);
break;
default:
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 69c5b5ee2169..2455d1448a7e 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -276,10 +276,8 @@ static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
}
udev = kzalloc(sizeof(*udev), GFP_KERNEL);
- if (!udev) {
- rc = -ENOMEM;
+ if (!udev)
goto err_udev;
- }
udev->uio_dev = -1;
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 46de2541af25..8f35174a1f9a 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -633,13 +633,13 @@ static int qla1280_read_nvram(struct scsi_qla_host *ha)
* to be read a word (two bytes) at a time.
*
* The net result of this would be that the word (and
- * doubleword) quantites in the firmware would be correct, but
+ * doubleword) quantities in the firmware would be correct, but
* the bytes would be pairwise reversed. Since most of the
- * firmware quantites are, in fact, bytes, we do an extra
+ * firmware quantities are, in fact, bytes, we do an extra
* le16_to_cpu() in the firmware read routine.
*
* The upshot of all this is that the bytes in the firmware
- * are in the correct places, but the 16 and 32 bit quantites
+ * are in the correct places, but the 16 and 32 bit quantities
* are still in little endian format. We fix that up below by
* doing extra reverses on them */
nv->isp_parameter = cpu_to_le16(nv->isp_parameter);
@@ -687,7 +687,7 @@ qla1280_info(struct Scsi_Host *host)
* The mid-level driver tries to ensures that queuecommand never gets invoked
* concurrently with itself or the interrupt handler (although the
* interrupt handler may call this routine as part of request-completion
- * handling). Unfortunely, it sometimes calls the scheduler in interrupt
+ * handling). Unfortunately, it sometimes calls the scheduler in interrupt
* context which is a big NO! NO!.
**************************************************************************/
static int
@@ -3054,7 +3054,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
/* Check for empty slot in outstanding command list. */
for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
- (ha->outstanding_cmds[cnt] != 0); cnt++) ;
+ ha->outstanding_cmds[cnt]; cnt++);
if (cnt >= MAX_OUTSTANDING_COMMANDS) {
status = SCSI_MLQUEUE_HOST_BUSY;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 63391c9be05d..3aa9869f6fae 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2864,6 +2864,8 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
if (IS_FWI2_CAPABLE(ha)) {
+ int rval;
+
stats = dma_alloc_coherent(&ha->pdev->dev,
sizeof(*stats), &stats_dma, GFP_KERNEL);
if (!stats) {
@@ -2873,7 +2875,11 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
}
/* reset firmware statistics */
- qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
+ rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
+ if (rval != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0x70de,
+ "Resetting ISP statistics failed: rval = %d\n",
+ rval);
dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
stats, stats_dma);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index bee8cf9f8123..d42b2ad84049 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -25,10 +25,11 @@ void qla2x00_bsg_job_done(srb_t *sp, int res)
struct bsg_job *bsg_job = sp->u.bsg_job;
struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ sp->free(sp);
+
bsg_reply->result = res;
bsg_job_done(bsg_job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
- sp->free(sp);
}
void qla2x00_bsg_sp_free(srb_t *sp)
@@ -2583,6 +2584,10 @@ qla2x00_get_host_stats(struct bsg_job *bsg_job)
}
data = kzalloc(response_len, GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto host_stat_out;
+ }
ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
data, response_len);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 144a893e7335..f2d05592c1e2 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -113,8 +113,13 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
uint32_t stat;
ulong i, j, timer = 6000000;
int rval = QLA_FUNCTION_FAILED;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+ if (qla_pci_disconnected(vha, reg))
+ return rval;
+
for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
if (i + dwords > ram_dwords)
dwords = ram_dwords - i;
@@ -138,6 +143,9 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
while (timer--) {
udelay(5);
+ if (qla_pci_disconnected(vha, reg))
+ return rval;
+
stat = rd_reg_dword(&reg->host_status);
/* Check for pending interrupts. */
if (!(stat & HSRX_RISC_INT))
@@ -192,9 +200,13 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram,
uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
uint32_t stat;
ulong i, j, timer = 6000000;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ if (qla_pci_disconnected(vha, reg))
+ return rval;
+
for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
if (i + dwords > ram_dwords)
dwords = ram_dwords - i;
@@ -216,8 +228,10 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram,
ha->flags.mbox_int = 0;
while (timer--) {
udelay(5);
- stat = rd_reg_dword(&reg->host_status);
+ if (qla_pci_disconnected(vha, reg))
+ return rval;
+ stat = rd_reg_dword(&reg->host_status);
/* Check for pending interrupts. */
if (!(stat & HSRX_RISC_INT))
continue;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 2e59e75c62b5..9eb708e5e22e 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -308,7 +308,7 @@ struct qla2xxx_fw_dump {
};
#define QL_MSGHDR "qla2xxx"
-#define QL_DBG_DEFAULT1_MASK 0x1e400000
+#define QL_DBG_DEFAULT1_MASK 0x1e600000
#define ql_log_fatal 0 /* display fatal errors */
#define ql_log_warn 1 /* display critical errors */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 49b42b430df4..def4d99f80e9 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -396,6 +396,7 @@ typedef union {
} b;
} port_id_t;
#define INVALID_PORT_ID 0xFFFFFF
+#define ISP_REG16_DISCONNECT 0xFFFF
static inline le_id_t be_id_to_le(be_id_t id)
{
@@ -1527,7 +1528,7 @@ struct init_sf_cb {
* BIT_12 = Remote Write Optimization (1 - Enabled, 0 - Disabled)
* BIT 11-0 = Reserved
*/
- uint16_t flags;
+ __le16 flags;
uint8_t reserved1[32];
uint16_t discard_OHRB_timeout_value;
uint16_t remote_write_opt_queue_num;
@@ -3815,7 +3816,7 @@ struct qlt_hw_data {
__le32 __iomem *atio_q_in;
__le32 __iomem *atio_q_out;
- struct qla_tgt_func_tmpl *tgt_ops;
+ const struct qla_tgt_func_tmpl *tgt_ops;
struct qla_tgt_vp_map *tgt_vp_map;
int saved_set;
@@ -3857,6 +3858,13 @@ struct qla_hw_data_stat {
u32 num_mpi_reset;
};
+/* refer to pcie_do_recovery reference */
+typedef enum {
+ QLA_PCI_RESUME,
+ QLA_PCI_ERR_DETECTED,
+ QLA_PCI_MMIO_ENABLED,
+ QLA_PCI_SLOT_RESET,
+} pci_error_state_t;
/*
* Qlogic host adapter specific data structure.
*/
@@ -4607,6 +4615,7 @@ struct qla_hw_data {
#define DEFAULT_ZIO_THRESHOLD 5
struct qla_hw_data_stat stat;
+ pci_error_state_t pci_error_state;
};
struct active_regions {
@@ -4727,7 +4736,7 @@ typedef struct scsi_qla_host {
#define FX00_CRITEMP_RECOVERY 25
#define FX00_HOST_INFO_RESEND 26
#define QPAIR_ONLINE_CHECK_NEEDED 27
-#define SET_NVME_ZIO_THRESHOLD_NEEDED 28
+#define DO_EEH_RECOVERY 28
#define DETECT_SFP_CHANGE 29
#define N2N_LOGIN_NEEDED 30
#define IOCB_WORK_ACTIVE 31
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 6486f97d649e..fae5cae6f0a8 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -224,6 +224,7 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
+extern void qla_eeh_work(struct work_struct *);
extern void qla2x00_sp_compl(srb_t *sp, int);
extern void qla2xxx_qpair_sp_free_dma(srb_t *sp);
extern void qla2xxx_qpair_sp_compl(srb_t *sp, int);
@@ -235,6 +236,8 @@ int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
struct purex_item *pkt);
+void qla_pci_set_eeh_busy(struct scsi_qla_host *);
+void qla_schedule_eeh_work(struct scsi_qla_host *);
/*
* Global Functions in qla_mid.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 517d358b0031..5b6e04a91a18 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1247,7 +1247,7 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
}
/**
- * qla2x00_snd_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
+ * qla2x00_sns_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
* @vha: HA context
*
* This command uses the old Exectute SNS Command mailbox routine.
@@ -1479,7 +1479,7 @@ qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
}
/**
- * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
+ * qla2x00_prep_ct_fdmi_req() - Prepare common CT request fields for SNS query.
* @p: CT request buffer
* @cmd: GS command
* @rsp_size: response size in bytes
@@ -1582,7 +1582,7 @@ qla25xx_fdmi_port_speed_currently(struct qla_hw_data *ha)
}
/**
- * qla2x00_hba_attributes() perform HBA attributes registration
+ * qla2x00_hba_attributes() - perform HBA attributes registration
* @vha: HA context
* @entries: number of entries to use
* @callopt: Option to issue extended or standard FDMI
@@ -1837,7 +1837,7 @@ done:
}
/**
- * qla2x00_port_attributes() perform Port attributes registration
+ * qla2x00_port_attributes() - perform Port attributes registration
* @vha: HA context
* @entries: number of entries to use
* @callopt: Option to issue extended or standard FDMI
@@ -2272,7 +2272,7 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
}
/**
- * qla2x00_fdmi_rprt() perform RPRT registration
+ * qla2x00_fdmi_rprt() - perform RPRT registration
* @vha: HA context
* @callopt: Option to issue extended or standard FDMI
* command parameter
@@ -3443,6 +3443,10 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) {
fcport->scan_state = QLA_FCPORT_SCAN;
+ if (fcport->loop_id == FC_NO_LOOP_ID)
+ fcport->logout_on_delete = 0;
+ else
+ fcport->logout_on_delete = 1;
}
}
goto login_logout;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f01f07116bd3..9c5782e946e0 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -718,6 +718,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
ql_dbg(ql_dbg_disc, vha, 0x20e0,
"%s %8phC login gen changed\n",
__func__, fcport->port_name);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
return;
}
@@ -2766,6 +2767,49 @@ qla81xx_reset_mpi(scsi_qla_host_t *vha)
return qla81xx_write_mpi_register(vha, mb);
}
+static int
+qla_chk_risc_recovery(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ __le16 __iomem *mbptr = &reg->mailbox0;
+ int i;
+ u16 mb[32];
+ int rc = QLA_SUCCESS;
+
+ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return rc;
+
+ /* this check is only valid after RISC reset */
+ mb[0] = rd_reg_word(mbptr);
+ mbptr++;
+ if (mb[0] == 0xf) {
+ rc = QLA_FUNCTION_FAILED;
+
+ for (i = 1; i < 32; i++) {
+ mb[i] = rd_reg_word(mbptr);
+ mbptr++;
+ }
+
+ ql_log(ql_log_warn, vha, 0x1015,
+ "RISC reset failed. mb[0-7] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
+ mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6], mb[7]);
+ ql_log(ql_log_warn, vha, 0x1015,
+ "RISC reset failed. mb[8-15] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
+ mb[8], mb[9], mb[10], mb[11], mb[12], mb[13], mb[14],
+ mb[15]);
+ ql_log(ql_log_warn, vha, 0x1015,
+ "RISC reset failed. mb[16-23] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
+ mb[16], mb[17], mb[18], mb[19], mb[20], mb[21], mb[22],
+ mb[23]);
+ ql_log(ql_log_warn, vha, 0x1015,
+ "RISC reset failed. mb[24-31] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
+ mb[24], mb[25], mb[26], mb[27], mb[28], mb[29], mb[30],
+ mb[31]);
+ }
+ return rc;
+}
+
/**
* qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
* @vha: HA context
@@ -2782,6 +2826,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
uint16_t wd;
static int abts_cnt; /* ISP abort retry counts */
int rval = QLA_SUCCESS;
+ int print = 1;
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -2870,17 +2915,26 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
rd_reg_dword(&reg->hccr);
wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
+ mdelay(10);
rd_reg_dword(&reg->hccr);
- rd_reg_word(&reg->mailbox0);
- for (cnt = 60; rd_reg_word(&reg->mailbox0) != 0 &&
- rval == QLA_SUCCESS; cnt--) {
+ wd = rd_reg_word(&reg->mailbox0);
+ for (cnt = 300; wd != 0 && rval == QLA_SUCCESS; cnt--) {
barrier();
- if (cnt)
- udelay(5);
- else
+ if (cnt) {
+ mdelay(1);
+ if (print && qla_chk_risc_recovery(vha))
+ print = 0;
+
+ wd = rd_reg_word(&reg->mailbox0);
+ } else {
rval = QLA_FUNCTION_TIMEOUT;
+
+ ql_log(ql_log_warn, vha, 0x015e,
+ "RISC reset timeout\n");
+ }
}
+
if (rval == QLA_SUCCESS)
set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
@@ -5512,13 +5566,14 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
if (fcport->port_type & FCT_NVME_DISCOVERY)
rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
+ fc_remote_port_rolechg(rport, rport_ids.roles);
+
ql_dbg(ql_dbg_disc, vha, 0x20ee,
- "%s %8phN. rport %p is %s mode\n",
- __func__, fcport->port_name, rport,
+ "%s: %8phN. rport %ld:0:%d (%p) is %s mode\n",
+ __func__, fcport->port_name, vha->host_no,
+ rport->scsi_target_id, rport,
(fcport->port_type == FCT_TARGET) ? "tgt" :
((fcport->port_type & FCT_NVME) ? "nvme" : "ini"));
-
- fc_remote_port_rolechg(rport, rport_ids.roles);
}
/*
@@ -6877,22 +6932,18 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
}
spin_unlock_irqrestore(&ha->vport_slock, flags);
- if (!ha->flags.eeh_busy) {
- /* Make sure for ISP 82XX IO DMA is complete */
- if (IS_P3P_TYPE(ha)) {
- qla82xx_chip_reset_cleanup(vha);
- ql_log(ql_log_info, vha, 0x00b4,
- "Done chip reset cleanup.\n");
-
- /* Done waiting for pending commands.
- * Reset the online flag.
- */
- vha->flags.online = 0;
- }
+ /* Make sure for ISP 82XX IO DMA is complete */
+ if (IS_P3P_TYPE(ha)) {
+ qla82xx_chip_reset_cleanup(vha);
+ ql_log(ql_log_info, vha, 0x00b4,
+ "Done chip reset cleanup.\n");
- /* Requeue all commands in outstanding command list. */
- qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+ /* Done waiting for pending commands. Reset online flag */
+ vha->flags.online = 0;
}
+
+ /* Requeue all commands in outstanding command list. */
+ qla2x00_abort_all_cmds(vha, DID_RESET << 16);
/* memory barrier */
wmb();
}
@@ -6923,6 +6974,12 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
if (vha->hw->flags.port_isolated)
return status;
+ if (qla2x00_isp_reg_stat(ha)) {
+ ql_log(ql_log_info, vha, 0x803f,
+ "ISP Abort - ISP reg disconnect, exiting.\n");
+ return status;
+ }
+
if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) {
ha->flags.chip_reset_done = 1;
vha->flags.online = 1;
@@ -6962,8 +7019,18 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
ha->isp_ops->get_flash_version(vha, req->ring);
+ if (qla2x00_isp_reg_stat(ha)) {
+ ql_log(ql_log_info, vha, 0x803f,
+ "ISP Abort - ISP reg disconnect pre nvram config, exiting.\n");
+ return status;
+ }
ha->isp_ops->nvram_config(vha);
+ if (qla2x00_isp_reg_stat(ha)) {
+ ql_log(ql_log_info, vha, 0x803f,
+ "ISP Abort - ISP reg disconnect post nvmram config, exiting.\n");
+ return status;
+ }
if (!qla2x00_restart_isp(vha)) {
clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index e80e41b6c9e1..82937c6bd9c4 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -432,3 +432,49 @@ qla_put_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
}
iores->res_type = RESOURCE_NONE;
}
+
+#define ISP_REG_DISCONNECT 0xffffffffU
+/**************************************************************************
+ * qla2x00_isp_reg_stat
+ *
+ * Description:
+ * Read the host status register of ISP before aborting the command.
+ *
+ * Input:
+ * ha = pointer to host adapter structure.
+ *
+ *
+ * Returns:
+ * Either true or false.
+ *
+ * Note: Return true if there is register disconnect.
+ **************************************************************************/
+static inline
+uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
+{
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
+
+ if (IS_P3P_TYPE(ha))
+ return ((rd_reg_dword(&reg82->host_int)) == ISP_REG_DISCONNECT);
+ else
+ return ((rd_reg_dword(&reg->host_status)) ==
+ ISP_REG_DISCONNECT);
+}
+
+static inline
+bool qla_pci_disconnected(struct scsi_qla_host *vha,
+ struct device_reg_24xx __iomem *reg)
+{
+ uint32_t stat;
+ bool ret = false;
+
+ stat = rd_reg_dword(&reg->host_status);
+ if (stat == 0xffffffff) {
+ ql_log(ql_log_info, vha, 0x8041,
+ "detected PCI disconnect.\n");
+ qla_schedule_eeh_work(vha);
+ ret = true;
+ }
+ return ret;
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 8b41cbaf8535..38b5bdde2405 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -491,7 +491,7 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
}
/**
- * qla2x00_marker() - Send a marker IOCB to the firmware.
+ * __qla2x00_marker() - Send a marker IOCB to the firmware.
* @vha: HA context
* @qpair: queue pair pointer
* @loop_id: loop ID
@@ -1600,12 +1600,14 @@ qla24xx_start_scsi(srb_t *sp)
uint16_t req_cnt;
uint16_t tot_dsds;
struct req_que *req = NULL;
+ struct rsp_que *rsp;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
/* Setup device pointers. */
req = vha->req;
+ rsp = req->rsp;
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0;
@@ -1643,8 +1645,14 @@ qla24xx_start_scsi(srb_t *sp)
goto queuing_error;
if (req->cnt < (req_cnt + 2)) {
- cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- rd_reg_dword_relaxed(req->req_q_out);
+ if (IS_SHADOW_REG_CAPABLE(ha)) {
+ cnt = *req->out_ptr;
+ } else {
+ cnt = rd_reg_dword_relaxed(req->req_q_out);
+ if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+ goto queuing_error;
+ }
+
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -1707,6 +1715,11 @@ qla24xx_start_scsi(srb_t *sp)
/* Set chip new ring index. */
wrt_reg_dword(req->req_q_in, req->ring_index);
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -1835,8 +1848,13 @@ qla24xx_dif_start_scsi(srb_t *sp)
goto queuing_error;
if (req->cnt < (req_cnt + 2)) {
- cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- rd_reg_dword_relaxed(req->req_q_out);
+ if (IS_SHADOW_REG_CAPABLE(ha)) {
+ cnt = *req->out_ptr;
+ } else {
+ cnt = rd_reg_dword_relaxed(req->req_q_out);
+ if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+ goto queuing_error;
+ }
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -1897,6 +1915,11 @@ qla24xx_dif_start_scsi(srb_t *sp)
/* Set chip new ring index. */
wrt_reg_dword(req->req_q_in, req->ring_index);
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -1910,6 +1933,7 @@ queuing_error:
qla_put_iocbs(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
return QLA_FUNCTION_FAILED;
}
@@ -1931,6 +1955,7 @@ qla2xxx_start_scsi_mq(srb_t *sp)
uint16_t req_cnt;
uint16_t tot_dsds;
struct req_que *req = NULL;
+ struct rsp_que *rsp;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct scsi_qla_host *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
@@ -1941,6 +1966,7 @@ qla2xxx_start_scsi_mq(srb_t *sp)
/* Setup qpair pointers */
req = qpair->req;
+ rsp = qpair->rsp;
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0;
@@ -1977,8 +2003,14 @@ qla2xxx_start_scsi_mq(srb_t *sp)
goto queuing_error;
if (req->cnt < (req_cnt + 2)) {
- cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- rd_reg_dword_relaxed(req->req_q_out);
+ if (IS_SHADOW_REG_CAPABLE(ha)) {
+ cnt = *req->out_ptr;
+ } else {
+ cnt = rd_reg_dword_relaxed(req->req_q_out);
+ if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+ goto queuing_error;
+ }
+
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -2041,6 +2073,11 @@ qla2xxx_start_scsi_mq(srb_t *sp)
/* Set chip new ring index. */
wrt_reg_dword(req->req_q_in, req->ring_index);
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_SUCCESS;
@@ -2184,8 +2221,14 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
goto queuing_error;
if (req->cnt < (req_cnt + 2)) {
- cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- rd_reg_dword_relaxed(req->req_q_out);
+ if (IS_SHADOW_REG_CAPABLE(ha)) {
+ cnt = *req->out_ptr;
+ } else {
+ cnt = rd_reg_dword_relaxed(req->req_q_out);
+ if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+ goto queuing_error;
+ }
+
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -2262,6 +2305,7 @@ queuing_error:
qla_put_iocbs(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
return QLA_FUNCTION_FAILED;
}
@@ -2306,6 +2350,11 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
cnt = qla2x00_debounce_register(
ISP_REQ_Q_OUT(ha, &reg->isp));
+ if (!qpair->use_shadow_reg && cnt == ISP_REG16_DISCONNECT) {
+ qla_schedule_eeh_work(vha);
+ return NULL;
+ }
+
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -2379,7 +2428,8 @@ qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
if (sp->vha->flags.nvme2_enabled) {
/* Set service parameter BIT_7 for NVME CONF support */
- logio->io_parameter[0] |= NVME_PRLI_SP_CONF;
+ logio->io_parameter[0] |=
+ cpu_to_le32(NVME_PRLI_SP_CONF);
/* Set service parameter BIT_8 for SLER support */
logio->io_parameter[0] |=
cpu_to_le32(NVME_PRLI_SP_SLER);
@@ -3720,6 +3770,9 @@ qla2x00_start_sp(srb_t *sp)
void *pkt;
unsigned long flags;
+ if (vha->hw->flags.eeh_busy)
+ return -EIO;
+
spin_lock_irqsave(qp->qp_lock_ptr, flags);
pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
if (!pkt) {
@@ -3937,8 +3990,14 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
/* Check for room on request queue. */
if (req->cnt < req_cnt + 2) {
- cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- rd_reg_dword_relaxed(req->req_q_out);
+ if (IS_SHADOW_REG_CAPABLE(ha)) {
+ cnt = *req->out_ptr;
+ } else {
+ cnt = rd_reg_dword_relaxed(req->req_q_out);
+ if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+ goto queuing_error;
+ }
+
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -3977,5 +4036,6 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
qla2x00_start_iocbs(vha, req);
queuing_error:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 5e188375c871..6e8f737a4af3 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -270,12 +270,7 @@ qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
!test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
!test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
- /*
- * Schedule this (only once) on the default system
- * workqueue so that all the adapter workqueues and the
- * DPC thread can be shutdown cleanly.
- */
- schedule_work(&vha->hw->board_disable);
+ qla_schedule_eeh_work(vha);
}
return true;
} else
@@ -1657,8 +1652,6 @@ global_port_update:
case MBA_TEMPERATURE_ALERT:
ql_dbg(ql_dbg_async, vha, 0x505e,
"TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
- if (mb[1] == 0x12)
- schedule_work(&ha->board_disable);
break;
case MBA_TRANS_INSERT:
@@ -3440,7 +3433,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
return;
abt = &sp->u.iocb_cmd;
- abt->u.abt.comp_status = le16_to_cpu(pkt->comp_status);
+ abt->u.abt.comp_status = pkt->comp_status;
orig_sp = sp->cmd_sp;
/* Need to pass original sp */
if (orig_sp)
@@ -4005,11 +3998,11 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
/* user wants to control IRQ setting for target mode */
ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
- min((u16)ha->msix_count, (u16)num_online_cpus()),
+ min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
PCI_IRQ_MSIX);
} else
ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
- min((u16)ha->msix_count, (u16)num_online_cpus()),
+ min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
&desc);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 06c99963b2c9..0bcd8afdc0ff 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -102,7 +102,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
int rval, i;
unsigned long flags = 0;
device_reg_t *reg;
- uint8_t abort_active;
+ uint8_t abort_active, eeh_delay;
uint8_t io_lock_on;
uint16_t command = 0;
uint16_t *iptr;
@@ -136,7 +136,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"PCI error, exiting.\n");
return QLA_FUNCTION_TIMEOUT;
}
-
+ eeh_delay = 0;
reg = ha->iobase;
io_lock_on = base_vha->flags.init_done;
@@ -159,10 +159,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
}
/* check if ISP abort is active and return cmd with timeout */
- if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
- test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
- !is_rom_cmd(mcp->mb[0])) {
+ if (((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
+ test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
+ !is_rom_cmd(mcp->mb[0])) || ha->flags.eeh_busy) {
ql_log(ql_log_info, vha, 0x1005,
"Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
mcp->mb[0]);
@@ -185,7 +185,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
atomic_dec(&ha->num_pend_mbx_stage1);
- if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
+ if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
+ ha->flags.eeh_busy) {
+ ql_log(ql_log_warn, vha, 0xd035,
+ "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n",
+ ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]);
rval = QLA_ABORTED;
goto premature_exit;
}
@@ -265,6 +269,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
if (chip_reset != ha->chip_reset) {
+ eeh_delay = ha->flags.eeh_busy ? 1 : 0;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock,
@@ -282,6 +288,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
} else if (ha->flags.purge_mbox ||
chip_reset != ha->chip_reset) {
+ eeh_delay = ha->flags.eeh_busy ? 1 : 0;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -323,6 +331,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
while (!ha->flags.mbox_int) {
if (ha->flags.purge_mbox ||
chip_reset != ha->chip_reset) {
+ eeh_delay = ha->flags.eeh_busy ? 1 : 0;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock,
@@ -531,7 +541,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
/* Allow next mbx cmd to come in. */
complete(&ha->mbx_cmd_comp);
- if (ha->isp_ops->abort_isp(vha)) {
+ if (ha->isp_ops->abort_isp(vha) &&
+ !ha->flags.eeh_busy) {
/* Failed. retry later. */
set_bit(ISP_ABORT_NEEDED,
&vha->dpc_flags);
@@ -584,6 +595,17 @@ mbx_done:
ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
}
+ i = 500;
+ while (i && eeh_delay && (ha->pci_error_state < QLA_PCI_SLOT_RESET)) {
+ /*
+ * The caller of this mailbox encounter pci error.
+ * Hold the thread until PCIE link reset complete to make
+ * sure caller does not unmap dma while recovery is
+ * in progress.
+ */
+ msleep(1);
+ i--;
+ }
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index ca7306685325..6e920da64863 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -516,7 +516,7 @@ qlafx00_pci_config(scsi_qla_host_t *vha)
}
/**
- * qlafx00_warm_reset() - Perform warm reset of iSA(CPUs being reset on SOC).
+ * qlafx00_soc_cpu_reset() - Perform warm reset of iSA(CPUs being reset on SOC).
* @vha: HA context
*
*/
@@ -2860,7 +2860,7 @@ qlafx00_async_event(scsi_qla_host_t *vha)
}
/**
- * qlafx00x_mbx_completion() - Process mailbox command completions.
+ * qlafx00_mbx_completion() - Process mailbox command completions.
* @vha: SCSI driver HA context
* @mb0: value to be written into mailbox register 0
*/
@@ -3266,8 +3266,8 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
fx_iocb.req_xfrcnt =
cpu_to_le16(fxio->u.fxiocb.req_len);
put_unaligned_le64(fxio->u.fxiocb.req_dma_handle,
- &fx_iocb.dseg_rq.address);
- fx_iocb.dseg_rq.length =
+ &fx_iocb.dseg_rq[0].address);
+ fx_iocb.dseg_rq[0].length =
cpu_to_le32(fxio->u.fxiocb.req_len);
}
@@ -3276,8 +3276,8 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
fx_iocb.rsp_xfrcnt =
cpu_to_le16(fxio->u.fxiocb.rsp_len);
put_unaligned_le64(fxio->u.fxiocb.rsp_dma_handle,
- &fx_iocb.dseg_rsp.address);
- fx_iocb.dseg_rsp.length =
+ &fx_iocb.dseg_rsp[0].address);
+ fx_iocb.dseg_rsp[0].length =
cpu_to_le32(fxio->u.fxiocb.rsp_len);
}
@@ -3314,7 +3314,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
cpu_to_le16(bsg_job->request_payload.sg_cnt);
tot_dsds =
bsg_job->request_payload.sg_cnt;
- cur_dsd = &fx_iocb.dseg_rq;
+ cur_dsd = &fx_iocb.dseg_rq[0];
avail_dsds = 1;
for_each_sg(bsg_job->request_payload.sg_list, sg,
tot_dsds, index) {
@@ -3369,7 +3369,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
fx_iocb.rsp_dsdcnt =
cpu_to_le16(bsg_job->reply_payload.sg_cnt);
tot_dsds = bsg_job->reply_payload.sg_cnt;
- cur_dsd = &fx_iocb.dseg_rsp;
+ cur_dsd = &fx_iocb.dseg_rsp[0];
avail_dsds = 1;
for_each_sg(bsg_job->reply_payload.sg_list, sg,
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index 73be8348402a..4f63aff333db 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -176,8 +176,12 @@ struct fxdisc_entry_fx00 {
uint8_t flags;
uint8_t reserved_1;
- struct dsd64 dseg_rq;
- struct dsd64 dseg_rsp;
+ /*
+ * Use array size 1 below to prevent that Coverity complains about
+ * the append_dsd64() calls for the two arrays below.
+ */
+ struct dsd64 dseg_rq[1];
+ struct dsd64 dseg_rsp[1];
__le32 dataword;
__le32 adapid;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 0237588f48b0..0cacb667a88b 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -398,8 +398,13 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
}
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
- cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- rd_reg_dword_relaxed(req->req_q_out);
+ if (IS_SHADOW_REG_CAPABLE(ha)) {
+ cnt = *req->out_ptr;
+ } else {
+ cnt = rd_reg_dword_relaxed(req->req_q_out);
+ if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+ goto queuing_error;
+ }
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
@@ -536,6 +541,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
queuing_error:
spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 68a16c95dcb7..5ceecc9642fc 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -2028,7 +2028,7 @@ exit_error:
}
/**
- * qla4_8xxx_check_temp - Check the ISP82XX temperature.
+ * qla8044_check_temp - Check the ISP82XX temperature.
* @vha: adapter block pointer.
*
* Note: The caller should not hold the idc lock.
@@ -2226,19 +2226,16 @@ qla8044_minidump_process_control(struct scsi_qla_host *vha,
if (opcode & QLA82XX_DBG_OPCODE_WR) {
qla8044_wr_reg_indirect(vha, crb_addr,
crb_entry->value_1);
- opcode &= ~QLA82XX_DBG_OPCODE_WR;
}
if (opcode & QLA82XX_DBG_OPCODE_RW) {
qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
qla8044_wr_reg_indirect(vha, crb_addr, read_value);
- opcode &= ~QLA82XX_DBG_OPCODE_RW;
}
if (opcode & QLA82XX_DBG_OPCODE_AND) {
qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
read_value &= crb_entry->value_2;
- opcode &= ~QLA82XX_DBG_OPCODE_AND;
if (opcode & QLA82XX_DBG_OPCODE_OR) {
read_value |= crb_entry->value_3;
opcode &= ~QLA82XX_DBG_OPCODE_OR;
@@ -2249,7 +2246,6 @@ qla8044_minidump_process_control(struct scsi_qla_host *vha,
qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
read_value |= crb_entry->value_3;
qla8044_wr_reg_indirect(vha, crb_addr, read_value);
- opcode &= ~QLA82XX_DBG_OPCODE_OR;
}
if (opcode & QLA82XX_DBG_OPCODE_POLL) {
poll_time = crb_entry->crb_strd.poll_timeout;
@@ -2269,7 +2265,6 @@ qla8044_minidump_process_control(struct scsi_qla_host *vha,
crb_addr, &read_value);
}
} while (1);
- opcode &= ~QLA82XX_DBG_OPCODE_POLL;
}
if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
@@ -2283,7 +2278,6 @@ qla8044_minidump_process_control(struct scsi_qla_host *vha,
qla8044_rd_reg_indirect(vha, addr, &read_value);
index = crb_entry->crb_ctrl.state_index_v;
tmplt_hdr->saved_state_array[index] = read_value;
- opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
}
if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
@@ -2303,7 +2297,6 @@ qla8044_minidump_process_control(struct scsi_qla_host *vha,
}
qla8044_wr_reg_indirect(vha, addr, read_value);
- opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
}
if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
@@ -2316,7 +2309,6 @@ qla8044_minidump_process_control(struct scsi_qla_host *vha,
read_value |= crb_entry->value_3;
read_value += crb_entry->value_1;
tmplt_hdr->saved_state_array[index] = read_value;
- opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
}
crb_addr += crb_entry->crb_strd.addr_stride;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 074392560f3d..d74c32f84ef5 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -971,6 +971,13 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
goto qc24_fail_command;
}
+ if (!qpair->online) {
+ ql_dbg(ql_dbg_io, vha, 0x3077,
+ "qpair not online. eeh_busy=%d.\n", ha->flags.eeh_busy);
+ cmd->result = DID_NO_CONNECT << 16;
+ goto qc24_fail_command;
+ }
+
if (!fcport || fcport->deleted) {
cmd->result = DID_IMM_RETRY << 16;
goto qc24_fail_command;
@@ -1013,8 +1020,6 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
"Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
- if (rval == QLA_INTERFACE_ERROR)
- goto qc24_free_sp_fail_command;
goto qc24_host_busy_free_sp;
}
@@ -1026,11 +1031,6 @@ qc24_host_busy_free_sp:
qc24_target_busy:
return SCSI_MLQUEUE_TARGET_BUSY;
-qc24_free_sp_fail_command:
- sp->free(sp);
- CMD_SP(cmd) = NULL;
- qla2xxx_rel_qpair_sp(sp->qpair, sp);
-
qc24_fail_command:
cmd->scsi_done(cmd);
@@ -1207,35 +1207,6 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
return return_status;
}
-#define ISP_REG_DISCONNECT 0xffffffffU
-/**************************************************************************
-* qla2x00_isp_reg_stat
-*
-* Description:
-* Read the host status register of ISP before aborting the command.
-*
-* Input:
-* ha = pointer to host adapter structure.
-*
-*
-* Returns:
-* Either true or false.
-*
-* Note: Return true if there is register disconnect.
-**************************************************************************/
-static inline
-uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
-{
- struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
-
- if (IS_P3P_TYPE(ha))
- return ((rd_reg_dword(&reg82->host_int)) == ISP_REG_DISCONNECT);
- else
- return ((rd_reg_dword(&reg->host_status)) ==
- ISP_REG_DISCONNECT);
-}
-
/**************************************************************************
* qla2xxx_eh_abort
*
@@ -1269,6 +1240,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x8042,
"PCI/Register disconnect, exiting.\n");
+ qla_pci_set_eeh_busy(vha);
return FAILED;
}
@@ -1462,6 +1434,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x803e,
"PCI/Register disconnect, exiting.\n");
+ qla_pci_set_eeh_busy(vha);
return FAILED;
}
@@ -1478,6 +1451,7 @@ qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x803f,
"PCI/Register disconnect, exiting.\n");
+ qla_pci_set_eeh_busy(vha);
return FAILED;
}
@@ -1513,6 +1487,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x8040,
"PCI/Register disconnect, exiting.\n");
+ qla_pci_set_eeh_busy(vha);
return FAILED;
}
@@ -1590,7 +1565,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x8041,
"PCI/Register disconnect, exiting.\n");
- schedule_work(&ha->board_disable);
+ qla_pci_set_eeh_busy(vha);
return SUCCESS;
}
@@ -4238,11 +4213,10 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
/* Get consistent memory allocated for Special Features-CB. */
if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- ha->sf_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
+ ha->sf_init_cb = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL,
&ha->sf_init_cb_dma);
if (!ha->sf_init_cb)
goto fail_sf_init_cb;
- memset(ha->sf_init_cb, 0, sizeof(struct init_sf_cb));
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0199,
"sf_init_cb=%p.\n", ha->sf_init_cb);
}
@@ -4644,8 +4618,7 @@ qla2x00_free_fw_dump(struct qla_hw_data *ha)
dma_free_coherent(&ha->pdev->dev,
EFT_SIZE, ha->eft, ha->eft_dma);
- if (ha->fw_dump)
- vfree(ha->fw_dump);
+ vfree(ha->fw_dump);
ha->fce = NULL;
ha->fce_dma = 0;
@@ -4659,8 +4632,7 @@ qla2x00_free_fw_dump(struct qla_hw_data *ha)
ha->fw_dump_len = 0;
for (j = 0; j < 2; j++, fwdt++) {
- if (fwdt->template)
- vfree(fwdt->template);
+ vfree(fwdt->template);
fwdt->template = NULL;
fwdt->length = 0;
}
@@ -6676,6 +6648,9 @@ qla2x00_do_dpc(void *data)
schedule();
+ if (test_and_clear_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags))
+ qla_pci_set_eeh_busy(base_vha);
+
if (!base_vha->flags.init_done || ha->flags.mbox_busy)
goto end_loop;
@@ -6969,28 +6944,23 @@ intr_on_check:
mutex_unlock(&ha->mq_lock);
}
- if (test_and_clear_bit(SET_NVME_ZIO_THRESHOLD_NEEDED,
- &base_vha->dpc_flags)) {
+ if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED,
+ &base_vha->dpc_flags)) {
+ u16 threshold = ha->nvme_last_rptd_aen + ha->last_zio_threshold;
+
+ if (threshold > ha->orig_fw_xcb_count)
+ threshold = ha->orig_fw_xcb_count;
+
ql_log(ql_log_info, base_vha, 0xffffff,
- "nvme: SET ZIO Activity exchange threshold to %d.\n",
- ha->nvme_last_rptd_aen);
- if (qla27xx_set_zio_threshold(base_vha,
- ha->nvme_last_rptd_aen)) {
+ "SET ZIO Activity exchange threshold to %d.\n",
+ threshold);
+ if (qla27xx_set_zio_threshold(base_vha, threshold)) {
ql_log(ql_log_info, base_vha, 0xffffff,
- "nvme: Unable to SET ZIO Activity exchange threshold to %d.\n",
- ha->nvme_last_rptd_aen);
+ "Unable to SET ZIO Activity exchange threshold to %d.\n",
+ threshold);
}
}
- if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED,
- &base_vha->dpc_flags)) {
- ql_log(ql_log_info, base_vha, 0xffffff,
- "SET ZIO Activity exchange threshold to %d.\n",
- ha->last_zio_threshold);
- qla27xx_set_zio_threshold(base_vha,
- ha->last_zio_threshold);
- }
-
if (!IS_QLAFX00(ha))
qla2x00_do_dpc_all_vps(base_vha);
@@ -7218,14 +7188,13 @@ qla2x00_timer(struct timer_list *t)
index = atomic_read(&ha->nvme_active_aen_cnt);
if (!vha->vp_idx &&
(index != ha->nvme_last_rptd_aen) &&
- (index >= DEFAULT_ZIO_THRESHOLD) &&
ha->zio_mode == QLA_ZIO_MODE_6 &&
!ha->flags.host_shutting_down) {
+ ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt);
ql_log(ql_log_info, vha, 0x3002,
"nvme: Sched: Set ZIO exchange threshold to %d.\n",
ha->nvme_last_rptd_aen);
- ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt);
- set_bit(SET_NVME_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
+ set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
start_dpc++;
}
@@ -7398,6 +7367,8 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
int i;
unsigned long flags;
+ ql_dbg(ql_dbg_aer, vha, 0x9000,
+ "%s\n", __func__);
ha->chip_reset++;
ha->base_qpair->chip_reset = ha->chip_reset;
@@ -7407,28 +7378,16 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
ha->base_qpair->chip_reset;
}
- /* purge MBox commands */
- if (atomic_read(&ha->num_pend_mbx_stage3)) {
- clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
- complete(&ha->mbx_intr_comp);
- }
-
- i = 0;
-
- while (atomic_read(&ha->num_pend_mbx_stage3) ||
- atomic_read(&ha->num_pend_mbx_stage2) ||
- atomic_read(&ha->num_pend_mbx_stage1)) {
- msleep(20);
- i++;
- if (i > 50)
- break;
- }
-
- ha->flags.purge_mbox = 0;
+ /*
+ * purge mailbox might take a while. Slot Reset/chip reset
+ * will take care of the purge
+ */
mutex_lock(&ha->mq_lock);
+ ha->base_qpair->online = 0;
list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
qpair->online = 0;
+ wmb();
mutex_unlock(&ha->mq_lock);
qla2x00_mark_all_devices_lost(vha);
@@ -7465,14 +7424,17 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
scsi_qla_host_t *vha = pci_get_drvdata(pdev);
struct qla_hw_data *ha = vha->hw;
+ pci_ers_result_t ret = PCI_ERS_RESULT_NEED_RESET;
- ql_dbg(ql_dbg_aer, vha, 0x9000,
- "PCI error detected, state %x.\n", state);
+ ql_log(ql_log_warn, vha, 0x9000,
+ "PCI error detected, state %x.\n", state);
+ ha->pci_error_state = QLA_PCI_ERR_DETECTED;
if (!atomic_read(&pdev->enable_cnt)) {
ql_log(ql_log_info, vha, 0xffff,
"PCI device is disabled,state %x\n", state);
- return PCI_ERS_RESULT_NEED_RESET;
+ ret = PCI_ERS_RESULT_NEED_RESET;
+ goto out;
}
switch (state) {
@@ -7482,11 +7444,12 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
- return PCI_ERS_RESULT_CAN_RECOVER;
+ ret = PCI_ERS_RESULT_CAN_RECOVER;
+ break;
case pci_channel_io_frozen:
- ha->flags.eeh_busy = 1;
- qla_pci_error_cleanup(vha);
- return PCI_ERS_RESULT_NEED_RESET;
+ qla_pci_set_eeh_busy(vha);
+ ret = PCI_ERS_RESULT_NEED_RESET;
+ break;
case pci_channel_io_perm_failure:
ha->flags.pci_channel_io_perm_failure = 1;
qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
@@ -7494,9 +7457,12 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
- return PCI_ERS_RESULT_DISCONNECT;
+ ret = PCI_ERS_RESULT_DISCONNECT;
}
- return PCI_ERS_RESULT_NEED_RESET;
+out:
+ ql_dbg(ql_dbg_aer, vha, 0x600d,
+ "PCI error detected returning [%x].\n", ret);
+ return ret;
}
static pci_ers_result_t
@@ -7510,6 +7476,10 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
+ ql_log(ql_log_warn, base_vha, 0x9000,
+ "mmio enabled\n");
+
+ ha->pci_error_state = QLA_PCI_MMIO_ENABLED;
if (IS_QLA82XX(ha))
return PCI_ERS_RESULT_RECOVERED;
@@ -7533,10 +7503,11 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
ql_log(ql_log_info, base_vha, 0x9003,
"RISC paused -- mmio_enabled, Dumping firmware.\n");
qla2xxx_dump_fw(base_vha);
-
- return PCI_ERS_RESULT_NEED_RESET;
- } else
- return PCI_ERS_RESULT_RECOVERED;
+ }
+ /* set PCI_ERS_RESULT_NEED_RESET to trigger call to qla2xxx_pci_slot_reset */
+ ql_dbg(ql_dbg_aer, base_vha, 0x600d,
+ "mmio enabled returning.\n");
+ return PCI_ERS_RESULT_NEED_RESET;
}
static pci_ers_result_t
@@ -7548,9 +7519,10 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
int rc;
struct qla_qpair *qpair = NULL;
- ql_dbg(ql_dbg_aer, base_vha, 0x9004,
- "Slot Reset.\n");
+ ql_log(ql_log_warn, base_vha, 0x9004,
+ "Slot Reset.\n");
+ ha->pci_error_state = QLA_PCI_SLOT_RESET;
/* Workaround: qla2xxx driver which access hardware earlier
* needs error state to be pci_channel_io_online.
* Otherwise mailbox command timesout.
@@ -7584,16 +7556,24 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
qpair->online = 1;
mutex_unlock(&ha->mq_lock);
+ ha->flags.eeh_busy = 0;
base_vha->flags.online = 1;
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
- if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS)
- ret = PCI_ERS_RESULT_RECOVERED;
+ ha->isp_ops->abort_isp(base_vha);
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+ if (qla2x00_isp_reg_stat(ha)) {
+ ha->flags.eeh_busy = 1;
+ qla_pci_error_cleanup(base_vha);
+ ql_log(ql_log_warn, base_vha, 0x9005,
+ "Device unable to recover from PCI error.\n");
+ } else {
+ ret = PCI_ERS_RESULT_RECOVERED;
+ }
exit_slot_reset:
ql_dbg(ql_dbg_aer, base_vha, 0x900e,
- "slot_reset return %x.\n", ret);
+ "Slot Reset returning %x.\n", ret);
return ret;
}
@@ -7605,16 +7585,55 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
struct qla_hw_data *ha = base_vha->hw;
int ret;
- ql_dbg(ql_dbg_aer, base_vha, 0x900f,
- "pci_resume.\n");
+ ql_log(ql_log_warn, base_vha, 0x900f,
+ "Pci Resume.\n");
- ha->flags.eeh_busy = 0;
ret = qla2x00_wait_for_hba_online(base_vha);
if (ret != QLA_SUCCESS) {
ql_log(ql_log_fatal, base_vha, 0x9002,
"The device failed to resume I/O from slot/link_reset.\n");
}
+ ha->pci_error_state = QLA_PCI_RESUME;
+ ql_dbg(ql_dbg_aer, base_vha, 0x600d,
+ "Pci Resume returning.\n");
+}
+
+void qla_pci_set_eeh_busy(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ bool do_cleanup = false;
+ unsigned long flags;
+
+ if (ha->flags.eeh_busy)
+ return;
+
+ spin_lock_irqsave(&base_vha->work_lock, flags);
+ if (!ha->flags.eeh_busy) {
+ ha->flags.eeh_busy = 1;
+ do_cleanup = true;
+ }
+ spin_unlock_irqrestore(&base_vha->work_lock, flags);
+
+ if (do_cleanup)
+ qla_pci_error_cleanup(base_vha);
+}
+
+/*
+ * this routine will schedule a task to pause IO from interrupt context
+ * if caller sees a PCIE error event (register read = 0xf's)
+ */
+void qla_schedule_eeh_work(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+ if (ha->flags.eeh_busy)
+ return;
+
+ set_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags);
+ qla2xxx_wake_dpc(base_vha);
}
static void
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index f771fabcba59..060c89237777 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2621,10 +2621,11 @@ qla24xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
}
static int
-qla28xx_extract_sfub_and_verify(struct scsi_qla_host *vha, uint32_t *buf,
+qla28xx_extract_sfub_and_verify(struct scsi_qla_host *vha, __le32 *buf,
uint32_t len, uint32_t buf_size_without_sfub, uint8_t *sfub_buf)
{
- uint32_t *p, check_sum = 0;
+ uint32_t check_sum = 0;
+ __le32 *p;
int i;
p = buf + buf_size_without_sfub;
@@ -2790,8 +2791,8 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
goto done;
}
- rval = qla28xx_extract_sfub_and_verify(vha, dwptr, dwords,
- buf_size_without_sfub, (uint8_t *)sfub);
+ rval = qla28xx_extract_sfub_and_verify(vha, (__le32 *)dwptr,
+ dwords, buf_size_without_sfub, (uint8_t *)sfub);
if (rval != QLA_SUCCESS)
goto done;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 480e7d2dcf3e..b2008fb1dd38 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1029,7 +1029,12 @@ void qlt_free_session_done(struct work_struct *work)
}
msleep(100);
cnt++;
- if (cnt > 200)
+ /*
+ * Driver timeout is set to 22 Sec, update count value to loop
+ * long enough for log-out to complete before advancing. Otherwise,
+ * straddling logout can interfere with re-login attempt.
+ */
+ if (cnt > 230)
break;
}
@@ -6459,7 +6464,7 @@ static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
}
/**
- * qla_tgt_lport_register - register lport with external module
+ * qlt_lport_register - register lport with external module
*
* @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
* @phys_wwpn: physical port WWPN
@@ -6535,7 +6540,7 @@ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
EXPORT_SYMBOL(qlt_lport_register);
/**
- * qla_tgt_lport_deregister - Degister lport
+ * qlt_lport_deregister - Degister lport
*
* @vha: Registered scsi_qla_host pointer
*/
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 72c648442e8d..da11829fa12d 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.00.105-k"
+#define QLA2XXX_VERSION "10.02.00.106-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
#define QLA_DRIVER_PATCH_VER 0
-#define QLA_DRIVER_BETA_VER 105
+#define QLA_DRIVER_BETA_VER 106
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 8b4890cdd4ca..03de1bcf1461 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -451,7 +451,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
struct se_portal_group *se_tpg;
struct tcm_qla2xxx_tpg *tpg;
#endif
- int target_flags = TARGET_SCF_ACK_KREF;
+ int rc, target_flags = TARGET_SCF_ACK_KREF;
unsigned long flags;
if (bidi)
@@ -486,9 +486,18 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
list_add_tail(&cmd->sess_cmd_list, &sess->sess_cmd_list);
spin_unlock_irqrestore(&sess->sess_cmd_lock, flags);
- return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
- cmd->unpacked_lun, data_length, fcp_task_attr,
- data_dir, target_flags);
+ rc = target_init_cmd(se_cmd, se_sess, &cmd->sense_buffer[0],
+ cmd->unpacked_lun, data_length, fcp_task_attr,
+ data_dir, target_flags);
+ if (rc)
+ return rc;
+
+ if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0, NULL, 0,
+ GFP_KERNEL))
+ return 0;
+
+ target_submit(se_cmd);
+ return 0;
}
static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
@@ -1569,7 +1578,7 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id,
/*
* Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
*/
-static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
+static const struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.find_cmd_by_tag = tcm_qla2xxx_find_cmd_by_tag,
.handle_cmd = tcm_qla2xxx_handle_cmd,
.handle_data = tcm_qla2xxx_handle_data,
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 17b719a8b6fb..187d78aa4f67 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -44,7 +44,7 @@ void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
}
/**
- * qla4xxx_is_intr_poll_mode – Are we allowed to poll for interrupts?
+ * qla4xxx_is_intr_poll_mode - Are we allowed to poll for interrupts?
* @ha: Pointer to host adapter structure.
* returns: 1=polling mode, 0=non-polling mode
**/
@@ -933,7 +933,7 @@ int qla4xxx_conn_open(struct scsi_qla_host *ha, uint16_t fw_ddb_index)
}
/**
- * qla4xxx_set_fwddb_entry - sets a ddb entry.
+ * qla4xxx_set_ddb_entry - sets a ddb entry.
* @ha: Pointer to host adapter structure.
* @fw_ddb_index: Firmware's device database index
* @fw_ddb_entry_dma: dma address of ddb entry
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index e6e35e6958f6..66a487795c53 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -3634,12 +3634,6 @@ flash_conf_addr(struct ql82xx_hw_data *hw, uint32_t faddr)
return hw->flash_conf_off | faddr;
}
-static inline uint32_t
-flash_data_addr(struct ql82xx_hw_data *hw, uint32_t faddr)
-{
- return hw->flash_data_off | faddr;
-}
-
static uint32_t *
qla4_82xx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
uint32_t faddr, uint32_t length)
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 7bd9a4a04ad5..ad3afe30f617 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -618,7 +618,7 @@ static umode_t qla4_attr_is_visible(int param_type, int param)
}
/**
- * qla4xxx_create chap_list - Create CHAP list from FLASH
+ * qla4xxx_create_chap_list - Create CHAP list from FLASH
* @ha: pointer to adapter structure
*
* Read flash and make a list of CHAP entries, during login when a CHAP entry
@@ -4171,8 +4171,7 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
ha->queues_dma);
- if (ha->fw_dump)
- vfree(ha->fw_dump);
+ vfree(ha->fw_dump);
ha->queues_len = 0;
ha->queues = NULL;
@@ -4192,8 +4191,7 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
dma_pool_destroy(ha->chap_dma_pool);
- if (ha->chap_list)
- vfree(ha->chap_list);
+ vfree(ha->chap_list);
ha->chap_list = NULL;
dma_pool_destroy(ha->fw_ddb_dma_pool);
@@ -4211,8 +4209,7 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
iounmap(ha->reg);
}
- if (ha->reset_tmplt.buff)
- vfree(ha->reset_tmplt.buff);
+ vfree(ha->reset_tmplt.buff);
pci_release_regions(ha->pdev);
}
@@ -6396,10 +6393,8 @@ static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
}
exit_check:
- if (fw_tddb)
- vfree(fw_tddb);
- if (tmp_tddb)
- vfree(tmp_tddb);
+ vfree(fw_tddb);
+ vfree(tmp_tddb);
return ret;
}
@@ -6551,10 +6546,8 @@ static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
}
exit_check:
- if (fw_tddb)
- vfree(fw_tddb);
- if (tmp_tddb)
- vfree(tmp_tddb);
+ vfree(fw_tddb);
+ vfree(tmp_tddb);
return ret;
}
@@ -6961,7 +6954,7 @@ static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
if (is_reset == RESET_ADAPTER) {
iscsi_block_session(cls_sess);
/* Use the relogin path to discover new devices
- * by short-circuting the logic of setting
+ * by short-circuiting the logic of setting
* timer to relogin - instead set the flags
* to initiate login right away.
*/
@@ -7834,10 +7827,8 @@ static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
ret = -ESRCH;
exit_ddb_logout:
- if (flash_tddb)
- vfree(flash_tddb);
- if (tmp_tddb)
- vfree(tmp_tddb);
+ vfree(flash_tddb);
+ vfree(tmp_tddb);
if (fw_ddb_entry)
dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
@@ -9633,7 +9624,7 @@ qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
}
/**
- * qla4xxx_pci_mmio_enabled() gets called if
+ * qla4xxx_pci_mmio_enabled() - gets called if
* qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
* and read/write to the device still works.
* @pdev: PCI device pointer
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 24619c3bebd5..e9e2f0e15ac8 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -214,6 +214,15 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
scsi_io_completion(cmd, good_bytes);
}
+
+/*
+ * 1024 is big enough for saturating the fast scsi LUN now
+ */
+int scsi_device_max_queue_depth(struct scsi_device *sdev)
+{
+ return max_t(int, sdev->host->can_queue, 1024);
+}
+
/**
* scsi_change_queue_depth - change a device's queue depth
* @sdev: SCSI Device in question
@@ -223,6 +232,8 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
*/
int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
{
+ depth = min_t(int, depth, scsi_device_max_queue_depth(sdev));
+
if (depth > 0) {
sdev->queue_depth = depth;
wmb();
@@ -231,6 +242,8 @@ int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
if (sdev->request_queue)
blk_set_queue_depth(sdev->request_queue, depth);
+ sbitmap_resize(&sdev->budget_map, sdev->queue_depth);
+
return sdev->queue_depth;
}
EXPORT_SYMBOL(scsi_change_queue_depth);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 3cdeaeb92933..70165be10f00 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -322,17 +322,19 @@ struct sdeb_store_info {
container_of(d, struct sdebug_host_info, dev)
enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
- SDEB_DEFER_WQ = 2};
+ SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
struct sdebug_defer {
struct hrtimer hrt;
struct execute_work ew;
+ ktime_t cmpl_ts;/* time since boot to complete this cmd */
int sqa_idx; /* index of sdebug_queue array */
int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
int hc_idx; /* hostwide tag index */
int issuing_cpu;
bool init_hrt;
bool init_wq;
+ bool init_poll;
bool aborted; /* true when blk_abort_request() already called */
enum sdeb_defer_type defer_t;
};
@@ -357,6 +359,7 @@ static atomic_t sdebug_completions; /* count of deferred completions */
static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
static atomic_t sdeb_inject_pending;
+static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
struct opcode_info_t {
u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
@@ -829,6 +832,7 @@ static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
+static int poll_queues; /* iouring iopoll interface.*/
static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
static DEFINE_RWLOCK(atomic_rw);
@@ -4729,7 +4733,6 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
struct scsi_cmnd *scp;
struct sdebug_dev_info *devip;
- sd_dp->defer_t = SDEB_DEFER_NONE;
if (unlikely(aborted))
sd_dp->aborted = false;
qc_idx = sd_dp->qc_idx;
@@ -4744,6 +4747,7 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
return;
}
spin_lock_irqsave(&sqp->qc_lock, iflags);
+ sd_dp->defer_t = SDEB_DEFER_NONE;
sqcp = &sqp->qc_arr[qc_idx];
scp = sqcp->a_cmnd;
if (unlikely(scp == NULL)) {
@@ -5363,6 +5367,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
{
bool new_sd_dp;
bool inject = false;
+ bool hipri = (cmnd->request->cmd_flags & REQ_HIPRI);
int k, num_in_q, qdepth;
unsigned long iflags;
u64 ns_from_boot = 0;
@@ -5432,6 +5437,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
cmnd->host_scribble = (unsigned char *)sqcp;
sd_dp = sqcp->sd_dp;
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+
if (!sd_dp) {
sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
if (!sd_dp) {
@@ -5448,7 +5454,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (sdebug_host_max_queue)
sd_dp->hc_idx = get_tag(cmnd);
- if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
+ if (hipri)
ns_from_boot = ktime_get_boottime_ns();
/* one of the resp_*() response functions is called here */
@@ -5508,40 +5514,66 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
kt -= d;
}
}
- if (!sd_dp->init_hrt) {
- sd_dp->init_hrt = true;
- sqcp->sd_dp = sd_dp;
- hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
- sd_dp->sqa_idx = sqp - sdebug_q_arr;
- sd_dp->qc_idx = k;
+ if (hipri) {
+ sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
+ spin_lock_irqsave(&sqp->qc_lock, iflags);
+ if (!sd_dp->init_poll) {
+ sd_dp->init_poll = true;
+ sqcp->sd_dp = sd_dp;
+ sd_dp->sqa_idx = sqp - sdebug_q_arr;
+ sd_dp->qc_idx = k;
+ }
+ sd_dp->defer_t = SDEB_DEFER_POLL;
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ } else {
+ if (!sd_dp->init_hrt) {
+ sd_dp->init_hrt = true;
+ sqcp->sd_dp = sd_dp;
+ hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
+ sd_dp->sqa_idx = sqp - sdebug_q_arr;
+ sd_dp->qc_idx = k;
+ }
+ sd_dp->defer_t = SDEB_DEFER_HRT;
+ /* schedule the invocation of scsi_done() for a later time */
+ hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
}
if (sdebug_statistics)
sd_dp->issuing_cpu = raw_smp_processor_id();
- sd_dp->defer_t = SDEB_DEFER_HRT;
- /* schedule the invocation of scsi_done() for a later time */
- hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
} else { /* jdelay < 0, use work queue */
- if (!sd_dp->init_wq) {
- sd_dp->init_wq = true;
- sqcp->sd_dp = sd_dp;
- sd_dp->sqa_idx = sqp - sdebug_q_arr;
- sd_dp->qc_idx = k;
- INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
- }
- if (sdebug_statistics)
- sd_dp->issuing_cpu = raw_smp_processor_id();
- sd_dp->defer_t = SDEB_DEFER_WQ;
if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
atomic_read(&sdeb_inject_pending)))
sd_dp->aborted = true;
- schedule_work(&sd_dp->ew.work);
- if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
- atomic_read(&sdeb_inject_pending))) {
+ if (hipri) {
+ sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
+ spin_lock_irqsave(&sqp->qc_lock, iflags);
+ if (!sd_dp->init_poll) {
+ sd_dp->init_poll = true;
+ sqcp->sd_dp = sd_dp;
+ sd_dp->sqa_idx = sqp - sdebug_q_arr;
+ sd_dp->qc_idx = k;
+ }
+ sd_dp->defer_t = SDEB_DEFER_POLL;
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ } else {
+ if (!sd_dp->init_wq) {
+ sd_dp->init_wq = true;
+ sqcp->sd_dp = sd_dp;
+ sd_dp->sqa_idx = sqp - sdebug_q_arr;
+ sd_dp->qc_idx = k;
+ INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
+ }
+ sd_dp->defer_t = SDEB_DEFER_WQ;
+ schedule_work(&sd_dp->ew.work);
+ }
+ if (sdebug_statistics)
+ sd_dp->issuing_cpu = raw_smp_processor_id();
+ if (unlikely(sd_dp->aborted)) {
sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", cmnd->request->tag);
blk_abort_request(cmnd->request);
atomic_set(&sdeb_inject_pending, 0);
+ sd_dp->aborted = false;
}
}
if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
@@ -5615,6 +5647,7 @@ module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
module_param_named(submit_queues, submit_queues, int, S_IRUGO);
+module_param_named(poll_queues, poll_queues, int, S_IRUGO);
module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
@@ -5677,6 +5710,7 @@ MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent
MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
+MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1)");
MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
@@ -5768,11 +5802,12 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
dix_reads, dix_writes, dif_errors);
seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
sdebug_statistics);
- seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
+ seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
atomic_read(&sdebug_cmnd_count),
atomic_read(&sdebug_completions),
"miss_cpus", atomic_read(&sdebug_miss_cpus),
- atomic_read(&sdebug_a_tsf));
+ atomic_read(&sdebug_a_tsf),
+ atomic_read(&sdeb_mq_poll_count));
seq_printf(m, "submit_queues=%d\n", submit_queues);
for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
@@ -7202,6 +7237,121 @@ static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return check_condition_result;
}
+static int sdebug_map_queues(struct Scsi_Host *shost)
+{
+ int i, qoff;
+
+ if (shost->nr_hw_queues == 1)
+ return 0;
+
+ for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
+ struct blk_mq_queue_map *map = &shost->tag_set.map[i];
+
+ map->nr_queues = 0;
+
+ if (i == HCTX_TYPE_DEFAULT)
+ map->nr_queues = submit_queues - poll_queues;
+ else if (i == HCTX_TYPE_POLL)
+ map->nr_queues = poll_queues;
+
+ if (!map->nr_queues) {
+ BUG_ON(i == HCTX_TYPE_DEFAULT);
+ continue;
+ }
+
+ map->queue_offset = qoff;
+ blk_mq_map_queues(map);
+
+ qoff += map->nr_queues;
+ }
+
+ return 0;
+
+}
+
+static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+ bool first;
+ bool retiring = false;
+ int num_entries = 0;
+ unsigned int qc_idx = 0;
+ unsigned long iflags;
+ ktime_t kt_from_boot = ktime_get_boottime();
+ struct sdebug_queue *sqp;
+ struct sdebug_queued_cmd *sqcp;
+ struct scsi_cmnd *scp;
+ struct sdebug_dev_info *devip;
+ struct sdebug_defer *sd_dp;
+
+ sqp = sdebug_q_arr + queue_num;
+ spin_lock_irqsave(&sqp->qc_lock, iflags);
+
+ for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
+ if (first) {
+ qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
+ first = false;
+ } else {
+ qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
+ }
+ if (unlikely(qc_idx >= sdebug_max_queue))
+ break;
+
+ sqcp = &sqp->qc_arr[qc_idx];
+ sd_dp = sqcp->sd_dp;
+ if (unlikely(!sd_dp))
+ continue;
+ scp = sqcp->a_cmnd;
+ if (unlikely(scp == NULL)) {
+ pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
+ queue_num, qc_idx, __func__);
+ break;
+ }
+ if (sd_dp->defer_t == SDEB_DEFER_POLL) {
+ if (kt_from_boot < sd_dp->cmpl_ts)
+ continue;
+
+ } else /* ignoring non REQ_HIPRI requests */
+ continue;
+ devip = (struct sdebug_dev_info *)scp->device->hostdata;
+ if (likely(devip))
+ atomic_dec(&devip->num_in_q);
+ else
+ pr_err("devip=NULL from %s\n", __func__);
+ if (unlikely(atomic_read(&retired_max_queue) > 0))
+ retiring = true;
+
+ sqcp->a_cmnd = NULL;
+ if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
+ pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
+ sqp, queue_num, qc_idx, __func__);
+ break;
+ }
+ if (unlikely(retiring)) { /* user has reduced max_queue */
+ int k, retval;
+
+ retval = atomic_read(&retired_max_queue);
+ if (qc_idx >= retval) {
+ pr_err("index %d too large\n", retval);
+ break;
+ }
+ k = find_last_bit(sqp->in_use_bm, retval);
+ if ((k < sdebug_max_queue) || (k == retval))
+ atomic_set(&retired_max_queue, 0);
+ else
+ atomic_set(&retired_max_queue, k + 1);
+ }
+ sd_dp->defer_t = SDEB_DEFER_NONE;
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ scp->scsi_done(scp); /* callback to mid level */
+ spin_lock_irqsave(&sqp->qc_lock, iflags);
+ num_entries++;
+ }
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ if (num_entries > 0)
+ atomic_add(num_entries, &sdeb_mq_poll_count);
+ return num_entries;
+}
+
static int scsi_debug_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *scp)
{
@@ -7381,6 +7531,8 @@ static struct scsi_host_template sdebug_driver_template = {
.ioctl = scsi_debug_ioctl,
.queuecommand = scsi_debug_queuecommand,
.change_queue_depth = sdebug_change_qdepth,
+ .map_queues = sdebug_map_queues,
+ .mq_poll = sdebug_blk_mq_poll,
.eh_abort_handler = scsi_debug_abort,
.eh_device_reset_handler = scsi_debug_device_reset,
.eh_target_reset_handler = scsi_debug_target_reset,
@@ -7428,6 +7580,25 @@ static int sdebug_driver_probe(struct device *dev)
if (sdebug_host_max_queue)
hpnt->host_tagset = 1;
+ /* poll queues are possible for nr_hw_queues > 1 */
+ if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
+ pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
+ my_name, poll_queues, hpnt->nr_hw_queues);
+ poll_queues = 0;
+ }
+
+ /*
+ * Poll queues don't need interrupts, but we need at least one I/O queue
+ * left over for non-polled I/O.
+ * If condition not met, trim poll_queues to 1 (just for simplicity).
+ */
+ if (poll_queues >= submit_queues) {
+ pr_warn("%s: trim poll_queues to 1\n", my_name);
+ poll_queues = 1;
+ }
+ if (poll_queues)
+ hpnt->nr_maps = 3;
+
sdbg_host->shost = hpnt;
*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 6f41e4b5a2b8..7b56e00c7df6 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * SCSI device handler infrastruture.
+ * SCSI device handler infrastructure.
*
* Copyright IBM Corporation, 2007
* Authors:
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 08c06c56331c..d8fafe77dbbe 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -60,8 +60,8 @@ static void scsi_eh_done(struct scsi_cmnd *scmd);
#define HOST_RESET_SETTLE_TIME (10)
static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
-static int scsi_try_to_abort_cmd(struct scsi_host_template *,
- struct scsi_cmnd *);
+static enum scsi_disposition scsi_try_to_abort_cmd(struct scsi_host_template *,
+ struct scsi_cmnd *);
void scsi_eh_wakeup(struct Scsi_Host *shost)
{
@@ -151,7 +151,7 @@ scmd_eh_abort_handler(struct work_struct *work)
struct scsi_cmnd *scmd =
container_of(work, struct scsi_cmnd, abort_work.work);
struct scsi_device *sdev = scmd->device;
- int rtn;
+ enum scsi_disposition rtn;
if (scsi_host_eh_past_deadline(sdev->host)) {
SCSI_LOG_ERROR_RECOVERY(3,
@@ -498,7 +498,7 @@ static void scsi_report_sense(struct scsi_device *sdev,
* When a deferred error is detected the current command has
* not been executed and needs retrying.
*/
-int scsi_check_sense(struct scsi_cmnd *scmd)
+enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
{
struct scsi_device *sdev = scmd->device;
struct scsi_sense_hdr sshdr;
@@ -512,7 +512,7 @@ int scsi_check_sense(struct scsi_cmnd *scmd)
return NEEDS_RETRY;
if (sdev->handler && sdev->handler->check_sense) {
- int rc;
+ enum scsi_disposition rc;
rc = sdev->handler->check_sense(sdev, &sshdr);
if (rc != SCSI_RETURN_NOT_HANDLED)
@@ -723,7 +723,7 @@ static void scsi_handle_queue_full(struct scsi_device *sdev)
* don't allow for the possibility of retries here, and we are a lot
* more restrictive about what we consider acceptable.
*/
-static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
+static enum scsi_disposition scsi_eh_completed_normally(struct scsi_cmnd *scmd)
{
/*
* first check the host byte, to see if there is anything in there
@@ -804,10 +804,10 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
* scsi_try_host_reset - ask host adapter to reset itself
* @scmd: SCSI cmd to send host reset.
*/
-static int scsi_try_host_reset(struct scsi_cmnd *scmd)
+static enum scsi_disposition scsi_try_host_reset(struct scsi_cmnd *scmd)
{
unsigned long flags;
- int rtn;
+ enum scsi_disposition rtn;
struct Scsi_Host *host = scmd->device->host;
struct scsi_host_template *hostt = host->hostt;
@@ -834,10 +834,10 @@ static int scsi_try_host_reset(struct scsi_cmnd *scmd)
* scsi_try_bus_reset - ask host to perform a bus reset
* @scmd: SCSI cmd to send bus reset.
*/
-static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
+static enum scsi_disposition scsi_try_bus_reset(struct scsi_cmnd *scmd)
{
unsigned long flags;
- int rtn;
+ enum scsi_disposition rtn;
struct Scsi_Host *host = scmd->device->host;
struct scsi_host_template *hostt = host->hostt;
@@ -876,10 +876,10 @@ static void __scsi_report_device_reset(struct scsi_device *sdev, void *data)
* timer on it, and set the host back to a consistent state prior to
* returning.
*/
-static int scsi_try_target_reset(struct scsi_cmnd *scmd)
+static enum scsi_disposition scsi_try_target_reset(struct scsi_cmnd *scmd)
{
unsigned long flags;
- int rtn;
+ enum scsi_disposition rtn;
struct Scsi_Host *host = scmd->device->host;
struct scsi_host_template *hostt = host->hostt;
@@ -907,9 +907,9 @@ static int scsi_try_target_reset(struct scsi_cmnd *scmd)
* timer on it, and set the host back to a consistent state prior to
* returning.
*/
-static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
+static enum scsi_disposition scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
{
- int rtn;
+ enum scsi_disposition rtn;
struct scsi_host_template *hostt = scmd->device->host->hostt;
if (!hostt->eh_device_reset_handler)
@@ -938,8 +938,8 @@ static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
* if the device is temporarily unavailable (eg due to a
* link down on FibreChannel)
*/
-static int scsi_try_to_abort_cmd(struct scsi_host_template *hostt,
- struct scsi_cmnd *scmd)
+static enum scsi_disposition
+scsi_try_to_abort_cmd(struct scsi_host_template *hostt, struct scsi_cmnd *scmd)
{
if (!hostt->eh_abort_handler)
return FAILED;
@@ -1072,8 +1072,8 @@ EXPORT_SYMBOL(scsi_eh_restore_cmnd);
* Return value:
* SUCCESS or FAILED or NEEDS_RETRY
*/
-static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
- int cmnd_size, int timeout, unsigned sense_bytes)
+static enum scsi_disposition scsi_send_eh_cmnd(struct scsi_cmnd *scmd,
+ unsigned char *cmnd, int cmnd_size, int timeout, unsigned sense_bytes)
{
struct scsi_device *sdev = scmd->device;
struct Scsi_Host *shost = sdev->host;
@@ -1109,7 +1109,7 @@ retry:
if (sdev->sdev_state != SDEV_BLOCK)
rtn = shost->hostt->queuecommand(shost, scmd);
else
- rtn = SCSI_MLQUEUE_DEVICE_BUSY;
+ rtn = FAILED;
mutex_unlock(&sdev->state_mutex);
if (rtn) {
@@ -1180,12 +1180,13 @@ retry:
* that we obtain it on our own. This function will *not* return until
* the command either times out, or it completes.
*/
-static int scsi_request_sense(struct scsi_cmnd *scmd)
+static enum scsi_disposition scsi_request_sense(struct scsi_cmnd *scmd)
{
return scsi_send_eh_cmnd(scmd, NULL, 0, scmd->device->eh_timeout, ~0);
}
-static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
+static enum scsi_disposition
+scsi_eh_action(struct scsi_cmnd *scmd, enum scsi_disposition rtn)
{
if (!blk_rq_is_passthrough(scmd->request)) {
struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
@@ -1238,7 +1239,7 @@ int scsi_eh_get_sense(struct list_head *work_q,
{
struct scsi_cmnd *scmd, *next;
struct Scsi_Host *shost;
- int rtn;
+ enum scsi_disposition rtn;
/*
* If SCSI_EH_ABORT_SCHEDULED has been set, it is timeout IO,
@@ -1316,7 +1317,8 @@ EXPORT_SYMBOL_GPL(scsi_eh_get_sense);
static int scsi_eh_tur(struct scsi_cmnd *scmd)
{
static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
- int retry_cnt = 1, rtn;
+ int retry_cnt = 1;
+ enum scsi_disposition rtn;
retry_tur:
rtn = scsi_send_eh_cmnd(scmd, tur_command, 6,
@@ -1404,7 +1406,8 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
if (scmd->device->allow_restart) {
- int i, rtn = NEEDS_RETRY;
+ int i;
+ enum scsi_disposition rtn = NEEDS_RETRY;
for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0);
@@ -1498,7 +1501,7 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
{
struct scsi_cmnd *scmd, *bdr_scmd, *next;
struct scsi_device *sdev;
- int rtn;
+ enum scsi_disposition rtn;
shost_for_each_device(sdev, shost) {
if (scsi_host_eh_past_deadline(shost)) {
@@ -1565,7 +1568,7 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
while (!list_empty(&tmp_list)) {
struct scsi_cmnd *next, *scmd;
- int rtn;
+ enum scsi_disposition rtn;
unsigned int id;
if (scsi_host_eh_past_deadline(shost)) {
@@ -1623,7 +1626,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
struct scsi_cmnd *scmd, *chan_scmd, *next;
LIST_HEAD(check_list);
unsigned int channel;
- int rtn;
+ enum scsi_disposition rtn;
/*
* we really want to loop over the various channels, and do this on
@@ -1694,7 +1697,7 @@ static int scsi_eh_host_reset(struct Scsi_Host *shost,
{
struct scsi_cmnd *scmd, *next;
LIST_HEAD(check_list);
- int rtn;
+ enum scsi_disposition rtn;
if (!list_empty(work_q)) {
scmd = list_entry(work_q->next,
@@ -1800,9 +1803,9 @@ check_type:
* doesn't require the error handler read (i.e. we don't need to
* abort/reset), this function should return SUCCESS.
*/
-int scsi_decide_disposition(struct scsi_cmnd *scmd)
+enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *scmd)
{
- int rtn;
+ enum scsi_disposition rtn;
/*
* if the device is offline, then we clearly just pass the result back
@@ -2365,7 +2368,8 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
struct Scsi_Host *shost = dev->host;
struct request *rq;
unsigned long flags;
- int error = 0, rtn, val;
+ int error = 0, val;
+ enum scsi_disposition rtn;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index c289991ffaed..532304d42f00 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -294,7 +294,8 @@ void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd)
if (starget->can_queue > 0)
atomic_dec(&starget->target_busy);
- atomic_dec(&sdev->device_busy);
+ sbitmap_put(&sdev->budget_map, cmd->budget_token);
+ cmd->budget_token = -1;
}
static void scsi_kick_queue(struct request_queue *q)
@@ -350,7 +351,7 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
static inline bool scsi_device_is_busy(struct scsi_device *sdev)
{
- if (atomic_read(&sdev->device_busy) >= sdev->queue_depth)
+ if (scsi_device_busy(sdev) >= sdev->queue_depth)
return true;
if (atomic_read(&sdev->device_blocked) > 0)
return true;
@@ -964,8 +965,11 @@ static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
}
/**
- * scsi_alloc_sgtables - allocate S/G tables for a command
- * @cmd: command descriptor we wish to initialize
+ * scsi_alloc_sgtables - Allocate and initialize data and integrity scatterlists
+ * @cmd: SCSI command data structure to initialize.
+ *
+ * Initializes @cmd->sdb and also @cmd->prot_sdb if data integrity is enabled
+ * for @cmd.
*
* Returns:
* * BLK_STS_OK - on success
@@ -1109,6 +1113,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
unsigned long jiffies_at_alloc;
int retries, to_clear;
bool in_flight;
+ int budget_token = cmd->budget_token;
if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) {
flags |= SCMD_INITIALIZED;
@@ -1137,6 +1142,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
cmd->retries = retries;
if (in_flight)
__set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
+ cmd->budget_token = budget_token;
}
@@ -1220,19 +1226,20 @@ scsi_device_state_check(struct scsi_device *sdev, struct request *req)
}
/*
- * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
- * return 0.
- *
- * Called with the queue_lock held.
+ * scsi_dev_queue_ready: if we can send requests to sdev, assign one token
+ * and return the token else return -1.
*/
static inline int scsi_dev_queue_ready(struct request_queue *q,
struct scsi_device *sdev)
{
- unsigned int busy;
+ int token;
- busy = atomic_inc_return(&sdev->device_busy) - 1;
+ token = sbitmap_get(&sdev->budget_map);
if (atomic_read(&sdev->device_blocked)) {
- if (busy)
+ if (token < 0)
+ goto out;
+
+ if (scsi_device_busy(sdev) > 1)
goto out_dec;
/*
@@ -1244,13 +1251,12 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
"unblocking device at zero depth\n"));
}
- if (busy >= sdev->queue_depth)
- goto out_dec;
-
- return 1;
+ return token;
out_dec:
- atomic_dec(&sdev->device_busy);
- return 0;
+ if (token >= 0)
+ sbitmap_put(&sdev->budget_map, token);
+out:
+ return -1;
}
/*
@@ -1394,10 +1400,14 @@ static bool scsi_mq_lld_busy(struct request_queue *q)
return false;
}
-static void scsi_softirq_done(struct request *rq)
+/*
+ * Block layer request completion callback. May be called from interrupt
+ * context.
+ */
+static void scsi_complete(struct request *rq)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
- int disposition;
+ enum scsi_disposition disposition;
INIT_LIST_HEAD(&cmd->eh_entry);
@@ -1571,19 +1581,20 @@ static void scsi_mq_done(struct scsi_cmnd *cmd)
blk_mq_complete_request(cmd->request);
}
-static void scsi_mq_put_budget(struct request_queue *q)
+static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
{
struct scsi_device *sdev = q->queuedata;
- atomic_dec(&sdev->device_busy);
+ sbitmap_put(&sdev->budget_map, budget_token);
}
-static bool scsi_mq_get_budget(struct request_queue *q)
+static int scsi_mq_get_budget(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
+ int token = scsi_dev_queue_ready(q, sdev);
- if (scsi_dev_queue_ready(q, sdev))
- return true;
+ if (token >= 0)
+ return token;
atomic_inc(&sdev->restarts);
@@ -1602,10 +1613,24 @@ static bool scsi_mq_get_budget(struct request_queue *q)
* the .restarts flag, and the request queue will be run for handling
* this request, see scsi_end_request().
*/
- if (unlikely(atomic_read(&sdev->device_busy) == 0 &&
+ if (unlikely(scsi_device_busy(sdev) == 0 &&
!scsi_device_blocked(sdev)))
blk_mq_delay_run_hw_queues(sdev->request_queue, SCSI_QUEUE_DELAY);
- return false;
+ return -1;
+}
+
+static void scsi_mq_set_rq_budget_token(struct request *req, int token)
+{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+
+ cmd->budget_token = token;
+}
+
+static int scsi_mq_get_rq_budget_token(struct request *req)
+{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+
+ return cmd->budget_token;
}
static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1619,6 +1644,8 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_status_t ret;
int reason;
+ WARN_ON_ONCE(cmd->budget_token < 0);
+
/*
* If the device is not in running state we will reject some or all
* commands.
@@ -1670,7 +1697,8 @@ out_dec_target_busy:
if (scsi_target(sdev)->can_queue > 0)
atomic_dec(&scsi_target(sdev)->target_busy);
out_put_budget:
- scsi_mq_put_budget(q);
+ scsi_mq_put_budget(q, cmd->budget_token);
+ cmd->budget_token = -1;
switch (ret) {
case BLK_STS_OK:
break;
@@ -1750,6 +1778,26 @@ static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
kmem_cache_free(scsi_sense_cache, cmd->sense_buffer);
}
+
+static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx)
+{
+ struct Scsi_Host *shost = hctx->driver_data;
+
+ if (shost->hostt->mq_poll)
+ return shost->hostt->mq_poll(shost, hctx->queue_num);
+
+ return 0;
+}
+
+static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct Scsi_Host *shost = data;
+
+ hctx->driver_data = shost;
+ return 0;
+}
+
static int scsi_map_queues(struct blk_mq_tag_set *set)
{
struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
@@ -1804,7 +1852,7 @@ static const struct blk_mq_ops scsi_mq_ops_no_commit = {
.get_budget = scsi_mq_get_budget,
.put_budget = scsi_mq_put_budget,
.queue_rq = scsi_queue_rq,
- .complete = scsi_softirq_done,
+ .complete = scsi_complete,
.timeout = scsi_timeout,
#ifdef CONFIG_BLK_DEBUG_FS
.show_rq = scsi_show_rq,
@@ -1815,14 +1863,16 @@ static const struct blk_mq_ops scsi_mq_ops_no_commit = {
.cleanup_rq = scsi_cleanup_rq,
.busy = scsi_mq_lld_busy,
.map_queues = scsi_map_queues,
+ .init_hctx = scsi_init_hctx,
+ .poll = scsi_mq_poll,
+ .set_rq_budget_token = scsi_mq_set_rq_budget_token,
+ .get_rq_budget_token = scsi_mq_get_rq_budget_token,
};
static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx)
{
- struct request_queue *q = hctx->queue;
- struct scsi_device *sdev = q->queuedata;
- struct Scsi_Host *shost = sdev->host;
+ struct Scsi_Host *shost = hctx->driver_data;
shost->hostt->commit_rqs(shost, hctx->queue_num);
}
@@ -1832,7 +1882,7 @@ static const struct blk_mq_ops scsi_mq_ops = {
.put_budget = scsi_mq_put_budget,
.queue_rq = scsi_queue_rq,
.commit_rqs = scsi_commit_rqs,
- .complete = scsi_softirq_done,
+ .complete = scsi_complete,
.timeout = scsi_timeout,
#ifdef CONFIG_BLK_DEBUG_FS
.show_rq = scsi_show_rq,
@@ -1843,6 +1893,10 @@ static const struct blk_mq_ops scsi_mq_ops = {
.cleanup_rq = scsi_cleanup_rq,
.busy = scsi_mq_lld_busy,
.map_queues = scsi_map_queues,
+ .init_hctx = scsi_init_hctx,
+ .poll = scsi_mq_poll,
+ .set_rq_budget_token = scsi_mq_set_rq_budget_token,
+ .get_rq_budget_token = scsi_mq_get_rq_budget_token,
};
struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
@@ -1875,6 +1929,7 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
else
tag_set->ops = &scsi_mq_ops_no_commit;
tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1;
+ tag_set->nr_maps = shost->nr_maps ? : 1;
tag_set->queue_depth = shost->can_queue;
tag_set->cmd_size = cmd_size;
tag_set->numa_node = NUMA_NO_NODE;
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 180636d54982..75d6f23e4fff 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -5,6 +5,7 @@
#include <linux/device.h>
#include <linux/async.h>
#include <scsi/scsi_device.h>
+#include <linux/sbitmap.h>
struct request_queue;
struct request;
@@ -73,7 +74,7 @@ extern void scsi_exit_devinfo(void);
extern void scmd_eh_abort_handler(struct work_struct *work);
extern enum blk_eh_timer_return scsi_times_out(struct request *req);
extern int scsi_error_handler(void *host);
-extern int scsi_decide_disposition(struct scsi_cmnd *cmd);
+extern enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *cmd);
extern void scsi_eh_wakeup(struct Scsi_Host *shost);
extern void scsi_eh_scmd_add(struct scsi_cmnd *);
void scsi_eh_ready_devs(struct Scsi_Host *shost,
@@ -96,8 +97,6 @@ extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
extern void scsi_exit_queue(void);
extern void scsi_evt_thread(struct work_struct *work);
-struct request_queue;
-struct request;
/* scsi_proc.c */
#ifdef CONFIG_SCSI_PROC_FS
@@ -182,6 +181,8 @@ static inline void scsi_dh_add_device(struct scsi_device *sdev) { }
static inline void scsi_dh_release_device(struct scsi_device *sdev) { }
#endif
+extern int scsi_device_max_queue_depth(struct scsi_device *sdev);
+
/*
* internal scsi timeout functions: for use by mid-layer and transport
* classes.
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 9b73aa506382..12f54571b83e 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -215,6 +215,7 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
u64 lun, void *hostdata)
{
+ unsigned int depth;
struct scsi_device *sdev;
int display_failure_msg = 1, ret;
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
@@ -276,8 +277,25 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
WARN_ON_ONCE(!blk_get_queue(sdev->request_queue));
sdev->request_queue->queuedata = sdev;
- scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun ?
- sdev->host->cmd_per_lun : 1);
+ depth = sdev->host->cmd_per_lun ?: 1;
+
+ /*
+ * Use .can_queue as budget map's depth because we have to
+ * support adjusting queue depth from sysfs. Meantime use
+ * default device queue depth to figure out sbitmap shift
+ * since we use this queue depth most of times.
+ */
+ if (sbitmap_init_node(&sdev->budget_map,
+ scsi_device_max_queue_depth(sdev),
+ sbitmap_calculate_shift(depth),
+ GFP_KERNEL, sdev->request_queue->node,
+ false, true)) {
+ put_device(&starget->dev);
+ kfree(sdev);
+ goto out;
+ }
+
+ scsi_change_queue_depth(sdev, depth);
scsi_sysfs_device_initialize(sdev);
@@ -979,6 +997,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
scsi_attach_vpd(sdev);
sdev->max_queue_depth = sdev->queue_depth;
+ WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth);
sdev->sdev_bflags = *bflags;
/*
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index b71ea1a69c8b..32489d25158f 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -475,6 +475,8 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
/* NULL queue means the device can't be used */
sdev->request_queue = NULL;
+ sbitmap_free(&sdev->budget_map);
+
mutex_lock(&sdev->inquiry_mutex);
vpd_pg0 = rcu_replace_pointer(sdev->vpd_pg0, vpd_pg0,
lockdep_is_held(&sdev->inquiry_mutex));
@@ -668,7 +670,7 @@ sdev_show_device_busy(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
- return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_busy));
+ return snprintf(buf, 20, "%d\n", scsi_device_busy(sdev));
}
static DEVICE_ATTR(device_busy, S_IRUGO, sdev_show_device_busy, NULL);
@@ -1456,7 +1458,7 @@ void __scsi_remove_device(struct scsi_device *sdev)
/*
* Paired with the kref_get() in scsi_sysfs_initialize(). We have
- * remoed sysfs visibility from the device, so make the target
+ * removed sysfs visibility from the device, so make the target
* invisible if this was the last device underneath it.
*/
scsi_target_reap(scsi_target(sdev));
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index ed0b1bb99f08..cb3c37d1e009 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1519,7 +1519,7 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
}
/**
- * sd_ioctl - process an ioctl
+ * sd_ioctl_common - process an ioctl
* @bdev: target block device
* @mode: FMODE_* mask
* @cmd: ioctl command number
@@ -1623,6 +1623,7 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
struct scsi_disk *sdkp = scsi_disk_get(disk);
struct scsi_device *sdp;
int retval;
+ bool disk_changed;
if (!sdkp)
return 0;
@@ -1680,10 +1681,10 @@ out:
* Medium present state has changed in either direction.
* Device has indicated UNIT_ATTENTION.
*/
- retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
+ disk_changed = sdp->changed;
sdp->changed = 0;
scsi_disk_put(sdkp);
- return retval;
+ return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
}
static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 994f1b8e3504..e45d8d94574c 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -134,7 +134,7 @@ static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
}
/**
- * Allocate a buffer for report zones reply.
+ * sd_zbc_alloc_report_buffer() - Allocate a buffer for report zones reply.
* @sdkp: The target disk
* @nr_zones: Maximum number of zones to report
* @buflen: Size of the buffer allocated
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 70f38715641e..def7ec3bbaf9 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2498,7 +2498,7 @@ static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
scsidp->id, scsidp->lun, (int) scsidp->type,
1,
(int) scsidp->queue_depth,
- (int) atomic_read(&scsidp->device_busy),
+ (int) scsi_device_busy(scsidp),
(int) scsi_device_online(scsidp));
}
read_unlock_irqrestore(&sg_index_lock, iflags);
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index 22302612e032..e519df68d603 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -213,21 +213,19 @@ static struct eisa_driver sim710_eisa_driver = {
static int __init sim710_init(void)
{
- int err = -ENODEV;
-
#ifdef MODULE
if (sim710)
param_setup(sim710);
#endif
#ifdef CONFIG_EISA
- err = eisa_driver_register(&sim710_eisa_driver);
+ /*
+ * FIXME: We'd really like to return -ENODEV if no devices have actually
+ * been found. However eisa_driver_register() only reports problems
+ * with kobject_register() so simply return success for now.
+ */
+ eisa_driver_register(&sim710_eisa_driver);
#endif
- /* FIXME: what we'd really like to return here is -ENODEV if
- * no devices have actually been found. Instead, the err
- * above actually only reports problems with kobject_register,
- * so for the moment return success */
-
return 0;
}
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 3e54590e6e92..d7dac5572274 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -79,7 +79,8 @@ struct pqi_ctrl_registers {
__le32 sis_ctrl_to_host_doorbell_clear; /* A0h */
u8 reserved4[0xb0 - (0xa0 + sizeof(__le32))];
__le32 sis_driver_scratch; /* B0h */
- u8 reserved5[0xbc - (0xb0 + sizeof(__le32))];
+ __le32 sis_product_identifier; /* B4h */
+ u8 reserved5[0xbc - (0xb4 + sizeof(__le32))];
__le32 sis_firmware_status; /* BCh */
u8 reserved6[0x1000 - (0xbc + sizeof(__le32))];
__le32 sis_mailbox[8]; /* 1000h */
@@ -128,10 +129,13 @@ struct pqi_iu_header {
__le16 iu_length; /* in bytes - does not include the length */
/* of this header */
__le16 response_queue_id; /* specifies the OQ where the */
- /* response IU is to be delivered */
- u8 work_area[2]; /* reserved for driver use */
+ /* response IU is to be delivered */
+ u16 driver_flags; /* reserved for driver use */
};
+/* manifest constants for pqi_iu_header.driver_flags */
+#define PQI_DRIVER_NONBLOCKABLE_REQUEST 0x1
+
/*
* According to the PQI spec, the IU header is only the first 4 bytes of our
* pqi_iu_header structure.
@@ -256,6 +260,7 @@ struct pqi_device_capability {
};
#define PQI_MAX_EMBEDDED_SG_DESCRIPTORS 4
+#define PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS 3
struct pqi_raid_path_request {
struct pqi_iu_header header;
@@ -279,8 +284,7 @@ struct pqi_raid_path_request {
u8 cdb[16];
u8 reserved6[12];
__le32 timeout;
- struct pqi_sg_descriptor
- sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
+ struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
};
struct pqi_aio_path_request {
@@ -307,8 +311,73 @@ struct pqi_aio_path_request {
u8 cdb_length;
u8 lun_number[8];
u8 reserved4[4];
- struct pqi_sg_descriptor
- sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
+ struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
+};
+
+#define PQI_RAID1_NVME_XFER_LIMIT (32 * 1024) /* 32 KiB */
+
+struct pqi_aio_r1_path_request {
+ struct pqi_iu_header header;
+ __le16 request_id;
+ __le16 volume_id; /* ID of the RAID volume */
+ __le32 it_nexus_1; /* IT nexus of the 1st drive in the RAID volume */
+ __le32 it_nexus_2; /* IT nexus of the 2nd drive in the RAID volume */
+ __le32 it_nexus_3; /* IT nexus of the 3rd drive in the RAID volume */
+ __le32 data_length; /* total bytes to read/write */
+ u8 data_direction : 2;
+ u8 partial : 1;
+ u8 memory_type : 1;
+ u8 fence : 1;
+ u8 encryption_enable : 1;
+ u8 reserved : 2;
+ u8 task_attribute : 3;
+ u8 command_priority : 4;
+ u8 reserved2 : 1;
+ __le16 data_encryption_key_index;
+ u8 cdb[16];
+ __le16 error_index;
+ u8 num_sg_descriptors;
+ u8 cdb_length;
+ u8 num_drives; /* number of drives in the RAID volume (2 or 3) */
+ u8 reserved3[3];
+ __le32 encrypt_tweak_lower;
+ __le32 encrypt_tweak_upper;
+ struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
+};
+
+#define PQI_DEFAULT_MAX_WRITE_RAID_5_6 (8 * 1024U)
+#define PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA (~0U)
+#define PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME (32 * 1024U)
+
+struct pqi_aio_r56_path_request {
+ struct pqi_iu_header header;
+ __le16 request_id;
+ __le16 volume_id; /* ID of the RAID volume */
+ __le32 data_it_nexus; /* IT nexus for the data drive */
+ __le32 p_parity_it_nexus; /* IT nexus for the P parity drive */
+ __le32 q_parity_it_nexus; /* IT nexus for the Q parity drive */
+ __le32 data_length; /* total bytes to read/write */
+ u8 data_direction : 2;
+ u8 partial : 1;
+ u8 mem_type : 1; /* 0 = PCIe, 1 = DDR */
+ u8 fence : 1;
+ u8 encryption_enable : 1;
+ u8 reserved : 2;
+ u8 task_attribute : 3;
+ u8 command_priority : 4;
+ u8 reserved1 : 1;
+ __le16 data_encryption_key_index;
+ u8 cdb[16];
+ __le16 error_index;
+ u8 num_sg_descriptors;
+ u8 cdb_length;
+ u8 xor_multiplier;
+ u8 reserved2[3];
+ __le32 encrypt_tweak_lower;
+ __le32 encrypt_tweak_upper;
+ __le64 row; /* row = logical LBA/blocks per row */
+ u8 reserved3[8];
+ struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS];
};
struct pqi_io_response {
@@ -353,7 +422,7 @@ struct pqi_event_config {
#define PQI_EVENT_OFA_MEMORY_ALLOCATION 0x0
#define PQI_EVENT_OFA_QUIESCE 0x1
-#define PQI_EVENT_OFA_CANCELLED 0x2
+#define PQI_EVENT_OFA_CANCELED 0x2
struct pqi_event_response {
struct pqi_iu_header header;
@@ -442,10 +511,6 @@ struct pqi_vendor_general_response {
#define PQI_OFA_SIGNATURE "OFA_QRM"
#define PQI_OFA_MAX_SG_DESCRIPTORS 64
-#define PQI_OFA_MEMORY_DESCRIPTOR_LENGTH \
- (offsetof(struct pqi_ofa_memory, sg_descriptor) + \
- (PQI_OFA_MAX_SG_DESCRIPTORS * sizeof(struct pqi_sg_descriptor)))
-
struct pqi_ofa_memory {
__le64 signature; /* "OFA_QRM" */
__le16 version; /* version of this struct (1 = 1st version) */
@@ -453,7 +518,7 @@ struct pqi_ofa_memory {
__le32 bytes_allocated; /* total allocated memory in bytes */
__le16 num_memory_descriptors;
u8 reserved1[2];
- struct pqi_sg_descriptor sg_descriptor[1];
+ struct pqi_sg_descriptor sg_descriptor[PQI_OFA_MAX_SG_DESCRIPTORS];
};
struct pqi_aio_error_info {
@@ -483,6 +548,9 @@ struct pqi_raid_error_info {
#define PQI_REQUEST_IU_TASK_MANAGEMENT 0x13
#define PQI_REQUEST_IU_RAID_PATH_IO 0x14
#define PQI_REQUEST_IU_AIO_PATH_IO 0x15
+#define PQI_REQUEST_IU_AIO_PATH_RAID5_IO 0x18
+#define PQI_REQUEST_IU_AIO_PATH_RAID6_IO 0x19
+#define PQI_REQUEST_IU_AIO_PATH_RAID1_IO 0x1A
#define PQI_REQUEST_IU_GENERAL_ADMIN 0x60
#define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG 0x72
#define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG 0x73
@@ -585,6 +653,7 @@ struct pqi_raid_error_info {
/* these values are defined by the PQI spec */
#define PQI_MAX_NUM_ELEMENTS_ADMIN_QUEUE 255
#define PQI_MAX_NUM_ELEMENTS_OPERATIONAL_QUEUE 65535
+
#define PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT 64
#define PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT 16
#define PQI_ADMIN_INDEX_ALIGNMENT 64
@@ -654,7 +723,7 @@ struct pqi_admin_queues_aligned {
struct pqi_admin_queues {
void *iq_element_array;
void *oq_element_array;
- pqi_index_t *iq_ci;
+ pqi_index_t __iomem *iq_ci;
pqi_index_t __iomem *oq_pi;
dma_addr_t iq_element_array_bus_addr;
dma_addr_t oq_element_array_bus_addr;
@@ -679,8 +748,8 @@ struct pqi_queue_group {
dma_addr_t oq_element_array_bus_addr;
__le32 __iomem *iq_pi[2];
pqi_index_t iq_pi_copy[2];
- pqi_index_t __iomem *iq_ci[2];
- pqi_index_t __iomem *oq_pi;
+ pqi_index_t __iomem *iq_ci[2];
+ pqi_index_t __iomem *oq_pi;
dma_addr_t iq_ci_bus_addr[2];
dma_addr_t oq_pi_bus_addr;
__le32 __iomem *oq_ci;
@@ -693,7 +762,7 @@ struct pqi_event_queue {
u16 oq_id;
u16 int_msg_num;
void *oq_element_array;
- pqi_index_t __iomem *oq_pi;
+ pqi_index_t __iomem *oq_pi;
dma_addr_t oq_element_array_bus_addr;
dma_addr_t oq_pi_bus_addr;
__le32 __iomem *oq_ci;
@@ -759,13 +828,29 @@ struct pqi_config_table_firmware_features {
u8 features_supported[];
/* u8 features_requested_by_host[]; */
/* u8 features_enabled[]; */
-};
-
-#define PQI_FIRMWARE_FEATURE_OFA 0
-#define PQI_FIRMWARE_FEATURE_SMP 1
-#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11
-#define PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT 13
-#define PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT 14
+/* The 2 fields below are only valid if the MAX_KNOWN_FEATURE bit is set. */
+/* __le16 firmware_max_known_feature; */
+/* __le16 host_max_known_feature; */
+};
+
+#define PQI_FIRMWARE_FEATURE_OFA 0
+#define PQI_FIRMWARE_FEATURE_SMP 1
+#define PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE 2
+#define PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS 3
+#define PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS 4
+#define PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS 5
+#define PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS 6
+#define PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS 7
+#define PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS 8
+#define PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS 9
+#define PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS 10
+#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11
+#define PQI_FIRMWARE_FEATURE_UNIQUE_SATA_WWN 12
+#define PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT 13
+#define PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT 14
+#define PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME 15
+#define PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN 16
+#define PQI_FIRMWARE_FEATURE_MAXIMUM 16
struct pqi_config_table_debug {
struct pqi_config_table_section_header header;
@@ -906,8 +991,65 @@ struct raid_map {
#pragma pack()
+struct pqi_scsi_dev_raid_map_data {
+ bool is_write;
+ u8 raid_level;
+ u32 map_index;
+ u64 first_block;
+ u64 last_block;
+ u32 data_length;
+ u32 block_cnt;
+ u32 blocks_per_row;
+ u64 first_row;
+ u64 last_row;
+ u32 first_row_offset;
+ u32 last_row_offset;
+ u32 first_column;
+ u32 last_column;
+ u64 r5or6_first_row;
+ u64 r5or6_last_row;
+ u32 r5or6_first_row_offset;
+ u32 r5or6_last_row_offset;
+ u32 r5or6_first_column;
+ u32 r5or6_last_column;
+ u16 data_disks_per_row;
+ u32 total_disks_per_row;
+ u16 layout_map_count;
+ u32 stripesize;
+ u16 strip_size;
+ u32 first_group;
+ u32 last_group;
+ u32 map_row;
+ u32 aio_handle;
+ u64 disk_block;
+ u32 disk_block_cnt;
+ u8 cdb[16];
+ u8 cdb_length;
+
+ /* RAID 1 specific */
+#define NUM_RAID1_MAP_ENTRIES 3
+ u32 num_it_nexus_entries;
+ u32 it_nexus[NUM_RAID1_MAP_ENTRIES];
+
+ /* RAID 5 / RAID 6 specific */
+ u32 p_parity_it_nexus; /* aio_handle */
+ u32 q_parity_it_nexus; /* aio_handle */
+ u8 xor_mult;
+ u64 row;
+ u64 stripe_lba;
+ u32 p_index;
+ u32 q_index;
+};
+
#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
+#define NUM_STREAMS_PER_LUN 8
+
+struct pqi_stream_data {
+ u64 next_lba;
+ u32 last_accessed;
+};
+
struct pqi_scsi_dev {
int devtype; /* as reported by INQUIRY commmand */
u8 device_type; /* as reported by */
@@ -929,7 +1071,6 @@ struct pqi_scsi_dev {
u8 volume_offline : 1;
u8 rescan : 1;
bool aio_enabled; /* only valid for physical disks */
- bool in_reset;
bool in_remove;
bool device_offline;
u8 vendor[8]; /* bytes 8-15 of inquiry data */
@@ -948,11 +1089,12 @@ struct pqi_scsi_dev {
u8 phy_connected_dev_type;
u8 box[8];
u16 phys_connector[8];
+ u8 phy_id;
bool raid_bypass_configured; /* RAID bypass configured */
bool raid_bypass_enabled; /* RAID bypass enabled */
- int offload_to_mirror; /* Send next RAID bypass request */
- /* to mirror drive. */
+ u32 next_bypass_group;
struct raid_map *raid_map; /* RAID bypass map */
+ u32 max_transfer_encrypted;
struct pqi_sas_port *sas_port;
struct scsi_device *sdev;
@@ -962,8 +1104,10 @@ struct pqi_scsi_dev {
struct list_head add_list_entry;
struct list_head delete_list_entry;
+ struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
atomic_t scsi_cmds_outstanding;
atomic_t raid_bypass_cnt;
+ u8 page_83_identifier[16];
};
/* VPD inquiry pages */
@@ -1069,10 +1213,8 @@ struct pqi_io_request {
struct pqi_event {
bool pending;
u8 event_type;
- __le16 event_id;
- __le32 additional_event_id;
- __le32 ofa_bytes_requested;
- __le16 ofa_cancel_reason;
+ u16 event_id;
+ u32 additional_event_id;
};
#define PQI_RESERVED_IO_SLOTS_LUN_RESET 1
@@ -1082,13 +1224,20 @@ struct pqi_event {
(PQI_RESERVED_IO_SLOTS_LUN_RESET + PQI_RESERVED_IO_SLOTS_EVENT_ACK + \
PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS)
+#define PQI_CTRL_PRODUCT_ID_GEN1 0
+#define PQI_CTRL_PRODUCT_ID_GEN2 7
+#define PQI_CTRL_PRODUCT_REVISION_A 0
+#define PQI_CTRL_PRODUCT_REVISION_B 1
+
struct pqi_ctrl_info {
unsigned int ctrl_id;
struct pci_dev *pci_dev;
- char firmware_version[11];
+ char firmware_version[32];
char serial_number[17];
char model[17];
char vendor[9];
+ u8 product_id;
+ u8 product_revision;
void __iomem *iomem_base;
struct pqi_ctrl_registers __iomem *registers;
struct pqi_device_registers __iomem *pqi_registers;
@@ -1118,6 +1267,7 @@ struct pqi_ctrl_info {
u16 max_inbound_iu_length_per_firmware;
u16 max_inbound_iu_length;
unsigned int max_sg_per_iu;
+ unsigned int max_sg_per_r56_iu;
void *admin_queue_memory_base;
u32 admin_queue_memory_length;
dma_addr_t admin_queue_memory_base_dma_handle;
@@ -1136,19 +1286,29 @@ struct pqi_ctrl_info {
struct mutex scan_mutex;
struct mutex lun_reset_mutex;
- struct mutex ofa_mutex; /* serialize ofa */
bool controller_online;
bool block_requests;
- bool block_device_reset;
- bool in_ofa;
- bool in_shutdown;
+ bool scan_blocked;
u8 inbound_spanning_supported : 1;
u8 outbound_spanning_supported : 1;
u8 pqi_mode_enabled : 1;
u8 pqi_reset_quiesce_supported : 1;
u8 soft_reset_handshake_supported : 1;
- u8 raid_iu_timeout_supported: 1;
- u8 tmf_iu_timeout_supported: 1;
+ u8 raid_iu_timeout_supported : 1;
+ u8 tmf_iu_timeout_supported : 1;
+ u8 unique_wwid_in_report_phys_lun_supported : 1;
+ u8 enable_r1_writes : 1;
+ u8 enable_r5_writes : 1;
+ u8 enable_r6_writes : 1;
+ u8 lv_drive_type_mix_valid : 1;
+ u8 enable_stream_detection : 1;
+
+ u8 ciss_report_log_flags;
+ u32 max_transfer_encrypted_sas_sata;
+ u32 max_transfer_encrypted_nvme;
+ u32 max_write_raid_5_6;
+ u32 max_write_raid_1_10_2drive;
+ u32 max_write_raid_1_10_3drive;
struct list_head scsi_device_list;
spinlock_t scsi_device_list_lock;
@@ -1178,14 +1338,14 @@ struct pqi_ctrl_info {
atomic_t num_blocked_threads;
wait_queue_head_t block_requests_wait;
- struct list_head raid_bypass_retry_list;
- spinlock_t raid_bypass_retry_list_lock;
- struct work_struct raid_bypass_retry_work;
-
+ struct mutex ofa_mutex;
struct pqi_ofa_memory *pqi_ofa_mem_virt_addr;
dma_addr_t pqi_ofa_mem_dma_handle;
void **pqi_ofa_chunk_virt_addr;
- atomic_t sync_cmds_outstanding;
+ struct work_struct ofa_memory_alloc_work;
+ struct work_struct ofa_quiesce_work;
+ u32 ofa_bytes_requested;
+ u16 ofa_cancel_reason;
};
enum pqi_ctrl_mode {
@@ -1209,6 +1369,7 @@ enum pqi_ctrl_mode {
#define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15
#define BMIC_READ 0x26
#define BMIC_WRITE 0x27
+#define BMIC_SENSE_FEATURE 0x61
#define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64
#define BMIC_SENSE_SUBSYSTEM_INFORMATION 0x66
#define BMIC_CSMI_PASSTHRU 0x68
@@ -1228,6 +1389,19 @@ enum pqi_ctrl_mode {
(((CISS_GET_LEVEL_2_BUS((lunid)) - 1) << 8) + \
CISS_GET_LEVEL_2_TARGET((lunid)))
+#define LV_GET_DRIVE_TYPE_MIX(lunid) ((lunid)[6])
+
+#define LV_DRIVE_TYPE_MIX_UNKNOWN 0
+#define LV_DRIVE_TYPE_MIX_NO_RESTRICTION 1
+#define LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY 2
+#define LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY 3
+#define LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY 4
+#define LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY 5
+#define LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY 6
+#define LV_DRIVE_TYPE_MIX_SAS_ONLY 7
+#define LV_DRIVE_TYPE_MIX_SATA_ONLY 8
+#define LV_DRIVE_TYPE_MIX_NVME_ONLY 9
+
#define NO_TIMEOUT ((unsigned long) -1)
#pragma pack(1)
@@ -1235,7 +1409,7 @@ enum pqi_ctrl_mode {
struct bmic_identify_controller {
u8 configured_logical_drive_count;
__le32 configuration_signature;
- u8 firmware_version[4];
+ u8 firmware_version_short[4];
u8 reserved[145];
__le16 extended_logical_unit_count;
u8 reserved1[34];
@@ -1243,11 +1417,17 @@ struct bmic_identify_controller {
u8 reserved2[8];
u8 vendor_id[8];
u8 product_id[16];
- u8 reserved3[68];
+ u8 reserved3[62];
+ __le32 extra_controller_flags;
+ u8 reserved4[2];
u8 controller_mode;
- u8 reserved4[32];
+ u8 spare_part_number[32];
+ u8 firmware_version_long[32];
};
+/* constants for extra_controller_flags field of bmic_identify_controller */
+#define BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED 0x20000000
+
struct bmic_sense_subsystem_info {
u8 reserved[44];
u8 ctrl_serial_number[16];
@@ -1341,6 +1521,34 @@ struct bmic_identify_physical_device {
u8 padding_to_multiple_of_512[9];
};
+#define BMIC_SENSE_FEATURE_IO_PAGE 0x8
+#define BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE 0x2
+
+struct bmic_sense_feature_buffer_header {
+ u8 page_code;
+ u8 subpage_code;
+ __le16 buffer_length;
+};
+
+struct bmic_sense_feature_page_header {
+ u8 page_code;
+ u8 subpage_code;
+ __le16 page_length;
+};
+
+struct bmic_sense_feature_io_page_aio_subpage {
+ struct bmic_sense_feature_page_header header;
+ u8 firmware_read_support;
+ u8 driver_read_support;
+ u8 firmware_write_support;
+ u8 driver_write_support;
+ __le16 max_transfer_encrypted_sas_sata;
+ __le16 max_transfer_encrypted_nvme;
+ __le16 max_write_raid_5_6;
+ __le16 max_write_raid_1_10_2drive;
+ __le16 max_write_raid_1_10_3drive;
+};
+
struct bmic_smp_request {
u8 frame_type;
u8 function;
@@ -1408,16 +1616,6 @@ struct bmic_diag_options {
#pragma pack()
-static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
-{
- atomic_inc(&ctrl_info->num_busy_threads);
-}
-
-static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
-{
- atomic_dec(&ctrl_info->num_busy_threads);
-}
-
static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
{
void *hostdata = shost_priv(shost);
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index a1dacb6e993e..5db16509b6e1 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "1.2.16-012"
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 2
-#define DRIVER_RELEASE 16
-#define DRIVER_REVISION 12
+#define DRIVER_VERSION "2.1.8-045"
+#define DRIVER_MAJOR 2
+#define DRIVER_MINOR 1
+#define DRIVER_RELEASE 8
+#define DRIVER_REVISION 45
#define DRIVER_NAME "Microsemi PQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -45,6 +45,9 @@
#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
+#define PQI_POST_RESET_DELAY_SECS 5
+#define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10
+
MODULE_AUTHOR("Microsemi");
MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
DRIVER_VERSION);
@@ -53,7 +56,6 @@ MODULE_LICENSE("GPL");
static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
static void pqi_ctrl_offline_worker(struct work_struct *work);
-static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
static void pqi_scan_start(struct Scsi_Host *shost);
static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
@@ -61,20 +63,27 @@ static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_io_request *io_request);
static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
struct pqi_iu_header *request, unsigned int flags,
- struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
+ struct pqi_raid_error_info *error_info);
static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
unsigned int cdb_length, struct pqi_queue_group *queue_group,
struct pqi_encryption_info *encryption_info, bool raid_bypass);
+static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
+ struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
+ struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
+ struct pqi_scsi_dev_raid_map_data *rmd);
+static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
+ struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
+ struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
+ struct pqi_scsi_dev_raid_map_data *rmd);
static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
-static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info);
-static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
- u32 bytes_requested);
+static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
+static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, unsigned long timeout_secs);
+ struct pqi_scsi_dev *device, unsigned long timeout_msecs);
/* for flags argument to pqi_submit_raid_request_synchronous() */
#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
@@ -147,14 +156,12 @@ MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
static int pqi_expose_ld_first;
module_param_named(expose_ld_first,
pqi_expose_ld_first, int, 0644);
-MODULE_PARM_DESC(expose_ld_first,
- "Expose logical drives before physical drives.");
+MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
static int pqi_hide_vsep;
module_param_named(hide_vsep,
pqi_hide_vsep, int, 0644);
-MODULE_PARM_DESC(hide_vsep,
- "Hide the virtual SEP for direct attached drives.");
+MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
static char *raid_levels[] = {
"RAID-0",
@@ -162,8 +169,8 @@ static char *raid_levels[] = {
"RAID-1(1+0)",
"RAID-5",
"RAID-5+1",
- "RAID-ADG",
- "RAID-1(ADM)",
+ "RAID-6",
+ "RAID-1(Triple)",
};
static char *pqi_raid_level_to_string(u8 raid_level)
@@ -180,8 +187,8 @@ static char *pqi_raid_level_to_string(u8 raid_level)
#define SA_RAID_5 3 /* also used for RAID 50 */
#define SA_RAID_51 4
#define SA_RAID_6 5 /* also used for RAID 60 */
-#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
-#define SA_RAID_MAX SA_RAID_ADM
+#define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */
+#define SA_RAID_MAX SA_RAID_TRIPLE
#define SA_RAID_UNKNOWN 0xff
static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
@@ -227,8 +234,7 @@ static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
}
-static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
- struct pqi_ctrl_info *ctrl_info)
+static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
{
return sis_read_driver_scratch(ctrl_info);
}
@@ -239,14 +245,66 @@ static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
sis_write_driver_scratch(ctrl_info, mode);
}
+static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->scan_blocked = true;
+ mutex_lock(&ctrl_info->scan_mutex);
+}
+
+static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->scan_blocked = false;
+ mutex_unlock(&ctrl_info->scan_mutex);
+}
+
+static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
+{
+ return ctrl_info->scan_blocked;
+}
+
static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
{
- ctrl_info->block_device_reset = true;
+ mutex_lock(&ctrl_info->lun_reset_mutex);
+}
+
+static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
+{
+ mutex_unlock(&ctrl_info->lun_reset_mutex);
+}
+
+static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
+{
+ struct Scsi_Host *shost;
+ unsigned int num_loops;
+ int msecs_sleep;
+
+ shost = ctrl_info->scsi_host;
+
+ scsi_block_requests(shost);
+
+ num_loops = 0;
+ msecs_sleep = 20;
+ while (scsi_host_busy(shost)) {
+ num_loops++;
+ if (num_loops == 10)
+ msecs_sleep = 500;
+ msleep(msecs_sleep);
+ }
}
-static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info)
+static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
{
- return ctrl_info->block_device_reset;
+ scsi_unblock_requests(ctrl_info->scsi_host);
+}
+
+static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
+{
+ atomic_inc(&ctrl_info->num_busy_threads);
+}
+
+static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
+{
+ atomic_dec(&ctrl_info->num_busy_threads);
}
static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
@@ -257,51 +315,53 @@ static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
{
ctrl_info->block_requests = true;
- scsi_block_requests(ctrl_info->scsi_host);
}
static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
{
ctrl_info->block_requests = false;
wake_up_all(&ctrl_info->block_requests_wait);
- pqi_retry_raid_bypass_requests(ctrl_info);
- scsi_unblock_requests(ctrl_info->scsi_host);
}
-static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
- unsigned long timeout_msecs)
+static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
{
- unsigned long remaining_msecs;
-
if (!pqi_ctrl_blocked(ctrl_info))
- return timeout_msecs;
+ return;
atomic_inc(&ctrl_info->num_blocked_threads);
-
- if (timeout_msecs == NO_TIMEOUT) {
- wait_event(ctrl_info->block_requests_wait,
- !pqi_ctrl_blocked(ctrl_info));
- remaining_msecs = timeout_msecs;
- } else {
- unsigned long remaining_jiffies;
-
- remaining_jiffies =
- wait_event_timeout(ctrl_info->block_requests_wait,
- !pqi_ctrl_blocked(ctrl_info),
- msecs_to_jiffies(timeout_msecs));
- remaining_msecs = jiffies_to_msecs(remaining_jiffies);
- }
-
+ wait_event(ctrl_info->block_requests_wait,
+ !pqi_ctrl_blocked(ctrl_info));
atomic_dec(&ctrl_info->num_blocked_threads);
-
- return remaining_msecs;
}
+#define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10
+
static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
{
+ unsigned long start_jiffies;
+ unsigned long warning_timeout;
+ bool displayed_warning;
+
+ displayed_warning = false;
+ start_jiffies = jiffies;
+ warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies;
+
while (atomic_read(&ctrl_info->num_busy_threads) >
- atomic_read(&ctrl_info->num_blocked_threads))
+ atomic_read(&ctrl_info->num_blocked_threads)) {
+ if (time_after(jiffies, warning_timeout)) {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "waiting %u seconds for driver activity to quiesce\n",
+ jiffies_to_msecs(jiffies - start_jiffies) / 1000);
+ displayed_warning = true;
+ warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ }
usleep_range(1000, 2000);
+ }
+
+ if (displayed_warning)
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "driver activity quiesced after waiting for %u seconds\n",
+ jiffies_to_msecs(jiffies - start_jiffies) / 1000);
}
static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
@@ -309,34 +369,25 @@ static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
return device->device_offline;
}
-static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
-{
- device->in_reset = true;
-}
-
-static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
-{
- device->in_reset = false;
-}
-
-static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
+static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
{
- return device->in_reset;
+ mutex_lock(&ctrl_info->ofa_mutex);
}
-static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
+static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
{
- ctrl_info->in_ofa = true;
+ mutex_unlock(&ctrl_info->ofa_mutex);
}
-static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
+static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
{
- ctrl_info->in_ofa = false;
+ mutex_lock(&ctrl_info->ofa_mutex);
+ mutex_unlock(&ctrl_info->ofa_mutex);
}
-static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info)
+static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
{
- return ctrl_info->in_ofa;
+ return mutex_is_locked(&ctrl_info->ofa_mutex);
}
static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
@@ -349,23 +400,27 @@ static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
return device->in_remove;
}
-static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info)
+static inline int pqi_event_type_to_event_index(unsigned int event_type)
{
- ctrl_info->in_shutdown = true;
+ int index;
+
+ for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
+ if (event_type == pqi_supported_event_types[index])
+ return index;
+
+ return -1;
}
-static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info)
+static inline bool pqi_is_supported_event(unsigned int event_type)
{
- return ctrl_info->in_shutdown;
+ return pqi_event_type_to_event_index(event_type) != -1;
}
-static inline void pqi_schedule_rescan_worker_with_delay(
- struct pqi_ctrl_info *ctrl_info, unsigned long delay)
+static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
+ unsigned long delay)
{
if (pqi_ctrl_offline(ctrl_info))
return;
- if (pqi_ctrl_in_ofa(ctrl_info))
- return;
schedule_delayed_work(&ctrl_info->rescan_work, delay);
}
@@ -377,8 +432,7 @@ static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
#define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ)
-static inline void pqi_schedule_rescan_worker_delayed(
- struct pqi_ctrl_info *ctrl_info)
+static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
{
pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
}
@@ -388,11 +442,6 @@ static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
cancel_delayed_work_sync(&ctrl_info->rescan_work);
}
-static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info)
-{
- cancel_work_sync(&ctrl_info->event_work);
-}
-
static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
{
if (!ctrl_info->heartbeat_counter)
@@ -403,22 +452,15 @@ static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
{
- if (!ctrl_info->soft_reset_status)
- return 0;
-
return readb(ctrl_info->soft_reset_status);
}
-static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info,
- u8 clear)
+static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
{
u8 status;
- if (!ctrl_info->soft_reset_status)
- return;
-
status = pqi_read_soft_reset_status(ctrl_info);
- status &= ~clear;
+ status &= ~PQI_SOFT_RESET_ABORT;
writeb(status, ctrl_info->soft_reset_status);
}
@@ -497,7 +539,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
if (cmd == CISS_REPORT_PHYS)
cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER;
else
- cdb[1] = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
+ cdb[1] = ctrl_info->ciss_report_log_flags;
put_unaligned_be32(cdb_length, &cdb[6]);
break;
case CISS_GET_RAID_MAP:
@@ -507,6 +549,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
put_unaligned_be32(cdb_length, &cdb[6]);
break;
case SA_FLUSH_CACHE:
+ request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
request->data_direction = SOP_WRITE_FLAG;
cdb[0] = BMIC_WRITE;
cdb[6] = BMIC_FLUSH_CACHE;
@@ -518,6 +561,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
case BMIC_IDENTIFY_CONTROLLER:
case BMIC_IDENTIFY_PHYSICAL_DEVICE:
case BMIC_SENSE_SUBSYSTEM_INFORMATION:
+ case BMIC_SENSE_FEATURE:
request->data_direction = SOP_READ_FLAG;
cdb[0] = BMIC_READ;
cdb[6] = cmd;
@@ -600,20 +644,18 @@ static void pqi_free_io_request(struct pqi_io_request *io_request)
static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
- struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
+ struct pqi_raid_error_info *error_info)
{
int rc;
struct pqi_raid_path_request request;
enum dma_data_direction dir;
- rc = pqi_build_raid_path_request(ctrl_info, &request,
- cmd, scsi3addr, buffer,
- buffer_length, vpd_page, &dir);
+ rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
+ buffer, buffer_length, vpd_page, &dir);
if (rc)
return rc;
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
- error_info, timeout_msecs);
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
@@ -626,7 +668,7 @@ static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
u8 cmd, void *buffer, size_t buffer_length)
{
return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
- buffer, buffer_length, 0, NULL, NO_TIMEOUT);
+ buffer, buffer_length, 0, NULL);
}
static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
@@ -634,7 +676,7 @@ static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
struct pqi_raid_error_info *error_info)
{
return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
- buffer, buffer_length, 0, error_info, NO_TIMEOUT);
+ buffer, buffer_length, 0, error_info);
}
static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
@@ -656,7 +698,7 @@ static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
{
return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
- buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
+ buffer, buffer_length, vpd_page, NULL);
}
static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
@@ -678,27 +720,116 @@ static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
request.cdb[2] = (u8)bmic_device_index;
request.cdb[9] = (u8)(bmic_device_index >> 8);
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
- 0, NULL, NO_TIMEOUT);
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
return rc;
}
+static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
+{
+ u32 bytes;
+
+ bytes = get_unaligned_le16(limit);
+ if (bytes == 0)
+ bytes = ~0;
+ else
+ bytes *= 1024;
+
+ return bytes;
+}
+
+#pragma pack(1)
+
+struct bmic_sense_feature_buffer {
+ struct bmic_sense_feature_buffer_header header;
+ struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
+};
+
+#pragma pack()
+
+#define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \
+ offsetofend(struct bmic_sense_feature_buffer, \
+ aio_subpage.max_write_raid_1_10_3drive)
+
+#define MINIMUM_AIO_SUBPAGE_LENGTH \
+ (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
+ max_write_raid_1_10_3drive) - \
+ sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
+
+static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ enum dma_data_direction dir;
+ struct pqi_raid_path_request request;
+ struct bmic_sense_feature_buffer *buffer;
+
+ buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
+ buffer, sizeof(*buffer), 0, &dir);
+ if (rc)
+ goto error;
+
+ request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
+ request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
+
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
+
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
+
+ if (rc)
+ goto error;
+
+ if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
+ buffer->header.subpage_code !=
+ BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
+ get_unaligned_le16(&buffer->header.buffer_length) <
+ MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
+ buffer->aio_subpage.header.page_code !=
+ BMIC_SENSE_FEATURE_IO_PAGE ||
+ buffer->aio_subpage.header.subpage_code !=
+ BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
+ get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
+ MINIMUM_AIO_SUBPAGE_LENGTH) {
+ goto error;
+ }
+
+ ctrl_info->max_transfer_encrypted_sas_sata =
+ pqi_aio_limit_to_bytes(
+ &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
+
+ ctrl_info->max_transfer_encrypted_nvme =
+ pqi_aio_limit_to_bytes(
+ &buffer->aio_subpage.max_transfer_encrypted_nvme);
+
+ ctrl_info->max_write_raid_5_6 =
+ pqi_aio_limit_to_bytes(
+ &buffer->aio_subpage.max_write_raid_5_6);
+
+ ctrl_info->max_write_raid_1_10_2drive =
+ pqi_aio_limit_to_bytes(
+ &buffer->aio_subpage.max_write_raid_1_10_2drive);
+
+ ctrl_info->max_write_raid_1_10_3drive =
+ pqi_aio_limit_to_bytes(
+ &buffer->aio_subpage.max_write_raid_1_10_3drive);
+
+error:
+ kfree(buffer);
+
+ return rc;
+}
+
static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
enum bmic_flush_cache_shutdown_event shutdown_event)
{
int rc;
struct bmic_flush_cache *flush_cache;
- /*
- * Don't bother trying to flush the cache if the controller is
- * locked up.
- */
- if (pqi_ctrl_offline(ctrl_info))
- return -ENXIO;
-
flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
if (!flush_cache)
return -ENOMEM;
@@ -877,9 +1008,6 @@ static void pqi_update_time_worker(struct work_struct *work)
ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
update_time_work);
- if (pqi_ctrl_offline(ctrl_info))
- return;
-
rc = pqi_write_current_time_to_host_wellness(ctrl_info);
if (rc)
dev_warn(&ctrl_info->pci_dev->dev,
@@ -889,27 +1017,23 @@ static void pqi_update_time_worker(struct work_struct *work)
PQI_UPDATE_TIME_WORK_INTERVAL);
}
-static inline void pqi_schedule_update_time_worker(
- struct pqi_ctrl_info *ctrl_info)
+static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
{
schedule_delayed_work(&ctrl_info->update_time_work, 0);
}
-static inline void pqi_cancel_update_time_worker(
- struct pqi_ctrl_info *ctrl_info)
+static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
{
cancel_delayed_work_sync(&ctrl_info->update_time_work);
}
-static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
- void *buffer, size_t buffer_length)
+static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
+ size_t buffer_length)
{
- return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
- buffer_length);
+ return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
}
-static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
- void **buffer)
+static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
{
int rc;
size_t lun_list_length;
@@ -924,8 +1048,7 @@ static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
goto out;
}
- rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
- sizeof(*report_lun_header));
+ rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
if (rc)
goto out;
@@ -949,8 +1072,8 @@ again:
if (rc)
goto out;
- new_lun_list_length = get_unaligned_be32(
- &((struct report_lun_header *)lun_data)->list_length);
+ new_lun_list_length =
+ get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
if (new_lun_list_length > lun_list_length) {
lun_list_length = new_lun_list_length;
@@ -971,15 +1094,12 @@ out:
return rc;
}
-static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
- void **buffer)
+static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
{
- return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
- buffer);
+ return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, buffer);
}
-static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
- void **buffer)
+static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
{
return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
}
@@ -1136,9 +1256,9 @@ static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
err_msg = "invalid RAID-1 map";
goto bad_raid_map;
}
- } else if (device->raid_level == SA_RAID_ADM) {
+ } else if (device->raid_level == SA_RAID_TRIPLE) {
if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
- err_msg = "invalid RAID-1(ADM) map";
+ err_msg = "invalid RAID-1(Triple) map";
goto bad_raid_map;
}
} else if ((device->raid_level == SA_RAID_5 ||
@@ -1177,9 +1297,7 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
return -ENOMEM;
rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
- device->scsi3addr, raid_map, sizeof(*raid_map),
- 0, NULL, NO_TIMEOUT);
-
+ device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
if (rc)
goto error;
@@ -1194,15 +1312,14 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
return -ENOMEM;
rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
- device->scsi3addr, raid_map, raid_map_size,
- 0, NULL, NO_TIMEOUT);
+ device->scsi3addr, raid_map, raid_map_size, 0, NULL);
if (rc)
goto error;
if (get_unaligned_le32(&raid_map->structure_size)
!= raid_map_size) {
dev_warn(&ctrl_info->pci_dev->dev,
- "Requested %d bytes, received %d bytes",
+ "requested %u bytes, received %u bytes\n",
raid_map_size,
get_unaligned_le32(&raid_map->structure_size));
goto error;
@@ -1223,6 +1340,39 @@ error:
return rc;
}
+static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device)
+{
+ if (!ctrl_info->lv_drive_type_mix_valid) {
+ device->max_transfer_encrypted = ~0;
+ return;
+ }
+
+ switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
+ case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
+ case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
+ case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
+ case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
+ case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
+ case LV_DRIVE_TYPE_MIX_SAS_ONLY:
+ case LV_DRIVE_TYPE_MIX_SATA_ONLY:
+ device->max_transfer_encrypted =
+ ctrl_info->max_transfer_encrypted_sas_sata;
+ break;
+ case LV_DRIVE_TYPE_MIX_NVME_ONLY:
+ device->max_transfer_encrypted =
+ ctrl_info->max_transfer_encrypted_nvme;
+ break;
+ case LV_DRIVE_TYPE_MIX_UNKNOWN:
+ case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
+ default:
+ device->max_transfer_encrypted =
+ min(ctrl_info->max_transfer_encrypted_sas_sata,
+ ctrl_info->max_transfer_encrypted_nvme);
+ break;
+ }
+}
+
static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device)
{
@@ -1248,8 +1398,12 @@ static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
(bypass_status & RAID_BYPASS_CONFIGURED) != 0;
if (device->raid_bypass_configured &&
(bypass_status & RAID_BYPASS_ENABLED) &&
- pqi_get_raid_map(ctrl_info, device) == 0)
+ pqi_get_raid_map(ctrl_info, device) == 0) {
device->raid_bypass_enabled = true;
+ if (get_unaligned_le16(&device->raid_map->flags) &
+ RAID_MAP_ENCRYPTION_ENABLED)
+ pqi_set_max_transfer_encrypted(ctrl_info, device);
+ }
out:
kfree(buffer);
@@ -1297,6 +1451,8 @@ no_buffer:
device->volume_offline = volume_offline;
}
+#define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10
+
static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device,
struct bmic_identify_physical_device *id_phys)
@@ -1333,6 +1489,16 @@ static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
sizeof(device->phys_connector));
device->bay = id_phys->phys_bay_in_box;
+ memcpy(&device->page_83_identifier, &id_phys->page_83_identifier,
+ sizeof(device->page_83_identifier));
+
+ if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
+ id_phys->phy_count)
+ device->phy_id =
+ id_phys->phy_to_phy_map[device->active_path_index];
+ else
+ device->phy_id = 0xFF;
+
return 0;
}
@@ -1520,16 +1686,16 @@ static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
return rc;
}
-#define PQI_PENDING_IO_TIMEOUT_SECS 20
+#define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000)
-static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
{
int rc;
pqi_device_remove_start(device);
- rc = pqi_device_wait_for_pending_io(ctrl_info, device, PQI_PENDING_IO_TIMEOUT_SECS);
+ rc = pqi_device_wait_for_pending_io(ctrl_info, device,
+ PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
if (rc)
dev_err(&ctrl_info->pci_dev->dev,
"scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
@@ -1557,8 +1723,7 @@ static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
return NULL;
}
-static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
- struct pqi_scsi_dev *dev2)
+static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
{
if (dev1->is_physical_device != dev2->is_physical_device)
return false;
@@ -1566,8 +1731,7 @@ static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
if (dev1->is_physical_device)
return dev1->wwid == dev2->wwid;
- return memcmp(dev1->volume_id, dev2->volume_id,
- sizeof(dev1->volume_id)) == 0;
+ return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
}
enum pqi_find_result {
@@ -1612,7 +1776,7 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
ssize_t count;
char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
- count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
+ count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
"%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
if (device->target_lun_valid)
@@ -1670,7 +1834,6 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
struct pqi_scsi_dev *new_device)
{
- existing_device->devtype = new_device->devtype;
existing_device->device_type = new_device->device_type;
existing_device->bus = new_device->bus;
if (new_device->target_lun_valid) {
@@ -1702,17 +1865,17 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
existing_device->aio_handle = new_device->aio_handle;
existing_device->volume_status = new_device->volume_status;
existing_device->active_path_index = new_device->active_path_index;
+ existing_device->phy_id = new_device->phy_id;
existing_device->path_map = new_device->path_map;
existing_device->bay = new_device->bay;
existing_device->box_index = new_device->box_index;
existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
- existing_device->phy_connected_dev_type =
- new_device->phy_connected_dev_type;
+ existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
memcpy(existing_device->box, new_device->box,
sizeof(existing_device->box));
memcpy(existing_device->phys_connector, new_device->phys_connector,
sizeof(existing_device->phys_connector));
- existing_device->offload_to_mirror = 0;
+ existing_device->next_bypass_group = 0;
kfree(existing_device->raid_map);
existing_device->raid_map = new_device->raid_map;
existing_device->raid_bypass_configured =
@@ -1843,8 +2006,18 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- if (pqi_ctrl_in_ofa(ctrl_info))
- pqi_ctrl_ofa_done(ctrl_info);
+ /*
+ * If OFA is in progress and there are devices that need to be deleted,
+ * allow any pending reset operations to continue and unblock any SCSI
+ * requests before removal.
+ */
+ if (pqi_ofa_in_progress(ctrl_info)) {
+ list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
+ if (pqi_is_device_added(device))
+ pqi_device_remove_start(device);
+ pqi_ctrl_unblock_device_reset(ctrl_info);
+ pqi_scsi_unblock_requests(ctrl_info);
+ }
/* Remove all devices that have gone away. */
list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
@@ -1866,15 +2039,10 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
* Notify the SCSI ML if the queue depth of any existing device has
* changed.
*/
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry) {
- if (device->sdev) {
- if (device->queue_depth !=
- device->advertised_queue_depth) {
- device->advertised_queue_depth = device->queue_depth;
- scsi_change_queue_depth(device->sdev,
- device->advertised_queue_depth);
- }
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
+ if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
+ device->advertised_queue_depth = device->queue_depth;
+ scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
if (device->rescan) {
scsi_rescan_device(&device->sdev->sdev_gendev);
device->rescan = false;
@@ -1910,7 +2078,7 @@ static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
*/
if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
!pqi_is_hba_lunid(device->scsi3addr))
- return false;
+ return false;
return true;
}
@@ -1943,8 +2111,17 @@ static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
{
- return !device->is_physical_device ||
- !pqi_skip_device(device->scsi3addr);
+ return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
+}
+
+static inline void pqi_set_physical_device_wwid(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device, struct report_phys_lun_extended_entry *phys_lun_ext_entry)
+{
+ if (ctrl_info->unique_wwid_in_report_phys_lun_supported ||
+ pqi_is_device_with_sas_address(device))
+ device->wwid = phys_lun_ext_entry->wwid;
+ else
+ device->wwid = cpu_to_be64(get_unaligned_be64(&device->page_83_identifier));
}
static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
@@ -2008,17 +2185,18 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
for (i = num_physicals - 1; i >= 0; i--) {
phys_lun_ext_entry =
&physdev_list->lun_entries[i];
- if (CISS_GET_DRIVE_NUMBER(
- phys_lun_ext_entry->lunid) ==
- PQI_VSEP_CISS_BTL) {
- pqi_mask_device(
- phys_lun_ext_entry->lunid);
+ if (CISS_GET_DRIVE_NUMBER(phys_lun_ext_entry->lunid) == PQI_VSEP_CISS_BTL) {
+ pqi_mask_device(phys_lun_ext_entry->lunid);
break;
}
}
}
}
+ if (num_logicals &&
+ (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
+ ctrl_info->lv_drive_type_mix_valid = true;
+
num_new_devices = num_physicals + num_logicals;
new_device_list = kmalloc_array(num_new_devices,
@@ -2098,8 +2276,7 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
if (device->is_physical_device)
dev_warn(&ctrl_info->pci_dev->dev,
"obtaining device info failed, skipping physical device %016llx\n",
- get_unaligned_be64(
- &phys_lun_ext_entry->wwid));
+ get_unaligned_be64(&phys_lun_ext_entry->wwid));
else
dev_warn(&ctrl_info->pci_dev->dev,
"obtaining device info failed, skipping logical device %08x%08x\n",
@@ -2112,13 +2289,13 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
pqi_assign_bus_target_lun(device);
if (device->is_physical_device) {
- device->wwid = phys_lun_ext_entry->wwid;
+ pqi_set_physical_device_wwid(ctrl_info, device, phys_lun_ext_entry);
if ((phys_lun_ext_entry->device_flags &
CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
phys_lun_ext_entry->aio_handle) {
- device->aio_enabled = true;
- device->aio_handle =
- phys_lun_ext_entry->aio_handle;
+ device->aio_enabled = true;
+ device->aio_handle =
+ phys_lun_ext_entry->aio_handle;
}
} else {
memcpy(device->volume_id, log_lun_ext_entry->volume_id,
@@ -2152,21 +2329,27 @@ out:
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
{
- int rc = 0;
+ int rc;
+ int mutex_acquired;
if (pqi_ctrl_offline(ctrl_info))
return -ENXIO;
- if (!mutex_trylock(&ctrl_info->scan_mutex)) {
+ mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
+
+ if (!mutex_acquired) {
+ if (pqi_ctrl_scan_blocked(ctrl_info))
+ return -EBUSY;
pqi_schedule_rescan_worker_delayed(ctrl_info);
- rc = -EINPROGRESS;
- } else {
- rc = pqi_update_scsi_devices(ctrl_info);
- if (rc)
- pqi_schedule_rescan_worker_delayed(ctrl_info);
- mutex_unlock(&ctrl_info->scan_mutex);
+ return -EINPROGRESS;
}
+ rc = pqi_update_scsi_devices(ctrl_info);
+ if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
+ pqi_schedule_rescan_worker_delayed(ctrl_info);
+
+ mutex_unlock(&ctrl_info->scan_mutex);
+
return rc;
}
@@ -2175,8 +2358,6 @@ static void pqi_scan_start(struct Scsi_Host *shost)
struct pqi_ctrl_info *ctrl_info;
ctrl_info = shost_to_hba(shost);
- if (pqi_ctrl_in_ofa(ctrl_info))
- return;
pqi_scan_scsi_devices(ctrl_info);
}
@@ -2193,27 +2374,8 @@ static int pqi_scan_finished(struct Scsi_Host *shost,
return !mutex_is_locked(&ctrl_info->scan_mutex);
}
-static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
-{
- mutex_lock(&ctrl_info->scan_mutex);
- mutex_unlock(&ctrl_info->scan_mutex);
-}
-
-static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
-{
- mutex_lock(&ctrl_info->lun_reset_mutex);
- mutex_unlock(&ctrl_info->lun_reset_mutex);
-}
-
-static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
-{
- mutex_lock(&ctrl_info->ofa_mutex);
- mutex_unlock(&ctrl_info->ofa_mutex);
-}
-
-static inline void pqi_set_encryption_info(
- struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
- u64 first_block)
+static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
+ struct raid_map *raid_map, u64 first_block)
{
u32 volume_blk_size;
@@ -2236,332 +2398,419 @@ static inline void pqi_set_encryption_info(
* Attempt to perform RAID bypass mapping for a logical volume I/O.
*/
+static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev_raid_map_data *rmd)
+{
+ bool is_supported = true;
+
+ switch (rmd->raid_level) {
+ case SA_RAID_0:
+ break;
+ case SA_RAID_1:
+ if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
+ rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
+ is_supported = false;
+ break;
+ case SA_RAID_TRIPLE:
+ if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
+ rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
+ is_supported = false;
+ break;
+ case SA_RAID_5:
+ if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
+ rmd->data_length > ctrl_info->max_write_raid_5_6))
+ is_supported = false;
+ break;
+ case SA_RAID_6:
+ if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
+ rmd->data_length > ctrl_info->max_write_raid_5_6))
+ is_supported = false;
+ break;
+ default:
+ is_supported = false;
+ break;
+ }
+
+ return is_supported;
+}
+
#define PQI_RAID_BYPASS_INELIGIBLE 1
-static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
- struct pqi_queue_group *queue_group)
+static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
+ struct pqi_scsi_dev_raid_map_data *rmd)
{
- struct raid_map *raid_map;
- bool is_write = false;
- u32 map_index;
- u64 first_block;
- u64 last_block;
- u32 block_cnt;
- u32 blocks_per_row;
- u64 first_row;
- u64 last_row;
- u32 first_row_offset;
- u32 last_row_offset;
- u32 first_column;
- u32 last_column;
- u64 r0_first_row;
- u64 r0_last_row;
- u32 r5or6_blocks_per_row;
- u64 r5or6_first_row;
- u64 r5or6_last_row;
- u32 r5or6_first_row_offset;
- u32 r5or6_last_row_offset;
- u32 r5or6_first_column;
- u32 r5or6_last_column;
- u16 data_disks_per_row;
- u32 total_disks_per_row;
- u16 layout_map_count;
- u32 stripesize;
- u16 strip_size;
- u32 first_group;
- u32 last_group;
- u32 current_group;
- u32 map_row;
- u32 aio_handle;
- u64 disk_block;
- u32 disk_block_cnt;
- u8 cdb[16];
- u8 cdb_length;
- int offload_to_mirror;
- struct pqi_encryption_info *encryption_info_ptr;
- struct pqi_encryption_info encryption_info;
-#if BITS_PER_LONG == 32
- u64 tmpdiv;
-#endif
-
/* Check for valid opcode, get LBA and block count. */
switch (scmd->cmnd[0]) {
case WRITE_6:
- is_write = true;
+ rmd->is_write = true;
fallthrough;
case READ_6:
- first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
+ rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
(scmd->cmnd[2] << 8) | scmd->cmnd[3]);
- block_cnt = (u32)scmd->cmnd[4];
- if (block_cnt == 0)
- block_cnt = 256;
+ rmd->block_cnt = (u32)scmd->cmnd[4];
+ if (rmd->block_cnt == 0)
+ rmd->block_cnt = 256;
break;
case WRITE_10:
- is_write = true;
+ rmd->is_write = true;
fallthrough;
case READ_10:
- first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
- block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
+ rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
+ rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
break;
case WRITE_12:
- is_write = true;
+ rmd->is_write = true;
fallthrough;
case READ_12:
- first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
- block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
+ rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
+ rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
break;
case WRITE_16:
- is_write = true;
+ rmd->is_write = true;
fallthrough;
case READ_16:
- first_block = get_unaligned_be64(&scmd->cmnd[2]);
- block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
+ rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
+ rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
break;
default:
/* Process via normal I/O path. */
return PQI_RAID_BYPASS_INELIGIBLE;
}
- /* Check for write to non-RAID-0. */
- if (is_write && device->raid_level != SA_RAID_0)
- return PQI_RAID_BYPASS_INELIGIBLE;
+ put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
- if (unlikely(block_cnt == 0))
- return PQI_RAID_BYPASS_INELIGIBLE;
+ return 0;
+}
- last_block = first_block + block_cnt - 1;
- raid_map = device->raid_map;
+static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
+{
+#if BITS_PER_LONG == 32
+ u64 tmpdiv;
+#endif
+
+ rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
/* Check for invalid block or wraparound. */
- if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
- last_block < first_block)
+ if (rmd->last_block >=
+ get_unaligned_le64(&raid_map->volume_blk_cnt) ||
+ rmd->last_block < rmd->first_block)
return PQI_RAID_BYPASS_INELIGIBLE;
- data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
- strip_size = get_unaligned_le16(&raid_map->strip_size);
- layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
+ rmd->data_disks_per_row =
+ get_unaligned_le16(&raid_map->data_disks_per_row);
+ rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
+ rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
/* Calculate stripe information for the request. */
- blocks_per_row = data_disks_per_row * strip_size;
+ rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
+ if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
+ return PQI_RAID_BYPASS_INELIGIBLE;
#if BITS_PER_LONG == 32
- tmpdiv = first_block;
- do_div(tmpdiv, blocks_per_row);
- first_row = tmpdiv;
- tmpdiv = last_block;
- do_div(tmpdiv, blocks_per_row);
- last_row = tmpdiv;
- first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
- last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
- tmpdiv = first_row_offset;
- do_div(tmpdiv, strip_size);
- first_column = tmpdiv;
- tmpdiv = last_row_offset;
- do_div(tmpdiv, strip_size);
- last_column = tmpdiv;
+ tmpdiv = rmd->first_block;
+ do_div(tmpdiv, rmd->blocks_per_row);
+ rmd->first_row = tmpdiv;
+ tmpdiv = rmd->last_block;
+ do_div(tmpdiv, rmd->blocks_per_row);
+ rmd->last_row = tmpdiv;
+ rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
+ rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
+ tmpdiv = rmd->first_row_offset;
+ do_div(tmpdiv, rmd->strip_size);
+ rmd->first_column = tmpdiv;
+ tmpdiv = rmd->last_row_offset;
+ do_div(tmpdiv, rmd->strip_size);
+ rmd->last_column = tmpdiv;
#else
- first_row = first_block / blocks_per_row;
- last_row = last_block / blocks_per_row;
- first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
- last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
- first_column = first_row_offset / strip_size;
- last_column = last_row_offset / strip_size;
+ rmd->first_row = rmd->first_block / rmd->blocks_per_row;
+ rmd->last_row = rmd->last_block / rmd->blocks_per_row;
+ rmd->first_row_offset = (u32)(rmd->first_block -
+ (rmd->first_row * rmd->blocks_per_row));
+ rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
+ rmd->blocks_per_row));
+ rmd->first_column = rmd->first_row_offset / rmd->strip_size;
+ rmd->last_column = rmd->last_row_offset / rmd->strip_size;
#endif
/* If this isn't a single row/column then give to the controller. */
- if (first_row != last_row || first_column != last_column)
+ if (rmd->first_row != rmd->last_row ||
+ rmd->first_column != rmd->last_column)
return PQI_RAID_BYPASS_INELIGIBLE;
/* Proceeding with driver mapping. */
- total_disks_per_row = data_disks_per_row +
+ rmd->total_disks_per_row = rmd->data_disks_per_row +
get_unaligned_le16(&raid_map->metadata_disks_per_row);
- map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
+ rmd->map_row = ((u32)(rmd->first_row >>
+ raid_map->parity_rotation_shift)) %
get_unaligned_le16(&raid_map->row_cnt);
- map_index = (map_row * total_disks_per_row) + first_column;
+ rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
+ rmd->first_column;
- /* RAID 1 */
- if (device->raid_level == SA_RAID_1) {
- if (device->offload_to_mirror)
- map_index += data_disks_per_row;
- device->offload_to_mirror = !device->offload_to_mirror;
- } else if (device->raid_level == SA_RAID_ADM) {
- /* RAID ADM */
- /*
- * Handles N-way mirrors (R1-ADM) and R10 with # of drives
- * divisible by 3.
- */
- offload_to_mirror = device->offload_to_mirror;
- if (offload_to_mirror == 0) {
- /* use physical disk in the first mirrored group. */
- map_index %= data_disks_per_row;
- } else {
- do {
- /*
- * Determine mirror group that map_index
- * indicates.
- */
- current_group = map_index / data_disks_per_row;
-
- if (offload_to_mirror != current_group) {
- if (current_group <
- layout_map_count - 1) {
- /*
- * Select raid index from
- * next group.
- */
- map_index += data_disks_per_row;
- current_group++;
- } else {
- /*
- * Select raid index from first
- * group.
- */
- map_index %= data_disks_per_row;
- current_group = 0;
- }
- }
- } while (offload_to_mirror != current_group);
- }
+ return 0;
+}
- /* Set mirror group to use next time. */
- offload_to_mirror =
- (offload_to_mirror >= layout_map_count - 1) ?
- 0 : offload_to_mirror + 1;
- device->offload_to_mirror = offload_to_mirror;
- /*
- * Avoid direct use of device->offload_to_mirror within this
- * function since multiple threads might simultaneously
- * increment it beyond the range of device->layout_map_count -1.
- */
- } else if ((device->raid_level == SA_RAID_5 ||
- device->raid_level == SA_RAID_6) && layout_map_count > 1) {
- /* RAID 50/60 */
- /* Verify first and last block are in same RAID group */
- r5or6_blocks_per_row = strip_size * data_disks_per_row;
- stripesize = r5or6_blocks_per_row * layout_map_count;
+static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
+ struct raid_map *raid_map)
+{
#if BITS_PER_LONG == 32
- tmpdiv = first_block;
- first_group = do_div(tmpdiv, stripesize);
- tmpdiv = first_group;
- do_div(tmpdiv, r5or6_blocks_per_row);
- first_group = tmpdiv;
- tmpdiv = last_block;
- last_group = do_div(tmpdiv, stripesize);
- tmpdiv = last_group;
- do_div(tmpdiv, r5or6_blocks_per_row);
- last_group = tmpdiv;
+ u64 tmpdiv;
+#endif
+
+ if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ /* RAID 50/60 */
+ /* Verify first and last block are in same RAID group. */
+ rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
+#if BITS_PER_LONG == 32
+ tmpdiv = rmd->first_block;
+ rmd->first_group = do_div(tmpdiv, rmd->stripesize);
+ tmpdiv = rmd->first_group;
+ do_div(tmpdiv, rmd->blocks_per_row);
+ rmd->first_group = tmpdiv;
+ tmpdiv = rmd->last_block;
+ rmd->last_group = do_div(tmpdiv, rmd->stripesize);
+ tmpdiv = rmd->last_group;
+ do_div(tmpdiv, rmd->blocks_per_row);
+ rmd->last_group = tmpdiv;
#else
- first_group = (first_block % stripesize) / r5or6_blocks_per_row;
- last_group = (last_block % stripesize) / r5or6_blocks_per_row;
+ rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
+ rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
#endif
- if (first_group != last_group)
- return PQI_RAID_BYPASS_INELIGIBLE;
+ if (rmd->first_group != rmd->last_group)
+ return PQI_RAID_BYPASS_INELIGIBLE;
- /* Verify request is in a single row of RAID 5/6 */
+ /* Verify request is in a single row of RAID 5/6. */
#if BITS_PER_LONG == 32
- tmpdiv = first_block;
- do_div(tmpdiv, stripesize);
- first_row = r5or6_first_row = r0_first_row = tmpdiv;
- tmpdiv = last_block;
- do_div(tmpdiv, stripesize);
- r5or6_last_row = r0_last_row = tmpdiv;
+ tmpdiv = rmd->first_block;
+ do_div(tmpdiv, rmd->stripesize);
+ rmd->first_row = tmpdiv;
+ rmd->r5or6_first_row = tmpdiv;
+ tmpdiv = rmd->last_block;
+ do_div(tmpdiv, rmd->stripesize);
+ rmd->r5or6_last_row = tmpdiv;
#else
- first_row = r5or6_first_row = r0_first_row =
- first_block / stripesize;
- r5or6_last_row = r0_last_row = last_block / stripesize;
+ rmd->first_row = rmd->r5or6_first_row =
+ rmd->first_block / rmd->stripesize;
+ rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
#endif
- if (r5or6_first_row != r5or6_last_row)
- return PQI_RAID_BYPASS_INELIGIBLE;
+ if (rmd->r5or6_first_row != rmd->r5or6_last_row)
+ return PQI_RAID_BYPASS_INELIGIBLE;
- /* Verify request is in a single column */
+ /* Verify request is in a single column. */
#if BITS_PER_LONG == 32
- tmpdiv = first_block;
- first_row_offset = do_div(tmpdiv, stripesize);
- tmpdiv = first_row_offset;
- first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
- r5or6_first_row_offset = first_row_offset;
- tmpdiv = last_block;
- r5or6_last_row_offset = do_div(tmpdiv, stripesize);
- tmpdiv = r5or6_last_row_offset;
- r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
- tmpdiv = r5or6_first_row_offset;
- do_div(tmpdiv, strip_size);
- first_column = r5or6_first_column = tmpdiv;
- tmpdiv = r5or6_last_row_offset;
- do_div(tmpdiv, strip_size);
- r5or6_last_column = tmpdiv;
+ tmpdiv = rmd->first_block;
+ rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
+ tmpdiv = rmd->first_row_offset;
+ rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
+ rmd->r5or6_first_row_offset = rmd->first_row_offset;
+ tmpdiv = rmd->last_block;
+ rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
+ tmpdiv = rmd->r5or6_last_row_offset;
+ rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
+ tmpdiv = rmd->r5or6_first_row_offset;
+ do_div(tmpdiv, rmd->strip_size);
+ rmd->first_column = rmd->r5or6_first_column = tmpdiv;
+ tmpdiv = rmd->r5or6_last_row_offset;
+ do_div(tmpdiv, rmd->strip_size);
+ rmd->r5or6_last_column = tmpdiv;
#else
- first_row_offset = r5or6_first_row_offset =
- (u32)((first_block % stripesize) %
- r5or6_blocks_per_row);
+ rmd->first_row_offset = rmd->r5or6_first_row_offset =
+ (u32)((rmd->first_block % rmd->stripesize) %
+ rmd->blocks_per_row);
+
+ rmd->r5or6_last_row_offset =
+ (u32)((rmd->last_block % rmd->stripesize) %
+ rmd->blocks_per_row);
+
+ rmd->first_column =
+ rmd->r5or6_first_row_offset / rmd->strip_size;
+ rmd->r5or6_first_column = rmd->first_column;
+ rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
+#endif
+ if (rmd->r5or6_first_column != rmd->r5or6_last_column)
+ return PQI_RAID_BYPASS_INELIGIBLE;
- r5or6_last_row_offset =
- (u32)((last_block % stripesize) %
- r5or6_blocks_per_row);
+ /* Request is eligible. */
+ rmd->map_row =
+ ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
+ get_unaligned_le16(&raid_map->row_cnt);
- first_column = r5or6_first_row_offset / strip_size;
- r5or6_first_column = first_column;
- r5or6_last_column = r5or6_last_row_offset / strip_size;
+ rmd->map_index = (rmd->first_group *
+ (get_unaligned_le16(&raid_map->row_cnt) *
+ rmd->total_disks_per_row)) +
+ (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
+
+ if (rmd->is_write) {
+ u32 index;
+
+ /*
+ * p_parity_it_nexus and q_parity_it_nexus are pointers to the
+ * parity entries inside the device's raid_map.
+ *
+ * A device's RAID map is bounded by: number of RAID disks squared.
+ *
+ * The devices RAID map size is checked during device
+ * initialization.
+ */
+ index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
+ index *= rmd->total_disks_per_row;
+ index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
+
+ rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
+ if (rmd->raid_level == SA_RAID_6) {
+ rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
+ rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
+ }
+#if BITS_PER_LONG == 32
+ tmpdiv = rmd->first_block;
+ do_div(tmpdiv, rmd->blocks_per_row);
+ rmd->row = tmpdiv;
+#else
+ rmd->row = rmd->first_block / rmd->blocks_per_row;
#endif
- if (r5or6_first_column != r5or6_last_column)
- return PQI_RAID_BYPASS_INELIGIBLE;
+ }
+
+ return 0;
+}
+
+static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
+{
+ /* Build the new CDB for the physical disk I/O. */
+ if (rmd->disk_block > 0xffffffff) {
+ rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
+ rmd->cdb[1] = 0;
+ put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
+ put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
+ rmd->cdb[14] = 0;
+ rmd->cdb[15] = 0;
+ rmd->cdb_length = 16;
+ } else {
+ rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
+ rmd->cdb[1] = 0;
+ put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
+ rmd->cdb[6] = 0;
+ put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
+ rmd->cdb[9] = 0;
+ rmd->cdb_length = 10;
+ }
+}
+
+static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
+ struct pqi_scsi_dev_raid_map_data *rmd)
+{
+ u32 index;
+ u32 group;
+
+ group = rmd->map_index / rmd->data_disks_per_row;
+
+ index = rmd->map_index - (group * rmd->data_disks_per_row);
+ rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
+ index += rmd->data_disks_per_row;
+ rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
+ if (rmd->layout_map_count > 2) {
+ index += rmd->data_disks_per_row;
+ rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
+ }
+
+ rmd->num_it_nexus_entries = rmd->layout_map_count;
+}
+
+static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
+ struct pqi_queue_group *queue_group)
+{
+ int rc;
+ struct raid_map *raid_map;
+ u32 group;
+ u32 next_bypass_group;
+ struct pqi_encryption_info *encryption_info_ptr;
+ struct pqi_encryption_info encryption_info;
+ struct pqi_scsi_dev_raid_map_data rmd = { 0 };
+
+ rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
+ if (rc)
+ return PQI_RAID_BYPASS_INELIGIBLE;
- /* Request is eligible */
- map_row =
- ((u32)(first_row >> raid_map->parity_rotation_shift)) %
- get_unaligned_le16(&raid_map->row_cnt);
+ rmd.raid_level = device->raid_level;
- map_index = (first_group *
- (get_unaligned_le16(&raid_map->row_cnt) *
- total_disks_per_row)) +
- (map_row * total_disks_per_row) + first_column;
+ if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ if (unlikely(rmd.block_cnt == 0))
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ raid_map = device->raid_map;
+
+ rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
+ if (rc)
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ if (device->raid_level == SA_RAID_1 ||
+ device->raid_level == SA_RAID_TRIPLE) {
+ if (rmd.is_write) {
+ pqi_calc_aio_r1_nexus(raid_map, &rmd);
+ } else {
+ group = device->next_bypass_group;
+ next_bypass_group = group + 1;
+ if (next_bypass_group >= rmd.layout_map_count)
+ next_bypass_group = 0;
+ device->next_bypass_group = next_bypass_group;
+ rmd.map_index += group * rmd.data_disks_per_row;
+ }
+ } else if ((device->raid_level == SA_RAID_5 ||
+ device->raid_level == SA_RAID_6) &&
+ (rmd.layout_map_count > 1 || rmd.is_write)) {
+ rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
+ if (rc)
+ return PQI_RAID_BYPASS_INELIGIBLE;
}
- aio_handle = raid_map->disk_data[map_index].aio_handle;
- disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
- first_row * strip_size +
- (first_row_offset - first_column * strip_size);
- disk_block_cnt = block_cnt;
+ if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
+ rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
+ rmd.first_row * rmd.strip_size +
+ (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
+ rmd.disk_block_cnt = rmd.block_cnt;
/* Handle differing logical/physical block sizes. */
if (raid_map->phys_blk_shift) {
- disk_block <<= raid_map->phys_blk_shift;
- disk_block_cnt <<= raid_map->phys_blk_shift;
+ rmd.disk_block <<= raid_map->phys_blk_shift;
+ rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
}
- if (unlikely(disk_block_cnt > 0xffff))
+ if (unlikely(rmd.disk_block_cnt > 0xffff))
return PQI_RAID_BYPASS_INELIGIBLE;
- /* Build the new CDB for the physical disk I/O. */
- if (disk_block > 0xffffffff) {
- cdb[0] = is_write ? WRITE_16 : READ_16;
- cdb[1] = 0;
- put_unaligned_be64(disk_block, &cdb[2]);
- put_unaligned_be32(disk_block_cnt, &cdb[10]);
- cdb[14] = 0;
- cdb[15] = 0;
- cdb_length = 16;
- } else {
- cdb[0] = is_write ? WRITE_10 : READ_10;
- cdb[1] = 0;
- put_unaligned_be32((u32)disk_block, &cdb[2]);
- cdb[6] = 0;
- put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
- cdb[9] = 0;
- cdb_length = 10;
- }
-
- if (get_unaligned_le16(&raid_map->flags) &
- RAID_MAP_ENCRYPTION_ENABLED) {
- pqi_set_encryption_info(&encryption_info, raid_map,
- first_block);
+ pqi_set_aio_cdb(&rmd);
+
+ if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
+ if (rmd.data_length > device->max_transfer_encrypted)
+ return PQI_RAID_BYPASS_INELIGIBLE;
+ pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
encryption_info_ptr = &encryption_info;
} else {
encryption_info_ptr = NULL;
}
- return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
- cdb, cdb_length, queue_group, encryption_info_ptr, true);
+ if (rmd.is_write) {
+ switch (device->raid_level) {
+ case SA_RAID_1:
+ case SA_RAID_TRIPLE:
+ return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
+ encryption_info_ptr, device, &rmd);
+ case SA_RAID_5:
+ case SA_RAID_6:
+ return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
+ encryption_info_ptr, device, &rmd);
+ }
+ }
+
+ return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
+ rmd.cdb, rmd.cdb_length, queue_group,
+ encryption_info_ptr, true);
}
#define PQI_STATUS_IDLE 0x0
@@ -2858,7 +3107,7 @@ static void pqi_process_io_error(unsigned int iu_type,
}
}
-static int pqi_interpret_task_management_response(
+static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
struct pqi_task_management_response *response)
{
int rc;
@@ -2876,6 +3125,10 @@ static int pqi_interpret_task_management_response(
break;
}
+ if (rc)
+ dev_err(&ctrl_info->pci_dev->dev,
+ "Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
+
return rc;
}
@@ -2941,13 +3194,11 @@ static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue
case PQI_RESPONSE_IU_VENDOR_GENERAL:
io_request->status =
get_unaligned_le16(
- &((struct pqi_vendor_general_response *)
- response)->status);
+ &((struct pqi_vendor_general_response *)response)->status);
break;
case PQI_RESPONSE_IU_TASK_MANAGEMENT:
- io_request->status =
- pqi_interpret_task_management_response(
- (void *)response);
+ io_request->status = pqi_interpret_task_management_response(ctrl_info,
+ (void *)response);
break;
case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
pqi_aio_path_disabled(io_request);
@@ -3055,8 +3306,8 @@ static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
&request.header.iu_length);
request.event_type = event->event_type;
- request.event_id = event->event_id;
- request.additional_event_id = event->additional_event_id;
+ put_unaligned_le16(event->event_id, &request.event_id);
+ put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
pqi_send_event_ack(ctrl_info, &request, sizeof(request));
}
@@ -3067,8 +3318,8 @@ static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
struct pqi_ctrl_info *ctrl_info)
{
- unsigned long timeout;
u8 status;
+ unsigned long timeout;
timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
@@ -3080,120 +3331,170 @@ static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
if (status & PQI_SOFT_RESET_ABORT)
return RESET_ABORT;
+ if (!sis_is_firmware_running(ctrl_info))
+ return RESET_NORESPONSE;
+
if (time_after(jiffies, timeout)) {
- dev_err(&ctrl_info->pci_dev->dev,
+ dev_warn(&ctrl_info->pci_dev->dev,
"timed out waiting for soft reset status\n");
return RESET_TIMEDOUT;
}
- if (!sis_is_firmware_running(ctrl_info))
- return RESET_NORESPONSE;
-
ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
}
}
-static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
- enum pqi_soft_reset_status reset_status)
+static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
{
int rc;
+ unsigned int delay_secs;
+ enum pqi_soft_reset_status reset_status;
+
+ if (ctrl_info->soft_reset_handshake_supported)
+ reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
+ else
+ reset_status = RESET_INITIATE_FIRMWARE;
+
+ delay_secs = PQI_POST_RESET_DELAY_SECS;
switch (reset_status) {
- case RESET_INITIATE_DRIVER:
case RESET_TIMEDOUT:
+ delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
+ fallthrough;
+ case RESET_INITIATE_DRIVER:
dev_info(&ctrl_info->pci_dev->dev,
- "resetting controller %u\n", ctrl_info->ctrl_id);
+ "Online Firmware Activation: resetting controller\n");
sis_soft_reset(ctrl_info);
fallthrough;
case RESET_INITIATE_FIRMWARE:
- rc = pqi_ofa_ctrl_restart(ctrl_info);
+ ctrl_info->pqi_mode_enabled = false;
+ pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
+ rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
pqi_ofa_free_host_buffer(ctrl_info);
+ pqi_ctrl_ofa_done(ctrl_info);
dev_info(&ctrl_info->pci_dev->dev,
- "Online Firmware Activation for controller %u: %s\n",
- ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED");
+ "Online Firmware Activation: %s\n",
+ rc == 0 ? "SUCCESS" : "FAILED");
break;
case RESET_ABORT:
- pqi_ofa_ctrl_unquiesce(ctrl_info);
dev_info(&ctrl_info->pci_dev->dev,
- "Online Firmware Activation for controller %u: %s\n",
- ctrl_info->ctrl_id, "ABORTED");
+ "Online Firmware Activation ABORTED\n");
+ if (ctrl_info->soft_reset_handshake_supported)
+ pqi_clear_soft_reset_status(ctrl_info);
+ pqi_ofa_free_host_buffer(ctrl_info);
+ pqi_ctrl_ofa_done(ctrl_info);
+ pqi_ofa_ctrl_unquiesce(ctrl_info);
break;
case RESET_NORESPONSE:
+ fallthrough;
+ default:
+ dev_err(&ctrl_info->pci_dev->dev,
+ "unexpected Online Firmware Activation reset status: 0x%x\n",
+ reset_status);
pqi_ofa_free_host_buffer(ctrl_info);
+ pqi_ctrl_ofa_done(ctrl_info);
+ pqi_ofa_ctrl_unquiesce(ctrl_info);
pqi_take_ctrl_offline(ctrl_info);
break;
}
}
-static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
- struct pqi_event *event)
+static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
+{
+ struct pqi_ctrl_info *ctrl_info;
+
+ ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
+
+ pqi_ctrl_ofa_start(ctrl_info);
+ pqi_ofa_setup_host_buffer(ctrl_info);
+ pqi_ofa_host_memory_update(ctrl_info);
+}
+
+static void pqi_ofa_quiesce_worker(struct work_struct *work)
{
- u16 event_id;
- enum pqi_soft_reset_status status;
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_event *event;
- event_id = get_unaligned_le16(&event->event_id);
+ ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
- mutex_lock(&ctrl_info->ofa_mutex);
+ event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
- if (event_id == PQI_EVENT_OFA_QUIESCE) {
- dev_info(&ctrl_info->pci_dev->dev,
- "Received Online Firmware Activation quiesce event for controller %u\n",
- ctrl_info->ctrl_id);
- pqi_ofa_ctrl_quiesce(ctrl_info);
- pqi_acknowledge_event(ctrl_info, event);
- if (ctrl_info->soft_reset_handshake_supported) {
- status = pqi_poll_for_soft_reset_status(ctrl_info);
- pqi_process_soft_reset(ctrl_info, status);
- } else {
- pqi_process_soft_reset(ctrl_info,
- RESET_INITIATE_FIRMWARE);
- }
+ pqi_ofa_ctrl_quiesce(ctrl_info);
+ pqi_acknowledge_event(ctrl_info, event);
+ pqi_process_soft_reset(ctrl_info);
+}
- } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
- pqi_acknowledge_event(ctrl_info, event);
- pqi_ofa_setup_host_buffer(ctrl_info,
- le32_to_cpu(event->ofa_bytes_requested));
- pqi_ofa_host_memory_update(ctrl_info);
- } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
- pqi_ofa_free_host_buffer(ctrl_info);
- pqi_acknowledge_event(ctrl_info, event);
+static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_event *event)
+{
+ bool ack_event;
+
+ ack_event = true;
+
+ switch (event->event_id) {
+ case PQI_EVENT_OFA_MEMORY_ALLOCATION:
dev_info(&ctrl_info->pci_dev->dev,
- "Online Firmware Activation(%u) cancel reason : %u\n",
- ctrl_info->ctrl_id, event->ofa_cancel_reason);
+ "received Online Firmware Activation memory allocation request\n");
+ schedule_work(&ctrl_info->ofa_memory_alloc_work);
+ break;
+ case PQI_EVENT_OFA_QUIESCE:
+ dev_info(&ctrl_info->pci_dev->dev,
+ "received Online Firmware Activation quiesce request\n");
+ schedule_work(&ctrl_info->ofa_quiesce_work);
+ ack_event = false;
+ break;
+ case PQI_EVENT_OFA_CANCELED:
+ dev_info(&ctrl_info->pci_dev->dev,
+ "received Online Firmware Activation cancel request: reason: %u\n",
+ ctrl_info->ofa_cancel_reason);
+ pqi_ofa_free_host_buffer(ctrl_info);
+ pqi_ctrl_ofa_done(ctrl_info);
+ break;
+ default:
+ dev_err(&ctrl_info->pci_dev->dev,
+ "received unknown Online Firmware Activation request: event ID: %u\n",
+ event->event_id);
+ break;
}
- mutex_unlock(&ctrl_info->ofa_mutex);
+ return ack_event;
}
static void pqi_event_worker(struct work_struct *work)
{
unsigned int i;
+ bool rescan_needed;
struct pqi_ctrl_info *ctrl_info;
struct pqi_event *event;
+ bool ack_event;
ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
pqi_ctrl_busy(ctrl_info);
- pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
+ pqi_wait_if_ctrl_blocked(ctrl_info);
if (pqi_ctrl_offline(ctrl_info))
goto out;
- pqi_schedule_rescan_worker_delayed(ctrl_info);
-
+ rescan_needed = false;
event = ctrl_info->events;
for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
if (event->pending) {
event->pending = false;
if (event->event_type == PQI_EVENT_TYPE_OFA) {
- pqi_ctrl_unbusy(ctrl_info);
- pqi_ofa_process_event(ctrl_info, event);
- return;
+ ack_event = pqi_ofa_process_event(ctrl_info, event);
+ } else {
+ ack_event = true;
+ rescan_needed = true;
}
- pqi_acknowledge_event(ctrl_info, event);
+ if (ack_event)
+ pqi_acknowledge_event(ctrl_info, event);
}
event++;
}
+ if (rescan_needed)
+ pqi_schedule_rescan_worker_delayed(ctrl_info);
+
out:
pqi_ctrl_unbusy(ctrl_info);
}
@@ -3204,8 +3505,7 @@ static void pqi_heartbeat_timer_handler(struct timer_list *t)
{
int num_interrupts;
u32 heartbeat_count;
- struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t,
- heartbeat_timer);
+ struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
pqi_check_ctrl_health(ctrl_info);
if (pqi_ctrl_offline(ctrl_info))
@@ -3251,37 +3551,18 @@ static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
del_timer_sync(&ctrl_info->heartbeat_timer);
}
-static inline int pqi_event_type_to_event_index(unsigned int event_type)
-{
- int index;
-
- for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
- if (event_type == pqi_supported_event_types[index])
- return index;
-
- return -1;
-}
-
-static inline bool pqi_is_supported_event(unsigned int event_type)
+static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_event *event, struct pqi_event_response *response)
{
- return pqi_event_type_to_event_index(event_type) != -1;
-}
-
-static void pqi_ofa_capture_event_payload(struct pqi_event *event,
- struct pqi_event_response *response)
-{
- u16 event_id;
-
- event_id = get_unaligned_le16(&event->event_id);
-
- if (event->event_type == PQI_EVENT_TYPE_OFA) {
- if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
- event->ofa_bytes_requested =
- response->data.ofa_memory_allocation.bytes_requested;
- } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
- event->ofa_cancel_reason =
- response->data.ofa_cancelled.reason;
- }
+ switch (event->event_id) {
+ case PQI_EVENT_OFA_MEMORY_ALLOCATION:
+ ctrl_info->ofa_bytes_requested =
+ get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
+ break;
+ case PQI_EVENT_OFA_CANCELED:
+ ctrl_info->ofa_cancel_reason =
+ get_unaligned_le16(&response->data.ofa_cancelled.reason);
+ break;
}
}
@@ -3315,17 +3596,17 @@ static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
num_events++;
response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
- event_index =
- pqi_event_type_to_event_index(response->event_type);
+ event_index = pqi_event_type_to_event_index(response->event_type);
if (event_index >= 0 && response->request_acknowledge) {
event = &ctrl_info->events[event_index];
event->pending = true;
event->event_type = response->event_type;
- event->event_id = response->event_id;
- event->additional_event_id = response->additional_event_id;
+ event->event_id = get_unaligned_le16(&response->event_id);
+ event->additional_event_id =
+ get_unaligned_le32(&response->additional_event_id);
if (event->event_type == PQI_EVENT_TYPE_OFA)
- pqi_ofa_capture_event_payload(event, response);
+ pqi_ofa_capture_event_payload(ctrl_info, event, response);
}
oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
@@ -3342,8 +3623,7 @@ static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
#define PQI_LEGACY_INTX_MASK 0x1
-static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
- bool enable_intx)
+static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
{
u32 intx_mask;
struct pqi_device_registers __iomem *pqi_registers;
@@ -3420,8 +3700,7 @@ static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
valid_irq = true;
break;
case IRQ_MODE_INTX:
- intx_status =
- readl(&ctrl_info->pqi_registers->legacy_intx_status);
+ intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
if (intx_status & PQI_LEGACY_INTX_PENDING)
valid_irq = true;
else
@@ -3742,7 +4021,8 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
&admin_queues_aligned->iq_element_array;
admin_queues->oq_element_array =
&admin_queues_aligned->oq_element_array;
- admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
+ admin_queues->iq_ci =
+ (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
admin_queues->oq_pi =
(pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
@@ -3756,8 +4036,8 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
ctrl_info->admin_queue_memory_base);
admin_queues->iq_ci_bus_addr =
ctrl_info->admin_queue_memory_base_dma_handle +
- ((void *)admin_queues->iq_ci -
- ctrl_info->admin_queue_memory_base);
+ ((void __iomem *)admin_queues->iq_ci -
+ (void __iomem *)ctrl_info->admin_queue_memory_base);
admin_queues->oq_pi_bus_addr =
ctrl_info->admin_queue_memory_base_dma_handle +
((void __iomem *)admin_queues->oq_pi -
@@ -3793,6 +4073,7 @@ static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
(PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
(admin_queues->int_msg_num << 16);
writel(reg, &pqi_registers->admin_iq_num_elements);
+
writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
&pqi_registers->function_and_status_code);
@@ -4020,59 +4301,40 @@ static int pqi_process_raid_io_error_synchronous(
return rc;
}
+static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
+{
+ return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
+}
+
static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
struct pqi_iu_header *request, unsigned int flags,
- struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
+ struct pqi_raid_error_info *error_info)
{
int rc = 0;
struct pqi_io_request *io_request;
- unsigned long start_jiffies;
- unsigned long msecs_blocked;
size_t iu_length;
DECLARE_COMPLETION_ONSTACK(wait);
- /*
- * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
- * are mutually exclusive.
- */
-
if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
if (down_interruptible(&ctrl_info->sync_request_sem))
return -ERESTARTSYS;
} else {
- if (timeout_msecs == NO_TIMEOUT) {
- down(&ctrl_info->sync_request_sem);
- } else {
- start_jiffies = jiffies;
- if (down_timeout(&ctrl_info->sync_request_sem,
- msecs_to_jiffies(timeout_msecs)))
- return -ETIMEDOUT;
- msecs_blocked =
- jiffies_to_msecs(jiffies - start_jiffies);
- if (msecs_blocked >= timeout_msecs) {
- rc = -ETIMEDOUT;
- goto out;
- }
- timeout_msecs -= msecs_blocked;
- }
+ down(&ctrl_info->sync_request_sem);
}
pqi_ctrl_busy(ctrl_info);
- timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
- if (timeout_msecs == 0) {
- pqi_ctrl_unbusy(ctrl_info);
- rc = -ETIMEDOUT;
- goto out;
- }
+ /*
+ * Wait for other admin queue updates such as;
+ * config table changes, OFA memory updates, ...
+ */
+ if (pqi_is_blockable_request(request))
+ pqi_wait_if_ctrl_blocked(ctrl_info);
if (pqi_ctrl_offline(ctrl_info)) {
- pqi_ctrl_unbusy(ctrl_info);
rc = -ENXIO;
goto out;
}
- atomic_inc(&ctrl_info->sync_cmds_outstanding);
-
io_request = pqi_alloc_io_request(ctrl_info);
put_unaligned_le16(io_request->index,
@@ -4089,38 +4351,24 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
io_request->io_complete_callback = pqi_raid_synchronous_complete;
io_request->context = &wait;
- pqi_start_io(ctrl_info,
- &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
+ pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
io_request);
- pqi_ctrl_unbusy(ctrl_info);
-
- if (timeout_msecs == NO_TIMEOUT) {
- pqi_wait_for_completion_io(ctrl_info, &wait);
- } else {
- if (!wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(timeout_msecs))) {
- dev_warn(&ctrl_info->pci_dev->dev,
- "command timed out\n");
- rc = -ETIMEDOUT;
- }
- }
+ pqi_wait_for_completion_io(ctrl_info, &wait);
if (error_info) {
if (io_request->error_info)
- memcpy(error_info, io_request->error_info,
- sizeof(*error_info));
+ memcpy(error_info, io_request->error_info, sizeof(*error_info));
else
memset(error_info, 0, sizeof(*error_info));
} else if (rc == 0 && io_request->error_info) {
- rc = pqi_process_raid_io_error_synchronous(
- io_request->error_info);
+ rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
}
pqi_free_io_request(io_request);
- atomic_dec(&ctrl_info->sync_cmds_outstanding);
out:
+ pqi_ctrl_unbusy(ctrl_info);
up(&ctrl_info->sync_request_sem);
return rc;
@@ -4157,8 +4405,7 @@ static int pqi_submit_admin_request_synchronous(
rc = pqi_poll_for_admin_response(ctrl_info, response);
if (rc == 0)
- rc = pqi_validate_admin_response(response,
- request->function_code);
+ rc = pqi_validate_admin_response(response, request->function_code);
return rc;
}
@@ -4192,8 +4439,7 @@ static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
if (rc)
goto out;
- rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
- &response);
+ rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
pqi_pci_unmap(ctrl_info->pci_dev,
&request.data.report_device_capability.sg_descriptor, 1,
@@ -4528,8 +4774,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
if (rc)
goto out;
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
- 0, NULL, NO_TIMEOUT);
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
pqi_pci_unmap(ctrl_info->pci_dev,
request.data.report_event_configuration.sg_descriptors, 1,
@@ -4542,7 +4787,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
event_descriptor = &event_config->descriptors[i];
if (enable_events &&
pqi_is_supported_event(event_descriptor->event_type))
- put_unaligned_le16(ctrl_info->event_queue.oq_id,
+ put_unaligned_le16(ctrl_info->event_queue.oq_id,
&event_descriptor->oq_id);
else
put_unaligned_le16(0, &event_descriptor->oq_id);
@@ -4564,8 +4809,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
if (rc)
goto out;
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
- NULL, NO_TIMEOUT);
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
pqi_pci_unmap(ctrl_info->pci_dev,
request.data.report_event_configuration.sg_descriptors, 1,
@@ -4582,11 +4826,6 @@ static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
return pqi_configure_events(ctrl_info, true);
}
-static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
-{
- return pqi_configure_events(ctrl_info, false);
-}
-
static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
{
unsigned int i;
@@ -4617,7 +4856,6 @@ static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
{
-
ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
ctrl_info->error_buffer_length,
&ctrl_info->error_buffer_dma_handle,
@@ -4637,9 +4875,8 @@ static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
struct device *dev;
struct pqi_io_request *io_request;
- ctrl_info->io_request_pool =
- kcalloc(ctrl_info->max_io_slots,
- sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
+ ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
+ sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
if (!ctrl_info->io_request_pool) {
dev_err(&ctrl_info->pci_dev->dev,
@@ -4652,8 +4889,7 @@ static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
io_request = ctrl_info->io_request_pool;
for (i = 0; i < ctrl_info->max_io_slots; i++) {
- io_request->iu =
- kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
+ io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
if (!io_request->iu) {
dev_err(&ctrl_info->pci_dev->dev,
@@ -4673,8 +4909,7 @@ static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
io_request->index = i;
io_request->sg_chain_buffer = sg_chain_buffer;
- io_request->sg_chain_buffer_dma_handle =
- sg_chain_buffer_dma_handle;
+ io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
io_request++;
}
@@ -4781,10 +5016,16 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
sizeof(struct pqi_sg_descriptor)) +
PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
+
+ ctrl_info->max_sg_per_r56_iu =
+ ((ctrl_info->max_inbound_iu_length -
+ PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
+ sizeof(struct pqi_sg_descriptor)) +
+ PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
}
-static inline void pqi_set_sg_descriptor(
- struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
+static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
+ struct scatterlist *sg)
{
u64 address = (u64)sg_dma_address(sg);
unsigned int length = sg_dma_len(sg);
@@ -4794,16 +5035,52 @@ static inline void pqi_set_sg_descriptor(
put_unaligned_le32(0, &sg_descriptor->flags);
}
+static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
+ struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
+ int max_sg_per_iu, bool *chained)
+{
+ int i;
+ unsigned int num_sg_in_iu;
+
+ *chained = false;
+ i = 0;
+ num_sg_in_iu = 0;
+ max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */
+
+ while (1) {
+ pqi_set_sg_descriptor(sg_descriptor, sg);
+ if (!*chained)
+ num_sg_in_iu++;
+ i++;
+ if (i == sg_count)
+ break;
+ sg_descriptor++;
+ if (i == max_sg_per_iu) {
+ put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
+ &sg_descriptor->address);
+ put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
+ &sg_descriptor->length);
+ put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
+ *chained = true;
+ num_sg_in_iu++;
+ sg_descriptor = io_request->sg_chain_buffer;
+ }
+ sg = sg_next(sg);
+ }
+
+ put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
+
+ return num_sg_in_iu;
+}
+
static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
struct pqi_io_request *io_request)
{
- int i;
u16 iu_length;
int sg_count;
bool chained;
unsigned int num_sg_in_iu;
- unsigned int max_sg_per_iu;
struct scatterlist *sg;
struct pqi_sg_descriptor *sg_descriptor;
@@ -4819,41 +5096,89 @@ static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
sg = scsi_sglist(scmd);
sg_descriptor = request->sg_descriptors;
- max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
- chained = false;
+
+ num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
+ ctrl_info->max_sg_per_iu, &chained);
+
+ request->partial = chained;
+ iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
+
+out:
+ put_unaligned_le16(iu_length, &request->header.iu_length);
+
+ return 0;
+}
+
+static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
+ struct pqi_io_request *io_request)
+{
+ u16 iu_length;
+ int sg_count;
+ bool chained;
+ unsigned int num_sg_in_iu;
+ struct scatterlist *sg;
+ struct pqi_sg_descriptor *sg_descriptor;
+
+ sg_count = scsi_dma_map(scmd);
+ if (sg_count < 0)
+ return sg_count;
+
+ iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
+ PQI_REQUEST_HEADER_LENGTH;
num_sg_in_iu = 0;
- i = 0;
- while (1) {
- pqi_set_sg_descriptor(sg_descriptor, sg);
- if (!chained)
- num_sg_in_iu++;
- i++;
- if (i == sg_count)
- break;
- sg_descriptor++;
- if (i == max_sg_per_iu) {
- put_unaligned_le64(
- (u64)io_request->sg_chain_buffer_dma_handle,
- &sg_descriptor->address);
- put_unaligned_le32((sg_count - num_sg_in_iu)
- * sizeof(*sg_descriptor),
- &sg_descriptor->length);
- put_unaligned_le32(CISS_SG_CHAIN,
- &sg_descriptor->flags);
- chained = true;
- num_sg_in_iu++;
- sg_descriptor = io_request->sg_chain_buffer;
- }
- sg = sg_next(sg);
- }
+ if (sg_count == 0)
+ goto out;
+
+ sg = scsi_sglist(scmd);
+ sg_descriptor = request->sg_descriptors;
+
+ num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
+ ctrl_info->max_sg_per_iu, &chained);
- put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
request->partial = chained;
iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
out:
put_unaligned_le16(iu_length, &request->header.iu_length);
+ request->num_sg_descriptors = num_sg_in_iu;
+
+ return 0;
+}
+
+static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
+ struct pqi_io_request *io_request)
+{
+ u16 iu_length;
+ int sg_count;
+ bool chained;
+ unsigned int num_sg_in_iu;
+ struct scatterlist *sg;
+ struct pqi_sg_descriptor *sg_descriptor;
+
+ sg_count = scsi_dma_map(scmd);
+ if (sg_count < 0)
+ return sg_count;
+
+ iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
+ PQI_REQUEST_HEADER_LENGTH;
+ num_sg_in_iu = 0;
+
+ if (sg_count != 0) {
+ sg = scsi_sglist(scmd);
+ sg_descriptor = request->sg_descriptors;
+
+ num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
+ ctrl_info->max_sg_per_r56_iu, &chained);
+
+ request->partial = chained;
+ iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
+ }
+
+ put_unaligned_le16(iu_length, &request->header.iu_length);
+ request->num_sg_descriptors = num_sg_in_iu;
return 0;
}
@@ -4862,12 +5187,10 @@ static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
struct pqi_io_request *io_request)
{
- int i;
u16 iu_length;
int sg_count;
bool chained;
unsigned int num_sg_in_iu;
- unsigned int max_sg_per_iu;
struct scatterlist *sg;
struct pqi_sg_descriptor *sg_descriptor;
@@ -4884,35 +5207,10 @@ static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
sg = scsi_sglist(scmd);
sg_descriptor = request->sg_descriptors;
- max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
- chained = false;
- i = 0;
- while (1) {
- pqi_set_sg_descriptor(sg_descriptor, sg);
- if (!chained)
- num_sg_in_iu++;
- i++;
- if (i == sg_count)
- break;
- sg_descriptor++;
- if (i == max_sg_per_iu) {
- put_unaligned_le64(
- (u64)io_request->sg_chain_buffer_dma_handle,
- &sg_descriptor->address);
- put_unaligned_le32((sg_count - num_sg_in_iu)
- * sizeof(*sg_descriptor),
- &sg_descriptor->length);
- put_unaligned_le32(CISS_SG_CHAIN,
- &sg_descriptor->flags);
- chained = true;
- num_sg_in_iu++;
- sg_descriptor = io_request->sg_chain_buffer;
- }
- sg = sg_next(sg);
- }
+ num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
+ ctrl_info->max_sg_per_iu, &chained);
- put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
request->partial = chained;
iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
@@ -4947,16 +5245,14 @@ static int pqi_raid_submit_scsi_cmd_with_io_request(
io_request->scmd = scmd;
request = io_request->iu;
- memset(request, 0,
- offsetof(struct pqi_raid_path_request, sg_descriptors));
+ memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
- memcpy(request->lun_number, device->scsi3addr,
- sizeof(request->lun_number));
+ memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
memcpy(request->cdb, scmd->cmnd, cdb_length);
@@ -4966,30 +5262,20 @@ static int pqi_raid_submit_scsi_cmd_with_io_request(
case 10:
case 12:
case 16:
- /* No bytes in the Additional CDB bytes field */
- request->additional_cdb_bytes_usage =
- SOP_ADDITIONAL_CDB_BYTES_0;
+ request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
break;
case 20:
- /* 4 bytes in the Additional cdb field */
- request->additional_cdb_bytes_usage =
- SOP_ADDITIONAL_CDB_BYTES_4;
+ request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
break;
case 24:
- /* 8 bytes in the Additional cdb field */
- request->additional_cdb_bytes_usage =
- SOP_ADDITIONAL_CDB_BYTES_8;
+ request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
break;
case 28:
- /* 12 bytes in the Additional cdb field */
- request->additional_cdb_bytes_usage =
- SOP_ADDITIONAL_CDB_BYTES_12;
+ request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
break;
case 32:
default:
- /* 16 bytes in the Additional cdb field */
- request->additional_cdb_bytes_usage =
- SOP_ADDITIONAL_CDB_BYTES_16;
+ request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
break;
}
@@ -5036,12 +5322,6 @@ static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
device, scmd, queue_group);
}
-static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
-{
- if (!pqi_ctrl_blocked(ctrl_info))
- schedule_work(&ctrl_info->raid_bypass_retry_work);
-}
-
static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
{
struct scsi_cmnd *scmd;
@@ -5058,7 +5338,7 @@ static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
return false;
device = scmd->device->hostdata;
- if (pqi_device_offline(device))
+ if (pqi_device_offline(device) || pqi_device_in_remove(device))
return false;
ctrl_info = shost_to_hba(scmd->device->host);
@@ -5068,132 +5348,6 @@ static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
return true;
}
-static inline void pqi_add_to_raid_bypass_retry_list(
- struct pqi_ctrl_info *ctrl_info,
- struct pqi_io_request *io_request, bool at_head)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
- if (at_head)
- list_add(&io_request->request_list_entry,
- &ctrl_info->raid_bypass_retry_list);
- else
- list_add_tail(&io_request->request_list_entry,
- &ctrl_info->raid_bypass_retry_list);
- spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
-}
-
-static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
- void *context)
-{
- struct scsi_cmnd *scmd;
-
- scmd = io_request->scmd;
- pqi_free_io_request(io_request);
- pqi_scsi_done(scmd);
-}
-
-static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
-{
- struct scsi_cmnd *scmd;
- struct pqi_ctrl_info *ctrl_info;
-
- io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
- scmd = io_request->scmd;
- scmd->result = 0;
- ctrl_info = shost_to_hba(scmd->device->host);
-
- pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
- pqi_schedule_bypass_retry(ctrl_info);
-}
-
-static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
-{
- struct scsi_cmnd *scmd;
- struct pqi_scsi_dev *device;
- struct pqi_ctrl_info *ctrl_info;
- struct pqi_queue_group *queue_group;
-
- scmd = io_request->scmd;
- device = scmd->device->hostdata;
- if (pqi_device_in_reset(device)) {
- pqi_free_io_request(io_request);
- set_host_byte(scmd, DID_RESET);
- pqi_scsi_done(scmd);
- return 0;
- }
-
- ctrl_info = shost_to_hba(scmd->device->host);
- queue_group = io_request->queue_group;
-
- pqi_reinit_io_request(io_request);
-
- return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
- device, scmd, queue_group);
-}
-
-static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
- struct pqi_ctrl_info *ctrl_info)
-{
- unsigned long flags;
- struct pqi_io_request *io_request;
-
- spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
- io_request = list_first_entry_or_null(
- &ctrl_info->raid_bypass_retry_list,
- struct pqi_io_request, request_list_entry);
- if (io_request)
- list_del(&io_request->request_list_entry);
- spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
-
- return io_request;
-}
-
-static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
-{
- int rc;
- struct pqi_io_request *io_request;
-
- pqi_ctrl_busy(ctrl_info);
-
- while (1) {
- if (pqi_ctrl_blocked(ctrl_info))
- break;
- io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
- if (!io_request)
- break;
- rc = pqi_retry_raid_bypass(io_request);
- if (rc) {
- pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
- true);
- pqi_schedule_bypass_retry(ctrl_info);
- break;
- }
- }
-
- pqi_ctrl_unbusy(ctrl_info);
-}
-
-static void pqi_raid_bypass_retry_worker(struct work_struct *work)
-{
- struct pqi_ctrl_info *ctrl_info;
-
- ctrl_info = container_of(work, struct pqi_ctrl_info,
- raid_bypass_retry_work);
- pqi_retry_raid_bypass_requests(ctrl_info);
-}
-
-static void pqi_clear_all_queued_raid_bypass_retries(
- struct pqi_ctrl_info *ctrl_info)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
- INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
- spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
-}
-
static void pqi_aio_io_complete(struct pqi_io_request *io_request,
void *context)
{
@@ -5201,12 +5355,11 @@ static void pqi_aio_io_complete(struct pqi_io_request *io_request,
scmd = io_request->scmd;
scsi_dma_unmap(scmd);
- if (io_request->status == -EAGAIN)
+ if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
set_host_byte(scmd, DID_IMM_RETRY);
- else if (pqi_raid_bypass_retry_needed(io_request)) {
- pqi_queue_raid_bypass_retry(io_request);
- return;
+ scmd->SCp.this_residual++;
}
+
pqi_free_io_request(io_request);
pqi_scsi_done(scmd);
}
@@ -5234,8 +5387,7 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
io_request->raid_bypass = raid_bypass;
request = io_request->iu;
- memset(request, 0,
- offsetof(struct pqi_raid_path_request, sg_descriptors));
+ memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
put_unaligned_le32(aio_handle, &request->nexus_id);
@@ -5289,6 +5441,129 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
return 0;
}
+static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
+ struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
+ struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
+ struct pqi_scsi_dev_raid_map_data *rmd)
+{
+ int rc;
+ struct pqi_io_request *io_request;
+ struct pqi_aio_r1_path_request *r1_request;
+
+ io_request = pqi_alloc_io_request(ctrl_info);
+ io_request->io_complete_callback = pqi_aio_io_complete;
+ io_request->scmd = scmd;
+ io_request->raid_bypass = true;
+
+ r1_request = io_request->iu;
+ memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
+
+ r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
+ put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
+ r1_request->num_drives = rmd->num_it_nexus_entries;
+ put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
+ put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
+ if (rmd->num_it_nexus_entries == 3)
+ put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
+
+ put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
+ r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+ put_unaligned_le16(io_request->index, &r1_request->request_id);
+ r1_request->error_index = r1_request->request_id;
+ if (rmd->cdb_length > sizeof(r1_request->cdb))
+ rmd->cdb_length = sizeof(r1_request->cdb);
+ r1_request->cdb_length = rmd->cdb_length;
+ memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
+
+ /* The direction is always write. */
+ r1_request->data_direction = SOP_READ_FLAG;
+
+ if (encryption_info) {
+ r1_request->encryption_enable = true;
+ put_unaligned_le16(encryption_info->data_encryption_key_index,
+ &r1_request->data_encryption_key_index);
+ put_unaligned_le32(encryption_info->encrypt_tweak_lower,
+ &r1_request->encrypt_tweak_lower);
+ put_unaligned_le32(encryption_info->encrypt_tweak_upper,
+ &r1_request->encrypt_tweak_upper);
+ }
+
+ rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
+ if (rc) {
+ pqi_free_io_request(io_request);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
+
+ return 0;
+}
+
+static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
+ struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
+ struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
+ struct pqi_scsi_dev_raid_map_data *rmd)
+{
+ int rc;
+ struct pqi_io_request *io_request;
+ struct pqi_aio_r56_path_request *r56_request;
+
+ io_request = pqi_alloc_io_request(ctrl_info);
+ io_request->io_complete_callback = pqi_aio_io_complete;
+ io_request->scmd = scmd;
+ io_request->raid_bypass = true;
+
+ r56_request = io_request->iu;
+ memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
+
+ if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
+ r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
+ else
+ r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
+
+ put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
+ put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
+ put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
+ if (rmd->raid_level == SA_RAID_6) {
+ put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
+ r56_request->xor_multiplier = rmd->xor_mult;
+ }
+ put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
+ r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+ put_unaligned_le64(rmd->row, &r56_request->row);
+
+ put_unaligned_le16(io_request->index, &r56_request->request_id);
+ r56_request->error_index = r56_request->request_id;
+
+ if (rmd->cdb_length > sizeof(r56_request->cdb))
+ rmd->cdb_length = sizeof(r56_request->cdb);
+ r56_request->cdb_length = rmd->cdb_length;
+ memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
+
+ /* The direction is always write. */
+ r56_request->data_direction = SOP_READ_FLAG;
+
+ if (encryption_info) {
+ r56_request->encryption_enable = true;
+ put_unaligned_le16(encryption_info->data_encryption_key_index,
+ &r56_request->data_encryption_key_index);
+ put_unaligned_le32(encryption_info->encrypt_tweak_lower,
+ &r56_request->encrypt_tweak_lower);
+ put_unaligned_le32(encryption_info->encrypt_tweak_upper,
+ &r56_request->encrypt_tweak_upper);
+ }
+
+ rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
+ if (rc) {
+ pqi_free_io_request(io_request);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
+
+ return 0;
+}
+
static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd)
{
@@ -5301,6 +5576,14 @@ static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
return hw_queue;
}
+static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
+{
+ if (blk_rq_is_passthrough(scmd->request))
+ return false;
+
+ return scmd->SCp.this_residual == 0;
+}
+
/*
* This function gets called just before we hand the completed SCSI request
* back to the SML.
@@ -5324,9 +5607,83 @@ void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
atomic_dec(&device->scsi_cmds_outstanding);
}
-static int pqi_scsi_queue_command(struct Scsi_Host *shost,
+static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd)
{
+ u32 oldest_jiffies;
+ u8 lru_index;
+ int i;
+ int rc;
+ struct pqi_scsi_dev *device;
+ struct pqi_stream_data *pqi_stream_data;
+ struct pqi_scsi_dev_raid_map_data rmd;
+
+ if (!ctrl_info->enable_stream_detection)
+ return false;
+
+ rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
+ if (rc)
+ return false;
+
+ /* Check writes only. */
+ if (!rmd.is_write)
+ return false;
+
+ device = scmd->device->hostdata;
+
+ /* Check for RAID 5/6 streams. */
+ if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
+ return false;
+
+ /*
+ * If controller does not support AIO RAID{5,6} writes, need to send
+ * requests down non-AIO path.
+ */
+ if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
+ (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
+ return true;
+
+ lru_index = 0;
+ oldest_jiffies = INT_MAX;
+ for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
+ pqi_stream_data = &device->stream_data[i];
+ /*
+ * Check for adjacent request or request is within
+ * the previous request.
+ */
+ if ((pqi_stream_data->next_lba &&
+ rmd.first_block >= pqi_stream_data->next_lba) &&
+ rmd.first_block <= pqi_stream_data->next_lba +
+ rmd.block_cnt) {
+ pqi_stream_data->next_lba = rmd.first_block +
+ rmd.block_cnt;
+ pqi_stream_data->last_accessed = jiffies;
+ return true;
+ }
+
+ /* unused entry */
+ if (pqi_stream_data->last_accessed == 0) {
+ lru_index = i;
+ break;
+ }
+
+ /* Find entry with oldest last accessed time. */
+ if (pqi_stream_data->last_accessed <= oldest_jiffies) {
+ oldest_jiffies = pqi_stream_data->last_accessed;
+ lru_index = i;
+ }
+ }
+
+ /* Set LRU entry. */
+ pqi_stream_data = &device->stream_data[lru_index];
+ pqi_stream_data->last_accessed = jiffies;
+ pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
+
+ return false;
+}
+
+static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+{
int rc;
struct pqi_ctrl_info *ctrl_info;
struct pqi_scsi_dev *device;
@@ -5335,7 +5692,6 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
bool raid_bypassed;
device = scmd->device->hostdata;
- ctrl_info = shost_to_hba(shost);
if (!device) {
set_host_byte(scmd, DID_NO_CONNECT);
@@ -5345,15 +5701,15 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
atomic_inc(&device->scsi_cmds_outstanding);
+ ctrl_info = shost_to_hba(shost);
+
if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
set_host_byte(scmd, DID_NO_CONNECT);
pqi_scsi_done(scmd);
return 0;
}
- pqi_ctrl_busy(ctrl_info);
- if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
- pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) {
+ if (pqi_ctrl_blocked(ctrl_info)) {
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
@@ -5370,9 +5726,9 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
if (pqi_is_logical_device(device)) {
raid_bypassed = false;
if (device->raid_bypass_enabled &&
- !blk_rq_is_passthrough(scmd->request)) {
- rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
- scmd, queue_group);
+ pqi_is_bypass_eligible_request(scmd) &&
+ !pqi_is_parity_write_stream(ctrl_info, scmd)) {
+ rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
raid_bypassed = true;
atomic_inc(&device->raid_bypass_cnt);
@@ -5388,7 +5744,6 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
}
out:
- pqi_ctrl_unbusy(ctrl_info);
if (rc)
atomic_dec(&device->scsi_cmds_outstanding);
@@ -5478,6 +5833,7 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
list_for_each_entry_safe(io_request, next,
&queue_group->request_list[path],
request_list_entry) {
+
scmd = io_request->scmd;
if (!scmd)
continue;
@@ -5488,6 +5844,8 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
list_del(&io_request->request_list_entry);
set_host_byte(scmd, DID_RESET);
+ pqi_free_io_request(io_request);
+ scsi_dma_unmap(scmd);
pqi_scsi_done(scmd);
}
@@ -5497,102 +5855,37 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
}
}
-static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
-{
- unsigned int i;
- unsigned int path;
- struct pqi_queue_group *queue_group;
- unsigned long flags;
- struct pqi_io_request *io_request;
- struct pqi_io_request *next;
- struct scsi_cmnd *scmd;
-
- for (i = 0; i < ctrl_info->num_queue_groups; i++) {
- queue_group = &ctrl_info->queue_groups[i];
-
- for (path = 0; path < 2; path++) {
- spin_lock_irqsave(&queue_group->submit_lock[path],
- flags);
-
- list_for_each_entry_safe(io_request, next,
- &queue_group->request_list[path],
- request_list_entry) {
-
- scmd = io_request->scmd;
- if (!scmd)
- continue;
-
- list_del(&io_request->request_list_entry);
- set_host_byte(scmd, DID_RESET);
- pqi_scsi_done(scmd);
- }
-
- spin_unlock_irqrestore(
- &queue_group->submit_lock[path], flags);
- }
- }
-}
+#define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, unsigned long timeout_secs)
+ struct pqi_scsi_dev *device, unsigned long timeout_msecs)
{
- unsigned long timeout;
+ int cmds_outstanding;
+ unsigned long start_jiffies;
+ unsigned long warning_timeout;
+ unsigned long msecs_waiting;
- timeout = (timeout_secs * PQI_HZ) + jiffies;
+ start_jiffies = jiffies;
+ warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies;
- while (atomic_read(&device->scsi_cmds_outstanding)) {
+ while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding)) > 0) {
pqi_check_ctrl_health(ctrl_info);
if (pqi_ctrl_offline(ctrl_info))
return -ENXIO;
- if (timeout_secs != NO_TIMEOUT) {
- if (time_after(jiffies, timeout)) {
- dev_err(&ctrl_info->pci_dev->dev,
- "timed out waiting for pending IO\n");
- return -ETIMEDOUT;
- }
- }
- usleep_range(1000, 2000);
- }
-
- return 0;
-}
-
-static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
- unsigned long timeout_secs)
-{
- bool io_pending;
- unsigned long flags;
- unsigned long timeout;
- struct pqi_scsi_dev *device;
-
- timeout = (timeout_secs * PQI_HZ) + jiffies;
- while (1) {
- io_pending = false;
-
- spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry) {
- if (atomic_read(&device->scsi_cmds_outstanding)) {
- io_pending = true;
- break;
- }
+ msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
+ if (msecs_waiting > timeout_msecs) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target,
+ device->lun, msecs_waiting / 1000, cmds_outstanding);
+ return -ETIMEDOUT;
}
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
- flags);
-
- if (!io_pending)
- break;
-
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info))
- return -ENXIO;
-
- if (timeout_secs != NO_TIMEOUT) {
- if (time_after(jiffies, timeout)) {
- dev_err(&ctrl_info->pci_dev->dev,
- "timed out waiting for pending IO\n");
- return -ETIMEDOUT;
- }
+ if (time_after(jiffies, warning_timeout)) {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target,
+ device->lun, msecs_waiting / 1000, cmds_outstanding);
+ warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies;
}
usleep_range(1000, 2000);
}
@@ -5600,18 +5893,6 @@ static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
return 0;
}
-static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info)
-{
- while (atomic_read(&ctrl_info->sync_cmds_outstanding)) {
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info))
- return -ENXIO;
- usleep_range(1000, 2000);
- }
-
- return 0;
-}
-
static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
void *context)
{
@@ -5620,13 +5901,15 @@ static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
complete(waiting);
}
-#define PQI_LUN_RESET_TIMEOUT_SECS 30
#define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct completion *wait)
{
int rc;
+ unsigned int wait_secs;
+
+ wait_secs = 0;
while (1) {
if (wait_for_completion_io_timeout(wait,
@@ -5640,13 +5923,21 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
rc = -ENXIO;
break;
}
+
+ wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
+
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun,
+ wait_secs);
}
return rc;
}
-static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+#define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
+
+static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
{
int rc;
struct pqi_io_request *io_request;
@@ -5668,11 +5959,9 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
sizeof(request->lun_number));
request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
if (ctrl_info->tmf_iu_timeout_supported)
- put_unaligned_le16(PQI_LUN_RESET_TIMEOUT_SECS,
- &request->timeout);
+ put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
- pqi_start_io(ctrl_info,
- &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
+ pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
io_request);
rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
@@ -5684,31 +5973,33 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
return rc;
}
-/* Performs a reset at the LUN level. */
+#define PQI_LUN_RESET_RETRIES 3
+#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000)
+#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
+#define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
-#define PQI_LUN_RESET_RETRIES 3
-#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000
-#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS 120
-
-static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
{
- int rc;
+ int reset_rc;
+ int wait_rc;
unsigned int retries;
- unsigned long timeout_secs;
+ unsigned long timeout_msecs;
for (retries = 0;;) {
- rc = pqi_lun_reset(ctrl_info, device);
- if (rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
+ reset_rc = pqi_lun_reset(ctrl_info, device);
+ if (reset_rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
break;
msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
}
- timeout_secs = rc ? PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS : NO_TIMEOUT;
+ timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
+ PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
- rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs);
+ wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, timeout_msecs);
+ if (wait_rc && reset_rc == 0)
+ reset_rc = wait_rc;
- return rc == 0 ? SUCCESS : FAILED;
+ return reset_rc == 0 ? SUCCESS : FAILED;
}
static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
@@ -5716,23 +6007,15 @@ static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
{
int rc;
- mutex_lock(&ctrl_info->lun_reset_mutex);
-
pqi_ctrl_block_requests(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
pqi_fail_io_queued_for_device(ctrl_info, device);
rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
- pqi_device_reset_start(device);
- pqi_ctrl_unblock_requests(ctrl_info);
-
if (rc)
rc = FAILED;
else
- rc = _pqi_device_reset(ctrl_info, device);
-
- pqi_device_reset_done(device);
-
- mutex_unlock(&ctrl_info->lun_reset_mutex);
+ rc = pqi_lun_reset_with_retries(ctrl_info, device);
+ pqi_ctrl_unblock_requests(ctrl_info);
return rc;
}
@@ -5748,29 +6031,25 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
ctrl_info = shost_to_hba(shost);
device = scmd->device->hostdata;
+ mutex_lock(&ctrl_info->lun_reset_mutex);
+
dev_err(&ctrl_info->pci_dev->dev,
"resetting scsi %d:%d:%d:%d\n",
shost->host_no, device->bus, device->target, device->lun);
pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info) ||
- pqi_device_reset_blocked(ctrl_info)) {
+ if (pqi_ctrl_offline(ctrl_info))
rc = FAILED;
- goto out;
- }
-
- pqi_wait_until_ofa_finished(ctrl_info);
-
- atomic_inc(&ctrl_info->sync_cmds_outstanding);
- rc = pqi_device_reset(ctrl_info, device);
- atomic_dec(&ctrl_info->sync_cmds_outstanding);
+ else
+ rc = pqi_device_reset(ctrl_info, device);
-out:
dev_err(&ctrl_info->pci_dev->dev,
"reset of scsi %d:%d:%d:%d: %s\n",
shost->host_no, device->bus, device->target, device->lun,
rc == SUCCESS ? "SUCCESS" : "FAILED");
+ mutex_unlock(&ctrl_info->lun_reset_mutex);
+
return rc;
}
@@ -5808,10 +6087,13 @@ static int pqi_slave_alloc(struct scsi_device *sdev)
scsi_change_queue_depth(sdev,
device->advertised_queue_depth);
}
- if (pqi_is_logical_device(device))
+ if (pqi_is_logical_device(device)) {
pqi_disable_write_same(sdev);
- else
+ } else {
sdev->allow_restart = 1;
+ if (device->device_type == SA_DEVICE_TYPE_NVME)
+ pqi_disable_write_same(sdev);
+ }
}
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -5985,6 +6267,8 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
if (pqi_ctrl_offline(ctrl_info))
return -ENXIO;
+ if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
+ return -EBUSY;
if (!arg)
return -EINVAL;
if (!capable(CAP_SYS_RAWIO))
@@ -6069,7 +6353,7 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
- PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
+ PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
if (iocommand.buf_size > 0)
pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
@@ -6121,9 +6405,6 @@ static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
ctrl_info = shost_to_hba(sdev->host);
- if (pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info))
- return -EBUSY;
-
switch (cmd) {
case CCISS_DEREGDISK:
case CCISS_REGNEWDISK:
@@ -6156,14 +6437,13 @@ static ssize_t pqi_firmware_version_show(struct device *dev,
shost = class_to_shost(dev);
ctrl_info = shost_to_hba(shost);
- return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
}
static ssize_t pqi_driver_version_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
- return snprintf(buffer, PAGE_SIZE, "%s\n",
- DRIVER_VERSION BUILD_TIMESTAMP);
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
}
static ssize_t pqi_serial_number_show(struct device *dev,
@@ -6175,7 +6455,7 @@ static ssize_t pqi_serial_number_show(struct device *dev,
shost = class_to_shost(dev);
ctrl_info = shost_to_hba(shost);
- return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
}
static ssize_t pqi_model_show(struct device *dev,
@@ -6187,7 +6467,7 @@ static ssize_t pqi_model_show(struct device *dev,
shost = class_to_shost(dev);
ctrl_info = shost_to_hba(shost);
- return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
}
static ssize_t pqi_vendor_show(struct device *dev,
@@ -6199,7 +6479,7 @@ static ssize_t pqi_vendor_show(struct device *dev,
shost = class_to_shost(dev);
ctrl_info = shost_to_hba(shost);
- return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
}
static ssize_t pqi_host_rescan_store(struct device *dev,
@@ -6252,14 +6532,103 @@ static ssize_t pqi_lockup_action_store(struct device *dev,
return -EINVAL;
}
+static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+
+ return scnprintf(buffer, 10, "%x\n",
+ ctrl_info->enable_stream_detection);
+}
+
+static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
+ struct device_attribute *attr, const char *buffer, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+ u8 set_stream_detection = 0;
+
+ if (kstrtou8(buffer, 0, &set_stream_detection))
+ return -EINVAL;
+
+ if (set_stream_detection > 0)
+ set_stream_detection = 1;
+
+ ctrl_info->enable_stream_detection = set_stream_detection;
+
+ return count;
+}
+
+static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+
+ return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
+}
+
+static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
+ struct device_attribute *attr, const char *buffer, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+ u8 set_r5_writes = 0;
+
+ if (kstrtou8(buffer, 0, &set_r5_writes))
+ return -EINVAL;
+
+ if (set_r5_writes > 0)
+ set_r5_writes = 1;
+
+ ctrl_info->enable_r5_writes = set_r5_writes;
+
+ return count;
+}
+
+static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+
+ return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
+}
+
+static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
+ struct device_attribute *attr, const char *buffer, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+ u8 set_r6_writes = 0;
+
+ if (kstrtou8(buffer, 0, &set_r6_writes))
+ return -EINVAL;
+
+ if (set_r6_writes > 0)
+ set_r6_writes = 1;
+
+ ctrl_info->enable_r6_writes = set_r6_writes;
+
+ return count;
+}
+
static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
-static DEVICE_ATTR(lockup_action, 0644,
- pqi_lockup_action_show, pqi_lockup_action_store);
+static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
+ pqi_lockup_action_store);
+static DEVICE_ATTR(enable_stream_detection, 0644,
+ pqi_host_enable_stream_detection_show,
+ pqi_host_enable_stream_detection_store);
+static DEVICE_ATTR(enable_r5_writes, 0644,
+ pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
+static DEVICE_ATTR(enable_r6_writes, 0644,
+ pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
static struct device_attribute *pqi_shost_attrs[] = {
&dev_attr_driver_version,
@@ -6269,6 +6638,9 @@ static struct device_attribute *pqi_shost_attrs[] = {
&dev_attr_vendor,
&dev_attr_rescan,
&dev_attr_lockup_action,
+ &dev_attr_enable_stream_detection,
+ &dev_attr_enable_r5_writes,
+ &dev_attr_enable_r6_writes,
NULL
};
@@ -6301,8 +6673,9 @@ static ssize_t pqi_unique_id_show(struct device *dev,
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- return snprintf(buffer, PAGE_SIZE,
- "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
+ return scnprintf(buffer, PAGE_SIZE,
+ "%02X%02X%02X%02X%02X%02X%02X%02X"
+ "%02X%02X%02X%02X%02X%02X%02X%02X\n",
unique_id[0], unique_id[1], unique_id[2], unique_id[3],
unique_id[4], unique_id[5], unique_id[6], unique_id[7],
unique_id[8], unique_id[9], unique_id[10], unique_id[11],
@@ -6333,7 +6706,7 @@ static ssize_t pqi_lunid_show(struct device *dev,
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
+ return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
}
#define MAX_PATHS 8
@@ -6445,7 +6818,7 @@ static ssize_t pqi_sas_address_show(struct device *dev,
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
+ return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
}
static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
@@ -6503,7 +6876,7 @@ static ssize_t pqi_raid_level_show(struct device *dev,
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
}
static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
@@ -6530,7 +6903,7 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- return snprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
+ return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
}
static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
@@ -6577,9 +6950,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
if (!shost) {
- dev_err(&ctrl_info->pci_dev->dev,
- "scsi_host_alloc failed for controller %u\n",
- ctrl_info->ctrl_id);
+ dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
return -ENOMEM;
}
@@ -6598,21 +6969,18 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
shost->unique_id = shost->irq;
shost->nr_hw_queues = ctrl_info->num_queue_groups;
+ shost->host_tagset = 1;
shost->hostdata[0] = (unsigned long)ctrl_info;
rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
if (rc) {
- dev_err(&ctrl_info->pci_dev->dev,
- "scsi_add_host failed for controller %u\n",
- ctrl_info->ctrl_id);
+ dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
goto free_host;
}
rc = pqi_add_sas_host(shost, ctrl_info);
if (rc) {
- dev_err(&ctrl_info->pci_dev->dev,
- "add SAS host failed for controller %u\n",
- ctrl_info->ctrl_id);
+ dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
goto remove_host;
}
@@ -6682,8 +7050,7 @@ static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
rc = sis_pqi_reset_quiesce(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
- "PQI reset failed during quiesce with error %d\n",
- rc);
+ "PQI reset failed during quiesce with error %d\n", rc);
return rc;
}
}
@@ -6738,13 +7105,24 @@ static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
if (rc)
goto out;
- memcpy(ctrl_info->firmware_version, identify->firmware_version,
- sizeof(identify->firmware_version));
- ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
- snprintf(ctrl_info->firmware_version +
- strlen(ctrl_info->firmware_version),
- sizeof(ctrl_info->firmware_version),
- "-%u", get_unaligned_le16(&identify->firmware_build_number));
+ if (get_unaligned_le32(&identify->extra_controller_flags) &
+ BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
+ memcpy(ctrl_info->firmware_version,
+ identify->firmware_version_long,
+ sizeof(identify->firmware_version_long));
+ } else {
+ memcpy(ctrl_info->firmware_version,
+ identify->firmware_version_short,
+ sizeof(identify->firmware_version_short));
+ ctrl_info->firmware_version
+ [sizeof(identify->firmware_version_short)] = '\0';
+ snprintf(ctrl_info->firmware_version +
+ strlen(ctrl_info->firmware_version),
+ sizeof(ctrl_info->firmware_version) -
+ sizeof(identify->firmware_version_short),
+ "-%u",
+ get_unaligned_le16(&identify->firmware_build_number));
+ }
memcpy(ctrl_info->model, identify->product_id,
sizeof(identify->product_id));
@@ -6831,8 +7209,7 @@ static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
put_unaligned_le16(last_section,
&request.data.config_table_update.last_section);
- return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
- 0, NULL, NO_TIMEOUT);
+ return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
}
static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
@@ -6841,6 +7218,7 @@ static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
{
void *features_requested;
void __iomem *features_requested_iomem_addr;
+ void __iomem *host_max_known_feature_iomem_addr;
features_requested = firmware_features->features_supported +
le16_to_cpu(firmware_features->num_elements);
@@ -6851,6 +7229,16 @@ static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
memcpy_toio(features_requested_iomem_addr, features_requested,
le16_to_cpu(firmware_features->num_elements));
+ if (pqi_is_firmware_feature_supported(firmware_features,
+ PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
+ host_max_known_feature_iomem_addr =
+ features_requested_iomem_addr +
+ (le16_to_cpu(firmware_features->num_elements) * 2) +
+ sizeof(__le16);
+ writew(PQI_FIRMWARE_FEATURE_MAXIMUM,
+ host_max_known_feature_iomem_addr);
+ }
+
return pqi_config_table_update(ctrl_info,
PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
@@ -6888,16 +7276,28 @@ static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
struct pqi_firmware_feature *firmware_feature)
{
switch (firmware_feature->feature_bit) {
+ case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
+ ctrl_info->enable_r1_writes = firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
+ ctrl_info->enable_r5_writes = firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
+ ctrl_info->enable_r6_writes = firmware_feature->enabled;
+ break;
case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
ctrl_info->soft_reset_handshake_supported =
- firmware_feature->enabled;
+ firmware_feature->enabled &&
+ pqi_read_soft_reset_status(ctrl_info);
break;
case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
- ctrl_info->raid_iu_timeout_supported =
- firmware_feature->enabled;
+ ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
break;
case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
- ctrl_info->tmf_iu_timeout_supported =
+ ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN:
+ ctrl_info->unique_wwid_in_report_phys_lun_supported =
firmware_feature->enabled;
break;
}
@@ -6926,6 +7326,51 @@ static struct pqi_firmware_feature pqi_firmware_features[] = {
.feature_status = pqi_firmware_feature_status,
},
{
+ .feature_name = "Maximum Known Feature",
+ .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 0 Read Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 1 Read Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 5 Read Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 6 Read Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 0 Write Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 1 Write Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "RAID 5 Write Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "RAID 6 Write Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
.feature_name = "New Soft Reset Handshake",
.feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
.feature_status = pqi_ctrl_update_feature_flags,
@@ -6940,6 +7385,16 @@ static struct pqi_firmware_feature pqi_firmware_features[] = {
.feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
.feature_status = pqi_ctrl_update_feature_flags,
},
+ {
+ .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "Unique WWID in Report Physical LUN",
+ .feature_bit = PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
};
static void pqi_process_firmware_features(
@@ -6998,7 +7453,7 @@ static void pqi_process_firmware_features(
if (pqi_is_firmware_feature_enabled(firmware_features,
firmware_features_iomem_addr,
pqi_firmware_features[i].feature_bit)) {
- pqi_firmware_features[i].enabled = true;
+ pqi_firmware_features[i].enabled = true;
}
pqi_firmware_feature_update(ctrl_info,
&pqi_firmware_features[i]);
@@ -7024,14 +7479,34 @@ static void pqi_process_firmware_features_section(
mutex_unlock(&pqi_firmware_features_mutex);
}
+/*
+ * Reset all controller settings that can be initialized during the processing
+ * of the PQI Configuration Table.
+ */
+
+static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->heartbeat_counter = NULL;
+ ctrl_info->soft_reset_status = NULL;
+ ctrl_info->soft_reset_handshake_supported = false;
+ ctrl_info->enable_r1_writes = false;
+ ctrl_info->enable_r5_writes = false;
+ ctrl_info->enable_r6_writes = false;
+ ctrl_info->raid_iu_timeout_supported = false;
+ ctrl_info->tmf_iu_timeout_supported = false;
+ ctrl_info->unique_wwid_in_report_phys_lun_supported = false;
+}
+
static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
{
u32 table_length;
u32 section_offset;
+ bool firmware_feature_section_present;
void __iomem *table_iomem_addr;
struct pqi_config_table *config_table;
struct pqi_config_table_section_header *section;
struct pqi_config_table_section_info section_info;
+ struct pqi_config_table_section_info feature_section_info;
table_length = ctrl_info->config_table_length;
if (table_length == 0)
@@ -7048,25 +7523,24 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
* Copy the config table contents from I/O memory space into the
* temporary buffer.
*/
- table_iomem_addr = ctrl_info->iomem_base +
- ctrl_info->config_table_offset;
+ table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
memcpy_fromio(config_table, table_iomem_addr, table_length);
+ firmware_feature_section_present = false;
section_info.ctrl_info = ctrl_info;
- section_offset =
- get_unaligned_le32(&config_table->first_section_offset);
+ section_offset = get_unaligned_le32(&config_table->first_section_offset);
while (section_offset) {
section = (void *)config_table + section_offset;
section_info.section = section;
section_info.section_offset = section_offset;
- section_info.section_iomem_addr =
- table_iomem_addr + section_offset;
+ section_info.section_iomem_addr = table_iomem_addr + section_offset;
switch (get_unaligned_le16(&section->section_id)) {
case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
- pqi_process_firmware_features_section(&section_info);
+ firmware_feature_section_present = true;
+ feature_section_info = section_info;
break;
case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
if (pqi_disable_heartbeat)
@@ -7076,8 +7550,7 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
ctrl_info->heartbeat_counter =
table_iomem_addr +
section_offset +
- offsetof(
- struct pqi_config_table_heartbeat,
+ offsetof(struct pqi_config_table_heartbeat,
heartbeat_counter);
break;
case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
@@ -7085,14 +7558,21 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
table_iomem_addr +
section_offset +
offsetof(struct pqi_config_table_soft_reset,
- soft_reset_status);
+ soft_reset_status);
break;
}
- section_offset =
- get_unaligned_le16(&section->next_section_offset);
+ section_offset = get_unaligned_le16(&section->next_section_offset);
}
+ /*
+ * We process the firmware feature section after all other sections
+ * have been processed so that the feature bit callbacks can take
+ * into account the settings configured by other sections.
+ */
+ if (firmware_feature_section_present)
+ pqi_process_firmware_features_section(&feature_section_info);
+
kfree(config_table);
return 0;
@@ -7140,15 +7620,14 @@ static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
return pqi_revert_to_sis_mode(ctrl_info);
}
-#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
-
static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
{
int rc;
+ u32 product_id;
if (reset_devices) {
sis_soft_reset(ctrl_info);
- msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
+ msleep(PQI_POST_RESET_DELAY_SECS * PQI_HZ);
} else {
rc = pqi_force_sis_mode(ctrl_info);
if (rc)
@@ -7181,15 +7660,19 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
return rc;
}
+ product_id = sis_get_product_id(ctrl_info);
+ ctrl_info->product_id = (u8)product_id;
+ ctrl_info->product_revision = (u8)(product_id >> 8);
+
if (reset_devices) {
if (ctrl_info->max_outstanding_requests >
PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
- ctrl_info->max_outstanding_requests =
+ ctrl_info->max_outstanding_requests =
PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
} else {
if (ctrl_info->max_outstanding_requests >
PQI_MAX_OUTSTANDING_REQUESTS)
- ctrl_info->max_outstanding_requests =
+ ctrl_info->max_outstanding_requests =
PQI_MAX_OUTSTANDING_REQUESTS;
}
@@ -7294,6 +7777,17 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
pqi_start_heartbeat_timer(ctrl_info);
+ if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
+ rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
+ if (rc) { /* Supported features not returned correctly. */
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error obtaining advanced RAID bypass configuration\n");
+ return rc;
+ }
+ ctrl_info->ciss_report_log_flags |=
+ CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
+ }
+
rc = pqi_enable_events(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
@@ -7443,12 +7937,25 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
ctrl_info->controller_online = true;
pqi_ctrl_unblock_requests(ctrl_info);
+ pqi_ctrl_reset_config(ctrl_info);
+
rc = pqi_process_config_table(ctrl_info);
if (rc)
return rc;
pqi_start_heartbeat_timer(ctrl_info);
+ if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
+ rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error obtaining advanced RAID bypass configuration\n");
+ return rc;
+ }
+ ctrl_info->ciss_report_log_flags |=
+ CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
+ }
+
rc = pqi_enable_events(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
@@ -7477,15 +7984,15 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
return rc;
}
- pqi_schedule_update_time_worker(ctrl_info);
+ if (pqi_ofa_in_progress(ctrl_info))
+ pqi_ctrl_unblock_scan(ctrl_info);
pqi_scan_scsi_devices(ctrl_info);
return 0;
}
-static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
- u16 timeout)
+static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
{
int rc;
@@ -7591,7 +8098,6 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
atomic_set(&ctrl_info->num_interrupts, 0);
- atomic_set(&ctrl_info->sync_cmds_outstanding, 0);
INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
@@ -7599,19 +8105,26 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
+ INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
+ INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
+
sema_init(&ctrl_info->sync_request_sem,
PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
init_waitqueue_head(&ctrl_info->block_requests_wait);
- INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
- spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
- INIT_WORK(&ctrl_info->raid_bypass_retry_work,
- pqi_raid_bypass_retry_worker);
-
ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
ctrl_info->irq_mode = IRQ_MODE_NONE;
ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
+ ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
+ ctrl_info->max_transfer_encrypted_sas_sata =
+ PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
+ ctrl_info->max_transfer_encrypted_nvme =
+ PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
+ ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
+ ctrl_info->max_write_raid_1_10_2drive = ~0;
+ ctrl_info->max_write_raid_1_10_3drive = ~0;
+
return ctrl_info;
}
@@ -7663,81 +8176,57 @@ static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
{
- pqi_cancel_update_time_worker(ctrl_info);
- pqi_cancel_rescan_worker(ctrl_info);
- pqi_wait_until_lun_reset_finished(ctrl_info);
- pqi_wait_until_scan_finished(ctrl_info);
- pqi_ctrl_ofa_start(ctrl_info);
+ pqi_ctrl_block_scan(ctrl_info);
+ pqi_scsi_block_requests(ctrl_info);
+ pqi_ctrl_block_device_reset(ctrl_info);
pqi_ctrl_block_requests(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
- pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS);
- pqi_fail_io_queued_for_all_devices(ctrl_info);
- pqi_wait_until_inbound_queues_empty(ctrl_info);
pqi_stop_heartbeat_timer(ctrl_info);
- ctrl_info->pqi_mode_enabled = false;
- pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
}
static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
{
- pqi_ofa_free_host_buffer(ctrl_info);
- ctrl_info->pqi_mode_enabled = true;
- pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
- ctrl_info->controller_online = true;
- pqi_ctrl_unblock_requests(ctrl_info);
pqi_start_heartbeat_timer(ctrl_info);
- pqi_schedule_update_time_worker(ctrl_info);
- pqi_clear_soft_reset_status(ctrl_info,
- PQI_SOFT_RESET_ABORT);
- pqi_scan_scsi_devices(ctrl_info);
+ pqi_ctrl_unblock_requests(ctrl_info);
+ pqi_ctrl_unblock_device_reset(ctrl_info);
+ pqi_scsi_unblock_requests(ctrl_info);
+ pqi_ctrl_unblock_scan(ctrl_info);
}
-static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
- u32 total_size, u32 chunk_size)
+static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
{
- u32 sg_count;
- u32 size;
int i;
- struct pqi_sg_descriptor *mem_descriptor = NULL;
+ u32 sg_count;
struct device *dev;
struct pqi_ofa_memory *ofap;
-
- dev = &ctrl_info->pci_dev->dev;
-
- sg_count = (total_size + chunk_size - 1);
- sg_count /= chunk_size;
+ struct pqi_sg_descriptor *mem_descriptor;
+ dma_addr_t dma_handle;
ofap = ctrl_info->pqi_ofa_mem_virt_addr;
- if (sg_count*chunk_size < total_size)
+ sg_count = DIV_ROUND_UP(total_size, chunk_size);
+ if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
goto out;
- ctrl_info->pqi_ofa_chunk_virt_addr =
- kcalloc(sg_count, sizeof(void *), GFP_KERNEL);
+ ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
if (!ctrl_info->pqi_ofa_chunk_virt_addr)
goto out;
- for (size = 0, i = 0; size < total_size; size += chunk_size, i++) {
- dma_addr_t dma_handle;
+ dev = &ctrl_info->pci_dev->dev;
+ for (i = 0; i < sg_count; i++) {
ctrl_info->pqi_ofa_chunk_virt_addr[i] =
- dma_alloc_coherent(dev, chunk_size, &dma_handle,
- GFP_KERNEL);
-
+ dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
- break;
-
+ goto out_free_chunks;
mem_descriptor = &ofap->sg_descriptor[i];
- put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address);
- put_unaligned_le32 (chunk_size, &mem_descriptor->length);
+ put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
+ put_unaligned_le32(chunk_size, &mem_descriptor->length);
}
- if (!size || size < total_size)
- goto out_free_chunks;
-
put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
- put_unaligned_le32(size, &ofap->bytes_allocated);
+ put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
return 0;
@@ -7745,82 +8234,87 @@ out_free_chunks:
while (--i >= 0) {
mem_descriptor = &ofap->sg_descriptor[i];
dma_free_coherent(dev, chunk_size,
- ctrl_info->pqi_ofa_chunk_virt_addr[i],
- get_unaligned_le64(&mem_descriptor->address));
+ ctrl_info->pqi_ofa_chunk_virt_addr[i],
+ get_unaligned_le64(&mem_descriptor->address));
}
kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
out:
- put_unaligned_le32 (0, &ofap->bytes_allocated);
return -ENOMEM;
}
static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
{
u32 total_size;
+ u32 chunk_size;
u32 min_chunk_size;
- u32 chunk_sz;
- total_size = le32_to_cpu(
- ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated);
- min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS;
+ if (ctrl_info->ofa_bytes_requested == 0)
+ return 0;
- for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2)
- if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz))
+ total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
+ min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
+ min_chunk_size = PAGE_ALIGN(min_chunk_size);
+
+ for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
+ if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
return 0;
+ chunk_size /= 2;
+ chunk_size = PAGE_ALIGN(chunk_size);
+ }
return -ENOMEM;
}
-static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
- u32 bytes_requested)
+static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
{
- struct pqi_ofa_memory *pqi_ofa_memory;
struct device *dev;
+ struct pqi_ofa_memory *ofap;
dev = &ctrl_info->pci_dev->dev;
- pqi_ofa_memory = dma_alloc_coherent(dev,
- PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
- &ctrl_info->pqi_ofa_mem_dma_handle,
- GFP_KERNEL);
- if (!pqi_ofa_memory)
+ ofap = dma_alloc_coherent(dev, sizeof(*ofap),
+ &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
+ if (!ofap)
return;
- put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version);
- memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE,
- sizeof(pqi_ofa_memory->signature));
- pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested);
-
- ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory;
+ ctrl_info->pqi_ofa_mem_virt_addr = ofap;
if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
- dev_err(dev, "Failed to allocate host buffer of size = %u",
- bytes_requested);
+ dev_err(dev,
+ "failed to allocate host buffer for Online Firmware Activation\n");
+ dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
+ ctrl_info->pqi_ofa_mem_virt_addr = NULL;
+ return;
}
- return;
+ put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
+ memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
}
static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
{
- int i;
- struct pqi_sg_descriptor *mem_descriptor;
+ unsigned int i;
+ struct device *dev;
struct pqi_ofa_memory *ofap;
+ struct pqi_sg_descriptor *mem_descriptor;
+ unsigned int num_memory_descriptors;
ofap = ctrl_info->pqi_ofa_mem_virt_addr;
-
if (!ofap)
return;
- if (!ofap->bytes_allocated)
+ dev = &ctrl_info->pci_dev->dev;
+
+ if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
goto out;
mem_descriptor = ofap->sg_descriptor;
+ num_memory_descriptors =
+ get_unaligned_le16(&ofap->num_memory_descriptors);
- for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors);
- i++) {
- dma_free_coherent(&ctrl_info->pci_dev->dev,
+ for (i = 0; i < num_memory_descriptors; i++) {
+ dma_free_coherent(dev,
get_unaligned_le32(&mem_descriptor[i].length),
ctrl_info->pqi_ofa_chunk_virt_addr[i],
get_unaligned_le64(&mem_descriptor[i].address));
@@ -7828,47 +8322,45 @@ static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
out:
- dma_free_coherent(&ctrl_info->pci_dev->dev,
- PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap,
- ctrl_info->pqi_ofa_mem_dma_handle);
+ dma_free_coherent(dev, sizeof(*ofap), ofap,
+ ctrl_info->pqi_ofa_mem_dma_handle);
ctrl_info->pqi_ofa_mem_virt_addr = NULL;
}
static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
{
+ u32 buffer_length;
struct pqi_vendor_general_request request;
- size_t size;
struct pqi_ofa_memory *ofap;
memset(&request, 0, sizeof(request));
- ofap = ctrl_info->pqi_ofa_mem_virt_addr;
-
request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
&request.header.iu_length);
put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
&request.function_code);
+ ofap = ctrl_info->pqi_ofa_mem_virt_addr;
+
if (ofap) {
- size = offsetof(struct pqi_ofa_memory, sg_descriptor) +
+ buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
get_unaligned_le16(&ofap->num_memory_descriptors) *
sizeof(struct pqi_sg_descriptor);
put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
&request.data.ofa_memory_allocation.buffer_address);
- put_unaligned_le32(size,
+ put_unaligned_le32(buffer_length,
&request.data.ofa_memory_allocation.buffer_length);
-
}
- return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
- 0, NULL, NO_TIMEOUT);
+ return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
}
-static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
+static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
{
- msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
+ ssleep(delay_secs);
+
return pqi_ctrl_init_resume(ctrl_info);
}
@@ -7926,7 +8418,6 @@ static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
pqi_cancel_update_time_worker(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
pqi_fail_all_outstanding_requests(ctrl_info);
- pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
pqi_ctrl_unblock_requests(ctrl_info);
}
@@ -8059,24 +8550,12 @@ static void pqi_shutdown(struct pci_dev *pci_dev)
return;
}
- pqi_disable_events(ctrl_info);
pqi_wait_until_ofa_finished(ctrl_info);
- pqi_cancel_update_time_worker(ctrl_info);
- pqi_cancel_rescan_worker(ctrl_info);
- pqi_cancel_event_worker(ctrl_info);
-
- pqi_ctrl_shutdown_start(ctrl_info);
- pqi_ctrl_wait_until_quiesced(ctrl_info);
-
- rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
- if (rc) {
- dev_err(&pci_dev->dev,
- "wait for pending I/O failed\n");
- return;
- }
+ pqi_scsi_block_requests(ctrl_info);
pqi_ctrl_block_device_reset(ctrl_info);
- pqi_wait_until_lun_reset_finished(ctrl_info);
+ pqi_ctrl_block_requests(ctrl_info);
+ pqi_ctrl_wait_until_quiesced(ctrl_info);
/*
* Write all data in the controller's battery-backed cache to
@@ -8087,15 +8566,6 @@ static void pqi_shutdown(struct pci_dev *pci_dev)
dev_err(&pci_dev->dev,
"unable to flush controller cache\n");
- pqi_ctrl_block_requests(ctrl_info);
-
- rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info);
- if (rc) {
- dev_err(&pci_dev->dev,
- "wait for pending sync cmds failed\n");
- return;
- }
-
pqi_crash_if_pending_command(ctrl_info);
pqi_reset(ctrl_info);
}
@@ -8130,19 +8600,18 @@ static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t stat
ctrl_info = pci_get_drvdata(pci_dev);
- pqi_disable_events(ctrl_info);
- pqi_cancel_update_time_worker(ctrl_info);
- pqi_cancel_rescan_worker(ctrl_info);
- pqi_wait_until_scan_finished(ctrl_info);
- pqi_wait_until_lun_reset_finished(ctrl_info);
pqi_wait_until_ofa_finished(ctrl_info);
- pqi_flush_cache(ctrl_info, SUSPEND);
+
+ pqi_ctrl_block_scan(ctrl_info);
+ pqi_scsi_block_requests(ctrl_info);
+ pqi_ctrl_block_device_reset(ctrl_info);
pqi_ctrl_block_requests(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
- pqi_wait_until_inbound_queues_empty(ctrl_info);
- pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
+ pqi_flush_cache(ctrl_info, SUSPEND);
pqi_stop_heartbeat_timer(ctrl_info);
+ pqi_crash_if_pending_command(ctrl_info);
+
if (state.event == PM_EVENT_FREEZE)
return 0;
@@ -8175,14 +8644,21 @@ static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
pci_dev->irq, rc);
return rc;
}
- pqi_start_heartbeat_timer(ctrl_info);
+ pqi_ctrl_unblock_device_reset(ctrl_info);
pqi_ctrl_unblock_requests(ctrl_info);
+ pqi_scsi_unblock_requests(ctrl_info);
+ pqi_ctrl_unblock_scan(ctrl_info);
return 0;
}
pci_set_power_state(pci_dev, PCI_D0);
pci_restore_state(pci_dev);
+ pqi_ctrl_unblock_device_reset(ctrl_info);
+ pqi_ctrl_unblock_requests(ctrl_info);
+ pqi_scsi_unblock_requests(ctrl_info);
+ pqi_ctrl_unblock_scan(ctrl_info);
+
return pqi_ctrl_init_resume(ctrl_info);
}
@@ -8218,6 +8694,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x8460)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x1104)
},
{
@@ -8290,6 +8770,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0051)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0052)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0053)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0054)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x19e5, 0xd227)
},
{
@@ -8450,6 +8946,122 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1400)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1402)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1410)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1411)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1412)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1420)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1430)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1440)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1441)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1450)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1452)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1460)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1461)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1462)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1470)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1471)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1472)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1480)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1490)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1491)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADVANTECH, 0x8312)
},
{
@@ -8514,6 +9126,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x1002)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_HP, 0x1100)
},
{
@@ -8522,6 +9138,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x0294)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x02db)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x02dc)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x032e)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1d8d, 0x0800)
},
{
@@ -8602,6 +9234,8 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
sis_driver_scratch) != 0xb0);
BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+ sis_product_identifier) != 0xb4);
+ BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
sis_firmware_status) != 0xbc);
BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
sis_mailbox) != 0x1000);
@@ -8615,7 +9249,7 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_iu_header,
response_queue_id) != 0x4);
BUILD_BUG_ON(offsetof(struct pqi_iu_header,
- work_area) != 0x6);
+ driver_flags) != 0x6);
BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
@@ -8713,7 +9347,7 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
header.iu_length) != 2);
BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
- header.work_area) != 6);
+ header.driver_flags) != 6);
BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
request_id) != 8);
BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
@@ -8769,7 +9403,7 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
header.iu_length) != 2);
BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
- header.work_area) != 6);
+ header.driver_flags) != 6);
BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
request_id) != 8);
BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
@@ -8793,7 +9427,7 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
header.response_queue_id) != 4);
BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
- header.work_area) != 6);
+ header.driver_flags) != 6);
BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
request_id) != 8);
BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
@@ -8822,7 +9456,7 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
header.response_queue_id) != 4);
BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
- header.work_area) != 6);
+ header.driver_flags) != 6);
BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
request_id) != 8);
BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
@@ -8997,13 +9631,23 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
configuration_signature) != 1);
BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
- firmware_version) != 5);
+ firmware_version_short) != 5);
BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
extended_logical_unit_count) != 154);
BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
firmware_build_number) != 190);
BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+ vendor_id) != 200);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+ product_id) != 208);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+ extra_controller_flags) != 286);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
controller_mode) != 292);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+ spare_part_number) != 293);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+ firmware_version_long) != 325);
BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
phys_bay_in_box) != 115);
@@ -9021,6 +9665,45 @@ static void __attribute__((unused)) verify_structures(void)
current_queue_depth_limit) != 1796);
BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
+ BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
+ page_code) != 0);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
+ subpage_code) != 1);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
+ buffer_length) != 2);
+
+ BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
+ page_code) != 0);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
+ subpage_code) != 1);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
+ page_length) != 2);
+
+ BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
+ != 18);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ header) != 0);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ firmware_read_support) != 4);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ driver_read_support) != 5);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ firmware_write_support) != 6);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ driver_write_support) != 7);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ max_transfer_encrypted_sas_sata) != 8);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ max_transfer_encrypted_nvme) != 10);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ max_write_raid_5_6) != 12);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ max_write_raid_1_10_2drive) != 14);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ max_write_raid_1_10_3drive) != 16);
+
BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index c9b00b3368d7..dd628cc87f78 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -65,8 +65,8 @@ static int pqi_sas_port_add_phy(struct pqi_sas_phy *pqi_sas_phy)
memset(identify, 0, sizeof(*identify));
identify->sas_address = pqi_sas_port->sas_address;
identify->device_type = SAS_END_DEVICE;
- identify->initiator_port_protocols = SAS_PROTOCOL_STP;
- identify->target_port_protocols = SAS_PROTOCOL_STP;
+ identify->initiator_port_protocols = SAS_PROTOCOL_ALL;
+ identify->target_port_protocols = SAS_PROTOCOL_ALL;
phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
@@ -93,13 +93,24 @@ static int pqi_sas_port_add_rphy(struct pqi_sas_port *pqi_sas_port,
identify = &rphy->identify;
identify->sas_address = pqi_sas_port->sas_address;
- if (pqi_sas_port->device &&
- pqi_sas_port->device->is_expander_smp_device) {
- identify->initiator_port_protocols = SAS_PROTOCOL_SMP;
- identify->target_port_protocols = SAS_PROTOCOL_SMP;
- } else {
- identify->initiator_port_protocols = SAS_PROTOCOL_STP;
- identify->target_port_protocols = SAS_PROTOCOL_STP;
+ identify->initiator_port_protocols = SAS_PROTOCOL_ALL;
+ identify->target_port_protocols = SAS_PROTOCOL_STP;
+
+ if (pqi_sas_port->device) {
+ identify->phy_identifier = pqi_sas_port->device->phy_id;
+ switch (pqi_sas_port->device->device_type) {
+ case SA_DEVICE_TYPE_SAS:
+ case SA_DEVICE_TYPE_SES:
+ case SA_DEVICE_TYPE_NVME:
+ identify->target_port_protocols = SAS_PROTOCOL_SSP;
+ break;
+ case SA_DEVICE_TYPE_EXPANDER_SMP:
+ identify->target_port_protocols = SAS_PROTOCOL_SMP;
+ break;
+ case SA_DEVICE_TYPE_SATA:
+ default:
+ break;
+ }
}
return sas_rphy_add(rphy);
@@ -107,8 +118,7 @@ static int pqi_sas_port_add_rphy(struct pqi_sas_port *pqi_sas_port,
static struct sas_rphy *pqi_sas_rphy_alloc(struct pqi_sas_port *pqi_sas_port)
{
- if (pqi_sas_port->device &&
- pqi_sas_port->device->is_expander_smp_device)
+ if (pqi_sas_port->device && pqi_sas_port->device->is_expander_smp_device)
return sas_expander_alloc(pqi_sas_port->port,
SAS_FANOUT_EXPANDER_DEVICE);
@@ -161,7 +171,7 @@ static void pqi_free_sas_port(struct pqi_sas_port *pqi_sas_port)
list_for_each_entry_safe(pqi_sas_phy, next,
&pqi_sas_port->phy_list_head, phy_list_entry)
- pqi_free_sas_phy(pqi_sas_phy);
+ pqi_free_sas_phy(pqi_sas_phy);
sas_port_delete(pqi_sas_port->port);
list_del(&pqi_sas_port->port_list_entry);
@@ -191,7 +201,7 @@ static void pqi_free_sas_node(struct pqi_sas_node *pqi_sas_node)
list_for_each_entry_safe(pqi_sas_port, next,
&pqi_sas_node->port_list_head, port_list_entry)
- pqi_free_sas_port(pqi_sas_port);
+ pqi_free_sas_port(pqi_sas_port);
kfree(pqi_sas_node);
}
@@ -498,7 +508,7 @@ static unsigned int pqi_build_sas_smp_handler_reply(
job->reply_len = le16_to_cpu(error_info->sense_data_length);
memcpy(job->reply, error_info->data,
- le16_to_cpu(error_info->sense_data_length));
+ le16_to_cpu(error_info->sense_data_length));
return job->reply_payload.payload_len -
get_unaligned_le32(&error_info->data_in_transferred);
@@ -547,6 +557,7 @@ void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
goto out;
reslen = pqi_build_sas_smp_handler_reply(smp_buf, job, &error_info);
+
out:
bsg_job_done(job, rc, reslen);
}
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index 26ea6b9d4199..c954620628e0 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -71,7 +71,7 @@ struct sis_base_struct {
/* error response data */
__le32 error_buffer_element_length; /* length of each PQI error */
/* response buffer element */
- /* in bytes */
+ /* in bytes */
__le32 error_buffer_num_elements; /* total number of PQI error */
/* response buffers available */
};
@@ -146,7 +146,12 @@ bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info)
bool sis_is_kernel_up(struct pqi_ctrl_info *ctrl_info)
{
return readl(&ctrl_info->registers->sis_firmware_status) &
- SIS_CTRL_KERNEL_UP;
+ SIS_CTRL_KERNEL_UP;
+}
+
+u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info)
+{
+ return readl(&ctrl_info->registers->sis_product_identifier);
}
/* used for passing command parameters/results when issuing SIS commands */
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index 878d34ca6532..12cd2ab1aead 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -27,5 +27,6 @@ int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info);
void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value);
u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info);
void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
+u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info);
#endif /* _SMARTPQI_SIS_H */
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index 9e2e196bc202..97c6f81b1d2a 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -58,6 +58,7 @@ static int snirm710_probe(struct platform_device *dev)
struct NCR_700_Host_Parameters *hostdata;
struct Scsi_Host *host;
struct resource *res;
+ int rc;
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res)
@@ -83,7 +84,9 @@ static int snirm710_probe(struct platform_device *dev)
goto out_kfree;
host->this_id = 7;
host->base = base;
- host->irq = platform_get_irq(dev, 0);
+ host->irq = rc = platform_get_irq(dev, 0);
+ if (rc < 0)
+ goto out_put_host;
if(request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "snirm710", host)) {
printk(KERN_ERR "snirm710: request_irq failed!\n");
goto out_put_host;
diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c
index 4471c4c8aafa..3aeee856d5c7 100644
--- a/drivers/scsi/snic/snic_debugfs.c
+++ b/drivers/scsi/snic/snic_debugfs.c
@@ -334,25 +334,7 @@ snic_stats_show(struct seq_file *sfp, void *data)
return 0;
}
-/*
- * snic_stats_open - Open the stats file for specific host
- *
- * Description:
- * This routine opens a debugfs file stats of specific host
- */
-static int
-snic_stats_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, snic_stats_show, inode->i_private);
-}
-
-static const struct file_operations snic_stats_fops = {
- .owner = THIS_MODULE,
- .open = snic_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(snic_stats);
static const struct file_operations snic_reset_stats_fops = {
.owner = THIS_MODULE,
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 6bc5453cea8a..e6718a74e5da 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -366,10 +366,14 @@ static u32 max_outstanding_req_per_channel;
static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth);
static int storvsc_vcpus_per_sub_channel = 4;
+static unsigned int storvsc_max_hw_queues;
module_param(storvsc_ringbuffer_size, int, S_IRUGO);
MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
+module_param(storvsc_max_hw_queues, uint, 0644);
+MODULE_PARM_DESC(storvsc_max_hw_queues, "Maximum number of hardware queues");
+
module_param(storvsc_vcpus_per_sub_channel, int, S_IRUGO);
MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels");
@@ -1688,9 +1692,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
struct storvsc_cmd_request *cmd_request = scsi_cmd_priv(scmnd);
int i;
struct scatterlist *sgl;
- unsigned int sg_count = 0;
+ unsigned int sg_count;
struct vmscsi_request *vm_srb;
- struct scatterlist *cur_sgl;
struct vmbus_packet_mpb_array *payload;
u32 payload_sz;
u32 length;
@@ -1769,8 +1772,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
payload_sz = sizeof(cmd_request->mpb);
if (sg_count) {
- unsigned int hvpgoff = 0;
- unsigned long offset_in_hvpg = sgl->offset & ~HV_HYP_PAGE_MASK;
+ unsigned int hvpgoff, hvpfns_to_add;
+ unsigned long offset_in_hvpg = offset_in_hvpage(sgl->offset);
unsigned int hvpg_count = HVPFN_UP(offset_in_hvpg + length);
u64 hvpfn;
@@ -1783,51 +1786,34 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
return SCSI_MLQUEUE_DEVICE_BUSY;
}
- /*
- * sgl is a list of PAGEs, and payload->range.pfn_array
- * expects the page number in the unit of HV_HYP_PAGE_SIZE (the
- * page size that Hyper-V uses, so here we need to divide PAGEs
- * into HV_HYP_PAGE in case that PAGE_SIZE > HV_HYP_PAGE_SIZE.
- * Besides, payload->range.offset should be the offset in one
- * HV_HYP_PAGE.
- */
payload->range.len = length;
payload->range.offset = offset_in_hvpg;
- hvpgoff = sgl->offset >> HV_HYP_PAGE_SHIFT;
- cur_sgl = sgl;
- for (i = 0; i < hvpg_count; i++) {
+
+ for (i = 0; sgl != NULL; sgl = sg_next(sgl)) {
/*
- * 'i' is the index of hv pages in the payload and
- * 'hvpgoff' is the offset (in hv pages) of the first
- * hv page in the the first page. The relationship
- * between the sum of 'i' and 'hvpgoff' and the offset
- * (in hv pages) in a payload page ('hvpgoff_in_page')
- * is as follow:
- *
- * |------------------ PAGE -------------------|
- * | NR_HV_HYP_PAGES_IN_PAGE hvpgs in total |
- * |hvpg|hvpg| ... |hvpg|... |hvpg|
- * ^ ^ ^ ^
- * +-hvpgoff-+ +-hvpgoff_in_page-+
- * ^ |
- * +--------------------- i ---------------------------+
+ * Init values for the current sgl entry. hvpgoff
+ * and hvpfns_to_add are in units of Hyper-V size
+ * pages. Handling the PAGE_SIZE != HV_HYP_PAGE_SIZE
+ * case also handles values of sgl->offset that are
+ * larger than PAGE_SIZE. Such offsets are handled
+ * even on other than the first sgl entry, provided
+ * they are a multiple of PAGE_SIZE.
*/
- unsigned int hvpgoff_in_page =
- (i + hvpgoff) % NR_HV_HYP_PAGES_IN_PAGE;
+ hvpgoff = HVPFN_DOWN(sgl->offset);
+ hvpfn = page_to_hvpfn(sg_page(sgl)) + hvpgoff;
+ hvpfns_to_add = HVPFN_UP(sgl->offset + sgl->length) -
+ hvpgoff;
/*
- * Two cases that we need to fetch a page:
- * 1) i == 0, the first step or
- * 2) hvpgoff_in_page == 0, when we reach the boundary
- * of a page.
+ * Fill the next portion of the PFN array with
+ * sequential Hyper-V PFNs for the continguous physical
+ * memory described by the sgl entry. The end of the
+ * last sgl should be reached at the same time that
+ * the PFN array is filled.
*/
- if (hvpgoff_in_page == 0 || i == 0) {
- hvpfn = page_to_hvpfn(sg_page(cur_sgl));
- cur_sgl = sg_next(cur_sgl);
- }
-
- payload->range.pfn_array[i] = hvpfn + hvpgoff_in_page;
+ while (hvpfns_to_add--)
+ payload->range.pfn_array[i++] = hvpfn++;
}
}
@@ -1861,8 +1847,6 @@ static struct scsi_host_template scsi_driver = {
.slave_configure = storvsc_device_configure,
.cmd_per_lun = 2048,
.this_id = -1,
- /* Make sure we dont get a sg segment crosses a page boundary */
- .dma_boundary = PAGE_SIZE-1,
/* Ensure there are no gaps in presented sgls */
.virt_boundary_mask = PAGE_SIZE-1,
.no_write_same = 1,
@@ -1907,6 +1891,7 @@ static int storvsc_probe(struct hv_device *device,
{
int ret;
int num_cpus = num_online_cpus();
+ int num_present_cpus = num_present_cpus();
struct Scsi_Host *host;
struct hv_host_device *host_dev;
bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
@@ -2015,8 +2000,17 @@ static int storvsc_probe(struct hv_device *device,
* For non-IDE disks, the host supports multiple channels.
* Set the number of HW queues we are supporting.
*/
- if (!dev_is_ide)
- host->nr_hw_queues = num_present_cpus();
+ if (!dev_is_ide) {
+ if (storvsc_max_hw_queues > num_present_cpus) {
+ storvsc_max_hw_queues = 0;
+ storvsc_log(device, STORVSC_LOGGING_WARN,
+ "Resetting invalid storvsc_max_hw_queues value to default.\n");
+ }
+ if (storvsc_max_hw_queues)
+ host->nr_hw_queues = storvsc_max_hw_queues;
+ else
+ host->nr_hw_queues = num_present_cpus;
+ }
/*
* Set the error handler work queue.
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 7de82f2c9757..d3489ac7ab28 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -206,7 +206,9 @@ static int esp_sun3x_probe(struct platform_device *dev)
if (!esp->command_block)
goto fail_unmap_regs_dma;
- host->irq = platform_get_irq(dev, 0);
+ host->irq = err = platform_get_irq(dev, 0);
+ if (err < 0)
+ goto fail_unmap_command_block;
err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
"SUN3X ESP", esp);
if (err < 0)
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c
index 149391faa19c..13d92043e13b 100644
--- a/drivers/scsi/ufs/cdns-pltfrm.c
+++ b/drivers/scsi/ufs/cdns-pltfrm.c
@@ -100,6 +100,7 @@ static void cdns_ufs_set_l4_attr(struct ufs_hba *hba)
}
/**
+ * cdns_ufs_set_hclkdiv()
* Sets HCLKDIV register value based on the core_clk
* @hba: host controller instance
*
@@ -141,6 +142,7 @@ static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba)
}
/**
+ * cdns_ufs_hce_enable_notify()
* Called before and after HCE enable bit is set.
* @hba: host controller instance
* @status: notify stage (pre, post change)
@@ -157,6 +159,7 @@ static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba,
}
/**
+ * cdns_ufs_hibern8_notify()
* Called around hibern8 enter/exit.
* @hba: host controller instance
* @cmd: UIC Command
@@ -173,6 +176,7 @@ static void cdns_ufs_hibern8_notify(struct ufs_hba *hba, enum uic_cmd_dme cmd,
}
/**
+ * cdns_ufs_link_startup_notify()
* Called before and after Link startup is carried out.
* @hba: host controller instance
* @status: notify stage (pre, post change)
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
index dee98dc72d29..ced9ef4d7c78 100644
--- a/drivers/scsi/ufs/ufs-debugfs.c
+++ b/drivers/scsi/ufs/ufs-debugfs.c
@@ -44,13 +44,103 @@ static int ufs_debugfs_stats_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(ufs_debugfs_stats);
+static int ee_usr_mask_get(void *data, u64 *val)
+{
+ struct ufs_hba *hba = data;
+
+ *val = hba->ee_usr_mask;
+ return 0;
+}
+
+static int ufs_debugfs_get_user_access(struct ufs_hba *hba)
+__acquires(&hba->host_sem)
+{
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ up(&hba->host_sem);
+ return -EBUSY;
+ }
+ pm_runtime_get_sync(hba->dev);
+ return 0;
+}
+
+static void ufs_debugfs_put_user_access(struct ufs_hba *hba)
+__releases(&hba->host_sem)
+{
+ pm_runtime_put_sync(hba->dev);
+ up(&hba->host_sem);
+}
+
+static int ee_usr_mask_set(void *data, u64 val)
+{
+ struct ufs_hba *hba = data;
+ int err;
+
+ if (val & ~(u64)MASK_EE_STATUS)
+ return -EINVAL;
+ err = ufs_debugfs_get_user_access(hba);
+ if (err)
+ return err;
+ err = ufshcd_update_ee_usr_mask(hba, val, MASK_EE_STATUS);
+ ufs_debugfs_put_user_access(hba);
+ return err;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(ee_usr_mask_fops, ee_usr_mask_get, ee_usr_mask_set, "%#llx\n");
+
+void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status)
+{
+ bool chgd = false;
+ u16 ee_ctrl_mask;
+ int err = 0;
+
+ if (!hba->debugfs_ee_rate_limit_ms || !status)
+ return;
+
+ mutex_lock(&hba->ee_ctrl_mutex);
+ ee_ctrl_mask = hba->ee_drv_mask | (hba->ee_usr_mask & ~status);
+ chgd = ee_ctrl_mask != hba->ee_ctrl_mask;
+ if (chgd) {
+ err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
+ if (err)
+ dev_err(hba->dev, "%s: failed to write ee control %d\n",
+ __func__, err);
+ }
+ mutex_unlock(&hba->ee_ctrl_mutex);
+
+ if (chgd && !err) {
+ unsigned long delay = msecs_to_jiffies(hba->debugfs_ee_rate_limit_ms);
+
+ queue_delayed_work(system_freezable_wq, &hba->debugfs_ee_work, delay);
+ }
+}
+
+static void ufs_debugfs_restart_ee(struct work_struct *work)
+{
+ struct ufs_hba *hba = container_of(work, struct ufs_hba, debugfs_ee_work.work);
+
+ if (!hba->ee_usr_mask || pm_runtime_suspended(hba->dev) ||
+ ufs_debugfs_get_user_access(hba))
+ return;
+ ufshcd_write_ee_control(hba);
+ ufs_debugfs_put_user_access(hba);
+}
+
void ufs_debugfs_hba_init(struct ufs_hba *hba)
{
+ /* Set default exception event rate limit period to 20ms */
+ hba->debugfs_ee_rate_limit_ms = 20;
+ INIT_DELAYED_WORK(&hba->debugfs_ee_work, ufs_debugfs_restart_ee);
hba->debugfs_root = debugfs_create_dir(dev_name(hba->dev), ufs_debugfs_root);
debugfs_create_file("stats", 0400, hba->debugfs_root, hba, &ufs_debugfs_stats_fops);
+ debugfs_create_file("exception_event_mask", 0600, hba->debugfs_root,
+ hba, &ee_usr_mask_fops);
+ debugfs_create_u32("exception_event_rate_limit_ms", 0600, hba->debugfs_root,
+ &hba->debugfs_ee_rate_limit_ms);
}
void ufs_debugfs_hba_exit(struct ufs_hba *hba)
{
debugfs_remove_recursive(hba->debugfs_root);
+ cancel_delayed_work_sync(&hba->debugfs_ee_work);
}
diff --git a/drivers/scsi/ufs/ufs-debugfs.h b/drivers/scsi/ufs/ufs-debugfs.h
index f35b39c4b4f5..3ca29d30460a 100644
--- a/drivers/scsi/ufs/ufs-debugfs.h
+++ b/drivers/scsi/ufs/ufs-debugfs.h
@@ -12,11 +12,13 @@ void __init ufs_debugfs_init(void);
void __exit ufs_debugfs_exit(void);
void ufs_debugfs_hba_init(struct ufs_hba *hba);
void ufs_debugfs_hba_exit(struct ufs_hba *hba);
+void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status);
#else
static inline void ufs_debugfs_init(void) {}
static inline void ufs_debugfs_exit(void) {}
static inline void ufs_debugfs_hba_init(struct ufs_hba *hba) {}
static inline void ufs_debugfs_hba_exit(struct ufs_hba *hba) {}
+static inline void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status) {}
#endif
#endif
diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
index 267943a13a94..70647eacf195 100644
--- a/drivers/scsi/ufs/ufs-exynos.c
+++ b/drivers/scsi/ufs/ufs-exynos.c
@@ -652,7 +652,6 @@ out:
#define PWR_MODE_STR_LEN 64
static int exynos_ufs_post_pwr_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *pwr_max,
struct ufs_pa_layer_attr *pwr_req)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
@@ -1155,7 +1154,7 @@ static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba,
dev_req_params);
break;
case POST_CHANGE:
- ret = exynos_ufs_post_pwr_mode(hba, NULL, dev_req_params);
+ ret = exynos_ufs_post_pwr_mode(hba, dev_req_params);
break;
}
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index a9dc8d7c9f78..2a3dd21da6a6 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -819,9 +819,9 @@ static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
if (host->hw_ver.major == 0x1)
- return UFSHCI_VERSION_11;
+ return ufshci_version(1, 1);
else
- return UFSHCI_VERSION_20;
+ return ufshci_version(2, 0);
}
/**
@@ -1071,13 +1071,8 @@ static int ufs_qcom_init(struct ufs_hba *hba)
if (res) {
host->dev_ref_clk_ctrl_mmio =
devm_ioremap_resource(dev, res);
- if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) {
- dev_warn(dev,
- "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n",
- __func__,
- PTR_ERR(host->dev_ref_clk_ctrl_mmio));
+ if (IS_ERR(host->dev_ref_clk_ctrl_mmio))
host->dev_ref_clk_ctrl_mmio = NULL;
- }
host->dev_ref_clk_en_mask = BIT(5);
}
}
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index acc54f530f2d..d7c3cff9662f 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -246,7 +246,7 @@ static ssize_t wb_on_store(struct device *dev, struct device_attribute *attr,
}
pm_runtime_get_sync(hba->dev);
- res = ufshcd_wb_ctrl(hba, wb_enable);
+ res = ufshcd_wb_toggle(hba, wb_enable);
pm_runtime_put_sync(hba->dev);
out:
up(&hba->host_sem);
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index bf1897a72532..cb80b9670bfe 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -348,8 +348,14 @@ enum power_desc_param_offset {
/* Exception event mask values */
enum {
- MASK_EE_STATUS = 0xFFFF,
- MASK_EE_URGENT_BKOPS = (1 << 2),
+ MASK_EE_STATUS = 0xFFFF,
+ MASK_EE_DYNCAP_EVENT = BIT(0),
+ MASK_EE_SYSPOOL_EVENT = BIT(1),
+ MASK_EE_URGENT_BKOPS = BIT(2),
+ MASK_EE_TOO_HIGH_TEMP = BIT(3),
+ MASK_EE_TOO_LOW_TEMP = BIT(4),
+ MASK_EE_WRITEBOOSTER_EVENT = BIT(5),
+ MASK_EE_PERFORMANCE_THROTTLING = BIT(6),
};
/* Background operation status */
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index fadd566025b8..23ee828747e2 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -15,13 +15,89 @@
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
#include <linux/debugfs.h>
+#include <linux/uuid.h>
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+
+struct ufs_host {
+ void (*late_init)(struct ufs_hba *hba);
+};
+
+enum {
+ INTEL_DSM_FNS = 0,
+ INTEL_DSM_RESET = 1,
+};
struct intel_host {
+ struct ufs_host ufs_host;
+ u32 dsm_fns;
u32 active_ltr;
u32 idle_ltr;
struct dentry *debugfs_root;
+ struct gpio_desc *reset_gpio;
};
+static const guid_t intel_dsm_guid =
+ GUID_INIT(0x1A4832A0, 0x7D03, 0x43CA,
+ 0xB0, 0x20, 0xF6, 0xDC, 0xD1, 0x2A, 0x19, 0x50);
+
+static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
+ unsigned int fn, u32 *result)
+{
+ union acpi_object *obj;
+ int err = 0;
+ size_t len;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
+ if (!obj)
+ return -EOPNOTSUPP;
+
+ if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ len = min_t(size_t, obj->buffer.length, 4);
+
+ *result = 0;
+ memcpy(result, obj->buffer.pointer, len);
+out:
+ ACPI_FREE(obj);
+
+ return err;
+}
+
+static int intel_dsm(struct intel_host *intel_host, struct device *dev,
+ unsigned int fn, u32 *result)
+{
+ if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
+ return -EOPNOTSUPP;
+
+ return __intel_dsm(intel_host, dev, fn, result);
+}
+
+static void intel_dsm_init(struct intel_host *intel_host, struct device *dev)
+{
+ int err;
+
+ err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
+ dev_dbg(dev, "DSM fns %#x, error %d\n", intel_host->dsm_fns, err);
+}
+
+static int ufs_intel_hce_enable_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ /* Cannot enable ICE until after HC enable */
+ if (status == POST_CHANGE && hba->caps & UFSHCD_CAP_CRYPTO) {
+ u32 hce = ufshcd_readl(hba, REG_CONTROLLER_ENABLE);
+
+ hce |= CRYPTO_GENERAL_ENABLE;
+ ufshcd_writel(hba, hce, REG_CONTROLLER_ENABLE);
+ }
+
+ return 0;
+}
+
static int ufs_intel_disable_lcc(struct ufs_hba *hba)
{
u32 attr = UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE);
@@ -144,6 +220,41 @@ static void intel_remove_debugfs(struct ufs_hba *hba)
debugfs_remove_recursive(host->debugfs_root);
}
+static int ufs_intel_device_reset(struct ufs_hba *hba)
+{
+ struct intel_host *host = ufshcd_get_variant(hba);
+
+ if (host->dsm_fns & INTEL_DSM_RESET) {
+ u32 result = 0;
+ int err;
+
+ err = intel_dsm(host, hba->dev, INTEL_DSM_RESET, &result);
+ if (!err && !result)
+ err = -EIO;
+ if (err)
+ dev_err(hba->dev, "%s: DSM error %d result %u\n",
+ __func__, err, result);
+ return err;
+ }
+
+ if (!host->reset_gpio)
+ return -EOPNOTSUPP;
+
+ gpiod_set_value_cansleep(host->reset_gpio, 1);
+ usleep_range(10, 15);
+
+ gpiod_set_value_cansleep(host->reset_gpio, 0);
+ usleep_range(10, 15);
+
+ return 0;
+}
+
+static struct gpio_desc *ufs_intel_get_reset_gpio(struct device *dev)
+{
+ /* GPIO in _DSD has active low setting */
+ return devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+}
+
static int ufs_intel_common_init(struct ufs_hba *hba)
{
struct intel_host *host;
@@ -154,6 +265,23 @@ static int ufs_intel_common_init(struct ufs_hba *hba)
if (!host)
return -ENOMEM;
ufshcd_set_variant(hba, host);
+ intel_dsm_init(host, hba->dev);
+ if (host->dsm_fns & INTEL_DSM_RESET) {
+ if (hba->vops->device_reset)
+ hba->caps |= UFSHCD_CAP_DEEPSLEEP;
+ } else {
+ if (hba->vops->device_reset)
+ host->reset_gpio = ufs_intel_get_reset_gpio(hba->dev);
+ if (IS_ERR(host->reset_gpio)) {
+ dev_err(hba->dev, "%s: failed to get reset GPIO, error %ld\n",
+ __func__, PTR_ERR(host->reset_gpio));
+ host->reset_gpio = NULL;
+ }
+ if (host->reset_gpio) {
+ gpiod_set_value_cansleep(host->reset_gpio, 0);
+ hba->caps |= UFSHCD_CAP_DEEPSLEEP;
+ }
+ }
intel_ltr_expose(hba->dev);
intel_add_debugfs(hba);
return 0;
@@ -206,6 +334,31 @@ static int ufs_intel_ehl_init(struct ufs_hba *hba)
return ufs_intel_common_init(hba);
}
+static void ufs_intel_lkf_late_init(struct ufs_hba *hba)
+{
+ /* LKF always needs a full reset, so set PM accordingly */
+ if (hba->caps & UFSHCD_CAP_DEEPSLEEP) {
+ hba->spm_lvl = UFS_PM_LVL_6;
+ hba->rpm_lvl = UFS_PM_LVL_6;
+ } else {
+ hba->spm_lvl = UFS_PM_LVL_5;
+ hba->rpm_lvl = UFS_PM_LVL_5;
+ }
+}
+
+static int ufs_intel_lkf_init(struct ufs_hba *hba)
+{
+ struct ufs_host *ufs_host;
+ int err;
+
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
+ hba->caps |= UFSHCD_CAP_CRYPTO;
+ err = ufs_intel_common_init(hba);
+ ufs_host = ufshcd_get_variant(hba);
+ ufs_host->late_init = ufs_intel_lkf_late_init;
+ return err;
+}
+
static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
.name = "intel-pci",
.init = ufs_intel_common_init,
@@ -222,6 +375,16 @@ static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
.resume = ufs_intel_resume,
};
+static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
+ .name = "intel-pci",
+ .init = ufs_intel_lkf_init,
+ .exit = ufs_intel_common_exit,
+ .hce_enable_notify = ufs_intel_hce_enable_notify,
+ .link_startup_notify = ufs_intel_link_startup_notify,
+ .resume = ufs_intel_resume,
+ .device_reset = ufs_intel_device_reset,
+};
+
#ifdef CONFIG_PM_SLEEP
/**
* ufshcd_pci_suspend - suspend power management function
@@ -321,6 +484,7 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
static int
ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct ufs_host *ufs_host;
struct ufs_hba *hba;
void __iomem *mmio_base;
int err;
@@ -358,6 +522,10 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
+ ufs_host = ufshcd_get_variant(hba);
+ if (ufs_host && ufs_host->late_init)
+ ufs_host->late_init(hba);
+
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);
@@ -383,6 +551,7 @@ static const struct pci_device_id ufshcd_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
+ { PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
{ } /* terminate list */
};
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 1a69949a4ea1..298e22ef907e 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -106,7 +106,6 @@ out:
static int ufshcd_populate_vreg(struct device *dev, const char *name,
struct ufs_vreg **out_vreg)
{
- int ret = 0;
char prop_name[MAX_PROP_SIZE];
struct ufs_vreg *vreg = NULL;
struct device_node *np = dev->of_node;
@@ -135,9 +134,8 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
vreg->max_uA = 0;
}
out:
- if (!ret)
- *out_vreg = vreg;
- return ret;
+ *out_vreg = vreg;
+ return 0;
}
/**
@@ -377,7 +375,7 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- err = -ENODEV;
+ err = irq;
goto out;
}
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index d3d05e997c13..0625da7a42ee 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -24,7 +24,6 @@
#include "ufs_bsg.h"
#include "ufshcd-crypto.h"
#include <asm/unaligned.h>
-#include <linux/blkdev.h>
#define CREATE_TRACE_POINTS
#include <trace/events/ufs.h>
@@ -245,8 +244,8 @@ static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
struct ufs_vreg *vreg);
static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
-static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
-static inline int ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
+static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
+static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
@@ -273,20 +272,12 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
static inline void ufshcd_wb_config(struct ufs_hba *hba)
{
- int ret;
-
if (!ufshcd_is_wb_allowed(hba))
return;
- ret = ufshcd_wb_ctrl(hba, true);
- if (ret)
- dev_err(hba->dev, "%s: Enable WB failed: %d\n", __func__, ret);
- else
- dev_info(hba->dev, "%s: Write Booster Configured\n", __func__);
- ret = ufshcd_wb_toggle_flush_during_h8(hba, true);
- if (ret)
- dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
- __func__, ret);
+ ufshcd_wb_toggle(hba, true);
+
+ ufshcd_wb_toggle_flush_during_h8(hba, true);
if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
ufshcd_wb_toggle_flush(hba, true);
}
@@ -336,11 +327,15 @@ static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
return;
if (str_t == UFS_TM_SEND)
- trace_ufshcd_upiu(dev_name(hba->dev), str_t, &descp->req_header,
- &descp->input_param1, UFS_TSF_TM_INPUT);
+ trace_ufshcd_upiu(dev_name(hba->dev), str_t,
+ &descp->upiu_req.req_header,
+ &descp->upiu_req.input_param1,
+ UFS_TSF_TM_INPUT);
else
- trace_ufshcd_upiu(dev_name(hba->dev), str_t, &descp->rsp_header,
- &descp->output_param1, UFS_TSF_TM_OUTPUT);
+ trace_ufshcd_upiu(dev_name(hba->dev), str_t,
+ &descp->upiu_rsp.rsp_header,
+ &descp->upiu_rsp.output_param1,
+ UFS_TSF_TM_OUTPUT);
}
static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
@@ -667,23 +662,12 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
*/
static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
{
- u32 intr_mask = 0;
+ if (hba->ufs_version == ufshci_version(1, 0))
+ return INTERRUPT_MASK_ALL_VER_10;
+ if (hba->ufs_version <= ufshci_version(2, 0))
+ return INTERRUPT_MASK_ALL_VER_11;
- switch (hba->ufs_version) {
- case UFSHCI_VERSION_10:
- intr_mask = INTERRUPT_MASK_ALL_VER_10;
- break;
- case UFSHCI_VERSION_11:
- case UFSHCI_VERSION_20:
- intr_mask = INTERRUPT_MASK_ALL_VER_11;
- break;
- case UFSHCI_VERSION_21:
- default:
- intr_mask = INTERRUPT_MASK_ALL_VER_21;
- break;
- }
-
- return intr_mask;
+ return INTERRUPT_MASK_ALL_VER_21;
}
/**
@@ -694,10 +678,22 @@ static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
*/
static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
{
+ u32 ufshci_ver;
+
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
- return ufshcd_vops_get_ufs_hci_version(hba);
+ ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba);
+ else
+ ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION);
- return ufshcd_readl(hba, REG_UFS_VERSION);
+ /*
+ * UFSHCI v1.x uses a different version scheme, in order
+ * to allow the use of comparisons with the ufshci_version
+ * function, we convert it to the same scheme as ufs 2.0+.
+ */
+ if (ufshci_ver & 0x00010000)
+ return ufshci_version(1, ufshci_ver & 0x00000100);
+
+ return ufshci_ver;
}
/**
@@ -929,8 +925,7 @@ static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
{
/* HCI version 1.0 and 1.1 supports UniPro 1.41 */
- if ((hba->ufs_version == UFSHCI_VERSION_10) ||
- (hba->ufs_version == UFSHCI_VERSION_11))
+ if (hba->ufs_version <= ufshci_version(1, 1))
return UFS_UNIPRO_VER_1_41;
else
return UFS_UNIPRO_VER_1_6;
@@ -1266,7 +1261,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
/* Enable Write Booster if we have scaled up else disable it */
downgrade_write(&hba->clk_scaling_lock);
is_writelock = false;
- ufshcd_wb_ctrl(hba, scale_up);
+ ufshcd_wb_toggle(hba, scale_up);
out_unprepare:
ufshcd_clock_scaling_unprepare(hba, is_writelock);
@@ -2333,7 +2328,7 @@ static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
{
u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- if (hba->ufs_version == UFSHCI_VERSION_10) {
+ if (hba->ufs_version == ufshci_version(1, 0)) {
u32 rw;
rw = set & INTERRUPT_MASK_RW_VER_10;
set = rw | ((set ^ intrs) & intrs);
@@ -2353,7 +2348,7 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
{
u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- if (hba->ufs_version == UFSHCI_VERSION_10) {
+ if (hba->ufs_version == ufshci_version(1, 0)) {
u32 rw;
rw = (set & INTERRUPT_MASK_RW_VER_10) &
~(intrs & INTERRUPT_MASK_RW_VER_10);
@@ -2516,8 +2511,7 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
u8 upiu_flags;
int ret = 0;
- if ((hba->ufs_version == UFSHCI_VERSION_10) ||
- (hba->ufs_version == UFSHCI_VERSION_11))
+ if (hba->ufs_version <= ufshci_version(1, 1))
lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
else
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
@@ -2544,8 +2538,7 @@ static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
u8 upiu_flags;
int ret = 0;
- if ((hba->ufs_version == UFSHCI_VERSION_10) ||
- (hba->ufs_version == UFSHCI_VERSION_11))
+ if (hba->ufs_version <= ufshci_version(1, 1))
lrbp->command_type = UTP_CMD_TYPE_SCSI;
else
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
@@ -5161,6 +5154,46 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
}
}
+int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
+{
+ return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_EE_CONTROL, 0, 0,
+ &ee_ctrl_mask);
+}
+
+int ufshcd_write_ee_control(struct ufs_hba *hba)
+{
+ int err;
+
+ mutex_lock(&hba->ee_ctrl_mutex);
+ err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask);
+ mutex_unlock(&hba->ee_ctrl_mutex);
+ if (err)
+ dev_err(hba->dev, "%s: failed to write ee control %d\n",
+ __func__, err);
+ return err;
+}
+
+int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
+ u16 set, u16 clr)
+{
+ u16 new_mask, ee_ctrl_mask;
+ int err = 0;
+
+ mutex_lock(&hba->ee_ctrl_mutex);
+ new_mask = (*mask & ~clr) | set;
+ ee_ctrl_mask = new_mask | *other_mask;
+ if (ee_ctrl_mask != hba->ee_ctrl_mask)
+ err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
+ /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */
+ if (!err) {
+ hba->ee_ctrl_mask = ee_ctrl_mask;
+ *mask = new_mask;
+ }
+ mutex_unlock(&hba->ee_ctrl_mutex);
+ return err;
+}
+
/**
* ufshcd_disable_ee - disable exception event
* @hba: per-adapter instance
@@ -5171,22 +5204,9 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
*
* Returns zero on success, non-zero error value on failure.
*/
-static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
+static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
{
- int err = 0;
- u32 val;
-
- if (!(hba->ee_ctrl_mask & mask))
- goto out;
-
- val = hba->ee_ctrl_mask & ~mask;
- val &= MASK_EE_STATUS;
- err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
- QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
- if (!err)
- hba->ee_ctrl_mask &= ~mask;
-out:
- return err;
+ return ufshcd_update_ee_drv_mask(hba, 0, mask);
}
/**
@@ -5199,22 +5219,9 @@ out:
*
* Returns zero on success, non-zero error value on failure.
*/
-static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
+static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
{
- int err = 0;
- u32 val;
-
- if (hba->ee_ctrl_mask & mask)
- goto out;
-
- val = hba->ee_ctrl_mask | mask;
- val &= MASK_EE_STATUS;
- err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
- QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
- if (!err)
- hba->ee_ctrl_mask |= mask;
-out:
- return err;
+ return ufshcd_update_ee_drv_mask(hba, mask, 0);
}
/**
@@ -5431,85 +5438,74 @@ out:
__func__, err);
}
-int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
+static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
{
- int ret;
u8 index;
- enum query_opcode opcode;
+ enum query_opcode opcode = set ? UPIU_QUERY_OPCODE_SET_FLAG :
+ UPIU_QUERY_OPCODE_CLEAR_FLAG;
+
+ index = ufshcd_wb_get_query_index(hba);
+ return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL);
+}
+
+int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
+{
+ int ret;
if (!ufshcd_is_wb_allowed(hba))
return 0;
if (!(enable ^ hba->dev_info.wb_enabled))
return 0;
- if (enable)
- opcode = UPIU_QUERY_OPCODE_SET_FLAG;
- else
- opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
- index = ufshcd_wb_get_query_index(hba);
- ret = ufshcd_query_flag_retry(hba, opcode,
- QUERY_FLAG_IDN_WB_EN, index, NULL);
+ ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN);
if (ret) {
- dev_err(hba->dev, "%s write booster %s failed %d\n",
+ dev_err(hba->dev, "%s Write Booster %s failed %d\n",
__func__, enable ? "enable" : "disable", ret);
return ret;
}
hba->dev_info.wb_enabled = enable;
- dev_dbg(hba->dev, "%s write booster %s %d\n",
- __func__, enable ? "enable" : "disable", ret);
+ dev_info(hba->dev, "%s Write Booster %s\n",
+ __func__, enable ? "enabled" : "disabled");
return ret;
}
-static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
+static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
{
- int val;
- u8 index;
-
- if (set)
- val = UPIU_QUERY_OPCODE_SET_FLAG;
- else
- val = UPIU_QUERY_OPCODE_CLEAR_FLAG;
+ int ret;
- index = ufshcd_wb_get_query_index(hba);
- return ufshcd_query_flag_retry(hba, val,
- QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8,
- index, NULL);
+ ret = __ufshcd_wb_toggle(hba, set,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8);
+ if (ret) {
+ dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed: %d\n",
+ __func__, set ? "enable" : "disable", ret);
+ return;
+ }
+ dev_dbg(hba->dev, "%s WB-Buf Flush during H8 %s\n",
+ __func__, set ? "enabled" : "disabled");
}
-static inline int ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
+static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
{
int ret;
- u8 index;
- enum query_opcode opcode;
if (!ufshcd_is_wb_allowed(hba) ||
hba->dev_info.wb_buf_flush_enabled == enable)
- return 0;
-
- if (enable)
- opcode = UPIU_QUERY_OPCODE_SET_FLAG;
- else
- opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
+ return;
- index = ufshcd_wb_get_query_index(hba);
- ret = ufshcd_query_flag_retry(hba, opcode,
- QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN, index,
- NULL);
+ ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN);
if (ret) {
dev_err(hba->dev, "%s WB-Buf Flush %s failed %d\n", __func__,
enable ? "enable" : "disable", ret);
- goto out;
+ return;
}
hba->dev_info.wb_buf_flush_enabled = enable;
- dev_dbg(hba->dev, "WB-Buf Flush %s\n", enable ? "enabled" : "disabled");
-out:
- return ret;
-
+ dev_dbg(hba->dev, "%s WB-Buf Flush %s\n",
+ __func__, enable ? "enabled" : "disabled");
}
static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
@@ -5617,11 +5613,12 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
goto out;
}
- status &= hba->ee_ctrl_mask;
+ trace_ufshcd_exception_event(dev_name(hba->dev), status);
- if (status & MASK_EE_URGENT_BKOPS)
+ if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
ufshcd_bkops_exception_event_handler(hba);
+ ufs_debugfs_exception_event(hba, status);
out:
ufshcd_scsi_unblock_requests(hba);
/*
@@ -6402,7 +6399,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
blk_mq_start_request(req);
task_tag = req->tag;
- treq->req_header.dword_0 |= cpu_to_be32(task_tag);
+ treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
@@ -6475,16 +6472,16 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
/* Configure task request UPIU */
- treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
+ treq.upiu_req.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
- treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
+ treq.upiu_req.req_header.dword_1 = cpu_to_be32(tm_function << 16);
/*
* The host shall provide the same value for LUN field in the basic
* header and for Input Parameter.
*/
- treq.input_param1 = cpu_to_be32(lun_id);
- treq.input_param2 = cpu_to_be32(task_id);
+ treq.upiu_req.input_param1 = cpu_to_be32(lun_id);
+ treq.upiu_req.input_param2 = cpu_to_be32(task_id);
err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
if (err == -ETIMEDOUT)
@@ -6495,7 +6492,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
__func__, ocs_value);
else if (tm_response)
- *tm_response = be32_to_cpu(treq.output_param1) &
+ *tm_response = be32_to_cpu(treq.upiu_rsp.output_param1) &
MASK_TM_SERVICE_RESP;
return err;
}
@@ -6560,15 +6557,10 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
ufshcd_prepare_lrbp_crypto(NULL, lrbp);
hba->dev_cmd.type = cmd_type;
- switch (hba->ufs_version) {
- case UFSHCI_VERSION_10:
- case UFSHCI_VERSION_11:
+ if (hba->ufs_version <= ufshci_version(1, 1))
lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
- break;
- default:
+ else
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
- break;
- }
/* update the task tag in the request upiu */
req_upiu->header.dword_0 |= cpu_to_be32(tag);
@@ -6675,7 +6667,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
- memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
+ memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu));
err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
if (err == -ETIMEDOUT)
@@ -6688,7 +6680,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
break;
}
- memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
+ memcpy(rsp_upiu, &treq.upiu_rsp, sizeof(*rsp_upiu));
break;
default:
@@ -7128,7 +7120,7 @@ static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
}
/**
- * ufshcd_calc_icc_level - calculate the max ICC level
+ * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level
* In case regulators are not initialized we'll return 0
* @hba: per-adapter instance
* @desc_buf: power descriptor buffer to extract ICC levels from.
@@ -7149,19 +7141,19 @@ static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
goto out;
}
- if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
+ if (hba->vreg_info.vcc->max_uA)
icc_level = ufshcd_get_max_icc_level(
hba->vreg_info.vcc->max_uA,
POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
&desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
- if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
+ if (hba->vreg_info.vccq->max_uA)
icc_level = ufshcd_get_max_icc_level(
hba->vreg_info.vccq->max_uA,
icc_level,
&desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
- if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
+ if (hba->vreg_info.vccq2->max_uA)
icc_level = ufshcd_get_max_icc_level(
hba->vreg_info.vccq2->max_uA,
icc_level,
@@ -7922,6 +7914,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
ufshcd_set_active_icc_lvl(hba);
ufshcd_wb_config(hba);
+ if (hba->ee_usr_mask)
+ ufshcd_write_ee_control(hba);
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
@@ -8919,6 +8913,9 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
*/
ufshcd_urgent_bkops(hba);
+ if (hba->ee_usr_mask)
+ ufshcd_write_ee_control(hba);
+
hba->clk_gating.is_suspended = false;
if (ufshcd_is_clkscaling_supported(hba))
@@ -9280,13 +9277,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Get UFS version supported by the controller */
hba->ufs_version = ufshcd_get_ufs_version(hba);
- if ((hba->ufs_version != UFSHCI_VERSION_10) &&
- (hba->ufs_version != UFSHCI_VERSION_11) &&
- (hba->ufs_version != UFSHCI_VERSION_20) &&
- (hba->ufs_version != UFSHCI_VERSION_21))
- dev_err(hba->dev, "invalid UFS version 0x%x\n",
- hba->ufs_version);
-
/* Get Interrupt bit mask per version */
hba->intr_mask = ufshcd_get_intr_mask(hba);
@@ -9337,6 +9327,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Initialize mutex for device management commands */
mutex_init(&hba->dev_cmd.lock);
+ /* Initialize mutex for exception event control */
+ mutex_init(&hba->ee_ctrl_mutex);
+
init_rwsem(&hba->clk_scaling_lock);
ufshcd_init_clk_gating(hba);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 18e56c1c1b30..5eb66a8debc7 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -773,7 +773,10 @@ struct ufs_hba {
u32 ufshcd_state;
u32 eh_flags;
u32 intr_mask;
- u16 ee_ctrl_mask;
+ u16 ee_ctrl_mask; /* Exception event mask */
+ u16 ee_drv_mask; /* Exception event mask for driver */
+ u16 ee_usr_mask; /* Exception event mask for user (via debugfs) */
+ struct mutex ee_ctrl_mutex;
bool is_powered;
bool shutting_down;
struct semaphore host_sem;
@@ -840,6 +843,8 @@ struct ufs_hba {
#endif
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_root;
+ struct delayed_work debugfs_ee_work;
+ u32 debugfs_ee_rate_limit_ms;
#endif
};
@@ -1099,7 +1104,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
u8 *desc_buff, int *buff_len,
enum query_opcode desc_op);
-int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
+int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
/* Wrapper functions for safely calling variant operations */
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
@@ -1181,7 +1186,7 @@ static inline int ufshcd_vops_phy_initialization(struct ufs_hba *hba)
}
static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
- bool status,
+ enum ufs_notify_change_status status,
struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
@@ -1285,4 +1290,23 @@ static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
const char *prefix);
+int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask);
+int ufshcd_write_ee_control(struct ufs_hba *hba);
+int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
+ u16 set, u16 clr);
+
+static inline int ufshcd_update_ee_drv_mask(struct ufs_hba *hba,
+ u16 set, u16 clr)
+{
+ return ufshcd_update_ee_control(hba, &hba->ee_drv_mask,
+ &hba->ee_usr_mask, set, clr);
+}
+
+static inline int ufshcd_update_ee_usr_mask(struct ufs_hba *hba,
+ u16 set, u16 clr)
+{
+ return ufshcd_update_ee_control(hba, &hba->ee_usr_mask,
+ &hba->ee_drv_mask, set, clr);
+}
+
#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 6795e1f0e8f8..de95be5d11d4 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -74,13 +74,17 @@ enum {
#define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0)
#define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16)
-/* Controller UFSHCI version */
-enum {
- UFSHCI_VERSION_10 = 0x00010000, /* 1.0 */
- UFSHCI_VERSION_11 = 0x00010100, /* 1.1 */
- UFSHCI_VERSION_20 = 0x00000200, /* 2.0 */
- UFSHCI_VERSION_21 = 0x00000210, /* 2.1 */
-};
+/*
+ * Controller UFSHCI version
+ * - 2.x and newer use the following scheme:
+ * major << 8 + minor << 4
+ * - 1.x has been converted to match this in
+ * ufshcd_get_ufs_version()
+ */
+static inline u32 ufshci_version(u32 major, u32 minor)
+{
+ return (major << 8) + (minor << 4);
+}
/*
* HCDDID - Host Controller Identification Descriptor
@@ -482,17 +486,21 @@ struct utp_task_req_desc {
struct request_desc_header header;
/* DW 4-11 - Task request UPIU structure */
- struct utp_upiu_header req_header;
- __be32 input_param1;
- __be32 input_param2;
- __be32 input_param3;
- __be32 __reserved1[2];
+ struct {
+ struct utp_upiu_header req_header;
+ __be32 input_param1;
+ __be32 input_param2;
+ __be32 input_param3;
+ __be32 __reserved1[2];
+ } upiu_req;
/* DW 12-19 - Task Management Response UPIU structure */
- struct utp_upiu_header rsp_header;
- __be32 output_param1;
- __be32 output_param2;
- __be32 __reserved2[3];
+ struct {
+ struct utp_upiu_header rsp_header;
+ __be32 output_param1;
+ __be32 output_param2;
+ __be32 __reserved2[3];
+ } upiu_rsp;
};
#endif /* End of Header */
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index e5c443bfbdf9..2c54c5d8412d 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1154,10 +1154,10 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
/*
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/
- transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
- conn->sess->se_sess, be32_to_cpu(hdr->data_length),
- cmd->data_direction, sam_task_attr,
- cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun));
+ __target_init_cmd(&cmd->se_cmd, &iscsi_ops,
+ conn->sess->se_sess, be32_to_cpu(hdr->data_length),
+ cmd->data_direction, sam_task_attr,
+ cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun));
pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
@@ -1167,7 +1167,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
target_get_sess_cmd(&cmd->se_cmd, true);
cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
- cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, hdr->cdb);
+ cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, hdr->cdb,
+ GFP_KERNEL);
+
if (cmd->sense_reason) {
if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
return iscsit_add_reject_cmd(cmd,
@@ -2012,10 +2014,10 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
buf);
}
- transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
- conn->sess->se_sess, 0, DMA_NONE,
- TCM_SIMPLE_TAG, cmd->sense_buffer + 2,
- scsilun_to_int(&hdr->lun));
+ __target_init_cmd(&cmd->se_cmd, &iscsi_ops,
+ conn->sess->se_sess, 0, DMA_NONE,
+ TCM_SIMPLE_TAG, cmd->sense_buffer + 2,
+ scsilun_to_int(&hdr->lun));
target_get_sess_cmd(&cmd->se_cmd, true);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 0fa1d57b26fa..f4a24fa5058e 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -161,14 +161,13 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
char *str, *str2, *ip_str, *port_str;
struct sockaddr_storage sockaddr = { };
int ret;
- char buf[MAX_PORTAL_LEN + 1];
+ char buf[MAX_PORTAL_LEN + 1] = { };
if (strlen(name) > MAX_PORTAL_LEN) {
pr_err("strlen(name): %d exceeds MAX_PORTAL_LEN: %d\n",
(int)strlen(name), MAX_PORTAL_LEN);
return ERR_PTR(-EOVERFLOW);
}
- memset(buf, 0, MAX_PORTAL_LEN + 1);
snprintf(buf, MAX_PORTAL_LEN + 1, "%s", name);
str = strstr(buf, "[");
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 8b40f10976ff..151e2949bb75 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -28,7 +28,6 @@
#include "iscsi_target_auth.h"
#define MAX_LOGIN_PDUS 7
-#define TEXT_LEN 4096
void convert_null_to_semi(char *buf, int len)
{
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index 35e75a3569c9..cce3a827059e 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -28,7 +28,6 @@
/* Instance Attributes Table */
#define ISCSI_INST_NUM_NODES 1
#define ISCSI_INST_DESCR "Storage Engine Target"
-#define ISCSI_INST_LAST_FAILURE_TYPE 0
#define ISCSI_DISCONTINUITY_TIME 0
#define ISCSI_NODE_INDEX 1
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 9468b017b4a7..6dd5810e2af1 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -28,23 +28,6 @@
#include "iscsi_target_util.h"
#include "iscsi_target.h"
-#define PRINT_BUFF(buff, len) \
-{ \
- int zzz; \
- \
- pr_debug("%d:\n", __LINE__); \
- for (zzz = 0; zzz < len; zzz++) { \
- if (zzz % 16 == 0) { \
- if (zzz) \
- pr_debug("\n"); \
- pr_debug("%4i: ", zzz); \
- } \
- pr_debug("%02x ", (unsigned char) (buff)[zzz]); \
- } \
- if ((len + 1) % 16) \
- pr_debug("\n"); \
-}
-
extern struct list_head g_tiqn_list;
extern spinlock_t tiqn_lock;
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index badba437e5f9..2687fd7d45db 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -39,7 +39,6 @@
#define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
-static struct workqueue_struct *tcm_loop_workqueue;
static struct kmem_cache *tcm_loop_cmd_cache;
static int tcm_loop_hba_no_cnt;
@@ -67,8 +66,12 @@ static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
+ struct scsi_cmnd *sc = tl_cmd->sc;
- kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+ kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+ else
+ sc->scsi_done(sc);
}
static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
@@ -102,10 +105,8 @@ static struct device_driver tcm_loop_driverfs = {
*/
static struct device *tcm_loop_primary;
-static void tcm_loop_submission_work(struct work_struct *work)
+static void tcm_loop_target_queue_cmd(struct tcm_loop_cmd *tl_cmd)
{
- struct tcm_loop_cmd *tl_cmd =
- container_of(work, struct tcm_loop_cmd, work);
struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
struct scsi_cmnd *sc = tl_cmd->sc;
struct tcm_loop_nexus *tl_nexus;
@@ -113,7 +114,6 @@ static void tcm_loop_submission_work(struct work_struct *work)
struct tcm_loop_tpg *tl_tpg;
struct scatterlist *sgl_bidi = NULL;
u32 sgl_bidi_count = 0, transfer_length;
- int rc;
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
@@ -151,21 +151,20 @@ static void tcm_loop_submission_work(struct work_struct *work)
}
se_cmd->tag = tl_cmd->sc_cmd_tag;
- rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
- &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
- transfer_length, TCM_SIMPLE_TAG,
- sc->sc_data_direction, 0,
- scsi_sglist(sc), scsi_sg_count(sc),
- sgl_bidi, sgl_bidi_count,
- scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
- if (rc < 0) {
- set_host_byte(sc, DID_NO_CONNECT);
- goto out_done;
- }
+ target_init_cmd(se_cmd, tl_nexus->se_sess, &tl_cmd->tl_sense_buf[0],
+ tl_cmd->sc->device->lun, transfer_length,
+ TCM_SIMPLE_TAG, sc->sc_data_direction, 0);
+
+ if (target_submit_prep(se_cmd, sc->cmnd, scsi_sglist(sc),
+ scsi_sg_count(sc), sgl_bidi, sgl_bidi_count,
+ scsi_prot_sglist(sc), scsi_prot_sg_count(sc),
+ GFP_ATOMIC))
+ return;
+
+ target_queue_submission(se_cmd);
return;
out_done:
- kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
sc->scsi_done(sc);
}
@@ -175,24 +174,18 @@ out_done:
*/
static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
{
- struct tcm_loop_cmd *tl_cmd;
+ struct tcm_loop_cmd *tl_cmd = scsi_cmd_priv(sc);
pr_debug("%s() %d:%d:%d:%llu got CDB: 0x%02x scsi_buf_len: %u\n",
__func__, sc->device->host->host_no, sc->device->id,
sc->device->channel, sc->device->lun, sc->cmnd[0],
scsi_bufflen(sc));
- tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
- if (!tl_cmd) {
- set_host_byte(sc, DID_ERROR);
- sc->scsi_done(sc);
- return 0;
- }
-
+ memset(tl_cmd, 0, sizeof(*tl_cmd));
tl_cmd->sc = sc;
tl_cmd->sc_cmd_tag = sc->request->tag;
- INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
- queue_work(tcm_loop_workqueue, &tl_cmd->work);
+
+ tcm_loop_target_queue_cmd(tl_cmd);
return 0;
}
@@ -320,6 +313,7 @@ static struct scsi_host_template tcm_loop_driver_template = {
.dma_boundary = PAGE_SIZE - 1,
.module = THIS_MODULE,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct tcm_loop_cmd),
};
static int tcm_loop_driver_probe(struct device *dev)
@@ -580,7 +574,6 @@ static int tcm_loop_queue_data_or_status(const char *func,
if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
(se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
scsi_set_resid(sc, se_cmd->residual_count);
- sc->scsi_done(sc);
return 0;
}
@@ -1164,17 +1157,13 @@ static int __init tcm_loop_fabric_init(void)
{
int ret = -ENOMEM;
- tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
- if (!tcm_loop_workqueue)
- goto out;
-
tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
sizeof(struct tcm_loop_cmd),
__alignof__(struct tcm_loop_cmd),
0, NULL);
if (!tcm_loop_cmd_cache) {
pr_debug("kmem_cache_create() for tcm_loop_cmd_cache failed\n");
- goto out_destroy_workqueue;
+ goto out;
}
ret = tcm_loop_alloc_core_bus();
@@ -1191,8 +1180,6 @@ out_release_core_bus:
tcm_loop_release_core_bus();
out_destroy_cache:
kmem_cache_destroy(tcm_loop_cmd_cache);
-out_destroy_workqueue:
- destroy_workqueue(tcm_loop_workqueue);
out:
return ret;
}
@@ -1202,7 +1189,6 @@ static void __exit tcm_loop_fabric_exit(void)
target_unregister_template(&loop_ops);
tcm_loop_release_core_bus();
kmem_cache_destroy(tcm_loop_cmd_cache);
- destroy_workqueue(tcm_loop_workqueue);
}
MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index d3110909a213..437663b3905c 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -16,7 +16,6 @@ struct tcm_loop_cmd {
struct scsi_cmnd *sc;
/* The TCM I/O descriptor that is accessed via container_of() */
struct se_cmd tl_se_cmd;
- struct work_struct work;
struct completion tmr_done;
/* Sense buffer that will be mapped into outgoing status */
unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER];
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 2a6165febd3b..ce84f93c183a 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -1218,11 +1218,9 @@ static void sbp_handle_command(struct sbp_target_request *req)
/* only used for printk until we do TMRs */
req->se_cmd.tag = req->orb_pointer;
- if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
- req->sense_buf, unpacked_lun, data_length,
- TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF))
- goto err;
-
+ target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
+ req->sense_buf, unpacked_lun, data_length,
+ TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF);
return;
err:
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index f04352285155..4b2e49341ad6 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1494,7 +1494,7 @@ static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item,
{
struct t10_wwn *t10_wwn = to_t10_wwn(item);
struct se_device *dev = t10_wwn->t10_dev;
- unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
+ unsigned char buf[INQUIRY_VPD_SERIAL_LEN] = { };
/*
* If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
@@ -1536,7 +1536,6 @@ static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item,
* Also, strip any newline added from the userspace
* echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
*/
- memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
"%s", strstrip(buf));
@@ -1556,11 +1555,9 @@ static ssize_t target_wwn_vpd_protocol_identifier_show(struct config_item *item,
{
struct t10_wwn *t10_wwn = to_t10_wwn(item);
struct t10_vpd *vpd;
- unsigned char buf[VPD_TMP_BUF_SIZE];
+ unsigned char buf[VPD_TMP_BUF_SIZE] = { };
ssize_t len = 0;
- memset(buf, 0, VPD_TMP_BUF_SIZE);
-
spin_lock(&t10_wwn->t10_vpd_lock);
list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
if (!vpd->protocol_identifier_set)
@@ -1663,9 +1660,7 @@ static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
{
struct se_node_acl *se_nacl;
struct t10_pr_registration *pr_reg;
- char i_buf[PR_REG_ISID_ID_LEN];
-
- memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+ char i_buf[PR_REG_ISID_ID_LEN] = { };
pr_reg = dev->dev_pr_res_holder;
if (!pr_reg)
@@ -2286,7 +2281,7 @@ static ssize_t target_dev_alua_lu_gp_store(struct config_item *item,
struct se_hba *hba = dev->se_hba;
struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
struct t10_alua_lu_gp_member *lu_gp_mem;
- unsigned char buf[LU_GROUP_NAME_BUF];
+ unsigned char buf[LU_GROUP_NAME_BUF] = { };
int move = 0;
lu_gp_mem = dev->dev_alua_lu_gp_mem;
@@ -2297,7 +2292,6 @@ static ssize_t target_dev_alua_lu_gp_store(struct config_item *item,
pr_err("ALUA LU Group Alias too large!\n");
return -EINVAL;
}
- memset(buf, 0, LU_GROUP_NAME_BUF);
memcpy(buf, page, count);
/*
* Any ALUA logical unit alias besides "NULL" means we will be
@@ -2615,9 +2609,7 @@ static ssize_t target_lu_gp_members_show(struct config_item *item, char *page)
struct se_hba *hba;
struct t10_alua_lu_gp_member *lu_gp_mem;
ssize_t len = 0, cur_len;
- unsigned char buf[LU_GROUP_NAME_BUF];
-
- memset(buf, 0, LU_GROUP_NAME_BUF);
+ unsigned char buf[LU_GROUP_NAME_BUF] = { };
spin_lock(&lu_gp->lu_gp_lock);
list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
@@ -2753,8 +2745,7 @@ static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
int new_state, ret;
if (!tg_pt_gp->tg_pt_gp_valid_id) {
- pr_err("Unable to do implicit ALUA on non valid"
- " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
+ pr_err("Unable to do implicit ALUA on invalid tg_pt_gp ID\n");
return -EINVAL;
}
if (!target_dev_configured(dev)) {
@@ -2805,9 +2796,7 @@ static ssize_t target_tg_pt_gp_alua_access_status_store(
int new_status, ret;
if (!tg_pt_gp->tg_pt_gp_valid_id) {
- pr_err("Unable to do set ALUA access status on non"
- " valid tg_pt_gp ID: %hu\n",
- tg_pt_gp->tg_pt_gp_valid_id);
+ pr_err("Unable to set ALUA access status on invalid tg_pt_gp ID\n");
return -EINVAL;
}
@@ -2860,9 +2849,7 @@ static ssize_t target_tg_pt_gp_alua_support_##_name##_store( \
int ret; \
\
if (!t->tg_pt_gp_valid_id) { \
- pr_err("Unable to do set " #_name " ALUA state on non" \
- " valid tg_pt_gp ID: %hu\n", \
- t->tg_pt_gp_valid_id); \
+ pr_err("Unable to set " #_name " ALUA state on invalid tg_pt_gp ID\n"); \
return -EINVAL; \
} \
\
@@ -3020,9 +3007,7 @@ static ssize_t target_tg_pt_gp_members_show(struct config_item *item,
struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
struct se_lun *lun;
ssize_t len = 0, cur_len;
- unsigned char buf[TG_PT_GROUP_NAME_BUF];
-
- memset(buf, 0, TG_PT_GROUP_NAME_BUF);
+ unsigned char buf[TG_PT_GROUP_NAME_BUF] = { };
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
@@ -3409,11 +3394,10 @@ static struct config_group *target_core_call_addhbatotarget(
{
char *se_plugin_str, *str, *str2;
struct se_hba *hba;
- char buf[TARGET_CORE_NAME_MAX_LEN];
+ char buf[TARGET_CORE_NAME_MAX_LEN] = { };
unsigned long plugin_dep_id = 0;
int ret;
- memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
pr_err("Passed *name strlen(): %d exceeds"
" TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 7787c527aad3..a8df9f0a82fa 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -735,8 +735,14 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->queue_cnt = nr_cpu_ids;
for (i = 0; i < dev->queue_cnt; i++) {
- INIT_LIST_HEAD(&dev->queues[i].state_list);
- spin_lock_init(&dev->queues[i].lock);
+ struct se_device_queue *q;
+
+ q = &dev->queues[i];
+ INIT_LIST_HEAD(&q->state_list);
+ spin_lock_init(&q->lock);
+
+ init_llist_head(&q->sq.cmd_list);
+ INIT_WORK(&q->sq.work, target_queued_submit_work);
}
dev->se_hba = hba;
@@ -1029,7 +1035,7 @@ int core_dev_setup_virtual_lun0(void)
{
struct se_hba *hba;
struct se_device *dev;
- char buf[] = "rd_pages=8,rd_nullio=1";
+ char buf[] = "rd_pages=8,rd_nullio=1,rd_dummy=1";
int ret;
hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index ee85602213f7..fc7edc04ee09 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -892,6 +892,7 @@ static void target_fabric_release_wwn(struct config_item *item)
struct target_fabric_configfs *tf = wwn->wwn_tf;
configfs_remove_default_groups(&wwn->fabric_stat_group);
+ configfs_remove_default_groups(&wwn->param_group);
tf->tf_ops->fabric_drop_wwn(wwn);
}
@@ -918,6 +919,57 @@ TF_CIT_SETUP(wwn_fabric_stats, NULL, NULL, NULL);
/* End of tfc_wwn_fabric_stats_cit */
+static ssize_t
+target_fabric_wwn_cmd_completion_affinity_show(struct config_item *item,
+ char *page)
+{
+ struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn,
+ param_group);
+ return sprintf(page, "%d\n",
+ wwn->cmd_compl_affinity == WORK_CPU_UNBOUND ?
+ SE_COMPL_AFFINITY_CURR_CPU : wwn->cmd_compl_affinity);
+}
+
+static ssize_t
+target_fabric_wwn_cmd_completion_affinity_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn,
+ param_group);
+ int compl_val;
+
+ if (kstrtoint(page, 0, &compl_val))
+ return -EINVAL;
+
+ switch (compl_val) {
+ case SE_COMPL_AFFINITY_CPUID:
+ wwn->cmd_compl_affinity = compl_val;
+ break;
+ case SE_COMPL_AFFINITY_CURR_CPU:
+ wwn->cmd_compl_affinity = WORK_CPU_UNBOUND;
+ break;
+ default:
+ if (compl_val < 0 || compl_val >= nr_cpu_ids ||
+ !cpu_online(compl_val)) {
+ pr_err("Command completion value must be between %d and %d or an online CPU.\n",
+ SE_COMPL_AFFINITY_CPUID,
+ SE_COMPL_AFFINITY_CURR_CPU);
+ return -EINVAL;
+ }
+ wwn->cmd_compl_affinity = compl_val;
+ }
+
+ return count;
+}
+CONFIGFS_ATTR(target_fabric_wwn_, cmd_completion_affinity);
+
+static struct configfs_attribute *target_fabric_wwn_param_attrs[] = {
+ &target_fabric_wwn_attr_cmd_completion_affinity,
+ NULL,
+};
+
+TF_CIT_SETUP(wwn_param, NULL, NULL, target_fabric_wwn_param_attrs);
+
/* Start of tfc_wwn_cit */
static struct config_group *target_fabric_make_wwn(
@@ -937,6 +989,7 @@ static struct config_group *target_fabric_make_wwn(
if (!wwn || IS_ERR(wwn))
return ERR_PTR(-EINVAL);
+ wwn->cmd_compl_affinity = SE_COMPL_AFFINITY_CPUID;
wwn->wwn_tf = tf;
config_group_init_type_name(&wwn->wwn_group, name, &tf->tf_tpg_cit);
@@ -945,6 +998,10 @@ static struct config_group *target_fabric_make_wwn(
&tf->tf_wwn_fabric_stats_cit);
configfs_add_default_group(&wwn->fabric_stat_group, &wwn->wwn_group);
+ config_group_init_type_name(&wwn->param_group, "param",
+ &tf->tf_wwn_param_cit);
+ configfs_add_default_group(&wwn->param_group, &wwn->wwn_group);
+
if (tf->tf_ops->add_wwn_groups)
tf->tf_ops->add_wwn_groups(wwn);
return &wwn->wwn_group;
@@ -974,6 +1031,7 @@ int target_fabric_setup_cits(struct target_fabric_configfs *tf)
target_fabric_setup_discovery_cit(tf);
target_fabric_setup_wwn_cit(tf);
target_fabric_setup_wwn_fabric_stats_cit(tf);
+ target_fabric_setup_wwn_param_cit(tf);
target_fabric_setup_tpg_cit(tf);
target_fabric_setup_tpg_base_cit(tf);
target_fabric_setup_tpg_port_cit(tf);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 5a66854def95..ef4a8e189fba 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -498,6 +498,7 @@ fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb,
prot_length = nolb * se_dev->prot_length;
+ memset(buf, 0xff, bufsize);
for (prot = 0; prot < prot_length;) {
sector_t len = min_t(sector_t, bufsize, prot_length - prot);
ssize_t ret = kernel_write(prot_fd, buf, len, &pos);
@@ -523,7 +524,6 @@ fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
pr_err("Unable to allocate FILEIO prot buf\n");
return -ENOMEM;
}
- memset(buf, 0xff, PAGE_SIZE);
rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE);
@@ -882,7 +882,6 @@ static int fd_format_prot(struct se_device *dev)
(unsigned long long)(dev->transport->get_blocks(dev) + 1) *
dev->prot_length);
- memset(buf, 0xff, unit_size);
ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1,
buf, unit_size);
vfree(buf);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index ee3d52061281..d6fdd1c61f90 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -61,9 +61,18 @@ static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *nam
return NULL;
}
+ ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug),
+ GFP_KERNEL);
+ if (!ib_dev->ibd_plug)
+ goto free_dev;
+
pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
return &ib_dev->dev;
+
+free_dev:
+ kfree(ib_dev);
+ return NULL;
}
static int iblock_configure_device(struct se_device *dev)
@@ -171,6 +180,7 @@ static void iblock_dev_call_rcu(struct rcu_head *p)
struct se_device *dev = container_of(p, struct se_device, rcu_head);
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ kfree(ib_dev->ibd_plug);
kfree(ib_dev);
}
@@ -188,6 +198,33 @@ static void iblock_destroy_device(struct se_device *dev)
bioset_exit(&ib_dev->ibd_bio_set);
}
+static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(se_dev);
+ struct iblock_dev_plug *ib_dev_plug;
+
+ /*
+ * Each se_device has a per cpu work this can be run from. Wwe
+ * shouldn't have multiple threads on the same cpu calling this
+ * at the same time.
+ */
+ ib_dev_plug = &ib_dev->ibd_plug[smp_processor_id()];
+ if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags))
+ return NULL;
+
+ blk_start_plug(&ib_dev_plug->blk_plug);
+ return &ib_dev_plug->se_plug;
+}
+
+static void iblock_unplug_device(struct se_dev_plug *se_plug)
+{
+ struct iblock_dev_plug *ib_dev_plug = container_of(se_plug,
+ struct iblock_dev_plug, se_plug);
+
+ blk_finish_plug(&ib_dev_plug->blk_plug);
+ clear_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags);
+}
+
static unsigned long long iblock_emulate_read_cap_with_block_size(
struct se_device *dev,
struct block_device *bd,
@@ -304,9 +341,8 @@ static void iblock_bio_done(struct bio *bio)
iblock_complete_cmd(cmd);
}
-static struct bio *
-iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
- int op_flags)
+static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
+ unsigned int opf)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
struct bio *bio;
@@ -326,7 +362,7 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
bio->bi_private = cmd;
bio->bi_end_io = &iblock_bio_done;
bio->bi_iter.bi_sector = lba;
- bio_set_op_attrs(bio, op, op_flags);
+ bio->bi_opf = opf;
return bio;
}
@@ -335,7 +371,10 @@ static void iblock_submit_bios(struct bio_list *list)
{
struct blk_plug plug;
struct bio *bio;
-
+ /*
+ * The block layer handles nested plugs, so just plug/unplug to handle
+ * fabric drivers that didn't support batching and multi bio cmds.
+ */
blk_start_plug(&plug);
while ((bio = bio_list_pop(list)))
submit_bio(bio);
@@ -477,7 +516,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
goto fail;
cmd->priv = ibr;
- bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE, 0);
+ bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
if (!bio)
goto fail_free_ibr;
@@ -490,8 +529,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
- bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE,
- 0);
+ bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
if (!bio)
goto fail_put_bios;
@@ -685,9 +723,11 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct bio_list list;
struct scatterlist *sg;
u32 sg_num = sgl_nents;
+ unsigned int opf;
unsigned bio_cnt;
- int i, rc, op, op_flags = 0;
+ int i, rc;
struct sg_mapping_iter prot_miter;
+ unsigned int miter_dir;
if (data_direction == DMA_TO_DEVICE) {
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
@@ -696,15 +736,17 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
* Force writethrough using REQ_FUA if a volatile write cache
* is not enabled, or if initiator set the Force Unit Access bit.
*/
- op = REQ_OP_WRITE;
+ opf = REQ_OP_WRITE;
+ miter_dir = SG_MITER_TO_SG;
if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
if (cmd->se_cmd_flags & SCF_FUA)
- op_flags = REQ_FUA;
+ opf |= REQ_FUA;
else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
- op_flags = REQ_FUA;
+ opf |= REQ_FUA;
}
} else {
- op = REQ_OP_READ;
+ opf = REQ_OP_READ;
+ miter_dir = SG_MITER_FROM_SG;
}
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
@@ -718,7 +760,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
return 0;
}
- bio = iblock_get_bio(cmd, block_lba, sgl_nents, op, op_flags);
+ bio = iblock_get_bio(cmd, block_lba, sgl_nents, opf);
if (!bio)
goto fail_free_ibr;
@@ -730,8 +772,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
- op == REQ_OP_READ ? SG_MITER_FROM_SG :
- SG_MITER_TO_SG);
+ miter_dir);
for_each_sg(sgl, sg, sgl_nents, i) {
/*
@@ -752,8 +793,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
bio_cnt = 0;
}
- bio = iblock_get_bio(cmd, block_lba, sg_num, op,
- op_flags);
+ bio = iblock_get_bio(cmd, block_lba, sg_num, opf);
if (!bio)
goto fail_put_bios;
@@ -813,7 +853,8 @@ static unsigned int iblock_get_lbppbe(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct block_device *bd = ib_dev->ibd_bd;
- int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
+ unsigned int logs_per_phys =
+ bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
return ilog2(logs_per_phys);
}
@@ -867,6 +908,8 @@ static const struct target_backend_ops iblock_ops = {
.configure_device = iblock_configure_device,
.destroy_device = iblock_destroy_device,
.free_device = iblock_free_device,
+ .plug_device = iblock_plug_device,
+ .unplug_device = iblock_unplug_device,
.parse_cdb = iblock_parse_cdb,
.set_configfs_dev_params = iblock_set_configfs_dev_params,
.show_configfs_dev_params = iblock_show_configfs_dev_params,
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index cefc641145b3..8c55375d2f75 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -4,6 +4,7 @@
#include <linux/atomic.h>
#include <linux/refcount.h>
+#include <linux/blkdev.h>
#include <target/target_core_base.h>
#define IBLOCK_VERSION "4.0"
@@ -17,6 +18,14 @@ struct iblock_req {
#define IBDF_HAS_UDEV_PATH 0x01
+#define IBD_PLUGF_PLUGGED 0x01
+
+struct iblock_dev_plug {
+ struct se_dev_plug se_plug;
+ struct blk_plug blk_plug;
+ unsigned long flags;
+};
+
struct iblock_dev {
struct se_device dev;
unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
@@ -24,6 +33,7 @@ struct iblock_dev {
struct bio_set ibd_bio_set;
struct block_device *ibd_bd;
bool ibd_readonly;
+ struct iblock_dev_plug *ibd_plug;
} ____cacheline_aligned;
#endif /* TARGET_CORE_IBLOCK_H */
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index e7b3c6e5d574..a343bcfa2180 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -34,6 +34,7 @@ struct target_fabric_configfs {
struct config_item_type tf_discovery_cit;
struct config_item_type tf_wwn_cit;
struct config_item_type tf_wwn_fabric_stats_cit;
+ struct config_item_type tf_wwn_param_cit;
struct config_item_type tf_tpg_cit;
struct config_item_type tf_tpg_base_cit;
struct config_item_type tf_tpg_lun_cit;
@@ -153,6 +154,7 @@ void target_qf_do_work(struct work_struct *work);
bool target_check_wce(struct se_device *dev);
bool target_check_fua(struct se_device *dev);
void __target_execute_cmd(struct se_cmd *, bool);
+void target_queued_submit_work(struct work_struct *work);
/* target_core_stat.c */
void target_stat_setup_dev_default_groups(struct se_device *);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index d4cc43afe05b..6fd5fec95539 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -896,9 +896,8 @@ static void core_scsi3_aptpl_reserve(
struct se_node_acl *node_acl,
struct t10_pr_registration *pr_reg)
{
- char i_buf[PR_REG_ISID_ID_LEN];
+ char i_buf[PR_REG_ISID_ID_LEN] = { };
- memset(i_buf, 0, PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
spin_lock(&dev->dev_reservation_lock);
@@ -928,12 +927,10 @@ static int __core_scsi3_check_aptpl_registration(
{
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
- unsigned char i_port[PR_APTPL_MAX_IPORT_LEN];
- unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
+ unsigned char i_port[PR_APTPL_MAX_IPORT_LEN] = { };
+ unsigned char t_port[PR_APTPL_MAX_TPORT_LEN] = { };
u16 tpgt;
- memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
- memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
/*
* Copy Initiator Port information from struct se_node_acl
*/
@@ -1023,9 +1020,8 @@ static void __core_scsi3_dump_registration(
enum register_type register_type)
{
struct se_portal_group *se_tpg = nacl->se_tpg;
- char i_buf[PR_REG_ISID_ID_LEN];
+ char i_buf[PR_REG_ISID_ID_LEN] = { };
- memset(&i_buf[0], 0, PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
@@ -1204,10 +1200,10 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg(
struct se_session *sess)
{
struct se_portal_group *tpg = nacl->se_tpg;
- unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
+ unsigned char buf[PR_REG_ISID_LEN] = { };
+ unsigned char *isid_ptr = NULL;
if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
- memset(&buf[0], 0, PR_REG_ISID_LEN);
tpg->se_tpg_tfo->sess_get_initiator_sid(sess, &buf[0],
PR_REG_ISID_LEN);
isid_ptr = &buf[0];
@@ -1285,11 +1281,10 @@ static void __core_scsi3_free_registration(
struct t10_reservation *pr_tmpl = &dev->t10_pr;
struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
struct se_dev_entry *deve;
- char i_buf[PR_REG_ISID_ID_LEN];
+ char i_buf[PR_REG_ISID_ID_LEN] = { };
lockdep_assert_held(&pr_tmpl->registration_lock);
- memset(i_buf, 0, PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
if (!list_empty(&pr_reg->pr_reg_list))
@@ -1642,8 +1637,7 @@ core_scsi3_decode_spec_i_port(
}
dest_tpg = tmp_tpg;
- pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node:"
- " %s Port RTPI: %hu\n",
+ pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s Port RTPI: %u\n",
dest_tpg->se_tpg_tfo->fabric_name,
dest_node_acl->initiatorname, dest_rtpi);
@@ -1680,8 +1674,7 @@ core_scsi3_decode_spec_i_port(
dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl,
dest_rtpi);
if (!dest_se_deve) {
- pr_err("Unable to locate %s dest_se_deve"
- " from destination RTPI: %hu\n",
+ pr_err("Unable to locate %s dest_se_deve from destination RTPI: %u\n",
dest_tpg->se_tpg_tfo->fabric_name,
dest_rtpi);
@@ -2059,7 +2052,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
- unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
+ unsigned char isid_buf[PR_REG_ISID_LEN] = { };
+ unsigned char *isid_ptr = NULL;
sense_reason_t ret = TCM_NO_SENSE;
int pr_holder = 0, type;
@@ -2070,7 +2064,6 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
se_tpg = se_sess->se_tpg;
if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) {
- memset(&isid_buf[0], 0, PR_REG_ISID_LEN);
se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, &isid_buf[0],
PR_REG_ISID_LEN);
isid_ptr = &isid_buf[0];
@@ -2282,11 +2275,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
struct se_lun *se_lun = cmd->se_lun;
struct t10_pr_registration *pr_reg, *pr_res_holder;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
- char i_buf[PR_REG_ISID_ID_LEN];
+ char i_buf[PR_REG_ISID_ID_LEN] = { };
sense_reason_t ret;
- memset(i_buf, 0, PR_REG_ISID_ID_LEN);
-
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -2457,12 +2448,11 @@ static void __core_scsi3_complete_pro_release(
int unreg)
{
const struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
- char i_buf[PR_REG_ISID_ID_LEN];
+ char i_buf[PR_REG_ISID_ID_LEN] = { };
int pr_res_type = 0, pr_res_scope = 0;
lockdep_assert_held(&dev->dev_reservation_lock);
- memset(i_buf, 0, PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
/*
* Go ahead and release the current PR reservation holder.
@@ -2768,11 +2758,10 @@ static void __core_scsi3_complete_pro_preempt(
{
struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
- char i_buf[PR_REG_ISID_ID_LEN];
+ char i_buf[PR_REG_ISID_ID_LEN] = { };
lockdep_assert_held(&dev->dev_reservation_lock);
- memset(i_buf, 0, PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
/*
* Do an implicit RELEASE of the existing reservation.
@@ -3158,7 +3147,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char *buf;
const unsigned char *initiator_str;
- char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
+ char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN] = { };
u32 tid_len, tmp_tid_len;
int new_reg = 0, type, scope, matching_iname;
sense_reason_t ret;
@@ -3170,7 +3159,6 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
- memset(i_buf, 0, PR_REG_ISID_ID_LEN);
se_tpg = se_sess->se_tpg;
tf_ops = se_tpg->se_tpg_tfo;
/*
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index d64c3ffdb52e..f2a11414366d 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -34,8 +34,6 @@
#include "target_core_internal.h"
#include "target_core_pscsi.h"
-#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
-
static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
{
return container_of(dev, struct pscsi_dev_virt, dev);
@@ -620,8 +618,9 @@ static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
unsigned char *buf;
buf = transport_kmap_data_sg(cmd);
- if (!buf)
+ if (!buf) {
; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
+ }
if (cdb[0] == MODE_SENSE_10) {
if (!(buf[3] & 0x80))
@@ -1047,7 +1046,7 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
int result = scsi_req(req)->result;
u8 scsi_status = status_byte(result) << 1;
- if (scsi_status) {
+ if (scsi_status != SAM_STAT_GOOD) {
pr_debug("PSCSI Status Byte exception at cmd: %p CDB:"
" 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
result);
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index bf936bbeccfe..6648c1c90e19 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -530,12 +530,13 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
}
enum {
- Opt_rd_pages, Opt_rd_nullio, Opt_err
+ Opt_rd_pages, Opt_rd_nullio, Opt_rd_dummy, Opt_err
};
static match_table_t tokens = {
{Opt_rd_pages, "rd_pages=%d"},
{Opt_rd_nullio, "rd_nullio=%d"},
+ {Opt_rd_dummy, "rd_dummy=%d"},
{Opt_err, NULL}
};
@@ -574,6 +575,14 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
rd_dev->rd_flags |= RDF_NULLIO;
break;
+ case Opt_rd_dummy:
+ match_int(args, &arg);
+ if (arg != 1)
+ break;
+
+ pr_debug("RAMDISK: Setting DUMMY flag: %d\n", arg);
+ rd_dev->rd_flags |= RDF_DUMMY;
+ break;
default:
break;
}
@@ -590,12 +599,22 @@ static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
rd_dev->rd_dev_id);
bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
- " SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count,
+ " SG_table_count: %u nullio: %d dummy: %d\n",
+ rd_dev->rd_page_count,
PAGE_SIZE, rd_dev->sg_table_count,
- !!(rd_dev->rd_flags & RDF_NULLIO));
+ !!(rd_dev->rd_flags & RDF_NULLIO),
+ !!(rd_dev->rd_flags & RDF_DUMMY));
return bl;
}
+static u32 rd_get_device_type(struct se_device *dev)
+{
+ if (RD_DEV(dev)->rd_flags & RDF_DUMMY)
+ return 0x3f; /* Unknown device type, not connected */
+ else
+ return sbc_get_device_type(dev);
+}
+
static sector_t rd_get_blocks(struct se_device *dev)
{
struct rd_dev *rd_dev = RD_DEV(dev);
@@ -647,7 +666,7 @@ static const struct target_backend_ops rd_mcp_ops = {
.parse_cdb = rd_parse_cdb,
.set_configfs_dev_params = rd_set_configfs_dev_params,
.show_configfs_dev_params = rd_show_configfs_dev_params,
- .get_device_type = sbc_get_device_type,
+ .get_device_type = rd_get_device_type,
.get_blocks = rd_get_blocks,
.init_prot = rd_init_prot,
.free_prot = rd_free_prot,
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 8b88f9b14c3f..9ffda5c4b584 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -28,6 +28,7 @@ struct rd_dev_sg_table {
#define RDF_HAS_PAGE_COUNT 0x01
#define RDF_NULLIO 0x02
+#define RDF_DUMMY 0x04
struct rd_dev {
struct se_device dev;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index f7c527a826fd..7b07e557dc8d 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -448,7 +448,7 @@ compare_and_write_do_cmp(struct scatterlist *read_sgl, unsigned int read_nents,
sense_reason_t ret;
unsigned int offset;
size_t rc;
- int i;
+ int sg_cnt;
buf = kzalloc(cmp_len, GFP_KERNEL);
if (!buf) {
@@ -467,7 +467,7 @@ compare_and_write_do_cmp(struct scatterlist *read_sgl, unsigned int read_nents,
*/
offset = 0;
ret = TCM_NO_SENSE;
- for_each_sg(read_sgl, sg, read_nents, i) {
+ for_each_sg(read_sgl, sg, read_nents, sg_cnt) {
unsigned int len = min(sg->length, cmp_len);
unsigned char *addr = kmap_atomic(sg_page(sg));
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index ca5579ebc81d..70a661801cb9 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -701,7 +701,6 @@ static sense_reason_t
spc_emulate_inquiry(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- struct se_portal_group *tpg = cmd->se_lun->lun_tpg;
unsigned char *rbuf;
unsigned char *cdb = cmd->t_task_cdb;
unsigned char *buf;
@@ -715,10 +714,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
- if (dev == rcu_access_pointer(tpg->tpg_virt_lun0->lun_se_dev))
- buf[0] = 0x3f; /* Not connected */
- else
- buf[0] = dev->transport->get_device_type(dev);
+ buf[0] = dev->transport->get_device_type(dev);
if (!(cdb[1] & 0x1)) {
if (cdb[2]) {
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 237309db4b33..62d15bcc3d93 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -31,9 +31,6 @@
#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
#endif
-#define NONE "None"
-#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
-
#define SCSI_LU_INDEX 1
#define LU_COUNT 1
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 7347285471fa..e7fcbc09f9db 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -124,6 +124,8 @@ void core_tmr_abort_task(
int i;
for (i = 0; i < dev->queue_cnt; i++) {
+ flush_work(&dev->queues[i].sq.work);
+
spin_lock_irqsave(&dev->queues[i].lock, flags);
list_for_each_entry_safe(se_cmd, next, &dev->queues[i].state_list,
state_list) {
@@ -302,6 +304,8 @@ static void core_tmr_drain_state_list(
* in the Control Mode Page.
*/
for (i = 0; i < dev->queue_cnt; i++) {
+ flush_work(&dev->queues[i].sq.work);
+
spin_lock_irqsave(&dev->queues[i].lock, flags);
list_for_each_entry_safe(cmd, next, &dev->queues[i].state_list,
state_list) {
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 5ecb9f18a53d..8fbfe75c5744 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -41,6 +41,7 @@
#include <trace/events/target.h>
static struct workqueue_struct *target_completion_wq;
+static struct workqueue_struct *target_submission_wq;
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_ua_cache;
struct kmem_cache *t10_pr_reg_cache;
@@ -129,8 +130,15 @@ int init_se_kmem_caches(void)
if (!target_completion_wq)
goto out_free_lba_map_mem_cache;
+ target_submission_wq = alloc_workqueue("target_submission",
+ WQ_MEM_RECLAIM, 0);
+ if (!target_submission_wq)
+ goto out_free_completion_wq;
+
return 0;
+out_free_completion_wq:
+ destroy_workqueue(target_completion_wq);
out_free_lba_map_mem_cache:
kmem_cache_destroy(t10_alua_lba_map_mem_cache);
out_free_lba_map_cache:
@@ -153,6 +161,7 @@ out:
void release_se_kmem_caches(void)
{
+ destroy_workqueue(target_submission_wq);
destroy_workqueue(target_completion_wq);
kmem_cache_destroy(se_sess_cache);
kmem_cache_destroy(se_ua_cache);
@@ -848,7 +857,8 @@ static bool target_cmd_interrupted(struct se_cmd *cmd)
/* May be called from interrupt context so must not sleep. */
void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
{
- int success;
+ struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn;
+ int success, cpu;
unsigned long flags;
if (target_cmd_interrupted(cmd))
@@ -875,7 +885,13 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
INIT_WORK(&cmd->work, success ? target_complete_ok_work :
target_complete_failure_work);
- queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
+
+ if (wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
+ cpu = cmd->cpuid;
+ else
+ cpu = wwn->cmd_compl_affinity;
+
+ queue_work_on(cpu, target_completion_wq, &cmd->work);
}
EXPORT_SYMBOL(target_complete_cmd);
@@ -1304,7 +1320,7 @@ target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
* Compare the data buffer size from the CDB with the data buffer limit from the transport
* header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary.
*
- * Note: target drivers set @cmd->data_length by calling transport_init_se_cmd().
+ * Note: target drivers set @cmd->data_length by calling __target_init_cmd().
*
* Return: TCM_NO_SENSE
*/
@@ -1371,7 +1387,7 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
*
* Preserves the value of @cmd->tag.
*/
-void transport_init_se_cmd(
+void __target_init_cmd(
struct se_cmd *cmd,
const struct target_core_fabric_ops *tfo,
struct se_session *se_sess,
@@ -1382,7 +1398,6 @@ void transport_init_se_cmd(
{
INIT_LIST_HEAD(&cmd->se_delayed_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
- INIT_LIST_HEAD(&cmd->se_cmd_list);
INIT_LIST_HEAD(&cmd->state_list);
init_completion(&cmd->t_transport_stop_comp);
cmd->free_compl = NULL;
@@ -1391,6 +1406,7 @@ void transport_init_se_cmd(
INIT_WORK(&cmd->work, NULL);
kref_init(&cmd->cmd_kref);
+ cmd->t_task_cdb = &cmd->__t_task_cdb[0];
cmd->se_tfo = tfo;
cmd->se_sess = se_sess;
cmd->data_length = data_length;
@@ -1404,7 +1420,7 @@ void transport_init_se_cmd(
cmd->state_active = false;
}
-EXPORT_SYMBOL(transport_init_se_cmd);
+EXPORT_SYMBOL(__target_init_cmd);
static sense_reason_t
transport_check_alloc_task_attr(struct se_cmd *cmd)
@@ -1428,11 +1444,10 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
}
sense_reason_t
-target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb)
+target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb, gfp_t gfp)
{
sense_reason_t ret;
- cmd->t_task_cdb = &cmd->__t_task_cdb[0];
/*
* Ensure that the received CDB is less than the max (252 + 8) bytes
* for VARIABLE_LENGTH_CMD
@@ -1450,8 +1465,7 @@ target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb)
* setup the pointer from __t_task_cdb to t_task_cdb.
*/
if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
- cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
- GFP_KERNEL);
+ cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), gfp);
if (!cmd->t_task_cdb) {
pr_err("Unable to allocate cmd->t_task_cdb"
" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
@@ -1573,46 +1587,31 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
}
/**
- * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
- * se_cmd + use pre-allocated SGL memory.
- *
- * @se_cmd: command descriptor to submit
+ * target_init_cmd - initialize se_cmd
+ * @se_cmd: command descriptor to init
* @se_sess: associated se_sess for endpoint
- * @cdb: pointer to SCSI CDB
* @sense: pointer to SCSI sense buffer
* @unpacked_lun: unpacked LUN to reference for struct se_lun
* @data_length: fabric expected data transfer length
* @task_attr: SAM task attribute
* @data_dir: DMA data direction
* @flags: flags for command submission from target_sc_flags_tables
- * @sgl: struct scatterlist memory for unidirectional mapping
- * @sgl_count: scatterlist count for unidirectional mapping
- * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
- * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
- * @sgl_prot: struct scatterlist memory protection information
- * @sgl_prot_count: scatterlist count for protection information
*
* Task tags are supported if the caller has set @se_cmd->tag.
*
- * Returns non zero to signal active I/O shutdown failure. All other
- * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
- * but still return zero here.
+ * Returns:
+ * - less than zero to signal active I/O shutdown failure.
+ * - zero on success.
*
- * This may only be called from process context, and also currently
- * assumes internal allocation of fabric payload buffer by target-core.
+ * If the fabric driver calls target_stop_session, then it must check the
+ * return code and handle failures. This will never fail for other drivers,
+ * and the return code can be ignored.
*/
-int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
- unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
- u32 data_length, int task_attr, int data_dir, int flags,
- struct scatterlist *sgl, u32 sgl_count,
- struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
- struct scatterlist *sgl_prot, u32 sgl_prot_count)
+int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+ unsigned char *sense, u64 unpacked_lun,
+ u32 data_length, int task_attr, int data_dir, int flags)
{
struct se_portal_group *se_tpg;
- sense_reason_t rc;
- int ret;
-
- might_sleep();
se_tpg = se_sess->se_tpg;
BUG_ON(!se_tpg);
@@ -1621,52 +1620,71 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
if (flags & TARGET_SCF_USE_CPUID)
se_cmd->se_cmd_flags |= SCF_USE_CPUID;
/*
+ * Signal bidirectional data payloads to target-core
+ */
+ if (flags & TARGET_SCF_BIDI_OP)
+ se_cmd->se_cmd_flags |= SCF_BIDI;
+
+ if (flags & TARGET_SCF_UNKNOWN_SIZE)
+ se_cmd->unknown_data_length = 1;
+ /*
* Initialize se_cmd for target operation. From this point
* exceptions are handled by sending exception status via
* target_core_fabric_ops->queue_status() callback
*/
- transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
- data_length, data_dir, task_attr, sense,
- unpacked_lun);
+ __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length,
+ data_dir, task_attr, sense, unpacked_lun);
- if (flags & TARGET_SCF_UNKNOWN_SIZE)
- se_cmd->unknown_data_length = 1;
/*
* Obtain struct se_cmd->cmd_kref reference. A second kref_get here is
* necessary for fabrics using TARGET_SCF_ACK_KREF that expect a second
* kref_put() to happen during fabric packet acknowledgement.
*/
- ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
- if (ret)
- return ret;
- /*
- * Signal bidirectional data payloads to target-core
- */
- if (flags & TARGET_SCF_BIDI_OP)
- se_cmd->se_cmd_flags |= SCF_BIDI;
+ return target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
+}
+EXPORT_SYMBOL_GPL(target_init_cmd);
- rc = target_cmd_init_cdb(se_cmd, cdb);
- if (rc) {
- transport_send_check_condition_and_sense(se_cmd, rc, 0);
- target_put_sess_cmd(se_cmd);
- return 0;
- }
+/**
+ * target_submit_prep - prepare cmd for submission
+ * @se_cmd: command descriptor to prep
+ * @cdb: pointer to SCSI CDB
+ * @sgl: struct scatterlist memory for unidirectional mapping
+ * @sgl_count: scatterlist count for unidirectional mapping
+ * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
+ * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
+ * @sgl_prot: struct scatterlist memory protection information
+ * @sgl_prot_count: scatterlist count for protection information
+ * @gfp: gfp allocation type
+ *
+ * Returns:
+ * - less than zero to signal failure.
+ * - zero on success.
+ *
+ * If failure is returned, lio will the callers queue_status to complete
+ * the cmd.
+ */
+int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb,
+ struct scatterlist *sgl, u32 sgl_count,
+ struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
+ struct scatterlist *sgl_prot, u32 sgl_prot_count,
+ gfp_t gfp)
+{
+ sense_reason_t rc;
+
+ rc = target_cmd_init_cdb(se_cmd, cdb, gfp);
+ if (rc)
+ goto send_cc_direct;
/*
* Locate se_lun pointer and attach it to struct se_cmd
*/
rc = transport_lookup_cmd_lun(se_cmd);
- if (rc) {
- transport_send_check_condition_and_sense(se_cmd, rc, 0);
- target_put_sess_cmd(se_cmd);
- return 0;
- }
+ if (rc)
+ goto send_cc_direct;
rc = target_cmd_parse_cdb(se_cmd);
- if (rc != 0) {
- transport_generic_request_failure(se_cmd, rc);
- return 0;
- }
+ if (rc != 0)
+ goto generic_fail;
/*
* Save pointers for SGLs containing protection information,
@@ -1686,6 +1704,41 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
if (sgl_count != 0) {
BUG_ON(!sgl);
+ rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
+ sgl_bidi, sgl_bidi_count);
+ if (rc != 0)
+ goto generic_fail;
+ }
+
+ return 0;
+
+send_cc_direct:
+ transport_send_check_condition_and_sense(se_cmd, rc, 0);
+ target_put_sess_cmd(se_cmd);
+ return -EIO;
+
+generic_fail:
+ transport_generic_request_failure(se_cmd, rc);
+ return -EIO;
+}
+EXPORT_SYMBOL_GPL(target_submit_prep);
+
+/**
+ * target_submit - perform final initialization and submit cmd to LIO core
+ * @se_cmd: command descriptor to submit
+ *
+ * target_submit_prep must have been called on the cmd, and this must be
+ * called from process context.
+ */
+void target_submit(struct se_cmd *se_cmd)
+{
+ struct scatterlist *sgl = se_cmd->t_data_sg;
+ unsigned char *buf = NULL;
+
+ might_sleep();
+
+ if (se_cmd->t_data_nents != 0) {
+ BUG_ON(!sgl);
/*
* A work-around for tcm_loop as some userspace code via
* scsi-generic do not memset their associated read buffers,
@@ -1696,8 +1749,6 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
*/
if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
se_cmd->data_direction == DMA_FROM_DEVICE) {
- unsigned char *buf = NULL;
-
if (sgl)
buf = kmap(sg_page(sgl)) + sgl->offset;
@@ -1707,12 +1758,6 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
}
}
- rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
- sgl_bidi, sgl_bidi_count);
- if (rc != 0) {
- transport_generic_request_failure(se_cmd, rc);
- return 0;
- }
}
/*
@@ -1722,9 +1767,8 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
core_alua_check_nonop_delay(se_cmd);
transport_handle_cdb_direct(se_cmd);
- return 0;
}
-EXPORT_SYMBOL(target_submit_cmd_map_sgls);
+EXPORT_SYMBOL_GPL(target_submit);
/**
* target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
@@ -1741,25 +1785,109 @@ EXPORT_SYMBOL(target_submit_cmd_map_sgls);
*
* Task tags are supported if the caller has set @se_cmd->tag.
*
- * Returns non zero to signal active I/O shutdown failure. All other
- * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
- * but still return zero here.
- *
* This may only be called from process context, and also currently
* assumes internal allocation of fabric payload buffer by target-core.
*
* It also assumes interal target core SGL memory allocation.
+ *
+ * This function must only be used by drivers that do their own
+ * sync during shutdown and does not use target_stop_session. If there
+ * is a failure this function will call into the fabric driver's
+ * queue_status with a CHECK_CONDITION.
*/
-int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
u32 data_length, int task_attr, int data_dir, int flags)
{
- return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
- unpacked_lun, data_length, task_attr, data_dir,
- flags, NULL, 0, NULL, 0, NULL, 0);
+ int rc;
+
+ rc = target_init_cmd(se_cmd, se_sess, sense, unpacked_lun, data_length,
+ task_attr, data_dir, flags);
+ WARN(rc, "Invalid target_submit_cmd use. Driver must not use target_stop_session or call target_init_cmd directly.\n");
+ if (rc)
+ return;
+
+ if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0, NULL, 0,
+ GFP_KERNEL))
+ return;
+
+ target_submit(se_cmd);
}
EXPORT_SYMBOL(target_submit_cmd);
+
+static struct se_dev_plug *target_plug_device(struct se_device *se_dev)
+{
+ struct se_dev_plug *se_plug;
+
+ if (!se_dev->transport->plug_device)
+ return NULL;
+
+ se_plug = se_dev->transport->plug_device(se_dev);
+ if (!se_plug)
+ return NULL;
+
+ se_plug->se_dev = se_dev;
+ /*
+ * We have a ref to the lun at this point, but the cmds could
+ * complete before we unplug, so grab a ref to the se_device so we
+ * can call back into the backend.
+ */
+ config_group_get(&se_dev->dev_group);
+ return se_plug;
+}
+
+static void target_unplug_device(struct se_dev_plug *se_plug)
+{
+ struct se_device *se_dev = se_plug->se_dev;
+
+ se_dev->transport->unplug_device(se_plug);
+ config_group_put(&se_dev->dev_group);
+}
+
+void target_queued_submit_work(struct work_struct *work)
+{
+ struct se_cmd_queue *sq = container_of(work, struct se_cmd_queue, work);
+ struct se_cmd *se_cmd, *next_cmd;
+ struct se_dev_plug *se_plug = NULL;
+ struct se_device *se_dev = NULL;
+ struct llist_node *cmd_list;
+
+ cmd_list = llist_del_all(&sq->cmd_list);
+ if (!cmd_list)
+ /* Previous call took what we were queued to submit */
+ return;
+
+ cmd_list = llist_reverse_order(cmd_list);
+ llist_for_each_entry_safe(se_cmd, next_cmd, cmd_list, se_cmd_list) {
+ if (!se_dev) {
+ se_dev = se_cmd->se_dev;
+ se_plug = target_plug_device(se_dev);
+ }
+
+ target_submit(se_cmd);
+ }
+
+ if (se_plug)
+ target_unplug_device(se_plug);
+}
+
+/**
+ * target_queue_submission - queue the cmd to run on the LIO workqueue
+ * @se_cmd: command descriptor to submit
+ */
+void target_queue_submission(struct se_cmd *se_cmd)
+{
+ struct se_device *se_dev = se_cmd->se_dev;
+ int cpu = se_cmd->cpuid;
+ struct se_cmd_queue *sq;
+
+ sq = &se_dev->queues[cpu].sq;
+ llist_add(&se_cmd->se_cmd_list, &sq->cmd_list);
+ queue_work_on(cpu, target_submission_wq, &sq->work);
+}
+EXPORT_SYMBOL_GPL(target_queue_submission);
+
static void target_complete_tmr_failure(struct work_struct *work)
{
struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
@@ -1799,8 +1927,8 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
se_tpg = se_sess->se_tpg;
BUG_ON(!se_tpg);
- transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
- 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun);
+ __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
+ 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun);
/*
* FIXME: Currently expect caller to handle se_cmd->se_tmr_req
* allocation failure.
@@ -2778,9 +2906,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
* invocations before se_cmd descriptor release.
*/
if (ack_kref) {
- if (!kref_get_unless_zero(&se_cmd->cmd_kref))
- return -EINVAL;
-
+ kref_get(&se_cmd->cmd_kref);
se_cmd->se_cmd_flags |= SCF_ACK_KREF;
}
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index bf73cd5f4b04..eec2fd573e2b 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -8,13 +8,12 @@
#include <linux/spinlock.h>
#include <linux/module.h>
-#include <linux/idr.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/parser.h>
#include <linux/vmalloc.h>
#include <linux/uio_driver.h>
-#include <linux/radix-tree.h>
+#include <linux/xarray.h>
#include <linux/stringify.h>
#include <linux/bitops.h>
#include <linux/highmem.h>
@@ -61,25 +60,27 @@
#define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
-/* For cmd area, the size is fixed 8MB */
-#define CMDR_SIZE (8 * 1024 * 1024)
+/* For mailbox plus cmd ring, the size is fixed 8MB */
+#define MB_CMDR_SIZE (8 * 1024 * 1024)
+/* Offset of cmd ring is size of mailbox */
+#define CMDR_OFF sizeof(struct tcmu_mailbox)
+#define CMDR_SIZE (MB_CMDR_SIZE - CMDR_OFF)
/*
- * For data area, the block size is PAGE_SIZE and
- * the total size is 256K * PAGE_SIZE.
+ * For data area, the default block size is PAGE_SIZE and
+ * the default total size is 256K * PAGE_SIZE.
*/
-#define DATA_BLOCK_SIZE PAGE_SIZE
-#define DATA_BLOCK_SHIFT PAGE_SHIFT
-#define DATA_BLOCK_BITS_DEF (256 * 1024)
+#define DATA_PAGES_PER_BLK_DEF 1
+#define DATA_AREA_PAGES_DEF (256 * 1024)
-#define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT))
-#define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT))
+#define TCMU_MBS_TO_PAGES(_mbs) ((size_t)_mbs << (20 - PAGE_SHIFT))
+#define TCMU_PAGES_TO_MBS(_pages) (_pages >> (20 - PAGE_SHIFT))
/*
* Default number of global data blocks(512K * PAGE_SIZE)
* when the unmap thread will be started.
*/
-#define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024)
+#define TCMU_GLOBAL_MAX_PAGES_DEF (512 * 1024)
static u8 tcmu_kern_cmd_reply_supported;
static u8 tcmu_netlink_blocked;
@@ -111,6 +112,7 @@ struct tcmu_dev {
struct kref kref;
struct se_device se_dev;
+ struct se_dev_plug se_plug;
char *name;
struct se_hba *hba;
@@ -119,22 +121,25 @@ struct tcmu_dev {
#define TCMU_DEV_BIT_BROKEN 1
#define TCMU_DEV_BIT_BLOCKED 2
#define TCMU_DEV_BIT_TMR_NOTIFY 3
+#define TCM_DEV_BIT_PLUGGED 4
unsigned long flags;
struct uio_info uio_info;
struct inode *inode;
- struct tcmu_mailbox *mb_addr;
uint64_t dev_size;
+
+ struct tcmu_mailbox *mb_addr;
+ void *cmdr;
u32 cmdr_size;
u32 cmdr_last_cleaned;
/* Offset of data area from start of mb */
/* Must add data_off and mb_addr to get the address */
size_t data_off;
- size_t data_size;
+ int data_area_mb;
uint32_t max_blocks;
- size_t ring_size;
+ size_t mmap_pages;
struct mutex cmdr_lock;
struct list_head qfull_queue;
@@ -143,9 +148,11 @@ struct tcmu_dev {
uint32_t dbi_max;
uint32_t dbi_thresh;
unsigned long *data_bitmap;
- struct radix_tree_root data_blocks;
+ struct xarray data_pages;
+ uint32_t data_pages_per_blk;
+ uint32_t data_blk_size;
- struct idr commands;
+ struct xarray commands;
struct timer_list cmd_timer;
unsigned int cmd_time_out;
@@ -165,8 +172,6 @@ struct tcmu_dev {
#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
-#define CMDR_OFF sizeof(struct tcmu_mailbox)
-
struct tcmu_cmd {
struct se_cmd *se_cmd;
struct tcmu_dev *tcmu_dev;
@@ -215,9 +220,9 @@ static LIST_HEAD(timed_out_udevs);
static struct kmem_cache *tcmu_cmd_cache;
-static atomic_t global_db_count = ATOMIC_INIT(0);
+static atomic_t global_page_count = ATOMIC_INIT(0);
static struct delayed_work tcmu_unmap_work;
-static int tcmu_global_max_blocks = TCMU_GLOBAL_MAX_BLOCKS_DEF;
+static int tcmu_global_max_pages = TCMU_GLOBAL_MAX_PAGES_DEF;
static int tcmu_set_global_max_data_area(const char *str,
const struct kernel_param *kp)
@@ -233,8 +238,8 @@ static int tcmu_set_global_max_data_area(const char *str,
return -EINVAL;
}
- tcmu_global_max_blocks = TCMU_MBS_TO_BLOCKS(max_area_mb);
- if (atomic_read(&global_db_count) > tcmu_global_max_blocks)
+ tcmu_global_max_pages = TCMU_MBS_TO_PAGES(max_area_mb);
+ if (atomic_read(&global_page_count) > tcmu_global_max_pages)
schedule_delayed_work(&tcmu_unmap_work, 0);
else
cancel_delayed_work_sync(&tcmu_unmap_work);
@@ -245,7 +250,7 @@ static int tcmu_set_global_max_data_area(const char *str,
static int tcmu_get_global_max_data_area(char *buffer,
const struct kernel_param *kp)
{
- return sprintf(buffer, "%d\n", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
+ return sprintf(buffer, "%d\n", TCMU_PAGES_TO_MBS(tcmu_global_max_pages));
}
static const struct kernel_param_ops tcmu_global_max_data_area_op = {
@@ -497,32 +502,39 @@ static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
struct tcmu_cmd *tcmu_cmd,
- int prev_dbi, int *iov_cnt)
+ int prev_dbi, int length, int *iov_cnt)
{
+ XA_STATE(xas, &udev->data_pages, 0);
struct page *page;
- int ret, dbi;
+ int i, cnt, dbi, dpi;
+ int page_cnt = DIV_ROUND_UP(length, PAGE_SIZE);
dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
if (dbi == udev->dbi_thresh)
return -1;
- page = radix_tree_lookup(&udev->data_blocks, dbi);
- if (!page) {
- if (atomic_add_return(1, &global_db_count) >
- tcmu_global_max_blocks)
- schedule_delayed_work(&tcmu_unmap_work, 0);
+ dpi = dbi * udev->data_pages_per_blk;
+ /* Count the number of already allocated pages */
+ xas_set(&xas, dpi);
+ for (cnt = 0; xas_next(&xas) && cnt < page_cnt;)
+ cnt++;
+ for (i = cnt; i < page_cnt; i++) {
/* try to get new page from the mm */
page = alloc_page(GFP_NOIO);
if (!page)
- goto err_alloc;
+ break;
- ret = radix_tree_insert(&udev->data_blocks, dbi, page);
- if (ret)
- goto err_insert;
+ if (xa_store(&udev->data_pages, dpi + i, page, GFP_NOIO)) {
+ __free_page(page);
+ break;
+ }
}
+ if (atomic_add_return(i - cnt, &global_page_count) >
+ tcmu_global_max_pages)
+ schedule_delayed_work(&tcmu_unmap_work, 0);
- if (dbi > udev->dbi_max)
+ if (i && dbi > udev->dbi_max)
udev->dbi_max = dbi;
set_bit(dbi, udev->data_bitmap);
@@ -531,35 +543,27 @@ static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
if (dbi != prev_dbi + 1)
*iov_cnt += 1;
- return dbi;
-err_insert:
- __free_page(page);
-err_alloc:
- atomic_dec(&global_db_count);
- return -1;
+ return i == page_cnt ? dbi : -1;
}
static int tcmu_get_empty_blocks(struct tcmu_dev *udev,
- struct tcmu_cmd *tcmu_cmd, int dbi_cnt)
+ struct tcmu_cmd *tcmu_cmd, int length)
{
/* start value of dbi + 1 must not be a valid dbi */
int dbi = -2;
- int i, iov_cnt = 0;
+ int blk_data_len, iov_cnt = 0;
+ uint32_t blk_size = udev->data_blk_size;
- for (i = 0; i < dbi_cnt; i++) {
- dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, &iov_cnt);
+ for (; length > 0; length -= blk_size) {
+ blk_data_len = min_t(uint32_t, length, blk_size);
+ dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_data_len,
+ &iov_cnt);
if (dbi < 0)
return -1;
}
return iov_cnt;
}
-static inline struct page *
-tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
-{
- return radix_tree_lookup(&udev->data_blocks, dbi);
-}
-
static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
{
kfree(tcmu_cmd->dbi);
@@ -570,14 +574,15 @@ static inline void tcmu_cmd_set_block_cnts(struct tcmu_cmd *cmd)
{
int i, len;
struct se_cmd *se_cmd = cmd->se_cmd;
+ uint32_t blk_size = cmd->tcmu_dev->data_blk_size;
- cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
+ cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, blk_size);
if (se_cmd->se_cmd_flags & SCF_BIDI) {
BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
for (i = 0, len = 0; i < se_cmd->t_bidi_data_nents; i++)
len += se_cmd->t_bidi_data_sg[i].length;
- cmd->dbi_bidi_cnt = DIV_ROUND_UP(len, DATA_BLOCK_SIZE);
+ cmd->dbi_bidi_cnt = DIV_ROUND_UP(len, blk_size);
cmd->dbi_cnt += cmd->dbi_bidi_cnt;
cmd->data_len_bidi = len;
}
@@ -589,9 +594,8 @@ static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
/* Get the next dbi */
int dbi = tcmu_cmd_get_dbi(cmd);
- /* Do not add more than DATA_BLOCK_SIZE to iov */
- if (len > DATA_BLOCK_SIZE)
- len = DATA_BLOCK_SIZE;
+ /* Do not add more than udev->data_blk_size to iov */
+ len = min_t(int, len, udev->data_blk_size);
/*
* The following code will gather and map the blocks to the same iovec
@@ -603,7 +607,7 @@ static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
(*iov)++;
/* write offset relative to mb_addr */
(*iov)->iov_base = (void __user *)
- (udev->data_off + dbi * DATA_BLOCK_SIZE);
+ (udev->data_off + dbi * udev->data_blk_size);
}
(*iov)->iov_len += len;
@@ -617,7 +621,7 @@ static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
int dbi = -2;
/* We prepare the IOVs for DMA_FROM_DEVICE transfer direction */
- for (; data_length > 0; data_length -= DATA_BLOCK_SIZE)
+ for (; data_length > 0; data_length -= udev->data_blk_size)
dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length);
}
@@ -695,9 +699,11 @@ static inline void tcmu_copy_data(struct tcmu_dev *udev,
struct scatterlist *sg, unsigned int sg_nents,
struct iovec **iov, size_t data_len)
{
+ XA_STATE(xas, &udev->data_pages, 0);
/* start value of dbi + 1 must not be a valid dbi */
int dbi = -2;
- size_t block_remaining, cp_len;
+ size_t page_remaining, cp_len;
+ int page_cnt, page_inx;
struct sg_mapping_iter sg_iter;
unsigned int sg_flags;
struct page *page;
@@ -715,37 +721,48 @@ static inline void tcmu_copy_data(struct tcmu_dev *udev,
data_len);
else
dbi = tcmu_cmd_get_dbi(tcmu_cmd);
- page = tcmu_get_block_page(udev, dbi);
- if (direction == TCMU_DATA_AREA_TO_SG)
- flush_dcache_page(page);
- data_page_start = kmap_atomic(page);
- block_remaining = DATA_BLOCK_SIZE;
-
- while (block_remaining && data_len) {
- if (!sg_miter_next(&sg_iter)) {
- /* set length to 0 to abort outer loop */
- data_len = 0;
- pr_debug("tcmu_move_data: aborting data copy due to exhausted sg_list\n");
- break;
+
+ page_cnt = DIV_ROUND_UP(data_len, PAGE_SIZE);
+ if (page_cnt > udev->data_pages_per_blk)
+ page_cnt = udev->data_pages_per_blk;
+
+ xas_set(&xas, dbi * udev->data_pages_per_blk);
+ for (page_inx = 0; page_inx < page_cnt && data_len; page_inx++) {
+ page = xas_next(&xas);
+
+ if (direction == TCMU_DATA_AREA_TO_SG)
+ flush_dcache_page(page);
+ data_page_start = kmap_atomic(page);
+ page_remaining = PAGE_SIZE;
+
+ while (page_remaining && data_len) {
+ if (!sg_miter_next(&sg_iter)) {
+ /* set length to 0 to abort outer loop */
+ data_len = 0;
+ pr_debug("%s: aborting data copy due to exhausted sg_list\n",
+ __func__);
+ break;
+ }
+ cp_len = min3(sg_iter.length, page_remaining,
+ data_len);
+
+ data_addr = data_page_start +
+ PAGE_SIZE - page_remaining;
+ if (direction == TCMU_SG_TO_DATA_AREA)
+ memcpy(data_addr, sg_iter.addr, cp_len);
+ else
+ memcpy(sg_iter.addr, data_addr, cp_len);
+
+ data_len -= cp_len;
+ page_remaining -= cp_len;
+ sg_iter.consumed = cp_len;
}
- cp_len = min3(sg_iter.length, block_remaining, data_len);
+ sg_miter_stop(&sg_iter);
- data_addr = data_page_start +
- DATA_BLOCK_SIZE - block_remaining;
+ kunmap_atomic(data_page_start);
if (direction == TCMU_SG_TO_DATA_AREA)
- memcpy(data_addr, sg_iter.addr, cp_len);
- else
- memcpy(sg_iter.addr, data_addr, cp_len);
-
- data_len -= cp_len;
- block_remaining -= cp_len;
- sg_iter.consumed = cp_len;
+ flush_dcache_page(page);
}
- sg_miter_stop(&sg_iter);
-
- kunmap_atomic(data_page_start);
- if (direction == TCMU_SG_TO_DATA_AREA)
- flush_dcache_page(page);
}
}
@@ -844,9 +861,9 @@ static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
(udev->max_blocks - udev->dbi_thresh) + space;
if (blocks_left < cmd->dbi_cnt) {
- pr_debug("no data space: only %lu available, but ask for %lu\n",
- blocks_left * DATA_BLOCK_SIZE,
- cmd->dbi_cnt * DATA_BLOCK_SIZE);
+ pr_debug("no data space: only %lu available, but ask for %u\n",
+ blocks_left * udev->data_blk_size,
+ cmd->dbi_cnt * udev->data_blk_size);
return -1;
}
@@ -855,13 +872,12 @@ static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
udev->dbi_thresh = udev->max_blocks;
}
- iov_cnt = tcmu_get_empty_blocks(udev, cmd,
- cmd->dbi_cnt - cmd->dbi_bidi_cnt);
+ iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length);
if (iov_cnt < 0)
return -1;
if (cmd->dbi_bidi_cnt) {
- ret = tcmu_get_empty_blocks(udev, cmd, cmd->dbi_bidi_cnt);
+ ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi);
if (ret < 0)
return -1;
}
@@ -941,7 +957,7 @@ static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size)
if (head_to_end(cmd_head, udev->cmdr_size) < cmd_size) {
size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
- hdr = (void *) mb + CMDR_OFF + cmd_head;
+ hdr = udev->cmdr + cmd_head;
tcmu_hdr_set_op(&hdr->len_op, TCMU_OP_PAD);
tcmu_hdr_set_len(&hdr->len_op, pad_size);
hdr->cmd_id = 0; /* not used for PAD */
@@ -959,6 +975,25 @@ static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size)
return cmd_head;
}
+static void tcmu_unplug_device(struct se_dev_plug *se_plug)
+{
+ struct se_device *se_dev = se_plug->se_dev;
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
+
+ clear_bit(TCM_DEV_BIT_PLUGGED, &udev->flags);
+ uio_event_notify(&udev->uio_info);
+}
+
+static struct se_dev_plug *tcmu_plug_device(struct se_device *se_dev)
+{
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
+
+ if (!test_and_set_bit(TCM_DEV_BIT_PLUGGED, &udev->flags))
+ return &udev->se_plug;
+
+ return NULL;
+}
+
/**
* queue_cmd_ring - queue cmd to ring or internally
* @tcmu_cmd: cmd to queue
@@ -977,11 +1012,12 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
struct tcmu_mailbox *mb = udev->mb_addr;
struct tcmu_cmd_entry *entry;
struct iovec *iov;
- int iov_cnt, iov_bidi_cnt, cmd_id;
- uint32_t cmd_head;
+ int iov_cnt, iov_bidi_cnt;
+ uint32_t cmd_id, cmd_head;
uint64_t cdb_off;
+ uint32_t blk_size = udev->data_blk_size;
/* size of data buffer needed */
- size_t data_length = (size_t)tcmu_cmd->dbi_cnt * DATA_BLOCK_SIZE;
+ size_t data_length = (size_t)tcmu_cmd->dbi_cnt * blk_size;
*scsi_err = TCM_NO_SENSE;
@@ -998,9 +1034,9 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
if (!list_empty(&udev->qfull_queue))
goto queue;
- if (data_length > udev->data_size) {
+ if (data_length > (size_t)udev->max_blocks * blk_size) {
pr_warn("TCMU: Request of size %zu is too big for %zu data area\n",
- data_length, udev->data_size);
+ data_length, (size_t)udev->max_blocks * blk_size);
*scsi_err = TCM_INVALID_CDB_FIELD;
return -1;
}
@@ -1031,8 +1067,8 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
*/
goto free_and_queue;
- cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
- if (cmd_id < 0) {
+ if (xa_alloc(&udev->commands, &cmd_id, tcmu_cmd, XA_LIMIT(1, 0xffff),
+ GFP_NOWAIT) < 0) {
pr_err("tcmu: Could not allocate cmd id.\n");
tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
@@ -1046,7 +1082,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
cmd_head = ring_insert_padding(udev, command_size);
- entry = (void *) mb + CMDR_OFF + cmd_head;
+ entry = udev->cmdr + cmd_head;
memset(entry, 0, command_size);
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
@@ -1086,8 +1122,8 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
- /* TODO: only if FLUSH and FUA? */
- uio_event_notify(&udev->uio_info);
+ if (!test_bit(TCM_DEV_BIT_PLUGGED, &udev->flags))
+ uio_event_notify(&udev->uio_info);
return 0;
@@ -1138,7 +1174,7 @@ queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr)
cmd_head = ring_insert_padding(udev, cmd_size);
- entry = (void *)mb + CMDR_OFF + cmd_head;
+ entry = udev->cmdr + cmd_head;
memset(entry, 0, cmd_size);
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_TMR);
tcmu_hdr_set_len(&entry->hdr.len_op, cmd_size);
@@ -1253,7 +1289,7 @@ tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf,
pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n",
tcmu_tmr_type(tmf), udev->name, i, cmd_cnt);
- tmr = kmalloc(sizeof(*tmr) + cmd_cnt * sizeof(*cmd_ids), GFP_KERNEL);
+ tmr = kmalloc(sizeof(*tmr) + cmd_cnt * sizeof(*cmd_ids), GFP_NOIO);
if (!tmr)
goto unlock;
@@ -1393,7 +1429,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
- struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
+ struct tcmu_cmd_entry *entry = udev->cmdr + udev->cmdr_last_cleaned;
/*
* Flush max. up to end of cmd ring since current entry might
@@ -1415,7 +1451,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
}
WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
- cmd = idr_remove(&udev->commands, entry->hdr.cmd_id);
+ cmd = xa_erase(&udev->commands, entry->hdr.cmd_id);
if (!cmd) {
pr_err("cmd_id %u not found, ring is broken\n",
entry->hdr.cmd_id);
@@ -1432,8 +1468,8 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
if (free_space)
free_space = tcmu_run_tmr_queue(udev);
- if (atomic_read(&global_db_count) > tcmu_global_max_blocks &&
- idr_is_empty(&udev->commands) && list_empty(&udev->qfull_queue)) {
+ if (atomic_read(&global_page_count) > tcmu_global_max_pages &&
+ xa_empty(&udev->commands) && list_empty(&udev->qfull_queue)) {
/*
* Allocated blocks exceeded global block limit, currently no
* more pending or waiting commands so try to reclaim blocks.
@@ -1548,7 +1584,10 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
udev->cmd_time_out = TCMU_TIME_OUT;
udev->qfull_time_out = -1;
- udev->max_blocks = DATA_BLOCK_BITS_DEF;
+ udev->data_pages_per_blk = DATA_PAGES_PER_BLK_DEF;
+ udev->max_blocks = DATA_AREA_PAGES_DEF / udev->data_pages_per_blk;
+ udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF);
+
mutex_init(&udev->cmdr_lock);
INIT_LIST_HEAD(&udev->node);
@@ -1556,12 +1595,12 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
INIT_LIST_HEAD(&udev->qfull_queue);
INIT_LIST_HEAD(&udev->tmr_queue);
INIT_LIST_HEAD(&udev->inflight_queue);
- idr_init(&udev->commands);
+ xa_init_flags(&udev->commands, XA_FLAGS_ALLOC1);
timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
- INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
+ xa_init(&udev->data_pages);
return &udev->se_dev;
}
@@ -1585,19 +1624,24 @@ static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
return -EINVAL;
}
-static void tcmu_blocks_release(struct radix_tree_root *blocks,
- int start, int end)
+static u32 tcmu_blocks_release(struct tcmu_dev *udev, unsigned long first,
+ unsigned long last)
{
- int i;
+ XA_STATE(xas, &udev->data_pages, first * udev->data_pages_per_blk);
struct page *page;
+ u32 pages_freed = 0;
- for (i = start; i < end; i++) {
- page = radix_tree_delete(blocks, i);
- if (page) {
- __free_page(page);
- atomic_dec(&global_db_count);
- }
+ xas_lock(&xas);
+ xas_for_each(&xas, page, (last + 1) * udev->data_pages_per_blk - 1) {
+ xas_store(&xas, NULL);
+ __free_page(page);
+ pages_freed++;
}
+ xas_unlock(&xas);
+
+ atomic_sub(pages_freed, &global_page_count);
+
+ return pages_freed;
}
static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev)
@@ -1616,7 +1660,7 @@ static void tcmu_dev_kref_release(struct kref *kref)
struct se_device *dev = &udev->se_dev;
struct tcmu_cmd *cmd;
bool all_expired = true;
- int i;
+ unsigned long i;
vfree(udev->mb_addr);
udev->mb_addr = NULL;
@@ -1628,7 +1672,7 @@ static void tcmu_dev_kref_release(struct kref *kref)
/* Upper layer should drain all requests before calling this */
mutex_lock(&udev->cmdr_lock);
- idr_for_each_entry(&udev->commands, cmd, i) {
+ xa_for_each(&udev->commands, i, cmd) {
if (tcmu_check_and_free_pending_cmd(cmd) != 0)
all_expired = false;
}
@@ -1636,10 +1680,10 @@ static void tcmu_dev_kref_release(struct kref *kref)
tcmu_remove_all_queued_tmr(udev);
if (!list_empty(&udev->qfull_queue))
all_expired = false;
- idr_destroy(&udev->commands);
+ xa_destroy(&udev->commands);
WARN_ON(!all_expired);
- tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1);
+ tcmu_blocks_release(udev, 0, udev->dbi_max);
bitmap_free(udev->data_bitmap);
mutex_unlock(&udev->cmdr_lock);
@@ -1737,12 +1781,12 @@ static int tcmu_find_mem_index(struct vm_area_struct *vma)
return -1;
}
-static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
+static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi)
{
struct page *page;
mutex_lock(&udev->cmdr_lock);
- page = tcmu_get_block_page(udev, dbi);
+ page = xa_load(&udev->data_pages, dpi);
if (likely(page)) {
mutex_unlock(&udev->cmdr_lock);
return page;
@@ -1752,12 +1796,11 @@ static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
* Userspace messed up and passed in a address not in the
* data iov passed to it.
*/
- pr_err("Invalid addr to data block mapping (dbi %u) on device %s\n",
- dbi, udev->name);
- page = NULL;
+ pr_err("Invalid addr to data page mapping (dpi %u) on device %s\n",
+ dpi, udev->name);
mutex_unlock(&udev->cmdr_lock);
- return page;
+ return NULL;
}
static void tcmu_vma_open(struct vm_area_struct *vma)
@@ -1802,11 +1845,11 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
addr = (void *)(unsigned long)info->mem[mi].addr + offset;
page = vmalloc_to_page(addr);
} else {
- uint32_t dbi;
+ uint32_t dpi;
/* For the dynamically growing data area pages */
- dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE;
- page = tcmu_try_get_block_page(udev, dbi);
+ dpi = (offset - udev->data_off) / PAGE_SIZE;
+ page = tcmu_try_get_data_page(udev, dpi);
if (!page)
return VM_FAULT_SIGBUS;
}
@@ -1832,7 +1875,7 @@ static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
vma->vm_private_data = udev;
/* Ensure the mmap is exactly the right size */
- if (vma_pages(vma) != (udev->ring_size >> PAGE_SHIFT))
+ if (vma_pages(vma) != udev->mmap_pages)
return -EINVAL;
tcmu_vma_open(vma);
@@ -2065,6 +2108,7 @@ static int tcmu_configure_device(struct se_device *dev)
struct tcmu_dev *udev = TCMU_DEV(dev);
struct uio_info *info;
struct tcmu_mailbox *mb;
+ size_t data_size;
int ret = 0;
ret = tcmu_update_uio_info(udev);
@@ -2081,20 +2125,23 @@ static int tcmu_configure_device(struct se_device *dev)
goto err_bitmap_alloc;
}
- udev->mb_addr = vzalloc(CMDR_SIZE);
- if (!udev->mb_addr) {
+ mb = vzalloc(MB_CMDR_SIZE);
+ if (!mb) {
ret = -ENOMEM;
goto err_vzalloc;
}
/* mailbox fits in first part of CMDR space */
- udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
- udev->data_off = CMDR_SIZE;
- udev->data_size = udev->max_blocks * DATA_BLOCK_SIZE;
+ udev->mb_addr = mb;
+ udev->cmdr = (void *)mb + CMDR_OFF;
+ udev->cmdr_size = CMDR_SIZE;
+ udev->data_off = MB_CMDR_SIZE;
+ data_size = TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT;
+ udev->mmap_pages = (data_size + MB_CMDR_SIZE) >> PAGE_SHIFT;
+ udev->data_blk_size = udev->data_pages_per_blk * PAGE_SIZE;
udev->dbi_thresh = 0; /* Default in Idle state */
/* Initialise the mailbox of the ring buffer */
- mb = udev->mb_addr;
mb->version = TCMU_MAILBOX_VERSION;
mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC |
TCMU_MAILBOX_FLAG_CAP_READ_LEN |
@@ -2103,14 +2150,13 @@ static int tcmu_configure_device(struct se_device *dev)
mb->cmdr_size = udev->cmdr_size;
WARN_ON(!PAGE_ALIGNED(udev->data_off));
- WARN_ON(udev->data_size % PAGE_SIZE);
- WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
+ WARN_ON(data_size % PAGE_SIZE);
info->version = __stringify(TCMU_MAILBOX_VERSION);
info->mem[0].name = "tcm-user command & data buffer";
info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
- info->mem[0].size = udev->ring_size = udev->data_size + CMDR_SIZE;
+ info->mem[0].size = data_size + MB_CMDR_SIZE;
info->mem[0].memtype = UIO_MEM_NONE;
info->irqcontrol = tcmu_irqcontrol;
@@ -2226,16 +2272,16 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
{
struct tcmu_mailbox *mb;
struct tcmu_cmd *cmd;
- int i;
+ unsigned long i;
mutex_lock(&udev->cmdr_lock);
- idr_for_each_entry(&udev->commands, cmd, i) {
+ xa_for_each(&udev->commands, i, cmd) {
pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
cmd->cmd_id, udev->name,
test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
- idr_remove(&udev->commands, i);
+ xa_erase(&udev->commands, i);
if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
WARN_ON(!cmd->se_cmd);
list_del_init(&cmd->queue_entry);
@@ -2285,7 +2331,8 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
enum {
Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
- Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_err,
+ Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_data_pages_per_blk,
+ Opt_err,
};
static match_table_t tokens = {
@@ -2295,6 +2342,7 @@ static match_table_t tokens = {
{Opt_hw_max_sectors, "hw_max_sectors=%d"},
{Opt_nl_reply_supported, "nl_reply_supported=%d"},
{Opt_max_data_area_mb, "max_data_area_mb=%d"},
+ {Opt_data_pages_per_blk, "data_pages_per_blk=%d"},
{Opt_err, NULL}
};
@@ -2321,6 +2369,7 @@ static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
{
int val, ret;
+ uint32_t pages_per_blk = udev->data_pages_per_blk;
ret = match_int(arg, &val);
if (ret < 0) {
@@ -2328,11 +2377,20 @@ static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
ret);
return ret;
}
-
if (val <= 0) {
pr_err("Invalid max_data_area %d.\n", val);
return -EINVAL;
}
+ if (val > TCMU_PAGES_TO_MBS(tcmu_global_max_pages)) {
+ pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
+ val, TCMU_PAGES_TO_MBS(tcmu_global_max_pages));
+ val = TCMU_PAGES_TO_MBS(tcmu_global_max_pages);
+ }
+ if (TCMU_MBS_TO_PAGES(val) < pages_per_blk) {
+ pr_err("Invalid max_data_area %d (%zu pages): smaller than data_pages_per_blk (%u pages).\n",
+ val, TCMU_MBS_TO_PAGES(val), pages_per_blk);
+ return -EINVAL;
+ }
mutex_lock(&udev->cmdr_lock);
if (udev->data_bitmap) {
@@ -2341,13 +2399,42 @@ static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
goto unlock;
}
- udev->max_blocks = TCMU_MBS_TO_BLOCKS(val);
- if (udev->max_blocks > tcmu_global_max_blocks) {
- pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
- val, TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
- udev->max_blocks = tcmu_global_max_blocks;
+ udev->data_area_mb = val;
+ udev->max_blocks = TCMU_MBS_TO_PAGES(val) / pages_per_blk;
+
+unlock:
+ mutex_unlock(&udev->cmdr_lock);
+ return ret;
+}
+
+static int tcmu_set_data_pages_per_blk(struct tcmu_dev *udev, substring_t *arg)
+{
+ int val, ret;
+
+ ret = match_int(arg, &val);
+ if (ret < 0) {
+ pr_err("match_int() failed for data_pages_per_blk=. Error %d.\n",
+ ret);
+ return ret;
+ }
+
+ if (val > TCMU_MBS_TO_PAGES(udev->data_area_mb)) {
+ pr_err("Invalid data_pages_per_blk %d: greater than max_data_area_mb %d -> %zd pages).\n",
+ val, udev->data_area_mb,
+ TCMU_MBS_TO_PAGES(udev->data_area_mb));
+ return -EINVAL;
+ }
+
+ mutex_lock(&udev->cmdr_lock);
+ if (udev->data_bitmap) {
+ pr_err("Cannot set data_pages_per_blk after it has been enabled.\n");
+ ret = -EINVAL;
+ goto unlock;
}
+ udev->data_pages_per_blk = val;
+ udev->max_blocks = TCMU_MBS_TO_PAGES(udev->data_area_mb) / val;
+
unlock:
mutex_unlock(&udev->cmdr_lock);
return ret;
@@ -2404,6 +2491,9 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
case Opt_max_data_area_mb:
ret = tcmu_set_max_blocks_param(udev, &args[0]);
break;
+ case Opt_data_pages_per_blk:
+ ret = tcmu_set_data_pages_per_blk(udev, &args[0]);
+ break;
default:
break;
}
@@ -2424,8 +2514,8 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
bl = sprintf(b + bl, "Config: %s ",
udev->dev_config[0] ? udev->dev_config : "NULL");
bl += sprintf(b + bl, "Size: %llu ", udev->dev_size);
- bl += sprintf(b + bl, "MaxDataAreaMB: %u\n",
- TCMU_BLOCKS_TO_MBS(udev->max_blocks));
+ bl += sprintf(b + bl, "MaxDataAreaMB: %u ", udev->data_area_mb);
+ bl += sprintf(b + bl, "DataPagesPerBlk: %u\n", udev->data_pages_per_blk);
return bl;
}
@@ -2519,11 +2609,21 @@ static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page)
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
- return snprintf(page, PAGE_SIZE, "%u\n",
- TCMU_BLOCKS_TO_MBS(udev->max_blocks));
+ return snprintf(page, PAGE_SIZE, "%u\n", udev->data_area_mb);
}
CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb);
+static ssize_t tcmu_data_pages_per_blk_show(struct config_item *item,
+ char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", udev->data_pages_per_blk);
+}
+CONFIGFS_ATTR_RO(tcmu_, data_pages_per_blk);
+
static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
@@ -2835,6 +2935,7 @@ static struct configfs_attribute *tcmu_attrib_attrs[] = {
&tcmu_attr_cmd_time_out,
&tcmu_attr_qfull_time_out,
&tcmu_attr_max_data_area_mb,
+ &tcmu_attr_data_pages_per_blk,
&tcmu_attr_dev_config,
&tcmu_attr_dev_size,
&tcmu_attr_emulate_write_cache,
@@ -2863,6 +2964,8 @@ static struct target_backend_ops tcmu_ops = {
.configure_device = tcmu_configure_device,
.destroy_device = tcmu_destroy_device,
.free_device = tcmu_free_device,
+ .unplug_device = tcmu_unplug_device,
+ .plug_device = tcmu_plug_device,
.parse_cdb = tcmu_parse_cdb,
.tmr_notify = tcmu_tmr_notify,
.set_configfs_dev_params = tcmu_set_configfs_dev_params,
@@ -2876,9 +2979,10 @@ static void find_free_blocks(void)
{
struct tcmu_dev *udev;
loff_t off;
- u32 start, end, block, total_freed = 0;
+ u32 pages_freed, total_pages_freed = 0;
+ u32 start, end, block, total_blocks_freed = 0;
- if (atomic_read(&global_db_count) <= tcmu_global_max_blocks)
+ if (atomic_read(&global_page_count) <= tcmu_global_max_pages)
return;
mutex_lock(&root_udev_mutex);
@@ -2919,20 +3023,22 @@ static void find_free_blocks(void)
}
/* Here will truncate the data area from off */
- off = udev->data_off + start * DATA_BLOCK_SIZE;
+ off = udev->data_off + (loff_t)start * udev->data_blk_size;
unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
/* Release the block pages */
- tcmu_blocks_release(&udev->data_blocks, start, end);
+ pages_freed = tcmu_blocks_release(udev, start, end - 1);
mutex_unlock(&udev->cmdr_lock);
- total_freed += end - start;
- pr_debug("Freed %u blocks (total %u) from %s.\n", end - start,
- total_freed, udev->name);
+ total_pages_freed += pages_freed;
+ total_blocks_freed += end - start;
+ pr_debug("Freed %u pages (total %u) from %u blocks (total %u) from %s.\n",
+ pages_freed, total_pages_freed, end - start,
+ total_blocks_freed, udev->name);
}
mutex_unlock(&root_udev_mutex);
- if (atomic_read(&global_db_count) > tcmu_global_max_blocks)
+ if (atomic_read(&global_page_count) > tcmu_global_max_pages)
schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000));
}
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 66d6f1d06f21..d31ed071cb08 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -554,7 +554,7 @@ static int target_xcopy_setup_pt_cmd(
}
cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
- if (target_cmd_init_cdb(cmd, cdb))
+ if (target_cmd_init_cdb(cmd, cdb, GFP_KERNEL))
return -EINVAL;
cmd->tag = 0;
@@ -615,8 +615,8 @@ static int target_xcopy_read_source(
pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n",
(unsigned long long)src_lba, src_sectors, length);
- transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
- DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0);
+ __target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
+ DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0);
rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, src_dev, &cdb[0],
remote_port);
@@ -660,8 +660,8 @@ static int target_xcopy_write_destination(
pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n",
(unsigned long long)dst_lba, dst_sectors, length);
- transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
- DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0);
+ __target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
+ DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0);
rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, dst_dev, &cdb[0],
remote_port);
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 768f250680d9..410b723f9d79 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -543,16 +543,22 @@ static void ft_send_work(struct work_struct *work)
fc_seq_set_resp(cmd->seq, ft_recv_seq, cmd);
cmd->se_cmd.tag = fc_seq_exch(cmd->seq)->rxid;
+
/*
* Use a single se_cmd->cmd_kref as we expect to release se_cmd
* directly from ft_check_stop_free callback in response path.
*/
- if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
- &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
- ntohl(fcp->fc_dl), task_attr, data_dir,
- TARGET_SCF_ACK_KREF))
+ if (target_init_cmd(&cmd->se_cmd, cmd->sess->se_sess,
+ &cmd->ft_sense_buffer[0],
+ scsilun_to_int(&fcp->fc_lun), ntohl(fcp->fc_dl),
+ task_attr, data_dir, TARGET_SCF_ACK_KREF))
goto err;
+ if (target_submit_prep(&cmd->se_cmd, fcp->fc_cdb, NULL, 0, NULL, 0,
+ NULL, 0, GFP_KERNEL))
+ return;
+
+ target_submit(&cmd->se_cmd);
pr_debug("r_ctl %x target_submit_cmd %p\n", fh->fh_r_ctl, cmd);
return;
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 23ce506d5402..593540da9346 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -410,7 +410,7 @@ not_target:
}
/**
- * tcm_fcp_prli() - Handle incoming or outgoing PRLI for the FCP target
+ * ft_prli() - Handle incoming or outgoing PRLI for the FCP target
* @rdata: remote port private
* @spp_len: service parameter page length
* @rspp: received service parameter page (NULL for outgoing PRLI)
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 410fa89eae8f..7acb507946e6 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1050,19 +1050,17 @@ static void usbg_cmd_work(struct work_struct *work)
tv_nexus = tpg->tpg_nexus;
dir = get_cmd_dir(cmd->cmd_buf);
if (dir < 0) {
- transport_init_se_cmd(se_cmd,
- tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
- tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
- cmd->prio_attr, cmd->sense_iu.sense,
- cmd->unpacked_lun);
+ __target_init_cmd(se_cmd,
+ tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
+ tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
+ cmd->prio_attr, cmd->sense_iu.sense,
+ cmd->unpacked_lun);
goto out;
}
- if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, cmd->cmd_buf,
- cmd->sense_iu.sense, cmd->unpacked_lun, 0,
- cmd->prio_attr, dir, flags) < 0)
- goto out;
-
+ target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, cmd->cmd_buf,
+ cmd->sense_iu.sense, cmd->unpacked_lun, 0,
+ cmd->prio_attr, dir, flags);
return;
out:
@@ -1181,19 +1179,17 @@ static void bot_cmd_work(struct work_struct *work)
tv_nexus = tpg->tpg_nexus;
dir = get_cmd_dir(cmd->cmd_buf);
if (dir < 0) {
- transport_init_se_cmd(se_cmd,
- tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
- tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
- cmd->prio_attr, cmd->sense_iu.sense,
- cmd->unpacked_lun);
+ __target_init_cmd(se_cmd,
+ tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
+ tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
+ cmd->prio_attr, cmd->sense_iu.sense,
+ cmd->unpacked_lun);
goto out;
}
- if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
- cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
- cmd->data_len, cmd->prio_attr, dir, 0) < 0)
- goto out;
-
+ target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
+ cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
+ cmd->data_len, cmd->prio_attr, dir, 0);
return;
out:
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 5de21ad4bd05..d16c04dcc144 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -85,7 +85,7 @@ struct vhost_scsi_cmd {
/* The number of scatterlists associated with this cmd */
u32 tvc_sgl_count;
u32 tvc_prot_sgl_count;
- /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
+ /* Saved unpacked SCSI LUN for vhost_scsi_target_queue_cmd() */
u32 tvc_lun;
/* Pointer to the SGL formatted memory from virtio-scsi */
struct scatterlist *tvc_sgl;
@@ -101,8 +101,6 @@ struct vhost_scsi_cmd {
struct vhost_scsi_nexus *tvc_nexus;
/* The TCM I/O descriptor that is accessed via container_of() */
struct se_cmd tvc_se_cmd;
- /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
- struct work_struct work;
/* Copy of the incoming SCSI command descriptor block (CDB) */
unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
/* Sense buffer that will be mapped into outgoing status */
@@ -240,8 +238,6 @@ struct vhost_scsi_ctx {
struct iov_iter out_iter;
};
-static struct workqueue_struct *vhost_scsi_workqueue;
-
/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
static DEFINE_MUTEX(vhost_scsi_mutex);
static LIST_HEAD(vhost_scsi_list);
@@ -614,7 +610,7 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
return ERR_PTR(-EIO);
}
- tag = sbitmap_get(&svq->scsi_tags, 0, false);
+ tag = sbitmap_get(&svq->scsi_tags);
if (tag < 0) {
pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
return ERR_PTR(-ENOMEM);
@@ -782,14 +778,11 @@ static int vhost_scsi_to_tcm_attr(int attr)
return TCM_SIMPLE_TAG;
}
-static void vhost_scsi_submission_work(struct work_struct *work)
+static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd)
{
- struct vhost_scsi_cmd *cmd =
- container_of(work, struct vhost_scsi_cmd, work);
- struct vhost_scsi_nexus *tv_nexus;
struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
+ struct vhost_scsi_nexus *tv_nexus;
struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
- int rc;
/* FIXME: BIDI operation */
if (cmd->tvc_sgl_count) {
@@ -805,18 +798,17 @@ static void vhost_scsi_submission_work(struct work_struct *work)
tv_nexus = cmd->tvc_nexus;
se_cmd->tag = 0;
- rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
- cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
+ target_init_cmd(se_cmd, tv_nexus->tvn_se_sess, &cmd->tvc_sense_buf[0],
cmd->tvc_lun, cmd->tvc_exp_data_len,
vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
- cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
- sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
- cmd->tvc_prot_sgl_count);
- if (rc < 0) {
- transport_send_check_condition_and_sense(se_cmd,
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
- transport_generic_free_cmd(se_cmd, 0);
- }
+ cmd->tvc_data_direction, TARGET_SCF_ACK_KREF);
+
+ if (target_submit_prep(se_cmd, cmd->tvc_cdb, sg_ptr,
+ cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
+ cmd->tvc_prot_sgl_count, GFP_KERNEL))
+ return;
+
+ target_queue_submission(se_cmd);
}
static void
@@ -1132,14 +1124,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
* vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
*/
cmd->tvc_vq_desc = vc.head;
- /*
- * Dispatch cmd descriptor for cmwq execution in process
- * context provided by vhost_scsi_workqueue. This also ensures
- * cmd is executed on the same kworker CPU as this vhost
- * thread to gain positive L2 cache locality effects.
- */
- INIT_WORK(&cmd->work, vhost_scsi_submission_work);
- queue_work(vhost_scsi_workqueue, &cmd->work);
+ vhost_scsi_target_queue_cmd(cmd);
ret = 0;
err:
/*
@@ -1512,7 +1497,7 @@ static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
return 0;
if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
- NUMA_NO_NODE))
+ NUMA_NO_NODE, false, true))
return -ENOMEM;
svq->max_cmds = max_cmds;
@@ -2486,17 +2471,9 @@ static int __init vhost_scsi_init(void)
" on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
utsname()->machine);
- /*
- * Use our own dedicated workqueue for submitting I/O into
- * target core to avoid contention within system_wq.
- */
- vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
- if (!vhost_scsi_workqueue)
- goto out;
-
ret = vhost_scsi_register();
if (ret < 0)
- goto out_destroy_workqueue;
+ goto out;
ret = target_register_template(&vhost_scsi_ops);
if (ret < 0)
@@ -2506,8 +2483,6 @@ static int __init vhost_scsi_init(void)
out_vhost_scsi_deregister:
vhost_scsi_deregister();
-out_destroy_workqueue:
- destroy_workqueue(vhost_scsi_workqueue);
out:
return ret;
};
@@ -2516,7 +2491,6 @@ static void vhost_scsi_exit(void)
{
target_unregister_template(&vhost_scsi_ops);
vhost_scsi_deregister();
- destroy_workqueue(vhost_scsi_workqueue);
};
MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index f3024942fbdf..55a4763da05e 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -360,21 +360,18 @@ static void scsiback_cmd_exec(struct vscsibk_pend *pending_req)
{
struct se_cmd *se_cmd = &pending_req->se_cmd;
struct se_session *sess = pending_req->v2p->tpg->tpg_nexus->tvn_se_sess;
- int rc;
scsiback_get(pending_req->info);
se_cmd->tag = pending_req->rqid;
- rc = target_submit_cmd_map_sgls(se_cmd, sess, pending_req->cmnd,
- pending_req->sense_buffer, pending_req->v2p->lun,
- pending_req->data_len, 0,
- pending_req->sc_data_direction, TARGET_SCF_ACK_KREF,
- pending_req->sgl, pending_req->n_sg,
- NULL, 0, NULL, 0);
- if (rc < 0) {
- transport_send_check_condition_and_sense(se_cmd,
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
- transport_generic_free_cmd(se_cmd, 0);
- }
+ target_init_cmd(se_cmd, sess, pending_req->sense_buffer,
+ pending_req->v2p->lun, pending_req->data_len, 0,
+ pending_req->sc_data_direction, TARGET_SCF_ACK_KREF);
+
+ if (target_submit_prep(se_cmd, pending_req->cmnd, pending_req->sgl,
+ pending_req->n_sg, NULL, 0, NULL, 0, GFP_KERNEL))
+ return;
+
+ target_submit(se_cmd);
}
static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map,