summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-10-11 11:12:33 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-10-11 11:12:33 -0700
commit69585dd69e663a40729492c7b52eb82477a2027a (patch)
tree5c179ca88b70856a7f9b56e10151ffa5480f0284
parentbd381934bf13ccb1af2813ae26c6fe00ec85d254 (diff)
parentc7922a911c42c5a8bdee6cc75eb6bd66937d4217 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6: (34 commits) [SCSI] qla2xxx: Fix NULL ptr deref bug in fail path during queue create [SCSI] st: fix possible memory use after free after MTSETBLK ioctl [SCSI] be2iscsi: Moving to pci_pools v3 [SCSI] libiscsi: iscsi_session_setup to allow for private space [SCSI] be2iscsi: add 10Gbps iSCSI - BladeEngine 2 driver [SCSI] zfcp: Fix hang when offlining device with offline chpid [SCSI] zfcp: Fix lockdep warning when offlining device with offline chpid [SCSI] zfcp: Fix oops during shutdown of offline device [SCSI] zfcp: Fix initial device and cfdc for delayed adapter allocation [SCSI] zfcp: correctly initialize unchained requests [SCSI] mpt2sas: Bump version 02.100.03.00 [SCSI] mpt2sas: Support dev remove when phy status is MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT [SCSI] mpt2sas: Timeout occurred within the HANDSHAKE logic while waiting on firmware to ACK. [SCSI] mpt2sas: Call init_completion on a per request basis. [SCSI] mpt2sas: Target Reset will be issued from Interrupt context. [SCSI] mpt2sas: Added SCSIIO, Internal and high priority memory pools to support multiple TM [SCSI] mpt2sas: Copyright change to 2009. [SCSI] mpt2sas: Added mpi2_history.txt for MPI2 headers. [SCSI] mpt2sas: Update driver to MPI2 REV K headers. [SCSI] bfa: Brocade BFA FC SCSI driver ...
-rw-r--r--Documentation/scsi/hptiop.txt21
-rw-r--r--MAINTAINERS15
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c33
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c40
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c17
-rw-r--r--drivers/s390/scsi/zfcp_ext.h2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c22
-rw-r--r--drivers/scsi/Kconfig11
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/be2iscsi/Kconfig8
-rw-r--r--drivers/scsi/be2iscsi/Makefile8
-rw-r--r--drivers/scsi/be2iscsi/be.h183
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c523
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h877
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c638
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h75
-rw-r--r--drivers/scsi/be2iscsi/be_main.c3390
-rw-r--r--drivers/scsi/be2iscsi/be_main.h837
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c321
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h249
-rw-r--r--drivers/scsi/bfa/Makefile15
-rw-r--r--drivers/scsi/bfa/bfa_callback_priv.h57
-rw-r--r--drivers/scsi/bfa/bfa_cb_ioim_macros.h205
-rw-r--r--drivers/scsi/bfa/bfa_cee.c492
-rw-r--r--drivers/scsi/bfa/bfa_core.c402
-rw-r--r--drivers/scsi/bfa/bfa_csdebug.c58
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c175
-rw-r--r--drivers/scsi/bfa/bfa_fcpim_priv.h188
-rw-r--r--drivers/scsi/bfa/bfa_fcport.c1671
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c182
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c940
-rw-r--r--drivers/scsi/bfa/bfa_fcs_port.c68
-rw-r--r--drivers/scsi/bfa/bfa_fcs_uf.c105
-rw-r--r--drivers/scsi/bfa/bfa_fcxp.c782
-rw-r--r--drivers/scsi/bfa/bfa_fcxp_priv.h138
-rw-r--r--drivers/scsi/bfa/bfa_fwimg_priv.h31
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c142
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c162
-rw-r--r--drivers/scsi/bfa/bfa_intr.c218
-rw-r--r--drivers/scsi/bfa/bfa_intr_priv.h115
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c2382
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h259
-rw-r--r--drivers/scsi/bfa/bfa_iocfc.c872
-rw-r--r--drivers/scsi/bfa/bfa_iocfc.h168
-rw-r--r--drivers/scsi/bfa/bfa_iocfc_q.c44
-rw-r--r--drivers/scsi/bfa/bfa_ioim.c1311
-rw-r--r--drivers/scsi/bfa/bfa_itnim.c1088
-rw-r--r--drivers/scsi/bfa/bfa_log.c346
-rw-r--r--drivers/scsi/bfa/bfa_log_module.c451
-rw-r--r--drivers/scsi/bfa/bfa_lps.c782
-rw-r--r--drivers/scsi/bfa/bfa_lps_priv.h38
-rw-r--r--drivers/scsi/bfa/bfa_module.c90
-rw-r--r--drivers/scsi/bfa/bfa_modules_priv.h43
-rw-r--r--drivers/scsi/bfa/bfa_os_inc.h222
-rw-r--r--drivers/scsi/bfa/bfa_port.c460
-rw-r--r--drivers/scsi/bfa/bfa_port_priv.h90
-rw-r--r--drivers/scsi/bfa/bfa_priv.h113
-rw-r--r--drivers/scsi/bfa/bfa_rport.c911
-rw-r--r--drivers/scsi/bfa/bfa_rport_priv.h45
-rw-r--r--drivers/scsi/bfa/bfa_sgpg.c231
-rw-r--r--drivers/scsi/bfa/bfa_sgpg_priv.h79
-rw-r--r--drivers/scsi/bfa/bfa_sm.c38
-rw-r--r--drivers/scsi/bfa/bfa_timer.c90
-rw-r--r--drivers/scsi/bfa/bfa_trcmod_priv.h66
-rw-r--r--drivers/scsi/bfa/bfa_tskim.c689
-rw-r--r--drivers/scsi/bfa/bfa_uf.c345
-rw-r--r--drivers/scsi/bfa/bfa_uf_priv.h47
-rw-r--r--drivers/scsi/bfa/bfad.c1182
-rw-r--r--drivers/scsi/bfa/bfad_attr.c649
-rw-r--r--drivers/scsi/bfa/bfad_attr.h65
-rw-r--r--drivers/scsi/bfa/bfad_drv.h295
-rw-r--r--drivers/scsi/bfa/bfad_fwimg.c95
-rw-r--r--drivers/scsi/bfa/bfad_im.c1230
-rw-r--r--drivers/scsi/bfa/bfad_im.h150
-rw-r--r--drivers/scsi/bfa/bfad_im_compat.h46
-rw-r--r--drivers/scsi/bfa/bfad_intr.c214
-rw-r--r--drivers/scsi/bfa/bfad_ipfc.h42
-rw-r--r--drivers/scsi/bfa/bfad_os.c50
-rw-r--r--drivers/scsi/bfa/bfad_tm.h59
-rw-r--r--drivers/scsi/bfa/bfad_trcmod.h52
-rw-r--r--drivers/scsi/bfa/fab.c62
-rw-r--r--drivers/scsi/bfa/fabric.c1278
-rw-r--r--drivers/scsi/bfa/fcbuild.c1449
-rw-r--r--drivers/scsi/bfa/fcbuild.h273
-rw-r--r--drivers/scsi/bfa/fcpim.c844
-rw-r--r--drivers/scsi/bfa/fcptm.c68
-rw-r--r--drivers/scsi/bfa/fcs.h30
-rw-r--r--drivers/scsi/bfa/fcs_auth.h37
-rw-r--r--drivers/scsi/bfa/fcs_fabric.h61
-rw-r--r--drivers/scsi/bfa/fcs_fcpim.h44
-rw-r--r--drivers/scsi/bfa/fcs_fcptm.h45
-rw-r--r--drivers/scsi/bfa/fcs_fcxp.h29
-rw-r--r--drivers/scsi/bfa/fcs_lport.h117
-rw-r--r--drivers/scsi/bfa/fcs_ms.h35
-rw-r--r--drivers/scsi/bfa/fcs_port.h32
-rw-r--r--drivers/scsi/bfa/fcs_rport.h61
-rw-r--r--drivers/scsi/bfa/fcs_trcmod.h56
-rw-r--r--drivers/scsi/bfa/fcs_uf.h32
-rw-r--r--drivers/scsi/bfa/fcs_vport.h39
-rw-r--r--drivers/scsi/bfa/fdmi.c1223
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen.h92
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_adapter.h31
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_audit.h31
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_ethport.h35
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_ioc.h37
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_itnim.h33
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_lport.h51
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_port.h57
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen_rport.h37
-rw-r--r--drivers/scsi/bfa/include/bfa.h177
-rw-r--r--drivers/scsi/bfa/include/bfa_fcpim.h159
-rw-r--r--drivers/scsi/bfa/include/bfa_fcptm.h47
-rw-r--r--drivers/scsi/bfa/include/bfa_svc.h324
-rw-r--r--drivers/scsi/bfa/include/bfa_timer.h53
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi.h174
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_boot.h34
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_cbreg.h305
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_cee.h119
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_ctreg.h611
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_fabric.h92
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_fcpim.h301
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_fcxp.h71
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_ioc.h202
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_iocfc.h177
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_lport.h89
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_lps.h96
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_port.h115
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_pport.h184
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_rport.h104
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_uf.h52
-rw-r--r--drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h36
-rw-r--r--drivers/scsi/bfa/include/cna/cee/bfa_cee.h77
-rw-r--r--drivers/scsi/bfa/include/cna/port/bfa_port.h69
-rw-r--r--drivers/scsi/bfa/include/cna/pstats/ethport_defs.h36
-rw-r--r--drivers/scsi/bfa/include/cna/pstats/phyport_defs.h218
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_checksum.h60
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_debug.h44
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_log.h184
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_perf.h34
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_plog.h162
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_q.h81
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_sm.h69
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_trc.h176
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_wc.h68
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_adapter.h82
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_aen.h73
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_audit.h38
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_auth.h112
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_boot.h71
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_cee.h159
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_driver.h40
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_ethport.h98
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_fcpim.h45
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_im_common.h32
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_im_team.h72
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_ioc.h152
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h310
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_ipfc.h70
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_itnim.h126
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_led.h35
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_lport.h68
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_mfg.h58
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_pci.h41
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_pm.h33
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_pom.h56
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_port.h245
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_pport.h383
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_qos.h99
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_rport.h199
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_status.h255
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_tin.h118
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h43
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_types.h30
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_version.h22
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_vf.h74
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_vport.h91
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb.h33
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h76
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_port.h113
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_rport.h80
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_vf.h47
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_vport.h47
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs.h73
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_auth.h82
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h112
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_fcpim.h131
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_fdmi.h63
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h226
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_rport.h104
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_vport.h63
-rw-r--r--drivers/scsi/bfa/include/log/bfa_log_fcs.h28
-rw-r--r--drivers/scsi/bfa/include/log/bfa_log_hal.h30
-rw-r--r--drivers/scsi/bfa/include/log/bfa_log_linux.h44
-rw-r--r--drivers/scsi/bfa/include/log/bfa_log_wdrv.h36
-rw-r--r--drivers/scsi/bfa/include/protocol/ct.h492
-rw-r--r--drivers/scsi/bfa/include/protocol/fc.h1105
-rw-r--r--drivers/scsi/bfa/include/protocol/fc_sp.h224
-rw-r--r--drivers/scsi/bfa/include/protocol/fcp.h186
-rw-r--r--drivers/scsi/bfa/include/protocol/fdmi.h163
-rw-r--r--drivers/scsi/bfa/include/protocol/pcifw.h75
-rw-r--r--drivers/scsi/bfa/include/protocol/scsi.h1648
-rw-r--r--drivers/scsi/bfa/include/protocol/types.h42
-rw-r--r--drivers/scsi/bfa/loop.c422
-rw-r--r--drivers/scsi/bfa/lport_api.c291
-rw-r--r--drivers/scsi/bfa/lport_priv.h82
-rw-r--r--drivers/scsi/bfa/ms.c759
-rw-r--r--drivers/scsi/bfa/n2n.c105
-rw-r--r--drivers/scsi/bfa/ns.c1243
-rw-r--r--drivers/scsi/bfa/plog.c184
-rw-r--r--drivers/scsi/bfa/rport.c2618
-rw-r--r--drivers/scsi/bfa/rport_api.c180
-rw-r--r--drivers/scsi/bfa/rport_ftrs.c375
-rw-r--r--drivers/scsi/bfa/scn.c482
-rw-r--r--drivers/scsi/bfa/vfapi.c292
-rw-r--r--drivers/scsi/bfa/vport.c891
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/hptiop.c37
-rw-r--r--drivers/scsi/hptiop.h3
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/libiscsi.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c15
-rw-r--r--drivers/scsi/mpt2sas/Kconfig2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h103
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h200
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_history.txt334
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_init.h18
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h65
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h134
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c446
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h106
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c22
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c61
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_debug.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c568
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c26
-rw-r--r--drivers/scsi/mvsas/mv_defs.h4
-rw-r--r--drivers/scsi/mvsas/mv_init.c4
-rw-r--r--drivers/scsi/pmcraid.c58
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c6
-rw-r--r--drivers/scsi/scsi.c11
-rw-r--r--drivers/scsi/scsi_debug.c139
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/sd.c140
-rw-r--r--drivers/scsi/sd.h9
-rw-r--r--drivers/scsi/sd_dif.c65
-rw-r--r--drivers/scsi/sg.c10
-rw-r--r--drivers/scsi/sr.c22
-rw-r--r--drivers/scsi/st.c3
-rw-r--r--include/scsi/libiscsi.h3
-rw-r--r--include/scsi/scsi.h3
-rw-r--r--include/scsi/scsi_cmnd.h4
-rw-r--r--include/scsi/scsi_host.h15
259 files changed, 58442 insertions, 666 deletions
diff --git a/Documentation/scsi/hptiop.txt b/Documentation/scsi/hptiop.txt
index a6eb4add1be6..9605179711f4 100644
--- a/Documentation/scsi/hptiop.txt
+++ b/Documentation/scsi/hptiop.txt
@@ -3,6 +3,25 @@ HIGHPOINT ROCKETRAID 3xxx/4xxx ADAPTER DRIVER (hptiop)
Controller Register Map
-------------------------
+For RR44xx Intel IOP based adapters, the controller IOP is accessed via PCI BAR0 and BAR2:
+
+ BAR0 offset Register
+ 0x11C5C Link Interface IRQ Set
+ 0x11C60 Link Interface IRQ Clear
+
+ BAR2 offset Register
+ 0x10 Inbound Message Register 0
+ 0x14 Inbound Message Register 1
+ 0x18 Outbound Message Register 0
+ 0x1C Outbound Message Register 1
+ 0x20 Inbound Doorbell Register
+ 0x24 Inbound Interrupt Status Register
+ 0x28 Inbound Interrupt Mask Register
+ 0x30 Outbound Interrupt Status Register
+ 0x34 Outbound Interrupt Mask Register
+ 0x40 Inbound Queue Port
+ 0x44 Outbound Queue Port
+
For Intel IOP based adapters, the controller IOP is accessed via PCI BAR0:
BAR0 offset Register
@@ -93,7 +112,7 @@ The driver exposes following sysfs attributes:
-----------------------------------------------------------------------------
-Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved.
+Copyright (C) 2006-2009 HighPoint Technologies, Inc. All Rights Reserved.
This file is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/MAINTAINERS b/MAINTAINERS
index e1da925b38c8..6141bdff3596 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1231,6 +1231,13 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/tg3.*
+BROCADE BFA FC SCSI DRIVER
+P: Jing Huang
+M: huangj@brocade.com
+L: linux-scsi@vger.kernel.org
+S: Supported
+F: drivers/scsi/bfa/
+
BSG (block layer generic sg v4 driver)
M: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
L: linux-scsi@vger.kernel.org
@@ -4646,6 +4653,14 @@ F: drivers/ata/
F: include/linux/ata.h
F: include/linux/libata.h
+SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
+P: Jayamohan Kallickal
+M: jayamohank@serverengines.com
+L: linux-scsi@vger.kernel.org
+W: http://www.serverengines.com
+S: Supported
+F: drivers/scsi/be2iscsi/
+
SERVER ENGINES 10Gbps NIC - BladeEngine 2 DRIVER
M: Sathya Perla <sathyap@serverengines.com>
M: Subbu Seetharaman <subbus@serverengines.com>
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 0ba6ec876296..add9188663ff 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -426,7 +426,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
* because we preallocate so many resources
*/
cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
- ISCSI_DEF_XMIT_CMDS_MAX,
+ ISCSI_DEF_XMIT_CMDS_MAX, 0,
sizeof(struct iscsi_iser_task),
initial_cmdsn, 0);
if (!cls_session)
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 1be6bf7e8ce6..0f79f3af4f54 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -80,28 +80,35 @@ int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
{
+ struct ccw_device *ccwdev;
struct zfcp_adapter *adapter;
struct zfcp_port *port;
struct zfcp_unit *unit;
- mutex_lock(&zfcp_data.config_mutex);
- read_lock_irq(&zfcp_data.config_lock);
- adapter = zfcp_get_adapter_by_busid(busid);
- if (adapter)
- zfcp_adapter_get(adapter);
- read_unlock_irq(&zfcp_data.config_lock);
+ ccwdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
+ if (!ccwdev)
+ return;
+
+ if (ccw_device_set_online(ccwdev))
+ goto out_ccwdev;
+ mutex_lock(&zfcp_data.config_mutex);
+ adapter = dev_get_drvdata(&ccwdev->dev);
if (!adapter)
- goto out_adapter;
- port = zfcp_port_enqueue(adapter, wwpn, 0, 0);
- if (IS_ERR(port))
+ goto out_unlock;
+ zfcp_adapter_get(adapter);
+
+ port = zfcp_get_port_by_wwpn(adapter, wwpn);
+ if (!port)
goto out_port;
+
+ zfcp_port_get(port);
unit = zfcp_unit_enqueue(port, lun);
if (IS_ERR(unit))
goto out_unit;
mutex_unlock(&zfcp_data.config_mutex);
- ccw_device_set_online(adapter->ccw_device);
+ zfcp_erp_unit_reopen(unit, 0, "auidc_1", NULL);
zfcp_erp_wait(adapter);
flush_work(&unit->scsi_work);
@@ -111,8 +118,10 @@ out_unit:
zfcp_port_put(port);
out_port:
zfcp_adapter_put(adapter);
-out_adapter:
+out_unlock:
mutex_unlock(&zfcp_data.config_mutex);
+out_ccwdev:
+ put_device(&ccwdev->dev);
return;
}
@@ -593,10 +602,8 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
int retval = 0;
unsigned long flags;
- cancel_work_sync(&adapter->scan_work);
cancel_work_sync(&adapter->stat_work);
zfcp_fc_wka_ports_force_offline(adapter->gs);
- zfcp_adapter_scsi_unregister(adapter);
sysfs_remove_group(&adapter->ccw_device->dev.kobj,
&zfcp_sysfs_adapter_attrs);
dev_set_drvdata(&adapter->ccw_device->dev, NULL);
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 0c90f8e71605..e08339428ecf 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -102,6 +102,14 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
adapter = dev_get_drvdata(&ccw_device->dev);
if (!adapter)
goto out;
+ mutex_unlock(&zfcp_data.config_mutex);
+
+ cancel_work_sync(&adapter->scan_work);
+
+ mutex_lock(&zfcp_data.config_mutex);
+
+ /* this also removes the scsi devices, so call it first */
+ zfcp_adapter_scsi_unregister(adapter);
write_lock_irq(&zfcp_data.config_lock);
list_for_each_entry_safe(port, p, &adapter->port_list_head, list) {
@@ -117,11 +125,8 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
write_unlock_irq(&zfcp_data.config_lock);
list_for_each_entry_safe(port, p, &port_remove_lh, list) {
- list_for_each_entry_safe(unit, u, &unit_remove_lh, list) {
- if (unit->device)
- scsi_remove_device(unit->device);
+ list_for_each_entry_safe(unit, u, &unit_remove_lh, list)
zfcp_unit_dequeue(unit);
- }
zfcp_port_dequeue(port);
}
wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0);
@@ -192,13 +197,9 @@ static int zfcp_ccw_set_offline(struct ccw_device *ccw_device)
mutex_lock(&zfcp_data.config_mutex);
adapter = dev_get_drvdata(&ccw_device->dev);
- if (!adapter)
- goto out;
-
zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL);
zfcp_erp_wait(adapter);
mutex_unlock(&zfcp_data.config_mutex);
-out:
return 0;
}
@@ -253,13 +254,17 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
mutex_lock(&zfcp_data.config_mutex);
adapter = dev_get_drvdata(&cdev->dev);
+ if (!adapter)
+ goto out;
+
zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL);
zfcp_erp_wait(adapter);
zfcp_erp_thread_kill(adapter);
+out:
mutex_unlock(&zfcp_data.config_mutex);
}
-static struct ccw_driver zfcp_ccw_driver = {
+struct ccw_driver zfcp_ccw_driver = {
.owner = THIS_MODULE,
.name = "zfcp",
.ids = zfcp_ccw_device_id,
@@ -284,20 +289,3 @@ int __init zfcp_ccw_register(void)
{
return ccw_driver_register(&zfcp_ccw_driver);
}
-
-/**
- * zfcp_get_adapter_by_busid - find zfcp_adapter struct
- * @busid: bus id string of zfcp adapter to find
- */
-struct zfcp_adapter *zfcp_get_adapter_by_busid(char *busid)
-{
- struct ccw_device *ccw_device;
- struct zfcp_adapter *adapter = NULL;
-
- ccw_device = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
- if (ccw_device) {
- adapter = dev_get_drvdata(&ccw_device->dev);
- put_device(&ccw_device->dev);
- }
- return adapter;
-}
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index 8305c874e86f..ef681dfed0cc 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -86,8 +86,23 @@ static int zfcp_cfdc_copy_to_user(void __user *user_buffer,
static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno)
{
char busid[9];
+ struct ccw_device *ccwdev;
+ struct zfcp_adapter *adapter = NULL;
+
snprintf(busid, sizeof(busid), "0.0.%04x", devno);
- return zfcp_get_adapter_by_busid(busid);
+ ccwdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
+ if (!ccwdev)
+ goto out;
+
+ adapter = dev_get_drvdata(&ccwdev->dev);
+ if (!adapter)
+ goto out_put;
+
+ zfcp_adapter_get(adapter);
+out_put:
+ put_device(&ccwdev->dev);
+out:
+ return adapter;
}
static int zfcp_cfdc_set_fsf(struct zfcp_fsf_cfdc *fsf_cfdc, int command)
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 36935bc0818f..629edec70405 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -28,7 +28,7 @@ extern int zfcp_sg_setup_table(struct scatterlist *, int);
/* zfcp_ccw.c */
extern int zfcp_ccw_register(void);
extern int zfcp_ccw_priv_sch(struct zfcp_adapter *);
-extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
+extern struct ccw_driver zfcp_ccw_driver;
/* zfcp_cfdc.c */
extern struct miscdevice zfcp_cfdc_misc;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index f09c863dc6bd..38a7e4a6b639 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1058,11 +1058,25 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
SBAL_FLAGS0_TYPE_WRITE_READ,
sg_resp, max_sbals);
+ req->qtcb->bottom.support.resp_buf_length = bytes;
if (bytes <= 0)
return -EIO;
+ return 0;
+}
+
+static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
+ struct scatterlist *sg_req,
+ struct scatterlist *sg_resp,
+ int max_sbals)
+{
+ int ret;
+
+ ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals);
+ if (ret)
+ return ret;
+
/* common settings for ct/gs and els requests */
- req->qtcb->bottom.support.resp_buf_length = bytes;
req->qtcb->bottom.support.service_class = FSF_CLASS_3;
req->qtcb->bottom.support.timeout = 2 * R_A_TOV;
zfcp_fsf_start_timer(req, 2 * R_A_TOV + 10);
@@ -1094,8 +1108,8 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- ret = zfcp_fsf_setup_ct_els_sbals(req, ct->req, ct->resp,
- FSF_MAX_SBALS_PER_REQ);
+ ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp,
+ FSF_MAX_SBALS_PER_REQ);
if (ret)
goto failed_send;
@@ -1192,7 +1206,7 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- ret = zfcp_fsf_setup_ct_els_sbals(req, els->req, els->resp, 2);
+ ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2);
if (ret)
goto failed_send;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 82bb3b2d207a..e11cca4c784c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -366,6 +366,7 @@ config ISCSI_TCP
source "drivers/scsi/cxgb3i/Kconfig"
source "drivers/scsi/bnx2i/Kconfig"
+source "drivers/scsi/be2iscsi/Kconfig"
config SGIWD93_SCSI
tristate "SGI WD93C93 SCSI Driver"
@@ -1827,6 +1828,16 @@ config SCSI_SRP
To compile this driver as a module, choose M here: the
module will be called libsrp.
+config SCSI_BFA_FC
+ tristate "Brocade BFA Fibre Channel Support"
+ depends on PCI && SCSI
+ select SCSI_FC_ATTRS
+ help
+ This bfa driver supports all Brocade PCIe FC/FCOE host adapters.
+
+ To compile this driver as a module, choose M here. The module will
+ be called bfa.
+
endif # SCSI_LOWLEVEL
source "drivers/scsi/pcmcia/Kconfig"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 61a94af3cee7..3ad61db5e3fa 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -86,6 +86,7 @@ obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/
obj-$(CONFIG_SCSI_LPFC) += lpfc/
+obj-$(CONFIG_SCSI_BFA_FC) += bfa/
obj-$(CONFIG_SCSI_PAS16) += pas16.o
obj-$(CONFIG_SCSI_T128) += t128.o
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
@@ -130,6 +131,7 @@ obj-$(CONFIG_SCSI_MVSAS) += mvsas/
obj-$(CONFIG_PS3_ROM) += ps3rom.o
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
+obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
obj-$(CONFIG_ARM) += arm/
diff --git a/drivers/scsi/be2iscsi/Kconfig b/drivers/scsi/be2iscsi/Kconfig
new file mode 100644
index 000000000000..2952fcd008ea
--- /dev/null
+++ b/drivers/scsi/be2iscsi/Kconfig
@@ -0,0 +1,8 @@
+config BE2ISCSI
+ tristate "ServerEngines' 10Gbps iSCSI - BladeEngine 2"
+ depends on PCI && SCSI
+ select SCSI_ISCSI_ATTRS
+
+ help
+ This driver implements the iSCSI functionality for ServerEngines'
+ 10Gbps Storage adapter - BladeEngine 2.
diff --git a/drivers/scsi/be2iscsi/Makefile b/drivers/scsi/be2iscsi/Makefile
new file mode 100644
index 000000000000..c11f443e3f83
--- /dev/null
+++ b/drivers/scsi/be2iscsi/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile to build the iSCSI driver for ServerEngine's BladeEngine.
+#
+#
+
+obj-$(CONFIG_BE2ISCSI) += be2iscsi.o
+
+be2iscsi-y := be_iscsi.o be_main.o be_mgmt.o be_cmds.o
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
new file mode 100644
index 000000000000..b36020dcf012
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be.h
@@ -0,0 +1,183 @@
+/**
+ * Copyright (C) 2005 - 2009 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+
+#ifndef BEISCSI_H
+#define BEISCSI_H
+
+#include <linux/pci.h>
+#include <linux/if_vlan.h>
+
+#define FW_VER_LEN 32
+
+struct be_dma_mem {
+ void *va;
+ dma_addr_t dma;
+ u32 size;
+};
+
+struct be_queue_info {
+ struct be_dma_mem dma_mem;
+ u16 len;
+ u16 entry_size; /* Size of an element in the queue */
+ u16 id;
+ u16 tail, head;
+ bool created;
+ atomic_t used; /* Number of valid elements in the queue */
+};
+
+static inline u32 MODULO(u16 val, u16 limit)
+{
+ WARN_ON(limit & (limit - 1));
+ return val & (limit - 1);
+}
+
+static inline void index_inc(u16 *index, u16 limit)
+{
+ *index = MODULO((*index + 1), limit);
+}
+
+static inline void *queue_head_node(struct be_queue_info *q)
+{
+ return q->dma_mem.va + q->head * q->entry_size;
+}
+
+static inline void *queue_tail_node(struct be_queue_info *q)
+{
+ return q->dma_mem.va + q->tail * q->entry_size;
+}
+
+static inline void queue_head_inc(struct be_queue_info *q)
+{
+ index_inc(&q->head, q->len);
+}
+
+static inline void queue_tail_inc(struct be_queue_info *q)
+{
+ index_inc(&q->tail, q->len);
+}
+
+/*ISCSI */
+
+struct be_eq_obj {
+ struct be_queue_info q;
+ char desc[32];
+
+ /* Adaptive interrupt coalescing (AIC) info */
+ bool enable_aic;
+ u16 min_eqd; /* in usecs */
+ u16 max_eqd; /* in usecs */
+ u16 cur_eqd; /* in usecs */
+};
+
+struct be_mcc_obj {
+ struct be_queue_info *q;
+ struct be_queue_info *cq;
+};
+
+struct be_ctrl_info {
+ u8 __iomem *csr;
+ u8 __iomem *db; /* Door Bell */
+ u8 __iomem *pcicfg; /* PCI config space */
+ struct pci_dev *pdev;
+
+ /* Mbox used for cmd request/response */
+ spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
+ struct be_dma_mem mbox_mem;
+ /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
+ * is stored for freeing purpose */
+ struct be_dma_mem mbox_mem_alloced;
+
+ /* MCC Rings */
+ struct be_mcc_obj mcc_obj;
+ spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
+ spinlock_t mcc_cq_lock;
+
+ /* MCC Async callback */
+ void (*async_cb) (void *adapter, bool link_up);
+ void *adapter_ctxt;
+};
+
+#include "be_cmds.h"
+
+#define PAGE_SHIFT_4K 12
+#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
+
+/* Returns number of pages spanned by the data starting at the given addr */
+#define PAGES_4K_SPANNED(_address, size) \
+ ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
+ (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
+
+/* Byte offset into the page corresponding to given address */
+#define OFFSET_IN_PAGE(addr) \
+ ((size_t)(addr) & (PAGE_SIZE_4K-1))
+
+/* Returns bit offset within a DWORD of a bitfield */
+#define AMAP_BIT_OFFSET(_struct, field) \
+ (((size_t)&(((_struct *)0)->field))%32)
+
+/* Returns the bit mask of the field that is NOT shifted into location. */
+static inline u32 amap_mask(u32 bitsize)
+{
+ return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
+}
+
+static inline void amap_set(void *ptr, u32 dw_offset, u32 mask,
+ u32 offset, u32 value)
+{
+ u32 *dw = (u32 *) ptr + dw_offset;
+ *dw &= ~(mask << offset);
+ *dw |= (mask & value) << offset;
+}
+
+#define AMAP_SET_BITS(_struct, field, ptr, val) \
+ amap_set(ptr, \
+ offsetof(_struct, field)/32, \
+ amap_mask(sizeof(((_struct *)0)->field)), \
+ AMAP_BIT_OFFSET(_struct, field), \
+ val)
+
+static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
+{
+ u32 *dw = ptr;
+ return mask & (*(dw + dw_offset) >> offset);
+}
+
+#define AMAP_GET_BITS(_struct, field, ptr) \
+ amap_get(ptr, \
+ offsetof(_struct, field)/32, \
+ amap_mask(sizeof(((_struct *)0)->field)), \
+ AMAP_BIT_OFFSET(_struct, field))
+
+#define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len)
+#define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len)
+static inline void swap_dws(void *wrb, int len)
+{
+#ifdef __BIG_ENDIAN
+ u32 *dw = wrb;
+ WARN_ON(len % 4);
+ do {
+ *dw = cpu_to_le32(*dw);
+ dw++;
+ len -= 4;
+ } while (len);
+#endif /* __BIG_ENDIAN */
+}
+
+extern void beiscsi_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm,
+ u16 num_popped);
+
+#endif /* BEISCSI_H */
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
new file mode 100644
index 000000000000..08007b6e42df
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -0,0 +1,523 @@
+/**
+ * Copyright (C) 2005 - 2009 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+
+#include "be.h"
+#include "be_mgmt.h"
+#include "be_main.h"
+
+static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
+{
+ if (compl->flags != 0) {
+ compl->flags = le32_to_cpu(compl->flags);
+ WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
+ return true;
+ } else
+ return false;
+}
+
+static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
+{
+ compl->flags = 0;
+}
+
+static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
+ struct be_mcc_compl *compl)
+{
+ u16 compl_status, extd_status;
+
+ be_dws_le_to_cpu(compl, 4);
+
+ compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
+ CQE_STATUS_COMPL_MASK;
+ if (compl_status != MCC_STATUS_SUCCESS) {
+ extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
+ CQE_STATUS_EXTD_MASK;
+ dev_err(&ctrl->pdev->dev,
+ "error in cmd completion: status(compl/extd)=%d/%d\n",
+ compl_status, extd_status);
+ return -1;
+ }
+ return 0;
+}
+
+static inline bool is_link_state_evt(u32 trailer)
+{
+ return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
+ ASYNC_TRAILER_EVENT_CODE_MASK) == ASYNC_EVENT_CODE_LINK_STATE);
+}
+
+void beiscsi_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm,
+ u16 num_popped)
+{
+ u32 val = 0;
+ val |= qid & DB_CQ_RING_ID_MASK;
+ if (arm)
+ val |= 1 << DB_CQ_REARM_SHIFT;
+ val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
+ iowrite32(val, ctrl->db + DB_CQ_OFFSET);
+}
+
+static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
+{
+#define long_delay 2000
+ void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
+ int cnt = 0, wait = 5; /* in usecs */
+ u32 ready;
+
+ do {
+ ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
+ if (ready)
+ break;
+
+ if (cnt > 6000000) {
+ dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n");
+ return -1;
+ }
+
+ if (cnt > 50) {
+ wait = long_delay;
+ mdelay(long_delay / 1000);
+ } else
+ udelay(wait);
+ cnt += wait;
+ } while (true);
+ return 0;
+}
+
+int be_mbox_notify(struct be_ctrl_info *ctrl)
+{
+ int status;
+ u32 val = 0;
+ void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
+ struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
+ struct be_mcc_mailbox *mbox = mbox_mem->va;
+ struct be_mcc_compl *compl = &mbox->compl;
+
+ val &= ~MPU_MAILBOX_DB_RDY_MASK;
+ val |= MPU_MAILBOX_DB_HI_MASK;
+ val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
+ iowrite32(val, db);
+
+ status = be_mbox_db_ready_wait(ctrl);
+ if (status != 0) {
+ SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 1\n");
+ return status;
+ }
+ val = 0;
+ val &= ~MPU_MAILBOX_DB_RDY_MASK;
+ val &= ~MPU_MAILBOX_DB_HI_MASK;
+ val |= (u32) (mbox_mem->dma >> 4) << 2;
+ iowrite32(val, db);
+
+ status = be_mbox_db_ready_wait(ctrl);
+ if (status != 0) {
+ SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 2\n");
+ return status;
+ }
+ if (be_mcc_compl_is_new(compl)) {
+ status = be_mcc_compl_process(ctrl, &mbox->compl);
+ be_mcc_compl_use(compl);
+ if (status) {
+ SE_DEBUG(DBG_LVL_1, "After be_mcc_compl_process \n");
+ return status;
+ }
+ } else {
+ dev_err(&ctrl->pdev->dev, "invalid mailbox completion\n");
+ return -1;
+ }
+ return 0;
+}
+
+void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
+ bool embedded, u8 sge_cnt)
+{
+ if (embedded)
+ wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
+ else
+ wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
+ MCC_WRB_SGE_CNT_SHIFT;
+ wrb->payload_length = payload_len;
+ be_dws_cpu_to_le(wrb, 8);
+}
+
+void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
+ u8 subsystem, u8 opcode, int cmd_len)
+{
+ req_hdr->opcode = opcode;
+ req_hdr->subsystem = subsystem;
+ req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
+}
+
+static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
+ struct be_dma_mem *mem)
+{
+ int i, buf_pages;
+ u64 dma = (u64) mem->dma;
+
+ buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
+ for (i = 0; i < buf_pages; i++) {
+ pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
+ pages[i].hi = cpu_to_le32(upper_32_bits(dma));
+ dma += PAGE_SIZE_4K;
+ }
+}
+
+static u32 eq_delay_to_mult(u32 usec_delay)
+{
+#define MAX_INTR_RATE 651042
+ const u32 round = 10;
+ u32 multiplier;
+
+ if (usec_delay == 0)
+ multiplier = 0;
+ else {
+ u32 interrupt_rate = 1000000 / usec_delay;
+ if (interrupt_rate == 0)
+ multiplier = 1023;
+ else {
+ multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
+ multiplier /= interrupt_rate;
+ multiplier = (multiplier + round / 2) / round;
+ multiplier = min(multiplier, (u32) 1023);
+ }
+ }
+ return multiplier;
+}
+
+struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
+{
+ return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
+}
+
+int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
+ struct be_queue_info *eq, int eq_delay)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_cmd_req_eq_create *req = embedded_payload(wrb);
+ struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
+ struct be_dma_mem *q_mem = &eq->dma_mem;
+ int status;
+
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_EQ_CREATE, sizeof(*req));
+
+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+
+ AMAP_SET_BITS(struct amap_eq_context, func, req->context,
+ PCI_FUNC(ctrl->pdev->devfn));
+ AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
+ AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
+ AMAP_SET_BITS(struct amap_eq_context, count, req->context,
+ __ilog2_u32(eq->len / 256));
+ AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
+ eq_delay_to_mult(eq_delay));
+ be_dws_cpu_to_le(req->context, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify(ctrl);
+ if (!status) {
+ eq->id = le16_to_cpu(resp->eq_id);
+ eq->created = true;
+ }
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ int status;
+ u8 *endian_check;
+
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ endian_check = (u8 *) wrb;
+ *endian_check++ = 0xFF;
+ *endian_check++ = 0x12;
+ *endian_check++ = 0x34;
+ *endian_check++ = 0xFF;
+ *endian_check++ = 0xFF;
+ *endian_check++ = 0x56;
+ *endian_check++ = 0x78;
+ *endian_check++ = 0xFF;
+ be_dws_cpu_to_le(wrb, sizeof(*wrb));
+
+ status = be_mbox_notify(ctrl);
+ if (status)
+ SE_DEBUG(DBG_LVL_1, "be_cmd_fw_initialize Failed \n");
+
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
+ struct be_queue_info *cq, struct be_queue_info *eq,
+ bool sol_evts, bool no_delay, int coalesce_wm)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_cmd_req_cq_create *req = embedded_payload(wrb);
+ struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
+ struct be_dma_mem *q_mem = &cq->dma_mem;
+ void *ctxt = &req->context;
+ int status;
+
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_CQ_CREATE, sizeof(*req));
+
+ if (!q_mem->va)
+ SE_DEBUG(DBG_LVL_1, "uninitialized q_mem->va\n");
+
+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+
+ AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
+ AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
+ AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
+ __ilog2_u32(cq->len / 256));
+ AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
+ AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
+ AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
+ PCI_FUNC(ctrl->pdev->devfn));
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify(ctrl);
+ if (!status) {
+ cq->id = le16_to_cpu(resp->cq_id);
+ cq->created = true;
+ } else
+ SE_DEBUG(DBG_LVL_1, "In be_cmd_cq_create, status=ox%08x \n",
+ status);
+ spin_unlock(&ctrl->mbox_lock);
+
+ return status;
+}
+
+static u32 be_encoded_q_len(int q_len)
+{
+ u32 len_encoded = fls(q_len); /* log2(len) + 1 */
+ if (len_encoded == 16)
+ len_encoded = 0;
+ return len_encoded;
+}
+int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
+ int queue_type)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
+ u8 subsys = 0, opcode = 0;
+ int status;
+
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+
+ switch (queue_type) {
+ case QTYPE_EQ:
+ subsys = CMD_SUBSYSTEM_COMMON;
+ opcode = OPCODE_COMMON_EQ_DESTROY;
+ break;
+ case QTYPE_CQ:
+ subsys = CMD_SUBSYSTEM_COMMON;
+ opcode = OPCODE_COMMON_CQ_DESTROY;
+ break;
+ case QTYPE_WRBQ:
+ subsys = CMD_SUBSYSTEM_ISCSI;
+ opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
+ break;
+ case QTYPE_DPDUQ:
+ subsys = CMD_SUBSYSTEM_ISCSI;
+ opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
+ break;
+ case QTYPE_SGL:
+ subsys = CMD_SUBSYSTEM_ISCSI;
+ opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
+ break;
+ default:
+ spin_unlock(&ctrl->mbox_lock);
+ BUG();
+ return -1;
+ }
+ be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
+ if (queue_type != QTYPE_SGL)
+ req->id = cpu_to_le16(q->id);
+
+ status = be_mbox_notify(ctrl);
+
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+int be_cmd_get_mac_addr(struct be_ctrl_info *ctrl, u8 *mac_addr)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_cmd_req_get_mac_addr *req = embedded_payload(wrb);
+ int status;
+
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
+ sizeof(*req));
+
+ status = be_mbox_notify(ctrl);
+ if (!status) {
+ struct be_cmd_resp_get_mac_addr *resp = embedded_payload(wrb);
+
+ memcpy(mac_addr, resp->mac_address, ETH_ALEN);
+ }
+
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
+ struct be_queue_info *cq,
+ struct be_queue_info *dq, int length,
+ int entry_size)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_defq_create_req *req = embedded_payload(wrb);
+ struct be_dma_mem *q_mem = &dq->dma_mem;
+ void *ctxt = &req->context;
+ int status;
+
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
+
+ req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+ AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0);
+ AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt,
+ 1);
+ AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt,
+ PCI_FUNC(ctrl->pdev->devfn));
+ AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt,
+ be_encoded_q_len(length / sizeof(struct phys_addr)));
+ AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size,
+ ctxt, entry_size);
+ AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt,
+ cq->id);
+
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify(ctrl);
+ if (!status) {
+ struct be_defq_create_resp *resp = embedded_payload(wrb);
+
+ dq->id = le16_to_cpu(resp->id);
+ dq->created = true;
+ }
+ spin_unlock(&ctrl->mbox_lock);
+
+ return status;
+}
+
+int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
+ struct be_queue_info *wrbq)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_wrbq_create_req *req = embedded_payload(wrb);
+ struct be_wrbq_create_resp *resp = embedded_payload(wrb);
+ int status;
+
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
+ req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify(ctrl);
+ if (!status)
+ wrbq->id = le16_to_cpu(resp->cid);
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
+ struct be_dma_mem *q_mem,
+ u32 page_offset, u32 num_pages)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_post_sgl_pages_req *req = embedded_payload(wrb);
+ int status;
+ unsigned int curr_pages;
+ u32 internal_page_offset = 0;
+ u32 temp_num_pages = num_pages;
+
+ if (num_pages == 0xff)
+ num_pages = 1;
+
+ spin_lock(&ctrl->mbox_lock);
+ do {
+ memset(wrb, 0, sizeof(*wrb));
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
+ sizeof(*req));
+ curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
+ pages);
+ req->num_pages = min(num_pages, curr_pages);
+ req->page_offset = page_offset;
+ be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
+ q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
+ internal_page_offset += req->num_pages;
+ page_offset += req->num_pages;
+ num_pages -= req->num_pages;
+
+ if (temp_num_pages == 0xff)
+ req->num_pages = temp_num_pages;
+
+ status = be_mbox_notify(ctrl);
+ if (status) {
+ SE_DEBUG(DBG_LVL_1,
+ "FW CMD to map iscsi frags failed.\n");
+ goto error;
+ }
+ } while (num_pages > 0);
+error:
+ spin_unlock(&ctrl->mbox_lock);
+ if (status != 0)
+ beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
+ return status;
+}
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
new file mode 100644
index 000000000000..c20d686cbb43
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -0,0 +1,877 @@
+/**
+ * Copyright (C) 2005 - 2009 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+
+#ifndef BEISCSI_CMDS_H
+#define BEISCSI_CMDS_H
+
+/**
+ * The driver sends configuration and managements command requests to the
+ * firmware in the BE. These requests are communicated to the processor
+ * using Work Request Blocks (WRBs) submitted to the MCC-WRB ring or via one
+ * WRB inside a MAILBOX.
+ * The commands are serviced by the ARM processor in the BladeEngine's MPU.
+ */
+struct be_sge {
+ u32 pa_lo;
+ u32 pa_hi;
+ u32 len;
+};
+
+#define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */
+#define MCC_WRB_SGE_CNT_MASK 0x1F /* bits 3 - 7 of dword 0 */
+struct be_mcc_wrb {
+ u32 embedded; /* dword 0 */
+ u32 payload_length; /* dword 1 */
+ u32 tag0; /* dword 2 */
+ u32 tag1; /* dword 3 */
+ u32 rsvd; /* dword 4 */
+ union {
+ u8 embedded_payload[236]; /* used by embedded cmds */
+ struct be_sge sgl[19]; /* used by non-embedded cmds */
+ } payload;
+};
+
+#define CQE_FLAGS_VALID_MASK (1 << 31)
+#define CQE_FLAGS_ASYNC_MASK (1 << 30)
+
+/* Completion Status */
+#define MCC_STATUS_SUCCESS 0x0
+
+#define CQE_STATUS_COMPL_MASK 0xFFFF
+#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
+#define CQE_STATUS_EXTD_MASK 0xFFFF
+#define CQE_STATUS_EXTD_SHIFT 0 /* bits 0 - 15 */
+
+struct be_mcc_compl {
+ u32 status; /* dword 0 */
+ u32 tag0; /* dword 1 */
+ u32 tag1; /* dword 2 */
+ u32 flags; /* dword 3 */
+};
+
+/********* Mailbox door bell *************/
+/**
+ * Used for driver communication with the FW.
+ * The software must write this register twice to post any command. First,
+ * it writes the register with hi=1 and the upper bits of the physical address
+ * for the MAILBOX structure. Software must poll the ready bit until this
+ * is acknowledged. Then, sotware writes the register with hi=0 with the lower
+ * bits in the address. It must poll the ready bit until the command is
+ * complete. Upon completion, the MAILBOX will contain a valid completion
+ * queue entry.
+ */
+#define MPU_MAILBOX_DB_OFFSET 0x160
+#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
+#define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */
+
+/********** MPU semphore ******************/
+#define MPU_EP_SEMAPHORE_OFFSET 0xac
+#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
+#define EP_SEMAPHORE_POST_ERR_MASK 0x1
+#define EP_SEMAPHORE_POST_ERR_SHIFT 31
+
+/********** MCC door bell ************/
+#define DB_MCCQ_OFFSET 0x140
+#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
+/* Number of entries posted */
+#define DB_MCCQ_NUM_POSTED_SHIFT 16 /* bits 16 - 29 */
+
+/* MPU semphore POST stage values */
+#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
+
+/**
+ * When the async bit of mcc_compl is set, the last 4 bytes of
+ * mcc_compl is interpreted as follows:
+ */
+#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
+#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
+#define ASYNC_EVENT_CODE_LINK_STATE 0x1
+struct be_async_event_trailer {
+ u32 code;
+};
+
+enum {
+ ASYNC_EVENT_LINK_DOWN = 0x0,
+ ASYNC_EVENT_LINK_UP = 0x1
+};
+
+/**
+ * When the event code of an async trailer is link-state, the mcc_compl
+ * must be interpreted as follows
+ */
+struct be_async_event_link_state {
+ u8 physical_port;
+ u8 port_link_status;
+ u8 port_duplex;
+ u8 port_speed;
+ u8 port_fault;
+ u8 rsvd0[7];
+ struct be_async_event_trailer trailer;
+} __packed;
+
+struct be_mcc_mailbox {
+ struct be_mcc_wrb wrb;
+ struct be_mcc_compl compl;
+};
+
+/* Type of subsystems supported by FW */
+#define CMD_SUBSYSTEM_COMMON 0x1
+#define CMD_SUBSYSTEM_ISCSI 0x2
+#define CMD_SUBSYSTEM_ETH 0x3
+#define CMD_SUBSYSTEM_ISCSI_INI 0x6
+#define CMD_COMMON_TCP_UPLOAD 0x1
+
+/**
+ * List of common opcodes subsystem CMD_SUBSYSTEM_COMMON
+ * These opcodes are unique for each subsystem defined above
+ */
+#define OPCODE_COMMON_CQ_CREATE 12
+#define OPCODE_COMMON_EQ_CREATE 13
+#define OPCODE_COMMON_MCC_CREATE 21
+#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
+#define OPCODE_COMMON_GET_FW_VERSION 35
+#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
+#define OPCODE_COMMON_FIRMWARE_CONFIG 42
+#define OPCODE_COMMON_MCC_DESTROY 53
+#define OPCODE_COMMON_CQ_DESTROY 54
+#define OPCODE_COMMON_EQ_DESTROY 55
+#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
+#define OPCODE_COMMON_FUNCTION_RESET 61
+
+/**
+ * LIST of opcodes that are common between Initiator and Target
+ * used by CMD_SUBSYSTEM_ISCSI
+ * These opcodes are unique for each subsystem defined above
+ */
+#define OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES 2
+#define OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES 3
+#define OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG 7
+#define OPCODE_COMMON_ISCSI_SET_FRAGNUM_BITS_FOR_SGL_CRA 61
+#define OPCODE_COMMON_ISCSI_DEFQ_CREATE 64
+#define OPCODE_COMMON_ISCSI_DEFQ_DESTROY 65
+#define OPCODE_COMMON_ISCSI_WRBQ_CREATE 66
+#define OPCODE_COMMON_ISCSI_WRBQ_DESTROY 67
+
+struct be_cmd_req_hdr {
+ u8 opcode; /* dword 0 */
+ u8 subsystem; /* dword 0 */
+ u8 port_number; /* dword 0 */
+ u8 domain; /* dword 0 */
+ u32 timeout; /* dword 1 */
+ u32 request_length; /* dword 2 */
+ u32 rsvd; /* dword 3 */
+};
+
+struct be_cmd_resp_hdr {
+ u32 info; /* dword 0 */
+ u32 status; /* dword 1 */
+ u32 response_length; /* dword 2 */
+ u32 actual_resp_len; /* dword 3 */
+};
+
+struct phys_addr {
+ u32 lo;
+ u32 hi;
+};
+
+/**************************
+ * BE Command definitions *
+ **************************/
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte - used to calculate offset/shift/mask of each field
+ */
+struct amap_eq_context {
+ u8 cidx[13]; /* dword 0 */
+ u8 rsvd0[3]; /* dword 0 */
+ u8 epidx[13]; /* dword 0 */
+ u8 valid; /* dword 0 */
+ u8 rsvd1; /* dword 0 */
+ u8 size; /* dword 0 */
+ u8 pidx[13]; /* dword 1 */
+ u8 rsvd2[3]; /* dword 1 */
+ u8 pd[10]; /* dword 1 */
+ u8 count[3]; /* dword 1 */
+ u8 solevent; /* dword 1 */
+ u8 stalled; /* dword 1 */
+ u8 armed; /* dword 1 */
+ u8 rsvd3[4]; /* dword 2 */
+ u8 func[8]; /* dword 2 */
+ u8 rsvd4; /* dword 2 */
+ u8 delaymult[10]; /* dword 2 */
+ u8 rsvd5[2]; /* dword 2 */
+ u8 phase[2]; /* dword 2 */
+ u8 nodelay; /* dword 2 */
+ u8 rsvd6[4]; /* dword 2 */
+ u8 rsvd7[32]; /* dword 3 */
+} __packed;
+
+struct be_cmd_req_eq_create {
+ struct be_cmd_req_hdr hdr; /* dw[4] */
+ u16 num_pages; /* sword */
+ u16 rsvd0; /* sword */
+ u8 context[sizeof(struct amap_eq_context) / 8]; /* dw[4] */
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_cmd_resp_eq_create {
+ struct be_cmd_resp_hdr resp_hdr;
+ u16 eq_id; /* sword */
+ u16 rsvd0; /* sword */
+} __packed;
+
+struct mac_addr {
+ u16 size_of_struct;
+ u8 addr[ETH_ALEN];
+} __packed;
+
+struct be_cmd_req_mac_query {
+ struct be_cmd_req_hdr hdr;
+ u8 type;
+ u8 permanent;
+ u16 if_id;
+} __packed;
+
+struct be_cmd_resp_mac_query {
+ struct be_cmd_resp_hdr hdr;
+ struct mac_addr mac;
+};
+
+/******************** Create CQ ***************************/
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte - used to calculate offset/shift/mask of each field
+ */
+struct amap_cq_context {
+ u8 cidx[11]; /* dword 0 */
+ u8 rsvd0; /* dword 0 */
+ u8 coalescwm[2]; /* dword 0 */
+ u8 nodelay; /* dword 0 */
+ u8 epidx[11]; /* dword 0 */
+ u8 rsvd1; /* dword 0 */
+ u8 count[2]; /* dword 0 */
+ u8 valid; /* dword 0 */
+ u8 solevent; /* dword 0 */
+ u8 eventable; /* dword 0 */
+ u8 pidx[11]; /* dword 1 */
+ u8 rsvd2; /* dword 1 */
+ u8 pd[10]; /* dword 1 */
+ u8 eqid[8]; /* dword 1 */
+ u8 stalled; /* dword 1 */
+ u8 armed; /* dword 1 */
+ u8 rsvd3[4]; /* dword 2 */
+ u8 func[8]; /* dword 2 */
+ u8 rsvd4[20]; /* dword 2 */
+ u8 rsvd5[32]; /* dword 3 */
+} __packed;
+
+struct be_cmd_req_cq_create {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u16 rsvd0;
+ u8 context[sizeof(struct amap_cq_context) / 8];
+ struct phys_addr pages[4];
+} __packed;
+
+struct be_cmd_resp_cq_create {
+ struct be_cmd_resp_hdr hdr;
+ u16 cq_id;
+ u16 rsvd0;
+} __packed;
+
+/******************** Create MCCQ ***************************/
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte - used to calculate offset/shift/mask of each field
+ */
+struct amap_mcc_context {
+ u8 con_index[14];
+ u8 rsvd0[2];
+ u8 ring_size[4];
+ u8 fetch_wrb;
+ u8 fetch_r2t;
+ u8 cq_id[10];
+ u8 prod_index[14];
+ u8 fid[8];
+ u8 pdid[9];
+ u8 valid;
+ u8 rsvd1[32];
+ u8 rsvd2[32];
+} __packed;
+
+struct be_cmd_req_mcc_create {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u16 rsvd0;
+ u8 context[sizeof(struct amap_mcc_context) / 8];
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_cmd_resp_mcc_create {
+ struct be_cmd_resp_hdr hdr;
+ u16 id;
+ u16 rsvd0;
+} __packed;
+
+/******************** Q Destroy ***************************/
+/* Type of Queue to be destroyed */
+enum {
+ QTYPE_EQ = 1,
+ QTYPE_CQ,
+ QTYPE_MCCQ,
+ QTYPE_WRBQ,
+ QTYPE_DPDUQ,
+ QTYPE_SGL
+};
+
+struct be_cmd_req_q_destroy {
+ struct be_cmd_req_hdr hdr;
+ u16 id;
+ u16 bypass_flush; /* valid only for rx q destroy */
+} __packed;
+
+struct macaddr {
+ u8 byte[ETH_ALEN];
+};
+
+struct be_cmd_req_mcast_mac_config {
+ struct be_cmd_req_hdr hdr;
+ u16 num_mac;
+ u8 promiscuous;
+ u8 interface_id;
+ struct macaddr mac[32];
+} __packed;
+
+static inline void *embedded_payload(struct be_mcc_wrb *wrb)
+{
+ return wrb->payload.embedded_payload;
+}
+
+static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
+{
+ return &wrb->payload.sgl[0];
+}
+
+/******************** Modify EQ Delay *******************/
+struct be_cmd_req_modify_eq_delay {
+ struct be_cmd_req_hdr hdr;
+ u32 num_eq;
+ struct {
+ u32 eq_id;
+ u32 phase;
+ u32 delay_multiplier;
+ } delay[8];
+} __packed;
+
+/******************** Get MAC ADDR *******************/
+
+#define ETH_ALEN 6
+
+
+struct be_cmd_req_get_mac_addr {
+ struct be_cmd_req_hdr hdr;
+ u32 nic_port_count;
+ u32 speed;
+ u32 max_speed;
+ u32 link_state;
+ u32 max_frame_size;
+ u16 size_of_structure;
+ u8 mac_address[ETH_ALEN];
+ u32 rsvd[23];
+};
+
+struct be_cmd_resp_get_mac_addr {
+ struct be_cmd_resp_hdr hdr;
+ u32 nic_port_count;
+ u32 speed;
+ u32 max_speed;
+ u32 link_state;
+ u32 max_frame_size;
+ u16 size_of_structure;
+ u8 mac_address[6];
+ u32 rsvd[23];
+};
+
+int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
+ struct be_queue_info *eq, int eq_delay);
+
+int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
+ struct be_queue_info *cq, struct be_queue_info *eq,
+ bool sol_evts, bool no_delay,
+ int num_cqe_dma_coalesce);
+
+int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
+ int type);
+int be_poll_mcc(struct be_ctrl_info *ctrl);
+unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl);
+int be_cmd_get_mac_addr(struct be_ctrl_info *ctrl, u8 *mac_addr);
+
+/*ISCSI Functuions */
+int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
+
+struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
+
+int be_mbox_notify(struct be_ctrl_info *ctrl);
+
+int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
+ struct be_queue_info *cq,
+ struct be_queue_info *dq, int length,
+ int entry_size);
+
+int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
+ struct be_dma_mem *q_mem, u32 page_offset,
+ u32 num_pages);
+
+int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
+ struct be_queue_info *wrbq);
+
+struct be_default_pdu_context {
+ u32 dw[4];
+} __packed;
+
+struct amap_be_default_pdu_context {
+ u8 dbuf_cindex[13]; /* dword 0 */
+ u8 rsvd0[3]; /* dword 0 */
+ u8 ring_size[4]; /* dword 0 */
+ u8 ring_state[4]; /* dword 0 */
+ u8 rsvd1[8]; /* dword 0 */
+ u8 dbuf_pindex[13]; /* dword 1 */
+ u8 rsvd2; /* dword 1 */
+ u8 pci_func_id[8]; /* dword 1 */
+ u8 rx_pdid[9]; /* dword 1 */
+ u8 rx_pdid_valid; /* dword 1 */
+ u8 default_buffer_size[16]; /* dword 2 */
+ u8 cq_id_recv[10]; /* dword 2 */
+ u8 rx_pdid_not_valid; /* dword 2 */
+ u8 rsvd3[5]; /* dword 2 */
+ u8 rsvd4[32]; /* dword 3 */
+} __packed;
+
+struct be_defq_create_req {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u8 ulp_num;
+ u8 rsvd0;
+ struct be_default_pdu_context context;
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_defq_create_resp {
+ struct be_cmd_req_hdr hdr;
+ u16 id;
+ u16 rsvd0;
+} __packed;
+
+struct be_post_sgl_pages_req {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u16 page_offset;
+ u32 rsvd0;
+ struct phys_addr pages[26];
+ u32 rsvd1;
+} __packed;
+
+struct be_wrbq_create_req {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u8 ulp_num;
+ u8 rsvd0;
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_wrbq_create_resp {
+ struct be_cmd_resp_hdr resp_hdr;
+ u16 cid;
+ u16 rsvd0;
+} __packed;
+
+#define SOL_CID_MASK 0x0000FFC0
+#define SOL_CODE_MASK 0x0000003F
+#define SOL_WRB_INDEX_MASK 0x00FF0000
+#define SOL_CMD_WND_MASK 0xFF000000
+#define SOL_RES_CNT_MASK 0x7FFFFFFF
+#define SOL_EXP_CMD_SN_MASK 0xFFFFFFFF
+#define SOL_HW_STS_MASK 0x000000FF
+#define SOL_STS_MASK 0x0000FF00
+#define SOL_RESP_MASK 0x00FF0000
+#define SOL_FLAGS_MASK 0x7F000000
+#define SOL_S_MASK 0x80000000
+
+struct sol_cqe {
+ u32 dw[4];
+};
+
+struct amap_sol_cqe {
+ u8 hw_sts[8]; /* dword 0 */
+ u8 i_sts[8]; /* dword 0 */
+ u8 i_resp[8]; /* dword 0 */
+ u8 i_flags[7]; /* dword 0 */
+ u8 s; /* dword 0 */
+ u8 i_exp_cmd_sn[32]; /* dword 1 */
+ u8 code[6]; /* dword 2 */
+ u8 cid[10]; /* dword 2 */
+ u8 wrb_index[8]; /* dword 2 */
+ u8 i_cmd_wnd[8]; /* dword 2 */
+ u8 i_res_cnt[31]; /* dword 3 */
+ u8 valid; /* dword 3 */
+} __packed;
+
+
+/**
+ * Post WRB Queue Doorbell Register used by the host Storage
+ * stack to notify the
+ * controller of a posted Work Request Block
+ */
+#define DB_WRB_POST_CID_MASK 0x3FF /* bits 0 - 9 */
+#define DB_DEF_PDU_WRB_INDEX_MASK 0xFF /* bits 0 - 9 */
+
+#define DB_DEF_PDU_WRB_INDEX_SHIFT 16
+#define DB_DEF_PDU_NUM_POSTED_SHIFT 24
+
+struct fragnum_bits_for_sgl_cra_in {
+ struct be_cmd_req_hdr hdr;
+ u32 num_bits;
+} __packed;
+
+struct iscsi_cleanup_req {
+ struct be_cmd_req_hdr hdr;
+ u16 chute;
+ u8 hdr_ring_id;
+ u8 data_ring_id;
+
+} __packed;
+
+struct eq_delay {
+ u32 eq_id;
+ u32 phase;
+ u32 delay_multiplier;
+} __packed;
+
+struct be_eq_delay_params_in {
+ struct be_cmd_req_hdr hdr;
+ u32 num_eq;
+ struct eq_delay delay[8];
+} __packed;
+
+struct ip_address_format {
+ u16 size_of_structure;
+ u8 reserved;
+ u8 ip_type;
+ u8 ip_address[16];
+ u32 rsvd0;
+} __packed;
+
+struct tcp_connect_and_offload_in {
+ struct be_cmd_req_hdr hdr;
+ struct ip_address_format ip_address;
+ u16 tcp_port;
+ u16 cid;
+ u16 cq_id;
+ u16 defq_id;
+ struct phys_addr dataout_template_pa;
+ u16 hdr_ring_id;
+ u16 data_ring_id;
+ u8 do_offload;
+ u8 rsvd0[3];
+} __packed;
+
+struct tcp_connect_and_offload_out {
+ struct be_cmd_resp_hdr hdr;
+ u32 connection_handle;
+ u16 cid;
+ u16 rsvd0;
+
+} __packed;
+
+struct be_mcc_wrb_context {
+ struct MCC_WRB *wrb;
+ int *users_final_status;
+} __packed;
+
+#define DB_DEF_PDU_RING_ID_MASK 0x3FF /* bits 0 - 9 */
+#define DB_DEF_PDU_CQPROC_MASK 0x3FFF /* bits 0 - 9 */
+#define DB_DEF_PDU_REARM_SHIFT 14
+#define DB_DEF_PDU_EVENT_SHIFT 15
+#define DB_DEF_PDU_CQPROC_SHIFT 16
+
+struct dmsg_cqe {
+ u32 dw[4];
+} __packed;
+
+struct tcp_upload_params_in {
+ struct be_cmd_req_hdr hdr;
+ u16 id;
+ u16 upload_type;
+ u32 reset_seq;
+} __packed;
+
+struct tcp_upload_params_out {
+ u32 dw[32];
+} __packed;
+
+union tcp_upload_params {
+ struct tcp_upload_params_in request;
+ struct tcp_upload_params_out response;
+} __packed;
+
+struct be_ulp_fw_cfg {
+ u32 ulp_mode;
+ u32 etx_base;
+ u32 etx_count;
+ u32 sq_base;
+ u32 sq_count;
+ u32 rq_base;
+ u32 rq_count;
+ u32 dq_base;
+ u32 dq_count;
+ u32 lro_base;
+ u32 lro_count;
+ u32 icd_base;
+ u32 icd_count;
+};
+
+struct be_fw_cfg {
+ struct be_cmd_req_hdr hdr;
+ u32 be_config_number;
+ u32 asic_revision;
+ u32 phys_port;
+ u32 function_mode;
+ struct be_ulp_fw_cfg ulp[2];
+ u32 function_caps;
+} __packed;
+
+#define CMD_ISCSI_COMMAND_INVALIDATE 1
+#define ISCSI_OPCODE_SCSI_DATA_OUT 5
+#define OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD 70
+#define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41
+#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
+#define OPCODE_COMMON_ISCSI_CLEANUP 59
+#define OPCODE_COMMON_TCP_UPLOAD 56
+#define OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS 1
+/* --- CMD_ISCSI_INVALIDATE_CONNECTION_TYPE --- */
+#define CMD_ISCSI_CONNECTION_INVALIDATE 1
+#define CMD_ISCSI_CONNECTION_ISSUE_TCP_RST 2
+#define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42
+
+#define INI_WR_CMD 1 /* Initiator write command */
+#define INI_TMF_CMD 2 /* Initiator TMF command */
+#define INI_NOPOUT_CMD 3 /* Initiator; Send a NOP-OUT */
+#define INI_RD_CMD 5 /* Initiator requesting to send
+ * a read command
+ */
+#define TGT_CTX_UPDT_CMD 7 /* Target context update */
+#define TGT_STS_CMD 8 /* Target R2T and other BHS
+ * where only the status number
+ * need to be updated
+ */
+#define TGT_DATAIN_CMD 9 /* Target Data-Ins in response
+ * to read command
+ */
+#define TGT_SOS_PDU 10 /* Target:standalone status
+ * response
+ */
+#define TGT_DM_CMD 11 /* Indicates that the bhs
+ * preparedby
+ * driver should not be touched
+ */
+/* --- CMD_CHUTE_TYPE --- */
+#define CMD_CONNECTION_CHUTE_0 1
+#define CMD_CONNECTION_CHUTE_1 2
+#define CMD_CONNECTION_CHUTE_2 3
+
+#define EQ_MAJOR_CODE_COMPLETION 0
+
+#define CMD_ISCSI_SESSION_DEL_CFG_FROM_FLASH 0
+#define CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH 1
+
+/* --- CONNECTION_UPLOAD_PARAMS --- */
+/* These parameters are used to define the type of upload desired. */
+#define CONNECTION_UPLOAD_GRACEFUL 1 /* Graceful upload */
+#define CONNECTION_UPLOAD_ABORT_RESET 2 /* Abortive upload with
+ * reset
+ */
+#define CONNECTION_UPLOAD_ABORT 3 /* Abortive upload without
+ * reset
+ */
+#define CONNECTION_UPLOAD_ABORT_WITH_SEQ 4 /* Abortive upload with reset,
+ * sequence number by driver */
+
+/* Returns byte size of given field with a structure. */
+
+/* Returns the number of items in the field array. */
+#define BE_NUMBER_OF_FIELD(_type_, _field_) \
+ (FIELD_SIZEOF(_type_, _field_)/sizeof((((_type_ *)0)->_field_[0])))\
+
+/**
+ * Different types of iSCSI completions to host driver for both initiator
+ * and taget mode
+ * of operation.
+ */
+#define SOL_CMD_COMPLETE 1 /* Solicited command completed
+ * normally
+ */
+#define SOL_CMD_KILLED_DATA_DIGEST_ERR 2 /* Solicited command got
+ * invalidated internally due
+ * to Data Digest error
+ */
+#define CXN_KILLED_PDU_SIZE_EXCEEDS_DSL 3 /* Connection got invalidated
+ * internally
+ * due to a recieved PDU
+ * size > DSL
+ */
+#define CXN_KILLED_BURST_LEN_MISMATCH 4 /* Connection got invalidated
+ * internally due ti received
+ * PDU sequence size >
+ * FBL/MBL.
+ */
+#define CXN_KILLED_AHS_RCVD 5 /* Connection got invalidated
+ * internally due to a recieved
+ * PDU Hdr that has
+ * AHS */
+#define CXN_KILLED_HDR_DIGEST_ERR 6 /* Connection got invalidated
+ * internally due to Hdr Digest
+ * error
+ */
+#define CXN_KILLED_UNKNOWN_HDR 7 /* Connection got invalidated
+ * internally
+ * due to a bad opcode in the
+ * pdu hdr
+ */
+#define CXN_KILLED_STALE_ITT_TTT_RCVD 8 /* Connection got invalidated
+ * internally due to a recieved
+ * ITT/TTT that does not belong
+ * to this Connection
+ */
+#define CXN_KILLED_INVALID_ITT_TTT_RCVD 9 /* Connection got invalidated
+ * internally due to recieved
+ * ITT/TTT value > Max
+ * Supported ITTs/TTTs
+ */
+#define CXN_KILLED_RST_RCVD 10 /* Connection got invalidated
+ * internally due to an
+ * incoming TCP RST
+ */
+#define CXN_KILLED_TIMED_OUT 11 /* Connection got invalidated
+ * internally due to timeout on
+ * tcp segment 12 retransmit
+ * attempts failed
+ */
+#define CXN_KILLED_RST_SENT 12 /* Connection got invalidated
+ * internally due to TCP RST
+ * sent by the Tx side
+ */
+#define CXN_KILLED_FIN_RCVD 13 /* Connection got invalidated
+ * internally due to an
+ * incoming TCP FIN.
+ */
+#define CXN_KILLED_BAD_UNSOL_PDU_RCVD 14 /* Connection got invalidated
+ * internally due to bad
+ * unsolicited PDU Unsolicited
+ * PDUs are PDUs with
+ * ITT=0xffffffff
+ */
+#define CXN_KILLED_BAD_WRB_INDEX_ERROR 15 /* Connection got invalidated
+ * internally due to bad WRB
+ * index.
+ */
+#define CXN_KILLED_OVER_RUN_RESIDUAL 16 /* Command got invalidated
+ * internally due to recived
+ * command has residual
+ * over run bytes.
+ */
+#define CXN_KILLED_UNDER_RUN_RESIDUAL 17 /* Command got invalidated
+ * internally due to recived
+ * command has residual under
+ * run bytes.
+ */
+#define CMD_KILLED_INVALID_STATSN_RCVD 18 /* Command got invalidated
+ * internally due to a recieved
+ * PDU has an invalid StatusSN
+ */
+#define CMD_KILLED_INVALID_R2T_RCVD 19 /* Command got invalidated
+ * internally due to a recieved
+ * an R2T with some invalid
+ * fields in it
+ */
+#define CMD_CXN_KILLED_LUN_INVALID 20 /* Command got invalidated
+ * internally due to received
+ * PDU has an invalid LUN.
+ */
+#define CMD_CXN_KILLED_ICD_INVALID 21 /* Command got invalidated
+ * internally due to the
+ * corresponding ICD not in a
+ * valid state
+ */
+#define CMD_CXN_KILLED_ITT_INVALID 22 /* Command got invalidated due
+ * to received PDU has an
+ * invalid ITT.
+ */
+#define CMD_CXN_KILLED_SEQ_OUTOFORDER 23 /* Command got invalidated due
+ * to received sequence buffer
+ * offset is out of order.
+ */
+#define CMD_CXN_KILLED_INVALID_DATASN_RCVD 24 /* Command got invalidated
+ * internally due to a
+ * recieved PDU has an invalid
+ * DataSN
+ */
+#define CXN_INVALIDATE_NOTIFY 25 /* Connection invalidation
+ * completion notify.
+ */
+#define CXN_INVALIDATE_INDEX_NOTIFY 26 /* Connection invalidation
+ * completion
+ * with data PDU index.
+ */
+#define CMD_INVALIDATED_NOTIFY 27 /* Command invalidation
+ * completionnotifify.
+ */
+#define UNSOL_HDR_NOTIFY 28 /* Unsolicited header notify.*/
+#define UNSOL_DATA_NOTIFY 29 /* Unsolicited data notify.*/
+#define UNSOL_DATA_DIGEST_ERROR_NOTIFY 30 /* Unsolicited data digest
+ * error notify.
+ */
+#define DRIVERMSG_NOTIFY 31 /* TCP acknowledge based
+ * notification.
+ */
+#define CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN 32 /* Connection got invalidated
+ * internally due to command
+ * and data are not on same
+ * connection.
+ */
+#define SOL_CMD_KILLED_DIF_ERR 33 /* Solicited command got
+ * invalidated internally due
+ * to DIF error
+ */
+#define CXN_KILLED_SYN_RCVD 34 /* Connection got invalidated
+ * internally due to incoming
+ * TCP SYN
+ */
+#define CXN_KILLED_IMM_DATA_RCVD 35 /* Connection got invalidated
+ * internally due to an
+ * incoming Unsolicited PDU
+ * that has immediate data on
+ * the cxn
+ */
+
+void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
+ bool embedded, u8 sge_cnt);
+
+void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
+ u8 subsystem, u8 opcode, int cmd_len);
+
+#endif /* !BEISCSI_CMDS_H */
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
new file mode 100644
index 000000000000..2fd25442cfaf
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -0,0 +1,638 @@
+/**
+ * Copyright (C) 2005 - 2009 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ *
+ */
+
+#include <scsi/libiscsi.h>
+#include <scsi/scsi_transport_iscsi.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+
+#include "be_iscsi.h"
+
+extern struct iscsi_transport beiscsi_iscsi_transport;
+
+/**
+ * beiscsi_session_create - creates a new iscsi session
+ * @cmds_max: max commands supported
+ * @qdepth: max queue depth supported
+ * @initial_cmdsn: initial iscsi CMDSN
+ */
+struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
+ u16 cmds_max,
+ u16 qdepth,
+ u32 initial_cmdsn)
+{
+ struct Scsi_Host *shost;
+ struct beiscsi_endpoint *beiscsi_ep;
+ struct iscsi_cls_session *cls_session;
+ struct beiscsi_hba *phba;
+ struct iscsi_session *sess;
+ struct beiscsi_session *beiscsi_sess;
+ struct beiscsi_io_task *io_task;
+
+ SE_DEBUG(DBG_LVL_8, "In beiscsi_session_create\n");
+
+ if (!ep) {
+ SE_DEBUG(DBG_LVL_1, "beiscsi_session_create: invalid ep \n");
+ return NULL;
+ }
+ beiscsi_ep = ep->dd_data;
+ phba = beiscsi_ep->phba;
+ shost = phba->shost;
+ if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) {
+ shost_printk(KERN_ERR, shost, "Cannot handle %d cmds."
+ "Max cmds per session supported is %d. Using %d. "
+ "\n", cmds_max,
+ beiscsi_ep->phba->params.wrbs_per_cxn,
+ beiscsi_ep->phba->params.wrbs_per_cxn);
+ cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn;
+ }
+
+ cls_session = iscsi_session_setup(&beiscsi_iscsi_transport,
+ shost, cmds_max,
+ sizeof(*beiscsi_sess),
+ sizeof(*io_task),
+ initial_cmdsn, ISCSI_MAX_TARGET);
+ if (!cls_session)
+ return NULL;
+ sess = cls_session->dd_data;
+ beiscsi_sess = sess->dd_data;
+ beiscsi_sess->bhs_pool = pci_pool_create("beiscsi_bhs_pool",
+ phba->pcidev,
+ sizeof(struct be_cmd_bhs),
+ 64, 0);
+ if (!beiscsi_sess->bhs_pool)
+ goto destroy_sess;
+
+ return cls_session;
+destroy_sess:
+ iscsi_session_teardown(cls_session);
+ return NULL;
+}
+
+/**
+ * beiscsi_session_destroy - destroys iscsi session
+ * @cls_session: pointer to iscsi cls session
+ *
+ * Destroys iSCSI session instance and releases
+ * resources allocated for it.
+ */
+void beiscsi_session_destroy(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess = cls_session->dd_data;
+ struct beiscsi_session *beiscsi_sess = sess->dd_data;
+
+ pci_pool_destroy(beiscsi_sess->bhs_pool);
+ iscsi_session_teardown(cls_session);
+}
+
+/**
+ * beiscsi_conn_create - create an instance of iscsi connection
+ * @cls_session: ptr to iscsi_cls_session
+ * @cid: iscsi cid
+ */
+struct iscsi_cls_conn *
+beiscsi_conn_create(struct iscsi_cls_session *cls_session, u32 cid)
+{
+ struct beiscsi_hba *phba;
+ struct Scsi_Host *shost;
+ struct iscsi_cls_conn *cls_conn;
+ struct beiscsi_conn *beiscsi_conn;
+ struct iscsi_conn *conn;
+ struct iscsi_session *sess;
+ struct beiscsi_session *beiscsi_sess;
+
+ SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_create ,cid"
+ "from iscsi layer=%d\n", cid);
+ shost = iscsi_session_to_shost(cls_session);
+ phba = iscsi_host_priv(shost);
+
+ cls_conn = iscsi_conn_setup(cls_session, sizeof(*beiscsi_conn), cid);
+ if (!cls_conn)
+ return NULL;
+
+ conn = cls_conn->dd_data;
+ beiscsi_conn = conn->dd_data;
+ beiscsi_conn->ep = NULL;
+ beiscsi_conn->phba = phba;
+ beiscsi_conn->conn = conn;
+ sess = cls_session->dd_data;
+ beiscsi_sess = sess->dd_data;
+ beiscsi_conn->beiscsi_sess = beiscsi_sess;
+ return cls_conn;
+}
+
+/**
+ * beiscsi_bindconn_cid - Bind the beiscsi_conn with phba connection table
+ * @beiscsi_conn: The pointer to beiscsi_conn structure
+ * @phba: The phba instance
+ * @cid: The cid to free
+ */
+static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
+ struct beiscsi_conn *beiscsi_conn,
+ unsigned int cid)
+{
+ if (phba->conn_table[cid]) {
+ SE_DEBUG(DBG_LVL_1,
+ "Connection table already occupied. Detected clash\n");
+ return -EINVAL;
+ } else {
+ SE_DEBUG(DBG_LVL_8, "phba->conn_table[%d]=%p(beiscsi_conn) \n",
+ cid, beiscsi_conn);
+ phba->conn_table[cid] = beiscsi_conn;
+ }
+ return 0;
+}
+
+/**
+ * beiscsi_conn_bind - Binds iscsi session/connection with TCP connection
+ * @cls_session: pointer to iscsi cls session
+ * @cls_conn: pointer to iscsi cls conn
+ * @transport_fd: EP handle(64 bit)
+ *
+ * This function binds the TCP Conn with iSCSI Connection and Session.
+ */
+int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn,
+ u64 transport_fd, int is_leading)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ struct Scsi_Host *shost =
+ (struct Scsi_Host *)iscsi_session_to_shost(cls_session);
+ struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
+ struct beiscsi_endpoint *beiscsi_ep;
+ struct iscsi_endpoint *ep;
+
+ SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_bind\n");
+ ep = iscsi_lookup_endpoint(transport_fd);
+ if (!ep)
+ return -EINVAL;
+
+ beiscsi_ep = ep->dd_data;
+
+ if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
+ return -EINVAL;
+
+ if (beiscsi_ep->phba != phba) {
+ SE_DEBUG(DBG_LVL_8,
+ "beiscsi_ep->hba=%p not equal to phba=%p \n",
+ beiscsi_ep->phba, phba);
+ return -EEXIST;
+ }
+
+ beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid;
+ beiscsi_conn->ep = beiscsi_ep;
+ beiscsi_ep->conn = beiscsi_conn;
+ SE_DEBUG(DBG_LVL_8, "beiscsi_conn=%p conn=%p ep_cid=%d \n",
+ beiscsi_conn, conn, beiscsi_ep->ep_cid);
+ return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid);
+}
+
+/**
+ * beiscsi_conn_get_param - get the iscsi parameter
+ * @cls_conn: pointer to iscsi cls conn
+ * @param: parameter type identifier
+ * @buf: buffer pointer
+ *
+ * returns iscsi parameter
+ */
+int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf)
+{
+ struct beiscsi_endpoint *beiscsi_ep;
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ int len = 0;
+
+ beiscsi_ep = beiscsi_conn->ep;
+ if (!beiscsi_ep) {
+ SE_DEBUG(DBG_LVL_1,
+ "In beiscsi_conn_get_param , no beiscsi_ep\n");
+ return -1;
+ }
+
+ switch (param) {
+ case ISCSI_PARAM_CONN_PORT:
+ len = sprintf(buf, "%hu\n", beiscsi_ep->dst_tcpport);
+ break;
+ case ISCSI_PARAM_CONN_ADDRESS:
+ if (beiscsi_ep->ip_type == BE2_IPV4)
+ len = sprintf(buf, "%pI4\n", &beiscsi_ep->dst_addr);
+ else
+ len = sprintf(buf, "%pI6\n", &beiscsi_ep->dst6_addr);
+ break;
+ default:
+ return iscsi_conn_get_param(cls_conn, param, buf);
+ }
+ return len;
+}
+
+int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ int ret;
+
+ ret = iscsi_set_param(cls_conn, param, buf, buflen);
+ if (ret)
+ return ret;
+ /*
+ * If userspace tried to set the value to higher than we can
+ * support override here.
+ */
+ switch (param) {
+ case ISCSI_PARAM_FIRST_BURST:
+ if (session->first_burst > 8192)
+ session->first_burst = 8192;
+ break;
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ if (conn->max_recv_dlength > 65536)
+ conn->max_recv_dlength = 65536;
+ break;
+ case ISCSI_PARAM_MAX_BURST:
+ if (session->first_burst > 262144)
+ session->first_burst = 262144;
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+/**
+ * beiscsi_get_host_param - get the iscsi parameter
+ * @shost: pointer to scsi_host structure
+ * @param: parameter type identifier
+ * @buf: buffer pointer
+ *
+ * returns host parameter
+ */
+int beiscsi_get_host_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf)
+{
+ struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
+ int len = 0;
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ be_cmd_get_mac_addr(&phba->ctrl, phba->mac_address);
+ len = sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
+ break;
+ default:
+ return iscsi_host_get_param(shost, param, buf);
+ }
+ return len;
+}
+
+/**
+ * beiscsi_conn_get_stats - get the iscsi stats
+ * @cls_conn: pointer to iscsi cls conn
+ * @stats: pointer to iscsi_stats structure
+ *
+ * returns iscsi stats
+ */
+void beiscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+
+ SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_stats\n");
+ stats->txdata_octets = conn->txdata_octets;
+ stats->rxdata_octets = conn->rxdata_octets;
+ stats->dataout_pdus = conn->dataout_pdus_cnt;
+ stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
+ stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
+ stats->datain_pdus = conn->datain_pdus_cnt;
+ stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
+ stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
+ stats->r2t_pdus = conn->r2t_pdus_cnt;
+ stats->digest_err = 0;
+ stats->timeout_err = 0;
+ stats->custom_length = 0;
+ strcpy(stats->custom[0].desc, "eh_abort_cnt");
+ stats->custom[0].value = conn->eh_abort_cnt;
+}
+
+/**
+ * beiscsi_set_params_for_offld - get the parameters for offload
+ * @beiscsi_conn: pointer to beiscsi_conn
+ * @params: pointer to offload_params structure
+ */
+static void beiscsi_set_params_for_offld(struct beiscsi_conn *beiscsi_conn,
+ struct beiscsi_offload_params *params)
+{
+ struct iscsi_conn *conn = beiscsi_conn->conn;
+ struct iscsi_session *session = conn->session;
+
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params, max_burst_length,
+ params, session->max_burst);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params,
+ max_send_data_segment_length, params,
+ conn->max_xmit_dlength);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params, first_burst_length,
+ params, session->first_burst);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params, erl, params,
+ session->erl);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params, dde, params,
+ conn->datadgst_en);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params, hde, params,
+ conn->hdrdgst_en);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params, ir2t, params,
+ session->initial_r2t_en);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params, imd, params,
+ session->imm_data_en);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params, exp_statsn, params,
+ (conn->exp_statsn - 1));
+}
+
+/**
+ * beiscsi_conn_start - offload of session to chip
+ * @cls_conn: pointer to beiscsi_conn
+ */
+int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ struct beiscsi_endpoint *beiscsi_ep;
+ struct beiscsi_offload_params params;
+ struct iscsi_session *session = conn->session;
+ struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+
+ memset(&params, 0, sizeof(struct beiscsi_offload_params));
+ beiscsi_ep = beiscsi_conn->ep;
+ if (!beiscsi_ep)
+ SE_DEBUG(DBG_LVL_1, "In beiscsi_conn_start , no beiscsi_ep\n");
+
+ free_mgmt_sgl_handle(phba, beiscsi_conn->plogin_sgl_handle);
+ beiscsi_conn->login_in_progress = 0;
+ beiscsi_set_params_for_offld(beiscsi_conn, &params);
+ beiscsi_offload_connection(beiscsi_conn, &params);
+ iscsi_conn_start(cls_conn);
+ return 0;
+}
+
+/**
+ * beiscsi_get_cid - Allocate a cid
+ * @phba: The phba instance
+ */
+static int beiscsi_get_cid(struct beiscsi_hba *phba)
+{
+ unsigned short cid = 0xFFFF;
+
+ if (!phba->avlbl_cids)
+ return cid;
+
+ cid = phba->cid_array[phba->cid_alloc++];
+ if (phba->cid_alloc == phba->params.cxns_per_ctrl)
+ phba->cid_alloc = 0;
+ phba->avlbl_cids--;
+ return cid;
+}
+
+/**
+ * beiscsi_open_conn - Ask FW to open a TCP connection
+ * @ep: endpoint to be used
+ * @src_addr: The source IP address
+ * @dst_addr: The Destination IP address
+ *
+ * Asks the FW to open a TCP connection
+ */
+static int beiscsi_open_conn(struct iscsi_endpoint *ep,
+ struct sockaddr *src_addr,
+ struct sockaddr *dst_addr, int non_blocking)
+{
+ struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
+ struct beiscsi_hba *phba = beiscsi_ep->phba;
+ int ret = -1;
+
+ beiscsi_ep->ep_cid = beiscsi_get_cid(phba);
+ if (beiscsi_ep->ep_cid == 0xFFFF) {
+ SE_DEBUG(DBG_LVL_1, "No free cid available\n");
+ return ret;
+ }
+ SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn, ep_cid=%d ",
+ beiscsi_ep->ep_cid);
+ phba->ep_array[beiscsi_ep->ep_cid] = ep;
+ if (beiscsi_ep->ep_cid >
+ (phba->fw_config.iscsi_cid_start + phba->params.cxns_per_ctrl)) {
+ SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
+ return ret;
+ }
+
+ beiscsi_ep->cid_vld = 0;
+ return mgmt_open_connection(phba, dst_addr, beiscsi_ep);
+}
+
+/**
+ * beiscsi_put_cid - Free the cid
+ * @phba: The phba for which the cid is being freed
+ * @cid: The cid to free
+ */
+static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
+{
+ phba->avlbl_cids++;
+ phba->cid_array[phba->cid_free++] = cid;
+ if (phba->cid_free == phba->params.cxns_per_ctrl)
+ phba->cid_free = 0;
+}
+
+/**
+ * beiscsi_free_ep - free endpoint
+ * @ep: pointer to iscsi endpoint structure
+ */
+static void beiscsi_free_ep(struct iscsi_endpoint *ep)
+{
+ struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
+ struct beiscsi_hba *phba = beiscsi_ep->phba;
+
+ beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
+ beiscsi_ep->phba = NULL;
+ iscsi_destroy_endpoint(ep);
+}
+
+/**
+ * beiscsi_ep_connect - Ask chip to create TCP Conn
+ * @scsi_host: Pointer to scsi_host structure
+ * @dst_addr: The IP address of Target
+ * @non_blocking: blocking or non-blocking call
+ *
+ * This routines first asks chip to create a connection and then allocates an EP
+ */
+struct iscsi_endpoint *
+beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
+ int non_blocking)
+{
+ struct beiscsi_hba *phba;
+ struct beiscsi_endpoint *beiscsi_ep;
+ struct iscsi_endpoint *ep;
+ int ret;
+
+ SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_connect \n");
+ if (shost)
+ phba = iscsi_host_priv(shost);
+ else {
+ ret = -ENXIO;
+ SE_DEBUG(DBG_LVL_1, "shost is NULL \n");
+ return ERR_PTR(ret);
+ }
+ ep = iscsi_create_endpoint(sizeof(struct beiscsi_endpoint));
+ if (!ep) {
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+ }
+
+ beiscsi_ep = ep->dd_data;
+ beiscsi_ep->phba = phba;
+
+ if (beiscsi_open_conn(ep, NULL, dst_addr, non_blocking)) {
+ SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
+ ret = -ENOMEM;
+ goto free_ep;
+ }
+
+ return ep;
+
+free_ep:
+ beiscsi_free_ep(ep);
+ return ERR_PTR(ret);
+}
+
+/**
+ * beiscsi_ep_poll - Poll to see if connection is established
+ * @ep: endpoint to be used
+ * @timeout_ms: timeout specified in millisecs
+ *
+ * Poll to see if TCP connection established
+ */
+int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+{
+ struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
+
+ SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_poll\n");
+ if (beiscsi_ep->cid_vld == 1)
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * beiscsi_close_conn - Upload the connection
+ * @ep: The iscsi endpoint
+ * @flag: The type of connection closure
+ */
+static int beiscsi_close_conn(struct iscsi_endpoint *ep, int flag)
+{
+ int ret = 0;
+ struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
+ struct beiscsi_hba *phba = beiscsi_ep->phba;
+
+ if (MGMT_STATUS_SUCCESS !=
+ mgmt_upload_connection(phba, beiscsi_ep->ep_cid,
+ CONNECTION_UPLOAD_GRACEFUL)) {
+ SE_DEBUG(DBG_LVL_8, "upload failed for cid 0x%x",
+ beiscsi_ep->ep_cid);
+ ret = -1;
+ }
+
+ return ret;
+}
+
+/**
+ * beiscsi_ep_disconnect - Tears down the TCP connection
+ * @ep: endpoint to be used
+ *
+ * Tears down the TCP connection
+ */
+void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
+{
+ struct beiscsi_conn *beiscsi_conn;
+ struct beiscsi_endpoint *beiscsi_ep;
+ struct beiscsi_hba *phba;
+ int flag = 0;
+
+ beiscsi_ep = ep->dd_data;
+ phba = beiscsi_ep->phba;
+ SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect\n");
+
+ if (beiscsi_ep->conn) {
+ beiscsi_conn = beiscsi_ep->conn;
+ iscsi_suspend_queue(beiscsi_conn->conn);
+ beiscsi_close_conn(ep, flag);
+ }
+
+ beiscsi_free_ep(ep);
+}
+
+/**
+ * beiscsi_unbind_conn_to_cid - Unbind the beiscsi_conn from phba conn table
+ * @phba: The phba instance
+ * @cid: The cid to free
+ */
+static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba,
+ unsigned int cid)
+{
+ if (phba->conn_table[cid])
+ phba->conn_table[cid] = NULL;
+ else {
+ SE_DEBUG(DBG_LVL_8, "Connection table Not occupied. \n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * beiscsi_conn_stop - Invalidate and stop the connection
+ * @cls_conn: pointer to get iscsi_conn
+ * @flag: The type of connection closure
+ */
+void beiscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ struct beiscsi_endpoint *beiscsi_ep;
+ struct iscsi_session *session = conn->session;
+ struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ unsigned int status;
+ unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH;
+
+ SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop\n");
+ beiscsi_ep = beiscsi_conn->ep;
+ if (!beiscsi_ep) {
+ SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop , no beiscsi_ep\n");
+ return;
+ }
+ status = mgmt_invalidate_connection(phba, beiscsi_ep,
+ beiscsi_ep->ep_cid, 1,
+ savecfg_flag);
+ if (status != MGMT_STATUS_SUCCESS) {
+ SE_DEBUG(DBG_LVL_1,
+ "mgmt_invalidate_connection Failed for cid=%d \n",
+ beiscsi_ep->ep_cid);
+ }
+ beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
+ iscsi_conn_stop(cls_conn, flag);
+}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
new file mode 100644
index 000000000000..f92ffc5349fb
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -0,0 +1,75 @@
+/**
+ * Copyright (C) 2005 - 2009 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ *
+ */
+
+#ifndef _BE_ISCSI_
+#define _BE_ISCSI_
+
+#include "be_main.h"
+#include "be_mgmt.h"
+
+#define BE2_IPV4 0x1
+#define BE2_IPV6 0x10
+
+void beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
+ struct beiscsi_offload_params *params);
+
+void beiscsi_offload_iscsi(struct beiscsi_hba *phba, struct iscsi_conn *conn,
+ struct beiscsi_conn *beiscsi_conn,
+ unsigned int fw_handle);
+
+struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
+ uint16_t cmds_max,
+ uint16_t qdepth,
+ uint32_t initial_cmdsn);
+
+void beiscsi_session_destroy(struct iscsi_cls_session *cls_session);
+
+struct iscsi_cls_conn *beiscsi_conn_create(struct iscsi_cls_session
+ *cls_session, uint32_t cid);
+
+int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn,
+ uint64_t transport_fd, int is_leading);
+
+int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf);
+
+int beiscsi_get_host_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf);
+
+int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen);
+
+int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn);
+
+void beiscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag);
+
+struct iscsi_endpoint *beiscsi_ep_connect(struct Scsi_Host *shost,
+ struct sockaddr *dst_addr,
+ int non_blocking);
+
+int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
+
+void beiscsi_ep_disconnect(struct iscsi_endpoint *ep);
+
+void beiscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats);
+
+#endif
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
new file mode 100644
index 000000000000..4f1aca346e38
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -0,0 +1,3390 @@
+/**
+ * Copyright (C) 2005 - 2009 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ *
+ */
+#include <linux/reboot.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/semaphore.h>
+
+#include <scsi/libiscsi.h>
+#include <scsi/scsi_transport_iscsi.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+#include "be_main.h"
+#include "be_iscsi.h"
+#include "be_mgmt.h"
+
+static unsigned int be_iopoll_budget = 10;
+static unsigned int be_max_phys_size = 64;
+static unsigned int enable_msix;
+
+MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
+MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
+MODULE_AUTHOR("ServerEngines Corporation");
+MODULE_LICENSE("GPL");
+module_param(be_iopoll_budget, int, 0);
+module_param(enable_msix, int, 0);
+module_param(be_max_phys_size, uint, S_IRUGO);
+MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
+ "contiguous memory that can be allocated."
+ "Range is 16 - 128");
+
+static int beiscsi_slave_configure(struct scsi_device *sdev)
+{
+ blk_queue_max_segment_size(sdev->request_queue, 65536);
+ return 0;
+}
+
+static struct scsi_host_template beiscsi_sht = {
+ .module = THIS_MODULE,
+ .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
+ .proc_name = DRV_NAME,
+ .queuecommand = iscsi_queuecommand,
+ .eh_abort_handler = iscsi_eh_abort,
+ .change_queue_depth = iscsi_change_queue_depth,
+ .slave_configure = beiscsi_slave_configure,
+ .target_alloc = iscsi_target_alloc,
+ .eh_device_reset_handler = iscsi_eh_device_reset,
+ .eh_target_reset_handler = iscsi_eh_target_reset,
+ .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
+ .can_queue = BE2_IO_DEPTH,
+ .this_id = -1,
+ .max_sectors = BEISCSI_MAX_SECTORS,
+ .cmd_per_lun = BEISCSI_CMD_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+static struct scsi_transport_template *beiscsi_scsi_transport;
+
+/*------------------- PCI Driver operations and data ----------------- */
+static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
+ { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
+ { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
+ { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
+
+static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
+{
+ struct beiscsi_hba *phba;
+ struct Scsi_Host *shost;
+
+ shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
+ if (!shost) {
+ dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
+ "iscsi_host_alloc failed \n");
+ return NULL;
+ }
+ shost->dma_boundary = pcidev->dma_mask;
+ shost->max_id = BE2_MAX_SESSIONS;
+ shost->max_channel = 0;
+ shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
+ shost->max_lun = BEISCSI_NUM_MAX_LUN;
+ shost->transportt = beiscsi_scsi_transport;
+
+ phba = iscsi_host_priv(shost);
+ memset(phba, 0, sizeof(*phba));
+ phba->shost = shost;
+ phba->pcidev = pci_dev_get(pcidev);
+
+ if (iscsi_host_add(shost, &phba->pcidev->dev))
+ goto free_devices;
+ return phba;
+
+free_devices:
+ pci_dev_put(phba->pcidev);
+ iscsi_host_free(phba->shost);
+ return NULL;
+}
+
+static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
+{
+ if (phba->csr_va) {
+ iounmap(phba->csr_va);
+ phba->csr_va = NULL;
+ }
+ if (phba->db_va) {
+ iounmap(phba->db_va);
+ phba->db_va = NULL;
+ }
+ if (phba->pci_va) {
+ iounmap(phba->pci_va);
+ phba->pci_va = NULL;
+ }
+}
+
+static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
+ struct pci_dev *pcidev)
+{
+ u8 __iomem *addr;
+
+ addr = ioremap_nocache(pci_resource_start(pcidev, 2),
+ pci_resource_len(pcidev, 2));
+ if (addr == NULL)
+ return -ENOMEM;
+ phba->ctrl.csr = addr;
+ phba->csr_va = addr;
+ phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
+
+ addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
+ if (addr == NULL)
+ goto pci_map_err;
+ phba->ctrl.db = addr;
+ phba->db_va = addr;
+ phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
+
+ addr = ioremap_nocache(pci_resource_start(pcidev, 1),
+ pci_resource_len(pcidev, 1));
+ if (addr == NULL)
+ goto pci_map_err;
+ phba->ctrl.pcicfg = addr;
+ phba->pci_va = addr;
+ phba->pci_pa.u.a64.address = pci_resource_start(pcidev, 1);
+ return 0;
+
+pci_map_err:
+ beiscsi_unmap_pci_function(phba);
+ return -ENOMEM;
+}
+
+static int beiscsi_enable_pci(struct pci_dev *pcidev)
+{
+ int ret;
+
+ ret = pci_enable_device(pcidev);
+ if (ret) {
+ dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
+ "failed. Returning -ENODEV\n");
+ return ret;
+ }
+
+ if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
+ ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
+ pci_disable_device(pcidev);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
+ struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
+ int status = 0;
+
+ ctrl->pdev = pdev;
+ status = beiscsi_map_pci_bars(phba, pdev);
+ if (status)
+ return status;
+
+ mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
+ mbox_mem_alloc->va = pci_alloc_consistent(pdev,
+ mbox_mem_alloc->size,
+ &mbox_mem_alloc->dma);
+ if (!mbox_mem_alloc->va) {
+ beiscsi_unmap_pci_function(phba);
+ status = -ENOMEM;
+ return status;
+ }
+
+ mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
+ mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
+ mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
+ memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
+ spin_lock_init(&ctrl->mbox_lock);
+ return status;
+}
+
+static void beiscsi_get_params(struct beiscsi_hba *phba)
+{
+ phba->params.ios_per_ctrl = BE2_IO_DEPTH;
+ phba->params.cxns_per_ctrl = BE2_MAX_SESSIONS;
+ phba->params.asyncpdus_per_ctrl = BE2_ASYNCPDUS;
+ phba->params.icds_per_ctrl = BE2_MAX_ICDS / 2;
+ phba->params.num_sge_per_io = BE2_SGE;
+ phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
+ phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
+ phba->params.eq_timer = 64;
+ phba->params.num_eq_entries =
+ (((BE2_CMDS_PER_CXN * 2 + BE2_LOGOUTS + BE2_TMFS + BE2_ASYNCPDUS) /
+ 512) + 1) * 512;
+ phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
+ ? 1024 : phba->params.num_eq_entries;
+ SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n",
+ phba->params.num_eq_entries);
+ phba->params.num_cq_entries =
+ (((BE2_CMDS_PER_CXN * 2 + BE2_LOGOUTS + BE2_TMFS + BE2_ASYNCPDUS) /
+ 512) + 1) * 512;
+ SE_DEBUG(DBG_LVL_8,
+ "phba->params.num_cq_entries=%d BE2_CMDS_PER_CXN=%d"
+ "BE2_LOGOUTS=%d BE2_TMFS=%d BE2_ASYNCPDUS=%d \n",
+ phba->params.num_cq_entries, BE2_CMDS_PER_CXN,
+ BE2_LOGOUTS, BE2_TMFS, BE2_ASYNCPDUS);
+ phba->params.wrbs_per_cxn = 256;
+}
+
+static void hwi_ring_eq_db(struct beiscsi_hba *phba,
+ unsigned int id, unsigned int clr_interrupt,
+ unsigned int num_processed,
+ unsigned char rearm, unsigned char event)
+{
+ u32 val = 0;
+ val |= id & DB_EQ_RING_ID_MASK;
+ if (rearm)
+ val |= 1 << DB_EQ_REARM_SHIFT;
+ if (clr_interrupt)
+ val |= 1 << DB_EQ_CLR_SHIFT;
+ if (event)
+ val |= 1 << DB_EQ_EVNT_SHIFT;
+ val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
+ iowrite32(val, phba->db_va + DB_EQ_OFFSET);
+}
+
+/**
+ * be_isr - The isr routine of the driver.
+ * @irq: Not used
+ * @dev_id: Pointer to host adapter structure
+ */
+static irqreturn_t be_isr(int irq, void *dev_id)
+{
+ struct beiscsi_hba *phba;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+ struct be_eq_entry *eqe = NULL;
+ struct be_queue_info *eq;
+ struct be_queue_info *cq;
+ unsigned long flags, index;
+ unsigned int num_eq_processed;
+ struct be_ctrl_info *ctrl;
+ int isr;
+
+ phba = dev_id;
+ if (!enable_msix) {
+ ctrl = &phba->ctrl;;
+ isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
+ (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
+ if (!isr)
+ return IRQ_NONE;
+ }
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+ eq = &phwi_context->be_eq.q;
+ cq = &phwi_context->be_cq;
+ index = 0;
+ eqe = queue_tail_node(eq);
+ if (!eqe)
+ SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
+
+ num_eq_processed = 0;
+ if (blk_iopoll_enabled) {
+ while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
+ & EQE_VALID_MASK) {
+ if (!blk_iopoll_sched_prep(&phba->iopoll))
+ blk_iopoll_sched(&phba->iopoll);
+
+ AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
+ queue_tail_inc(eq);
+ eqe = queue_tail_node(eq);
+ num_eq_processed++;
+ SE_DEBUG(DBG_LVL_8, "Valid EQE\n");
+ }
+ if (num_eq_processed) {
+ hwi_ring_eq_db(phba, eq->id, 0, num_eq_processed, 0, 1);
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
+ } else {
+ while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
+ & EQE_VALID_MASK) {
+
+ if (((eqe->dw[offsetof(struct amap_eq_entry,
+ resource_id) / 32] &
+ EQE_RESID_MASK) >> 16) != cq->id) {
+ spin_lock_irqsave(&phba->isr_lock, flags);
+ phba->todo_mcc_cq = 1;
+ spin_unlock_irqrestore(&phba->isr_lock, flags);
+ } else {
+ spin_lock_irqsave(&phba->isr_lock, flags);
+ phba->todo_cq = 1;
+ spin_unlock_irqrestore(&phba->isr_lock, flags);
+ }
+ AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
+ queue_tail_inc(eq);
+ eqe = queue_tail_node(eq);
+ num_eq_processed++;
+ }
+ if (phba->todo_cq || phba->todo_mcc_cq)
+ queue_work(phba->wq, &phba->work_cqs);
+
+ if (num_eq_processed) {
+ hwi_ring_eq_db(phba, eq->id, 0, num_eq_processed, 1, 1);
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
+ }
+}
+
+static int beiscsi_init_irqs(struct beiscsi_hba *phba)
+{
+ struct pci_dev *pcidev = phba->pcidev;
+ int ret;
+
+ ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, "beiscsi", phba);
+ if (ret) {
+ shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
+ "Failed to register irq\\n");
+ return ret;
+ }
+ return 0;
+}
+
+static void hwi_ring_cq_db(struct beiscsi_hba *phba,
+ unsigned int id, unsigned int num_processed,
+ unsigned char rearm, unsigned char event)
+{
+ u32 val = 0;
+ val |= id & DB_CQ_RING_ID_MASK;
+ if (rearm)
+ val |= 1 << DB_CQ_REARM_SHIFT;
+ val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
+ iowrite32(val, phba->db_va + DB_CQ_OFFSET);
+}
+
+/*
+ * async pdus include
+ * a. unsolicited NOP-In (target initiated NOP-In)
+ * b. Async Messages
+ * c. Reject PDU
+ * d. Login response
+ * These headers arrive unprocessed by the EP firmware and iSCSI layer
+ * process them
+ */
+static unsigned int
+beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
+ struct beiscsi_hba *phba,
+ unsigned short cid,
+ struct pdu_base *ppdu,
+ unsigned long pdu_len,
+ void *pbuffer, unsigned long buf_len)
+{
+ struct iscsi_conn *conn = beiscsi_conn->conn;
+ struct iscsi_session *session = conn->session;
+
+ switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
+ PDUBASE_OPCODE_MASK) {
+ case ISCSI_OP_NOOP_IN:
+ pbuffer = NULL;
+ buf_len = 0;
+ break;
+ case ISCSI_OP_ASYNC_EVENT:
+ break;
+ case ISCSI_OP_REJECT:
+ WARN_ON(!pbuffer);
+ WARN_ON(!(buf_len == 48));
+ SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
+ break;
+ case ISCSI_OP_LOGIN_RSP:
+ break;
+ default:
+ shost_printk(KERN_WARNING, phba->shost,
+ "Unrecognized opcode 0x%x in async msg \n",
+ (ppdu->
+ dw[offsetof(struct amap_pdu_base, opcode) / 32]
+ & PDUBASE_OPCODE_MASK));
+ return 1;
+ }
+
+ spin_lock_bh(&session->lock);
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
+ spin_unlock_bh(&session->lock);
+ return 0;
+}
+
+static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
+{
+ struct sgl_handle *psgl_handle;
+
+ if (phba->io_sgl_hndl_avbl) {
+ SE_DEBUG(DBG_LVL_8,
+ "In alloc_io_sgl_handle,io_sgl_alloc_index=%d \n",
+ phba->io_sgl_alloc_index);
+ psgl_handle = phba->io_sgl_hndl_base[phba->
+ io_sgl_alloc_index];
+ phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
+ phba->io_sgl_hndl_avbl--;
+ if (phba->io_sgl_alloc_index == (phba->params.ios_per_ctrl - 1))
+ phba->io_sgl_alloc_index = 0;
+ else
+ phba->io_sgl_alloc_index++;
+ } else
+ psgl_handle = NULL;
+ return psgl_handle;
+}
+
+static void
+free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
+{
+ SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d \n",
+ phba->io_sgl_free_index);
+ if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
+ /*
+ * this can happen if clean_task is called on a task that
+ * failed in xmit_task or alloc_pdu.
+ */
+ SE_DEBUG(DBG_LVL_8,
+ "Double Free in IO SGL io_sgl_free_index=%d,"
+ "value there=%p \n", phba->io_sgl_free_index,
+ phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
+ return;
+ }
+ phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
+ phba->io_sgl_hndl_avbl++;
+ if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
+ phba->io_sgl_free_index = 0;
+ else
+ phba->io_sgl_free_index++;
+}
+
+/**
+ * alloc_wrb_handle - To allocate a wrb handle
+ * @phba: The hba pointer
+ * @cid: The cid to use for allocation
+ * @index: index allocation and wrb index
+ *
+ * This happens under session_lock until submission to chip
+ */
+struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
+ int index)
+{
+ struct hwi_wrb_context *pwrb_context;
+ struct hwi_controller *phwi_ctrlr;
+ struct wrb_handle *pwrb_handle;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ pwrb_context = &phwi_ctrlr->wrb_context[cid];
+ pwrb_handle = pwrb_context->pwrb_handle_base[index];
+ pwrb_handle->wrb_index = index;
+ pwrb_handle->nxt_wrb_index = index;
+ return pwrb_handle;
+}
+
+/**
+ * free_wrb_handle - To free the wrb handle back to pool
+ * @phba: The hba pointer
+ * @pwrb_context: The context to free from
+ * @pwrb_handle: The wrb_handle to free
+ *
+ * This happens under session_lock until submission to chip
+ */
+static void
+free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
+ struct wrb_handle *pwrb_handle)
+{
+ SE_DEBUG(DBG_LVL_8,
+ "FREE WRB: pwrb_handle=%p free_index=%d=0x%x"
+ "wrb_handles_available=%d \n",
+ pwrb_handle, pwrb_context->free_index,
+ pwrb_context->free_index, pwrb_context->wrb_handles_available);
+}
+
+static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
+{
+ struct sgl_handle *psgl_handle;
+
+ if (phba->eh_sgl_hndl_avbl) {
+ psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
+ phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
+ SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x \n",
+ phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
+ phba->eh_sgl_hndl_avbl--;
+ if (phba->eh_sgl_alloc_index ==
+ (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
+ 1))
+ phba->eh_sgl_alloc_index = 0;
+ else
+ phba->eh_sgl_alloc_index++;
+ } else
+ psgl_handle = NULL;
+ return psgl_handle;
+}
+
+void
+free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
+{
+
+ if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
+ /*
+ * this can happen if clean_task is called on a task that
+ * failed in xmit_task or alloc_pdu.
+ */
+ SE_DEBUG(DBG_LVL_8,
+ "Double Free in eh SGL ,eh_sgl_free_index=%d \n",
+ phba->eh_sgl_free_index);
+ return;
+ }
+ phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
+ phba->eh_sgl_hndl_avbl++;
+ if (phba->eh_sgl_free_index ==
+ (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
+ phba->eh_sgl_free_index = 0;
+ else
+ phba->eh_sgl_free_index++;
+}
+
+static void
+be_complete_io(struct beiscsi_conn *beiscsi_conn,
+ struct iscsi_task *task, struct sol_cqe *psol)
+{
+ struct beiscsi_io_task *io_task = task->dd_data;
+ struct be_status_bhs *sts_bhs =
+ (struct be_status_bhs *)io_task->cmd_bhs;
+ struct iscsi_conn *conn = beiscsi_conn->conn;
+ unsigned int sense_len;
+ unsigned char *sense;
+ u32 resid = 0, exp_cmdsn, max_cmdsn;
+ u8 rsp, status, flags;
+
+ exp_cmdsn = be32_to_cpu(psol->
+ dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
+ & SOL_EXP_CMD_SN_MASK);
+ max_cmdsn = be32_to_cpu((psol->
+ dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
+ & SOL_EXP_CMD_SN_MASK) +
+ ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
+ / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
+ rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
+ & SOL_RESP_MASK) >> 16);
+ status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
+ & SOL_STS_MASK) >> 8);
+ flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
+ & SOL_FLAGS_MASK) >> 24) | 0x80;
+
+ task->sc->result = (DID_OK << 16) | status;
+ if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
+ task->sc->result = DID_ERROR << 16;
+ goto unmap;
+ }
+
+ /* bidi not initially supported */
+ if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
+ resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
+ 32] & SOL_RES_CNT_MASK);
+
+ if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
+ task->sc->result = DID_ERROR << 16;
+
+ if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
+ scsi_set_resid(task->sc, resid);
+ if (!status && (scsi_bufflen(task->sc) - resid <
+ task->sc->underflow))
+ task->sc->result = DID_ERROR << 16;
+ }
+ }
+
+ if (status == SAM_STAT_CHECK_CONDITION) {
+ sense = sts_bhs->sense_info + sizeof(unsigned short);
+ sense_len =
+ cpu_to_be16((unsigned short)(sts_bhs->sense_info[0]));
+ memcpy(task->sc->sense_buffer, sense,
+ min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
+ }
+ if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
+ if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
+ & SOL_RES_CNT_MASK)
+ conn->rxdata_octets += (psol->
+ dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
+ & SOL_RES_CNT_MASK);
+ }
+unmap:
+ scsi_dma_unmap(io_task->scsi_cmnd);
+ iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
+}
+
+static void
+be_complete_logout(struct beiscsi_conn *beiscsi_conn,
+ struct iscsi_task *task, struct sol_cqe *psol)
+{
+ struct iscsi_logout_rsp *hdr;
+ struct iscsi_conn *conn = beiscsi_conn->conn;
+
+ hdr = (struct iscsi_logout_rsp *)task->hdr;
+ hdr->t2wait = 5;
+ hdr->t2retain = 0;
+ hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
+ & SOL_FLAGS_MASK) >> 24) | 0x80;
+ hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
+ 32] & SOL_RESP_MASK);
+ hdr->exp_cmdsn = cpu_to_be32(psol->
+ dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
+ & SOL_EXP_CMD_SN_MASK);
+ hdr->max_cmdsn = be32_to_cpu((psol->
+ dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
+ & SOL_EXP_CMD_SN_MASK) +
+ ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
+ / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
+ hdr->hlength = 0;
+
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
+}
+
+static void
+be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
+ struct iscsi_task *task, struct sol_cqe *psol)
+{
+ struct iscsi_tm_rsp *hdr;
+ struct iscsi_conn *conn = beiscsi_conn->conn;
+
+ hdr = (struct iscsi_tm_rsp *)task->hdr;
+ hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
+ & SOL_FLAGS_MASK) >> 24) | 0x80;
+ hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
+ 32] & SOL_RESP_MASK);
+ hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
+ i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
+ hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
+ i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
+ ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
+ / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
+}
+
+static void
+hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
+ struct beiscsi_hba *phba, struct sol_cqe *psol)
+{
+ struct hwi_wrb_context *pwrb_context;
+ struct wrb_handle *pwrb_handle;
+ struct hwi_controller *phwi_ctrlr;
+ struct iscsi_conn *conn = beiscsi_conn->conn;
+ struct iscsi_session *session = conn->session;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ pwrb_context = &phwi_ctrlr->wrb_context[((psol->
+ dw[offsetof(struct amap_sol_cqe, cid) / 32] &
+ SOL_CID_MASK) >> 6)];
+ pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
+ dw[offsetof(struct amap_sol_cqe, wrb_index) /
+ 32] & SOL_WRB_INDEX_MASK) >> 16)];
+ spin_lock_bh(&session->lock);
+ free_wrb_handle(phba, pwrb_context, pwrb_handle);
+ spin_unlock_bh(&session->lock);
+}
+
+static void
+be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
+ struct iscsi_task *task, struct sol_cqe *psol)
+{
+ struct iscsi_nopin *hdr;
+ struct iscsi_conn *conn = beiscsi_conn->conn;
+
+ hdr = (struct iscsi_nopin *)task->hdr;
+ hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
+ & SOL_FLAGS_MASK) >> 24) | 0x80;
+ hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
+ i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
+ hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
+ i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
+ ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
+ / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
+ hdr->opcode = ISCSI_OP_NOOP_IN;
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
+}
+
+static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
+ struct beiscsi_hba *phba, struct sol_cqe *psol)
+{
+ struct hwi_wrb_context *pwrb_context;
+ struct wrb_handle *pwrb_handle;
+ struct iscsi_wrb *pwrb = NULL;
+ struct hwi_controller *phwi_ctrlr;
+ struct iscsi_task *task;
+ struct beiscsi_io_task *io_task;
+ struct iscsi_conn *conn = beiscsi_conn->conn;
+ struct iscsi_session *session = conn->session;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+
+ pwrb_context = &phwi_ctrlr->
+ wrb_context[((psol->dw[offsetof(struct amap_sol_cqe, cid) / 32]
+ & SOL_CID_MASK) >> 6)];
+ pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
+ dw[offsetof(struct amap_sol_cqe, wrb_index) /
+ 32] & SOL_WRB_INDEX_MASK) >> 16)];
+
+ task = pwrb_handle->pio_handle;
+ io_task = task->dd_data;
+ spin_lock_bh(&session->lock);
+ pwrb = pwrb_handle->pwrb;
+ switch ((pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
+ WRB_TYPE_MASK) >> 28) {
+ case HWH_TYPE_IO:
+ case HWH_TYPE_IO_RD:
+ if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
+ ISCSI_OP_NOOP_OUT) {
+ be_complete_nopin_resp(beiscsi_conn, task, psol);
+ } else
+ be_complete_io(beiscsi_conn, task, psol);
+ break;
+
+ case HWH_TYPE_LOGOUT:
+ be_complete_logout(beiscsi_conn, task, psol);
+ break;
+
+ case HWH_TYPE_LOGIN:
+ SE_DEBUG(DBG_LVL_1,
+ "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
+ "- Solicited path \n");
+ break;
+
+ case HWH_TYPE_TMF:
+ be_complete_tmf(beiscsi_conn, task, psol);
+ break;
+
+ case HWH_TYPE_NOP:
+ be_complete_nopin_resp(beiscsi_conn, task, psol);
+ break;
+
+ default:
+ shost_printk(KERN_WARNING, phba->shost,
+ "wrb_index 0x%x CID 0x%x\n",
+ ((psol->dw[offsetof(struct amap_iscsi_wrb, type) /
+ 32] & SOL_WRB_INDEX_MASK) >> 16),
+ ((psol->dw[offsetof(struct amap_sol_cqe, cid) / 32]
+ & SOL_CID_MASK) >> 6));
+ break;
+ }
+
+ spin_unlock_bh(&session->lock);
+}
+
+static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
+ *pasync_ctx, unsigned int is_header,
+ unsigned int host_write_ptr)
+{
+ if (is_header)
+ return &pasync_ctx->async_entry[host_write_ptr].
+ header_busy_list;
+ else
+ return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
+}
+
+static struct async_pdu_handle *
+hwi_get_async_handle(struct beiscsi_hba *phba,
+ struct beiscsi_conn *beiscsi_conn,
+ struct hwi_async_pdu_context *pasync_ctx,
+ struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
+{
+ struct be_bus_address phys_addr;
+ struct list_head *pbusy_list;
+ struct async_pdu_handle *pasync_handle = NULL;
+ int buffer_len = 0;
+ unsigned char buffer_index = -1;
+ unsigned char is_header = 0;
+
+ phys_addr.u.a32.address_lo =
+ pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
+ ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
+ & PDUCQE_DPL_MASK) >> 16);
+ phys_addr.u.a32.address_hi =
+ pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
+
+ phys_addr.u.a64.address =
+ *((unsigned long long *)(&phys_addr.u.a64.address));
+
+ switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
+ & PDUCQE_CODE_MASK) {
+ case UNSOL_HDR_NOTIFY:
+ is_header = 1;
+
+ pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
+ (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
+ index) / 32] & PDUCQE_INDEX_MASK));
+
+ buffer_len = (unsigned int)(phys_addr.u.a64.address -
+ pasync_ctx->async_header.pa_base.u.a64.address);
+
+ buffer_index = buffer_len /
+ pasync_ctx->async_header.buffer_size;
+
+ break;
+ case UNSOL_DATA_NOTIFY:
+ pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
+ dw[offsetof(struct amap_i_t_dpdu_cqe,
+ index) / 32] & PDUCQE_INDEX_MASK));
+ buffer_len = (unsigned long)(phys_addr.u.a64.address -
+ pasync_ctx->async_data.pa_base.u.
+ a64.address);
+ buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
+ break;
+ default:
+ pbusy_list = NULL;
+ shost_printk(KERN_WARNING, phba->shost,
+ "Unexpected code=%d \n",
+ pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
+ code) / 32] & PDUCQE_CODE_MASK);
+ return NULL;
+ }
+
+ WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
+ WARN_ON(list_empty(pbusy_list));
+ list_for_each_entry(pasync_handle, pbusy_list, link) {
+ WARN_ON(pasync_handle->consumed);
+ if (pasync_handle->index == buffer_index)
+ break;
+ }
+
+ WARN_ON(!pasync_handle);
+
+ pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid;
+ pasync_handle->is_header = is_header;
+ pasync_handle->buffer_len = ((pdpdu_cqe->
+ dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
+ & PDUCQE_DPL_MASK) >> 16);
+
+ *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
+ index) / 32] & PDUCQE_INDEX_MASK);
+ return pasync_handle;
+}
+
+static unsigned int
+hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
+ unsigned int is_header, unsigned int cq_index)
+{
+ struct list_head *pbusy_list;
+ struct async_pdu_handle *pasync_handle;
+ unsigned int num_entries, writables = 0;
+ unsigned int *pep_read_ptr, *pwritables;
+
+
+ if (is_header) {
+ pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
+ pwritables = &pasync_ctx->async_header.writables;
+ num_entries = pasync_ctx->async_header.num_entries;
+ } else {
+ pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
+ pwritables = &pasync_ctx->async_data.writables;
+ num_entries = pasync_ctx->async_data.num_entries;
+ }
+
+ while ((*pep_read_ptr) != cq_index) {
+ (*pep_read_ptr)++;
+ *pep_read_ptr = (*pep_read_ptr) % num_entries;
+
+ pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
+ *pep_read_ptr);
+ if (writables == 0)
+ WARN_ON(list_empty(pbusy_list));
+
+ if (!list_empty(pbusy_list)) {
+ pasync_handle = list_entry(pbusy_list->next,
+ struct async_pdu_handle,
+ link);
+ WARN_ON(!pasync_handle);
+ pasync_handle->consumed = 1;
+ }
+
+ writables++;
+ }
+
+ if (!writables) {
+ SE_DEBUG(DBG_LVL_1,
+ "Duplicate notification received - index 0x%x!!\n",
+ cq_index);
+ WARN_ON(1);
+ }
+
+ *pwritables = *pwritables + writables;
+ return 0;
+}
+
+static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
+ unsigned int cri)
+{
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_async_pdu_context *pasync_ctx;
+ struct async_pdu_handle *pasync_handle, *tmp_handle;
+ struct list_head *plist;
+ unsigned int i = 0;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
+
+ plist = &pasync_ctx->async_entry[cri].wait_queue.list;
+
+ list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
+ list_del(&pasync_handle->link);
+
+ if (i == 0) {
+ list_add_tail(&pasync_handle->link,
+ &pasync_ctx->async_header.free_list);
+ pasync_ctx->async_header.free_entries++;
+ i++;
+ } else {
+ list_add_tail(&pasync_handle->link,
+ &pasync_ctx->async_data.free_list);
+ pasync_ctx->async_data.free_entries++;
+ i++;
+ }
+ }
+
+ INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
+ pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
+ pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
+ return 0;
+}
+
+static struct phys_addr *
+hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
+ unsigned int is_header, unsigned int host_write_ptr)
+{
+ struct phys_addr *pasync_sge = NULL;
+
+ if (is_header)
+ pasync_sge = pasync_ctx->async_header.ring_base;
+ else
+ pasync_sge = pasync_ctx->async_data.ring_base;
+
+ return pasync_sge + host_write_ptr;
+}
+
+static void hwi_post_async_buffers(struct beiscsi_hba *phba,
+ unsigned int is_header)
+{
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_async_pdu_context *pasync_ctx;
+ struct async_pdu_handle *pasync_handle;
+ struct list_head *pfree_link, *pbusy_list;
+ struct phys_addr *pasync_sge;
+ unsigned int ring_id, num_entries;
+ unsigned int host_write_num;
+ unsigned int writables;
+ unsigned int i = 0;
+ u32 doorbell = 0;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
+
+ if (is_header) {
+ num_entries = pasync_ctx->async_header.num_entries;
+ writables = min(pasync_ctx->async_header.writables,
+ pasync_ctx->async_header.free_entries);
+ pfree_link = pasync_ctx->async_header.free_list.next;
+ host_write_num = pasync_ctx->async_header.host_write_ptr;
+ ring_id = phwi_ctrlr->default_pdu_hdr.id;
+ } else {
+ num_entries = pasync_ctx->async_data.num_entries;
+ writables = min(pasync_ctx->async_data.writables,
+ pasync_ctx->async_data.free_entries);
+ pfree_link = pasync_ctx->async_data.free_list.next;
+ host_write_num = pasync_ctx->async_data.host_write_ptr;
+ ring_id = phwi_ctrlr->default_pdu_data.id;
+ }
+
+ writables = (writables / 8) * 8;
+ if (writables) {
+ for (i = 0; i < writables; i++) {
+ pbusy_list =
+ hwi_get_async_busy_list(pasync_ctx, is_header,
+ host_write_num);
+ pasync_handle =
+ list_entry(pfree_link, struct async_pdu_handle,
+ link);
+ WARN_ON(!pasync_handle);
+ pasync_handle->consumed = 0;
+
+ pfree_link = pfree_link->next;
+
+ pasync_sge = hwi_get_ring_address(pasync_ctx,
+ is_header, host_write_num);
+
+ pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
+ pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
+
+ list_move(&pasync_handle->link, pbusy_list);
+
+ host_write_num++;
+ host_write_num = host_write_num % num_entries;
+ }
+
+ if (is_header) {
+ pasync_ctx->async_header.host_write_ptr =
+ host_write_num;
+ pasync_ctx->async_header.free_entries -= writables;
+ pasync_ctx->async_header.writables -= writables;
+ pasync_ctx->async_header.busy_entries += writables;
+ } else {
+ pasync_ctx->async_data.host_write_ptr = host_write_num;
+ pasync_ctx->async_data.free_entries -= writables;
+ pasync_ctx->async_data.writables -= writables;
+ pasync_ctx->async_data.busy_entries += writables;
+ }
+
+ doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
+ doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
+ doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
+ doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
+ << DB_DEF_PDU_CQPROC_SHIFT;
+
+ iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
+ }
+}
+
+static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
+ struct beiscsi_conn *beiscsi_conn,
+ struct i_t_dpdu_cqe *pdpdu_cqe)
+{
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_async_pdu_context *pasync_ctx;
+ struct async_pdu_handle *pasync_handle = NULL;
+ unsigned int cq_index = -1;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
+
+ pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
+ pdpdu_cqe, &cq_index);
+ BUG_ON(pasync_handle->is_header != 0);
+ if (pasync_handle->consumed == 0)
+ hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
+ cq_index);
+
+ hwi_free_async_msg(phba, pasync_handle->cri);
+ hwi_post_async_buffers(phba, pasync_handle->is_header);
+}
+
+static unsigned int
+hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
+ struct beiscsi_hba *phba,
+ struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
+{
+ struct list_head *plist;
+ struct async_pdu_handle *pasync_handle;
+ void *phdr = NULL;
+ unsigned int hdr_len = 0, buf_len = 0;
+ unsigned int status, index = 0, offset = 0;
+ void *pfirst_buffer = NULL;
+ unsigned int num_buf = 0;
+
+ plist = &pasync_ctx->async_entry[cri].wait_queue.list;
+
+ list_for_each_entry(pasync_handle, plist, link) {
+ if (index == 0) {
+ phdr = pasync_handle->pbuffer;
+ hdr_len = pasync_handle->buffer_len;
+ } else {
+ buf_len = pasync_handle->buffer_len;
+ if (!num_buf) {
+ pfirst_buffer = pasync_handle->pbuffer;
+ num_buf++;
+ }
+ memcpy(pfirst_buffer + offset,
+ pasync_handle->pbuffer, buf_len);
+ offset = buf_len;
+ }
+ index++;
+ }
+
+ status = beiscsi_process_async_pdu(beiscsi_conn, phba,
+ beiscsi_conn->beiscsi_conn_cid,
+ phdr, hdr_len, pfirst_buffer,
+ buf_len);
+
+ if (status == 0)
+ hwi_free_async_msg(phba, cri);
+ return 0;
+}
+
+static unsigned int
+hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
+ struct beiscsi_hba *phba,
+ struct async_pdu_handle *pasync_handle)
+{
+ struct hwi_async_pdu_context *pasync_ctx;
+ struct hwi_controller *phwi_ctrlr;
+ unsigned int bytes_needed = 0, status = 0;
+ unsigned short cri = pasync_handle->cri;
+ struct pdu_base *ppdu;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
+
+ list_del(&pasync_handle->link);
+ if (pasync_handle->is_header) {
+ pasync_ctx->async_header.busy_entries--;
+ if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
+ hwi_free_async_msg(phba, cri);
+ BUG();
+ }
+
+ pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
+ pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
+ pasync_ctx->async_entry[cri].wait_queue.hdr_len =
+ (unsigned short)pasync_handle->buffer_len;
+ list_add_tail(&pasync_handle->link,
+ &pasync_ctx->async_entry[cri].wait_queue.list);
+
+ ppdu = pasync_handle->pbuffer;
+ bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
+ data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
+ 0xFFFF0000) | ((be16_to_cpu((ppdu->
+ dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
+ & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
+
+ if (status == 0) {
+ pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
+ bytes_needed;
+
+ if (bytes_needed == 0)
+ status = hwi_fwd_async_msg(beiscsi_conn, phba,
+ pasync_ctx, cri);
+ }
+ } else {
+ pasync_ctx->async_data.busy_entries--;
+ if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
+ list_add_tail(&pasync_handle->link,
+ &pasync_ctx->async_entry[cri].wait_queue.
+ list);
+ pasync_ctx->async_entry[cri].wait_queue.
+ bytes_received +=
+ (unsigned short)pasync_handle->buffer_len;
+
+ if (pasync_ctx->async_entry[cri].wait_queue.
+ bytes_received >=
+ pasync_ctx->async_entry[cri].wait_queue.
+ bytes_needed)
+ status = hwi_fwd_async_msg(beiscsi_conn, phba,
+ pasync_ctx, cri);
+ }
+ }
+ return status;
+}
+
+static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
+ struct beiscsi_hba *phba,
+ struct i_t_dpdu_cqe *pdpdu_cqe)
+{
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_async_pdu_context *pasync_ctx;
+ struct async_pdu_handle *pasync_handle = NULL;
+ unsigned int cq_index = -1;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
+ pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
+ pdpdu_cqe, &cq_index);
+
+ if (pasync_handle->consumed == 0)
+ hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
+ cq_index);
+ hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
+ hwi_post_async_buffers(phba, pasync_handle->is_header);
+}
+
+static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba)
+{
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+ struct be_queue_info *cq;
+ struct sol_cqe *sol;
+ struct dmsg_cqe *dmsg;
+ unsigned int num_processed = 0;
+ unsigned int tot_nump = 0;
+ struct beiscsi_conn *beiscsi_conn;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+ cq = &phwi_context->be_cq;
+ sol = queue_tail_node(cq);
+
+ while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
+ CQE_VALID_MASK) {
+ be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
+
+ beiscsi_conn = phba->conn_table[(u32) (sol->
+ dw[offsetof(struct amap_sol_cqe, cid) / 32] &
+ SOL_CID_MASK) >> 6];
+
+ if (!beiscsi_conn || !beiscsi_conn->ep) {
+ shost_printk(KERN_WARNING, phba->shost,
+ "Connection table empty for cid = %d\n",
+ (u32)(sol->dw[offsetof(struct amap_sol_cqe,
+ cid) / 32] & SOL_CID_MASK) >> 6);
+ return 0;
+ }
+
+ if (num_processed >= 32) {
+ hwi_ring_cq_db(phba, phwi_context->be_cq.id,
+ num_processed, 0, 0);
+ tot_nump += num_processed;
+ num_processed = 0;
+ }
+
+ switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
+ 32] & CQE_CODE_MASK) {
+ case SOL_CMD_COMPLETE:
+ hwi_complete_cmd(beiscsi_conn, phba, sol);
+ break;
+ case DRIVERMSG_NOTIFY:
+ SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY \n");
+ dmsg = (struct dmsg_cqe *)sol;
+ hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
+ break;
+ case UNSOL_HDR_NOTIFY:
+ case UNSOL_DATA_NOTIFY:
+ SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR/DATA_NOTIFY\n");
+ hwi_process_default_pdu_ring(beiscsi_conn, phba,
+ (struct i_t_dpdu_cqe *)sol);
+ break;
+ case CXN_INVALIDATE_INDEX_NOTIFY:
+ case CMD_INVALIDATED_NOTIFY:
+ case CXN_INVALIDATE_NOTIFY:
+ SE_DEBUG(DBG_LVL_1,
+ "Ignoring CQ Error notification for cmd/cxn"
+ "invalidate\n");
+ break;
+ case SOL_CMD_KILLED_DATA_DIGEST_ERR:
+ case CMD_KILLED_INVALID_STATSN_RCVD:
+ case CMD_KILLED_INVALID_R2T_RCVD:
+ case CMD_CXN_KILLED_LUN_INVALID:
+ case CMD_CXN_KILLED_ICD_INVALID:
+ case CMD_CXN_KILLED_ITT_INVALID:
+ case CMD_CXN_KILLED_SEQ_OUTOFORDER:
+ case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
+ SE_DEBUG(DBG_LVL_1,
+ "CQ Error notification for cmd.. "
+ "code %d cid 0x%x\n",
+ sol->dw[offsetof(struct amap_sol_cqe, code) /
+ 32] & CQE_CODE_MASK,
+ (sol->dw[offsetof(struct amap_sol_cqe, cid) /
+ 32] & SOL_CID_MASK));
+ break;
+ case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
+ SE_DEBUG(DBG_LVL_1,
+ "Digest error on def pdu ring, dropping..\n");
+ hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
+ (struct i_t_dpdu_cqe *) sol);
+ break;
+ case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
+ case CXN_KILLED_BURST_LEN_MISMATCH:
+ case CXN_KILLED_AHS_RCVD:
+ case CXN_KILLED_HDR_DIGEST_ERR:
+ case CXN_KILLED_UNKNOWN_HDR:
+ case CXN_KILLED_STALE_ITT_TTT_RCVD:
+ case CXN_KILLED_INVALID_ITT_TTT_RCVD:
+ case CXN_KILLED_TIMED_OUT:
+ case CXN_KILLED_FIN_RCVD:
+ case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
+ case CXN_KILLED_BAD_WRB_INDEX_ERROR:
+ case CXN_KILLED_OVER_RUN_RESIDUAL:
+ case CXN_KILLED_UNDER_RUN_RESIDUAL:
+ case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
+ SE_DEBUG(DBG_LVL_1, "CQ Error %d, resetting CID "
+ "0x%x...\n",
+ sol->dw[offsetof(struct amap_sol_cqe, code) /
+ 32] & CQE_CODE_MASK,
+ sol->dw[offsetof(struct amap_sol_cqe, cid) /
+ 32] & CQE_CID_MASK);
+ iscsi_conn_failure(beiscsi_conn->conn,
+ ISCSI_ERR_CONN_FAILED);
+ break;
+ case CXN_KILLED_RST_SENT:
+ case CXN_KILLED_RST_RCVD:
+ SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset received/sent "
+ "on CID 0x%x...\n",
+ sol->dw[offsetof(struct amap_sol_cqe, code) /
+ 32] & CQE_CODE_MASK,
+ sol->dw[offsetof(struct amap_sol_cqe, cid) /
+ 32] & CQE_CID_MASK);
+ iscsi_conn_failure(beiscsi_conn->conn,
+ ISCSI_ERR_CONN_FAILED);
+ break;
+ default:
+ SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
+ "received on CID 0x%x...\n",
+ sol->dw[offsetof(struct amap_sol_cqe, code) /
+ 32] & CQE_CODE_MASK,
+ sol->dw[offsetof(struct amap_sol_cqe, cid) /
+ 32] & CQE_CID_MASK);
+ break;
+ }
+
+ AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
+ queue_tail_inc(cq);
+ sol = queue_tail_node(cq);
+ num_processed++;
+ }
+
+ if (num_processed > 0) {
+ tot_nump += num_processed;
+ hwi_ring_cq_db(phba, phwi_context->be_cq.id, num_processed,
+ 1, 0);
+ }
+ return tot_nump;
+}
+
+static void beiscsi_process_all_cqs(struct work_struct *work)
+{
+ unsigned long flags;
+ struct beiscsi_hba *phba =
+ container_of(work, struct beiscsi_hba, work_cqs);
+
+ if (phba->todo_mcc_cq) {
+ spin_lock_irqsave(&phba->isr_lock, flags);
+ phba->todo_mcc_cq = 0;
+ spin_unlock_irqrestore(&phba->isr_lock, flags);
+ SE_DEBUG(DBG_LVL_1, "MCC Interrupt Not expected \n");
+ }
+
+ if (phba->todo_cq) {
+ spin_lock_irqsave(&phba->isr_lock, flags);
+ phba->todo_cq = 0;
+ spin_unlock_irqrestore(&phba->isr_lock, flags);
+ beiscsi_process_cq(phba);
+ }
+}
+
+static int be_iopoll(struct blk_iopoll *iop, int budget)
+{
+ static unsigned int ret;
+ struct beiscsi_hba *phba;
+
+ phba = container_of(iop, struct beiscsi_hba, iopoll);
+
+ ret = beiscsi_process_cq(phba);
+ if (ret < budget) {
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+ blk_iopoll_complete(iop);
+ hwi_ring_eq_db(phba, phwi_context->be_eq.q.id, 0,
+ 0, 1, 1);
+ }
+ return ret;
+}
+
+static void
+hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
+ unsigned int num_sg, struct beiscsi_io_task *io_task)
+{
+ struct iscsi_sge *psgl;
+ unsigned short sg_len, index;
+ unsigned int sge_len = 0;
+ unsigned long long addr;
+ struct scatterlist *l_sg;
+ unsigned int offset;
+
+ AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
+ io_task->bhs_pa.u.a32.address_lo);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
+ io_task->bhs_pa.u.a32.address_hi);
+
+ l_sg = sg;
+ for (index = 0; (index < num_sg) && (index < 2); index++, sg_next(sg)) {
+ if (index == 0) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64) sg_dma_address(sg);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
+ (addr & 0xFFFFFFFF));
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
+ (addr >> 32));
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
+ sg_len);
+ sge_len = sg_len;
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
+ 1);
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
+ 0);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
+ pwrb, sge_len);
+ sg_len = sg_dma_len(sg);
+ addr = (u64) sg_dma_address(sg);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
+ (addr & 0xFFFFFFFF));
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
+ (addr >> 32));
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
+ sg_len);
+ }
+ }
+ psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
+ memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
+
+ AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
+
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
+ io_task->bhs_pa.u.a32.address_hi);
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
+ io_task->bhs_pa.u.a32.address_lo);
+
+ if (num_sg == 2)
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 1);
+ sg = l_sg;
+ psgl++;
+ psgl++;
+ offset = 0;
+ for (index = 0; index < num_sg; index++, sg_next(sg), psgl++) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64) sg_dma_address(sg);
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
+ (addr & 0xFFFFFFFF));
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
+ (addr >> 32));
+ AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
+ AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
+ AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
+ offset += sg_len;
+ }
+ psgl--;
+ AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
+}
+
+static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
+{
+ struct iscsi_sge *psgl;
+ unsigned long long addr;
+ struct beiscsi_io_task *io_task = task->dd_data;
+ struct beiscsi_conn *beiscsi_conn = io_task->conn;
+ struct beiscsi_hba *phba = beiscsi_conn->phba;
+
+ io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
+ AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
+ io_task->bhs_pa.u.a32.address_lo);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
+ io_task->bhs_pa.u.a32.address_hi);
+
+ if (task->data) {
+ if (task->data_count) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
+ addr = (u64) pci_map_single(phba->pcidev,
+ task->data,
+ task->data_count, 1);
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
+ addr = 0;
+ }
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
+ (addr & 0xFFFFFFFF));
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
+ (addr >> 32));
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
+ task->data_count);
+
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
+ addr = 0;
+ }
+
+ psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
+
+ AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
+
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
+ io_task->bhs_pa.u.a32.address_hi);
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
+ io_task->bhs_pa.u.a32.address_lo);
+ if (task->data) {
+ psgl++;
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
+ AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
+ AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
+ AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
+ AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
+
+ psgl++;
+ if (task->data) {
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
+ (addr & 0xFFFFFFFF));
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
+ (addr >> 32));
+ }
+ AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
+ }
+ AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
+}
+
+static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
+{
+ unsigned int num_cq_pages, num_eq_pages, num_async_pdu_buf_pages;
+ unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
+ unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
+
+ num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
+ sizeof(struct sol_cqe));
+ num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
+ sizeof(struct be_eq_entry));
+ num_async_pdu_buf_pages =
+ PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
+ phba->params.defpdu_hdr_sz);
+ num_async_pdu_buf_sgl_pages =
+ PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
+ sizeof(struct phys_addr));
+ num_async_pdu_data_pages =
+ PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
+ phba->params.defpdu_data_sz);
+ num_async_pdu_data_sgl_pages =
+ PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
+ sizeof(struct phys_addr));
+
+ phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
+
+ phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
+ BE_ISCSI_PDU_HEADER_SIZE;
+ phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
+ sizeof(struct hwi_context_memory);
+
+ phba->mem_req[HWI_MEM_CQ] = num_cq_pages * PAGE_SIZE;
+ phba->mem_req[HWI_MEM_EQ] = num_eq_pages * PAGE_SIZE;
+
+ phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
+ * (phba->params.wrbs_per_cxn)
+ * phba->params.cxns_per_ctrl;
+ wrb_sz_per_cxn = sizeof(struct wrb_handle) *
+ (phba->params.wrbs_per_cxn);
+ phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
+ phba->params.cxns_per_ctrl);
+
+ phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
+ phba->params.icds_per_ctrl;
+ phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
+ phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
+
+ phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
+ num_async_pdu_buf_pages * PAGE_SIZE;
+ phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
+ num_async_pdu_data_pages * PAGE_SIZE;
+ phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
+ num_async_pdu_buf_sgl_pages * PAGE_SIZE;
+ phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
+ num_async_pdu_data_sgl_pages * PAGE_SIZE;
+ phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
+ phba->params.asyncpdus_per_ctrl *
+ sizeof(struct async_pdu_handle);
+ phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
+ phba->params.asyncpdus_per_ctrl *
+ sizeof(struct async_pdu_handle);
+ phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
+ sizeof(struct hwi_async_pdu_context) +
+ (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
+}
+
+static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
+{
+ struct be_mem_descriptor *mem_descr;
+ dma_addr_t bus_add;
+ struct mem_array *mem_arr, *mem_arr_orig;
+ unsigned int i, j, alloc_size, curr_alloc_size;
+
+ phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
+ if (!phba->phwi_ctrlr)
+ return -ENOMEM;
+
+ phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
+ GFP_KERNEL);
+ if (!phba->init_mem) {
+ kfree(phba->phwi_ctrlr);
+ return -ENOMEM;
+ }
+
+ mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
+ GFP_KERNEL);
+ if (!mem_arr_orig) {
+ kfree(phba->init_mem);
+ kfree(phba->phwi_ctrlr);
+ return -ENOMEM;
+ }
+
+ mem_descr = phba->init_mem;
+ for (i = 0; i < SE_MEM_MAX; i++) {
+ j = 0;
+ mem_arr = mem_arr_orig;
+ alloc_size = phba->mem_req[i];
+ memset(mem_arr, 0, sizeof(struct mem_array) *
+ BEISCSI_MAX_FRAGS_INIT);
+ curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
+ do {
+ mem_arr->virtual_address = pci_alloc_consistent(
+ phba->pcidev,
+ curr_alloc_size,
+ &bus_add);
+ if (!mem_arr->virtual_address) {
+ if (curr_alloc_size <= BE_MIN_MEM_SIZE)
+ goto free_mem;
+ if (curr_alloc_size -
+ rounddown_pow_of_two(curr_alloc_size))
+ curr_alloc_size = rounddown_pow_of_two
+ (curr_alloc_size);
+ else
+ curr_alloc_size = curr_alloc_size / 2;
+ } else {
+ mem_arr->bus_address.u.
+ a64.address = (__u64) bus_add;
+ mem_arr->size = curr_alloc_size;
+ alloc_size -= curr_alloc_size;
+ curr_alloc_size = min(be_max_phys_size *
+ 1024, alloc_size);
+ j++;
+ mem_arr++;
+ }
+ } while (alloc_size);
+ mem_descr->num_elements = j;
+ mem_descr->size_in_bytes = phba->mem_req[i];
+ mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
+ GFP_KERNEL);
+ if (!mem_descr->mem_array)
+ goto free_mem;
+
+ memcpy(mem_descr->mem_array, mem_arr_orig,
+ sizeof(struct mem_array) * j);
+ mem_descr++;
+ }
+ kfree(mem_arr_orig);
+ return 0;
+free_mem:
+ mem_descr->num_elements = j;
+ while ((i) || (j)) {
+ for (j = mem_descr->num_elements; j > 0; j--) {
+ pci_free_consistent(phba->pcidev,
+ mem_descr->mem_array[j - 1].size,
+ mem_descr->mem_array[j - 1].
+ virtual_address,
+ mem_descr->mem_array[j - 1].
+ bus_address.u.a64.address);
+ }
+ if (i) {
+ i--;
+ kfree(mem_descr->mem_array);
+ mem_descr--;
+ }
+ }
+ kfree(mem_arr_orig);
+ kfree(phba->init_mem);
+ kfree(phba->phwi_ctrlr);
+ return -ENOMEM;
+}
+
+static int beiscsi_get_memory(struct beiscsi_hba *phba)
+{
+ beiscsi_find_mem_req(phba);
+ return beiscsi_alloc_mem(phba);
+}
+
+static void iscsi_init_global_templates(struct beiscsi_hba *phba)
+{
+ struct pdu_data_out *pdata_out;
+ struct pdu_nop_out *pnop_out;
+ struct be_mem_descriptor *mem_descr;
+
+ mem_descr = phba->init_mem;
+ mem_descr += ISCSI_MEM_GLOBAL_HEADER;
+ pdata_out =
+ (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
+ memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
+
+ AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
+ IIOC_SCSI_DATA);
+
+ pnop_out =
+ (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
+ virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
+
+ memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
+ AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
+ AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
+ AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
+}
+
+static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
+{
+ struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
+ struct wrb_handle *pwrb_handle;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_wrb_context *pwrb_context;
+ struct iscsi_wrb *pwrb;
+ unsigned int num_cxn_wrbh;
+ unsigned int num_cxn_wrb, j, idx, index;
+
+ mem_descr_wrbh = phba->init_mem;
+ mem_descr_wrbh += HWI_MEM_WRBH;
+
+ mem_descr_wrb = phba->init_mem;
+ mem_descr_wrb += HWI_MEM_WRB;
+
+ idx = 0;
+ pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
+ num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
+ ((sizeof(struct wrb_handle)) *
+ phba->params.wrbs_per_cxn));
+ phwi_ctrlr = phba->phwi_ctrlr;
+
+ for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
+ pwrb_context = &phwi_ctrlr->wrb_context[index];
+ SE_DEBUG(DBG_LVL_8, "cid=%d pwrb_context=%p \n", index,
+ pwrb_context);
+ pwrb_context->pwrb_handle_base =
+ kzalloc(sizeof(struct wrb_handle *) *
+ phba->params.wrbs_per_cxn, GFP_KERNEL);
+ pwrb_context->pwrb_handle_basestd =
+ kzalloc(sizeof(struct wrb_handle *) *
+ phba->params.wrbs_per_cxn, GFP_KERNEL);
+ if (num_cxn_wrbh) {
+ pwrb_context->alloc_index = 0;
+ pwrb_context->wrb_handles_available = 0;
+ for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
+ pwrb_context->pwrb_handle_base[j] = pwrb_handle;
+ pwrb_context->pwrb_handle_basestd[j] =
+ pwrb_handle;
+ pwrb_context->wrb_handles_available++;
+ pwrb_handle++;
+ }
+ pwrb_context->free_index = 0;
+ num_cxn_wrbh--;
+ } else {
+ idx++;
+ pwrb_handle =
+ mem_descr_wrbh->mem_array[idx].virtual_address;
+ num_cxn_wrbh =
+ ((mem_descr_wrbh->mem_array[idx].size) /
+ ((sizeof(struct wrb_handle)) *
+ phba->params.wrbs_per_cxn));
+ pwrb_context->alloc_index = 0;
+ for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
+ pwrb_context->pwrb_handle_base[j] = pwrb_handle;
+ pwrb_context->pwrb_handle_basestd[j] =
+ pwrb_handle;
+ pwrb_context->wrb_handles_available++;
+ pwrb_handle++;
+ }
+ pwrb_context->free_index = 0;
+ num_cxn_wrbh--;
+ }
+ }
+ idx = 0;
+ pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
+ num_cxn_wrb =
+ ((mem_descr_wrb->mem_array[idx].size) / (sizeof(struct iscsi_wrb)) *
+ phba->params.wrbs_per_cxn);
+
+ for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) {
+ pwrb_context = &phwi_ctrlr->wrb_context[index];
+ if (num_cxn_wrb) {
+ for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
+ pwrb_handle = pwrb_context->pwrb_handle_base[j];
+ pwrb_handle->pwrb = pwrb;
+ pwrb++;
+ }
+ num_cxn_wrb--;
+ } else {
+ idx++;
+ pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
+ num_cxn_wrb = ((mem_descr_wrb->mem_array[idx].size) /
+ (sizeof(struct iscsi_wrb)) *
+ phba->params.wrbs_per_cxn);
+ for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
+ pwrb_handle = pwrb_context->pwrb_handle_base[j];
+ pwrb_handle->pwrb = pwrb;
+ pwrb++;
+ }
+ num_cxn_wrb--;
+ }
+ }
+}
+
+static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
+{
+ struct hwi_controller *phwi_ctrlr;
+ struct hba_parameters *p = &phba->params;
+ struct hwi_async_pdu_context *pasync_ctx;
+ struct async_pdu_handle *pasync_header_h, *pasync_data_h;
+ unsigned int index;
+ struct be_mem_descriptor *mem_descr;
+
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
+ mem_descr->mem_array[0].virtual_address;
+ pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
+ memset(pasync_ctx, 0, sizeof(*pasync_ctx));
+
+ pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
+ pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
+ pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
+ pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
+
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
+ if (mem_descr->mem_array[0].virtual_address) {
+ SE_DEBUG(DBG_LVL_8,
+ "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
+ "va=%p \n", mem_descr->mem_array[0].virtual_address);
+ } else
+ shost_printk(KERN_WARNING, phba->shost,
+ "No Virtual address \n");
+
+ pasync_ctx->async_header.va_base =
+ mem_descr->mem_array[0].virtual_address;
+
+ pasync_ctx->async_header.pa_base.u.a64.address =
+ mem_descr->mem_array[0].bus_address.u.a64.address;
+
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_HEADER_RING;
+ if (mem_descr->mem_array[0].virtual_address) {
+ SE_DEBUG(DBG_LVL_8,
+ "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
+ "va=%p \n", mem_descr->mem_array[0].virtual_address);
+ } else
+ shost_printk(KERN_WARNING, phba->shost,
+ "No Virtual address \n");
+ pasync_ctx->async_header.ring_base =
+ mem_descr->mem_array[0].virtual_address;
+
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
+ if (mem_descr->mem_array[0].virtual_address) {
+ SE_DEBUG(DBG_LVL_8,
+ "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
+ "va=%p \n", mem_descr->mem_array[0].virtual_address);
+ } else
+ shost_printk(KERN_WARNING, phba->shost,
+ "No Virtual address \n");
+
+ pasync_ctx->async_header.handle_base =
+ mem_descr->mem_array[0].virtual_address;
+ pasync_ctx->async_header.writables = 0;
+ INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
+
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_DATA_BUF;
+ if (mem_descr->mem_array[0].virtual_address) {
+ SE_DEBUG(DBG_LVL_8,
+ "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
+ "va=%p \n", mem_descr->mem_array[0].virtual_address);
+ } else
+ shost_printk(KERN_WARNING, phba->shost,
+ "No Virtual address \n");
+ pasync_ctx->async_data.va_base =
+ mem_descr->mem_array[0].virtual_address;
+ pasync_ctx->async_data.pa_base.u.a64.address =
+ mem_descr->mem_array[0].bus_address.u.a64.address;
+
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_DATA_RING;
+ if (mem_descr->mem_array[0].virtual_address) {
+ SE_DEBUG(DBG_LVL_8,
+ "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
+ "va=%p \n", mem_descr->mem_array[0].virtual_address);
+ } else
+ shost_printk(KERN_WARNING, phba->shost,
+ "No Virtual address \n");
+
+ pasync_ctx->async_data.ring_base =
+ mem_descr->mem_array[0].virtual_address;
+
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
+ if (!mem_descr->mem_array[0].virtual_address)
+ shost_printk(KERN_WARNING, phba->shost,
+ "No Virtual address \n");
+
+ pasync_ctx->async_data.handle_base =
+ mem_descr->mem_array[0].virtual_address;
+ pasync_ctx->async_data.writables = 0;
+ INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
+
+ pasync_header_h =
+ (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
+ pasync_data_h =
+ (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
+
+ for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
+ pasync_header_h->cri = -1;
+ pasync_header_h->index = (char)index;
+ INIT_LIST_HEAD(&pasync_header_h->link);
+ pasync_header_h->pbuffer =
+ (void *)((unsigned long)
+ (pasync_ctx->async_header.va_base) +
+ (p->defpdu_hdr_sz * index));
+
+ pasync_header_h->pa.u.a64.address =
+ pasync_ctx->async_header.pa_base.u.a64.address +
+ (p->defpdu_hdr_sz * index);
+
+ list_add_tail(&pasync_header_h->link,
+ &pasync_ctx->async_header.free_list);
+ pasync_header_h++;
+ pasync_ctx->async_header.free_entries++;
+ pasync_ctx->async_header.writables++;
+
+ INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
+ INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
+ header_busy_list);
+ pasync_data_h->cri = -1;
+ pasync_data_h->index = (char)index;
+ INIT_LIST_HEAD(&pasync_data_h->link);
+ pasync_data_h->pbuffer =
+ (void *)((unsigned long)
+ (pasync_ctx->async_data.va_base) +
+ (p->defpdu_data_sz * index));
+
+ pasync_data_h->pa.u.a64.address =
+ pasync_ctx->async_data.pa_base.u.a64.address +
+ (p->defpdu_data_sz * index);
+
+ list_add_tail(&pasync_data_h->link,
+ &pasync_ctx->async_data.free_list);
+ pasync_data_h++;
+ pasync_ctx->async_data.free_entries++;
+ pasync_ctx->async_data.writables++;
+
+ INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
+ }
+
+ pasync_ctx->async_header.host_write_ptr = 0;
+ pasync_ctx->async_header.ep_read_ptr = -1;
+ pasync_ctx->async_data.host_write_ptr = 0;
+ pasync_ctx->async_data.ep_read_ptr = -1;
+}
+
+static int
+be_sgl_create_contiguous(void *virtual_address,
+ u64 physical_address, u32 length,
+ struct be_dma_mem *sgl)
+{
+ WARN_ON(!virtual_address);
+ WARN_ON(!physical_address);
+ WARN_ON(!length > 0);
+ WARN_ON(!sgl);
+
+ sgl->va = virtual_address;
+ sgl->dma = physical_address;
+ sgl->size = length;
+
+ return 0;
+}
+
+static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
+{
+ memset(sgl, 0, sizeof(*sgl));
+}
+
+static void
+hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
+ struct mem_array *pmem, struct be_dma_mem *sgl)
+{
+ if (sgl->va)
+ be_sgl_destroy_contiguous(sgl);
+
+ be_sgl_create_contiguous(pmem->virtual_address,
+ pmem->bus_address.u.a64.address,
+ pmem->size, sgl);
+}
+
+static void
+hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
+ struct mem_array *pmem, struct be_dma_mem *sgl)
+{
+ if (sgl->va)
+ be_sgl_destroy_contiguous(sgl);
+
+ be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
+ pmem->bus_address.u.a64.address,
+ pmem->size, sgl);
+}
+
+static int be_fill_queue(struct be_queue_info *q,
+ u16 len, u16 entry_size, void *vaddress)
+{
+ struct be_dma_mem *mem = &q->dma_mem;
+
+ memset(q, 0, sizeof(*q));
+ q->len = len;
+ q->entry_size = entry_size;
+ mem->size = len * entry_size;
+ mem->va = vaddress;
+ if (!mem->va)
+ return -ENOMEM;
+ memset(mem->va, 0, mem->size);
+ return 0;
+}
+
+static int beiscsi_create_eq(struct beiscsi_hba *phba,
+ struct hwi_context_memory *phwi_context)
+{
+ unsigned int idx;
+ int ret;
+ struct be_queue_info *eq;
+ struct be_dma_mem *mem;
+ struct be_mem_descriptor *mem_descr;
+ void *eq_vaddress;
+
+ idx = 0;
+ eq = &phwi_context->be_eq.q;
+ mem = &eq->dma_mem;
+ mem_descr = phba->init_mem;
+ mem_descr += HWI_MEM_EQ;
+ eq_vaddress = mem_descr->mem_array[idx].virtual_address;
+
+ ret = be_fill_queue(eq, phba->params.num_eq_entries,
+ sizeof(struct be_eq_entry), eq_vaddress);
+ if (ret) {
+ shost_printk(KERN_ERR, phba->shost,
+ "be_fill_queue Failed for EQ \n");
+ return ret;
+ }
+
+ mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
+
+ ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
+ phwi_context->be_eq.cur_eqd);
+ if (ret) {
+ shost_printk(KERN_ERR, phba->shost, "beiscsi_cmd_eq_create"
+ "Failedfor EQ \n");
+ return ret;
+ }
+ SE_DEBUG(DBG_LVL_8, "eq id is %d\n", phwi_context->be_eq.q.id);
+ return 0;
+}
+
+static int beiscsi_create_cq(struct beiscsi_hba *phba,
+ struct hwi_context_memory *phwi_context)
+{
+ unsigned int idx;
+ int ret;
+ struct be_queue_info *cq, *eq;
+ struct be_dma_mem *mem;
+ struct be_mem_descriptor *mem_descr;
+ void *cq_vaddress;
+
+ idx = 0;
+ cq = &phwi_context->be_cq;
+ eq = &phwi_context->be_eq.q;
+ mem = &cq->dma_mem;
+ mem_descr = phba->init_mem;
+ mem_descr += HWI_MEM_CQ;
+ cq_vaddress = mem_descr->mem_array[idx].virtual_address;
+ ret = be_fill_queue(cq, phba->params.icds_per_ctrl / 2,
+ sizeof(struct sol_cqe), cq_vaddress);
+ if (ret) {
+ shost_printk(KERN_ERR, phba->shost,
+ "be_fill_queue Failed for ISCSI CQ \n");
+ return ret;
+ }
+
+ mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
+ ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, false, 0);
+ if (ret) {
+ shost_printk(KERN_ERR, phba->shost,
+ "beiscsi_cmd_eq_create Failed for ISCSI CQ \n");
+ return ret;
+ }
+ SE_DEBUG(DBG_LVL_8, "iscsi cq id is %d\n", phwi_context->be_cq.id);
+ SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
+ return 0;
+}
+
+static int
+beiscsi_create_def_hdr(struct beiscsi_hba *phba,
+ struct hwi_context_memory *phwi_context,
+ struct hwi_controller *phwi_ctrlr,
+ unsigned int def_pdu_ring_sz)
+{
+ unsigned int idx;
+ int ret;
+ struct be_queue_info *dq, *cq;
+ struct be_dma_mem *mem;
+ struct be_mem_descriptor *mem_descr;
+ void *dq_vaddress;
+
+ idx = 0;
+ dq = &phwi_context->be_def_hdrq;
+ cq = &phwi_context->be_cq;
+ mem = &dq->dma_mem;
+ mem_descr = phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_HEADER_RING;
+ dq_vaddress = mem_descr->mem_array[idx].virtual_address;
+ ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
+ sizeof(struct phys_addr),
+ sizeof(struct phys_addr), dq_vaddress);
+ if (ret) {
+ shost_printk(KERN_ERR, phba->shost,
+ "be_fill_queue Failed for DEF PDU HDR\n");
+ return ret;
+ }
+ mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
+ ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
+ def_pdu_ring_sz,
+ phba->params.defpdu_hdr_sz);
+ if (ret) {
+ shost_printk(KERN_ERR, phba->shost,
+ "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
+ return ret;
+ }
+ phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
+ SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
+ phwi_context->be_def_hdrq.id);
+ hwi_post_async_buffers(phba, 1);
+ return 0;
+}
+
+static int
+beiscsi_create_def_data(struct beiscsi_hba *phba,
+ struct hwi_context_memory *phwi_context,
+ struct hwi_controller *phwi_ctrlr,
+ unsigned int def_pdu_ring_sz)
+{
+ unsigned int idx;
+ int ret;
+ struct be_queue_info *dataq, *cq;
+ struct be_dma_mem *mem;
+ struct be_mem_descriptor *mem_descr;
+ void *dq_vaddress;
+
+ idx = 0;
+ dataq = &phwi_context->be_def_dataq;
+ cq = &phwi_context->be_cq;
+ mem = &dataq->dma_mem;
+ mem_descr = phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_DATA_RING;
+ dq_vaddress = mem_descr->mem_array[idx].virtual_address;
+ ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
+ sizeof(struct phys_addr),
+ sizeof(struct phys_addr), dq_vaddress);
+ if (ret) {
+ shost_printk(KERN_ERR, phba->shost,
+ "be_fill_queue Failed for DEF PDU DATA\n");
+ return ret;
+ }
+ mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
+ ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
+ def_pdu_ring_sz,
+ phba->params.defpdu_data_sz);
+ if (ret) {
+ shost_printk(KERN_ERR, phba->shost,
+ "be_cmd_create_default_pdu_queue Failed"
+ " for DEF PDU DATA\n");
+ return ret;
+ }
+ phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
+ SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
+ phwi_context->be_def_dataq.id);
+ hwi_post_async_buffers(phba, 0);
+ SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED \n");
+ return 0;
+}
+
+static int
+beiscsi_post_pages(struct beiscsi_hba *phba)
+{
+ struct be_mem_descriptor *mem_descr;
+ struct mem_array *pm_arr;
+ unsigned int page_offset, i;
+ struct be_dma_mem sgl;
+ int status;
+
+ mem_descr = phba->init_mem;
+ mem_descr += HWI_MEM_SGE;
+ pm_arr = mem_descr->mem_array;
+
+ page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
+ phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
+ for (i = 0; i < mem_descr->num_elements; i++) {
+ hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
+ status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
+ page_offset,
+ (pm_arr->size / PAGE_SIZE));
+ page_offset += pm_arr->size / PAGE_SIZE;
+ if (status != 0) {
+ shost_printk(KERN_ERR, phba->shost,
+ "post sgl failed.\n");
+ return status;
+ }
+ pm_arr++;
+ }
+ SE_DEBUG(DBG_LVL_8, "POSTED PAGES \n");
+ return 0;
+}
+
+static int
+beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
+ struct hwi_context_memory *phwi_context,
+ struct hwi_controller *phwi_ctrlr)
+{
+ unsigned int wrb_mem_index, offset, size, num_wrb_rings;
+ u64 pa_addr_lo;
+ unsigned int idx, num, i;
+ struct mem_array *pwrb_arr;
+ void *wrb_vaddr;
+ struct be_dma_mem sgl;
+ struct be_mem_descriptor *mem_descr;
+ int status;
+
+ idx = 0;
+ mem_descr = phba->init_mem;
+ mem_descr += HWI_MEM_WRB;
+ pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
+ GFP_KERNEL);
+ if (!pwrb_arr) {
+ shost_printk(KERN_ERR, phba->shost,
+ "Memory alloc failed in create wrb ring.\n");
+ return -ENOMEM;
+ }
+ wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
+ pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
+ num_wrb_rings = mem_descr->mem_array[idx].size /
+ (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
+
+ for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
+ if (num_wrb_rings) {
+ pwrb_arr[num].virtual_address = wrb_vaddr;
+ pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
+ pwrb_arr[num].size = phba->params.wrbs_per_cxn *
+ sizeof(struct iscsi_wrb);
+ wrb_vaddr += pwrb_arr[num].size;
+ pa_addr_lo += pwrb_arr[num].size;
+ num_wrb_rings--;
+ } else {
+ idx++;
+ wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
+ pa_addr_lo = mem_descr->mem_array[idx].\
+ bus_address.u.a64.address;
+ num_wrb_rings = mem_descr->mem_array[idx].size /
+ (phba->params.wrbs_per_cxn *
+ sizeof(struct iscsi_wrb));
+ pwrb_arr[num].virtual_address = wrb_vaddr;
+ pwrb_arr[num].bus_address.u.a64.address\
+ = pa_addr_lo;
+ pwrb_arr[num].size = phba->params.wrbs_per_cxn *
+ sizeof(struct iscsi_wrb);
+ wrb_vaddr += pwrb_arr[num].size;
+ pa_addr_lo += pwrb_arr[num].size;
+ num_wrb_rings--;
+ }
+ }
+ for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
+ wrb_mem_index = 0;
+ offset = 0;
+ size = 0;
+
+ hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
+ status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
+ &phwi_context->be_wrbq[i]);
+ if (status != 0) {
+ shost_printk(KERN_ERR, phba->shost,
+ "wrbq create failed.");
+ return status;
+ }
+ phwi_ctrlr->wrb_context[i].cid = phwi_context->be_wrbq[i].id;
+ }
+ kfree(pwrb_arr);
+ return 0;
+}
+
+static void free_wrb_handles(struct beiscsi_hba *phba)
+{
+ unsigned int index;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_wrb_context *pwrb_context;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
+ pwrb_context = &phwi_ctrlr->wrb_context[index];
+ kfree(pwrb_context->pwrb_handle_base);
+ kfree(pwrb_context->pwrb_handle_basestd);
+ }
+}
+
+static void hwi_cleanup(struct beiscsi_hba *phba)
+{
+ struct be_queue_info *q;
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+ int i;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+ for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
+ q = &phwi_context->be_wrbq[i];
+ if (q->created)
+ beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
+ }
+
+ free_wrb_handles(phba);
+
+ q = &phwi_context->be_def_hdrq;
+ if (q->created)
+ beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+
+ q = &phwi_context->be_def_dataq;
+ if (q->created)
+ beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+
+ beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
+
+ q = &phwi_context->be_cq;
+ if (q->created)
+ beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
+
+ q = &phwi_context->be_eq.q;
+ if (q->created)
+ beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
+}
+
+static int hwi_init_port(struct beiscsi_hba *phba)
+{
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+ unsigned int def_pdu_ring_sz;
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ int status;
+
+ def_pdu_ring_sz =
+ phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
+ phwi_ctrlr = phba->phwi_ctrlr;
+
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+ phwi_context->be_eq.max_eqd = 0;
+ phwi_context->be_eq.min_eqd = 0;
+ phwi_context->be_eq.cur_eqd = 64;
+ phwi_context->be_eq.enable_aic = false;
+ be_cmd_fw_initialize(&phba->ctrl);
+ status = beiscsi_create_eq(phba, phwi_context);
+ if (status != 0) {
+ shost_printk(KERN_ERR, phba->shost, "EQ not created \n");
+ goto error;
+ }
+
+ status = mgmt_check_supported_fw(ctrl);
+ if (status != 0) {
+ shost_printk(KERN_ERR, phba->shost,
+ "Unsupported fw version \n");
+ goto error;
+ }
+
+ status = mgmt_get_fw_config(ctrl, phba);
+ if (status != 0) {
+ shost_printk(KERN_ERR, phba->shost,
+ "Error getting fw config\n");
+ goto error;
+ }
+
+ status = beiscsi_create_cq(phba, phwi_context);
+ if (status != 0) {
+ shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
+ goto error;
+ }
+
+ status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
+ def_pdu_ring_sz);
+ if (status != 0) {
+ shost_printk(KERN_ERR, phba->shost,
+ "Default Header not created\n");
+ goto error;
+ }
+
+ status = beiscsi_create_def_data(phba, phwi_context,
+ phwi_ctrlr, def_pdu_ring_sz);
+ if (status != 0) {
+ shost_printk(KERN_ERR, phba->shost,
+ "Default Data not created\n");
+ goto error;
+ }
+
+ status = beiscsi_post_pages(phba);
+ if (status != 0) {
+ shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
+ goto error;
+ }
+
+ status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
+ if (status != 0) {
+ shost_printk(KERN_ERR, phba->shost,
+ "WRB Rings not created\n");
+ goto error;
+ }
+
+ SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
+ return 0;
+
+error:
+ shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
+ hwi_cleanup(phba);
+ return -ENOMEM;
+}
+
+
+static int hwi_init_controller(struct beiscsi_hba *phba)
+{
+ struct hwi_controller *phwi_ctrlr;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
+ phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
+ init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
+ SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p \n",
+ phwi_ctrlr->phwi_ctxt);
+ } else {
+ shost_printk(KERN_ERR, phba->shost,
+ "HWI_MEM_ADDN_CONTEXT is more than one element."
+ "Failing to load\n");
+ return -ENOMEM;
+ }
+
+ iscsi_init_global_templates(phba);
+ beiscsi_init_wrb_handle(phba);
+ hwi_init_async_pdu_ctx(phba);
+ if (hwi_init_port(phba) != 0) {
+ shost_printk(KERN_ERR, phba->shost,
+ "hwi_init_controller failed\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void beiscsi_free_mem(struct beiscsi_hba *phba)
+{
+ struct be_mem_descriptor *mem_descr;
+ int i, j;
+
+ mem_descr = phba->init_mem;
+ i = 0;
+ j = 0;
+ for (i = 0; i < SE_MEM_MAX; i++) {
+ for (j = mem_descr->num_elements; j > 0; j--) {
+ pci_free_consistent(phba->pcidev,
+ mem_descr->mem_array[j - 1].size,
+ mem_descr->mem_array[j - 1].virtual_address,
+ mem_descr->mem_array[j - 1].bus_address.
+ u.a64.address);
+ }
+ kfree(mem_descr->mem_array);
+ mem_descr++;
+ }
+ kfree(phba->init_mem);
+ kfree(phba->phwi_ctrlr);
+}
+
+static int beiscsi_init_controller(struct beiscsi_hba *phba)
+{
+ int ret = -ENOMEM;
+
+ ret = beiscsi_get_memory(phba);
+ if (ret < 0) {
+ shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
+ "Failed in beiscsi_alloc_memory \n");
+ return ret;
+ }
+
+ ret = hwi_init_controller(phba);
+ if (ret)
+ goto free_init;
+ SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
+ return 0;
+
+free_init:
+ beiscsi_free_mem(phba);
+ return -ENOMEM;
+}
+
+static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
+{
+ struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
+ struct sgl_handle *psgl_handle;
+ struct iscsi_sge *pfrag;
+ unsigned int arr_index, i, idx;
+
+ phba->io_sgl_hndl_avbl = 0;
+ phba->eh_sgl_hndl_avbl = 0;
+ mem_descr_sglh = phba->init_mem;
+ mem_descr_sglh += HWI_MEM_SGLH;
+ if (1 == mem_descr_sglh->num_elements) {
+ phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
+ phba->params.ios_per_ctrl,
+ GFP_KERNEL);
+ if (!phba->io_sgl_hndl_base) {
+ shost_printk(KERN_ERR, phba->shost,
+ "Mem Alloc Failed. Failing to load\n");
+ return -ENOMEM;
+ }
+ phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
+ (phba->params.icds_per_ctrl -
+ phba->params.ios_per_ctrl),
+ GFP_KERNEL);
+ if (!phba->eh_sgl_hndl_base) {
+ kfree(phba->io_sgl_hndl_base);
+ shost_printk(KERN_ERR, phba->shost,
+ "Mem Alloc Failed. Failing to load\n");
+ return -ENOMEM;
+ }
+ } else {
+ shost_printk(KERN_ERR, phba->shost,
+ "HWI_MEM_SGLH is more than one element."
+ "Failing to load\n");
+ return -ENOMEM;
+ }
+
+ arr_index = 0;
+ idx = 0;
+ while (idx < mem_descr_sglh->num_elements) {
+ psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
+
+ for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
+ sizeof(struct sgl_handle)); i++) {
+ if (arr_index < phba->params.ios_per_ctrl) {
+ phba->io_sgl_hndl_base[arr_index] = psgl_handle;
+ phba->io_sgl_hndl_avbl++;
+ arr_index++;
+ } else {
+ phba->eh_sgl_hndl_base[arr_index -
+ phba->params.ios_per_ctrl] =
+ psgl_handle;
+ arr_index++;
+ phba->eh_sgl_hndl_avbl++;
+ }
+ psgl_handle++;
+ }
+ idx++;
+ }
+ SE_DEBUG(DBG_LVL_8,
+ "phba->io_sgl_hndl_avbl=%d"
+ "phba->eh_sgl_hndl_avbl=%d \n",
+ phba->io_sgl_hndl_avbl,
+ phba->eh_sgl_hndl_avbl);
+ mem_descr_sg = phba->init_mem;
+ mem_descr_sg += HWI_MEM_SGE;
+ SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d \n",
+ mem_descr_sg->num_elements);
+ arr_index = 0;
+ idx = 0;
+ while (idx < mem_descr_sg->num_elements) {
+ pfrag = mem_descr_sg->mem_array[idx].virtual_address;
+
+ for (i = 0;
+ i < (mem_descr_sg->mem_array[idx].size) /
+ (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
+ i++) {
+ if (arr_index < phba->params.ios_per_ctrl)
+ psgl_handle = phba->io_sgl_hndl_base[arr_index];
+ else
+ psgl_handle = phba->eh_sgl_hndl_base[arr_index -
+ phba->params.ios_per_ctrl];
+ psgl_handle->pfrag = pfrag;
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
+ pfrag += phba->params.num_sge_per_io;
+ psgl_handle->sgl_index =
+ phba->fw_config.iscsi_cid_start + arr_index++;
+ }
+ idx++;
+ }
+ phba->io_sgl_free_index = 0;
+ phba->io_sgl_alloc_index = 0;
+ phba->eh_sgl_free_index = 0;
+ phba->eh_sgl_alloc_index = 0;
+ return 0;
+}
+
+static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
+{
+ int i, new_cid;
+
+ phba->cid_array = kmalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
+ GFP_KERNEL);
+ if (!phba->cid_array) {
+ shost_printk(KERN_ERR, phba->shost,
+ "Failed to allocate memory in "
+ "hba_setup_cid_tbls\n");
+ return -ENOMEM;
+ }
+ phba->ep_array = kmalloc(sizeof(struct iscsi_endpoint *) *
+ phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
+ if (!phba->ep_array) {
+ shost_printk(KERN_ERR, phba->shost,
+ "Failed to allocate memory in "
+ "hba_setup_cid_tbls \n");
+ kfree(phba->cid_array);
+ return -ENOMEM;
+ }
+ new_cid = phba->fw_config.iscsi_icd_start;
+ for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
+ phba->cid_array[i] = new_cid;
+ new_cid += 2;
+ }
+ phba->avlbl_cids = phba->params.cxns_per_ctrl;
+ return 0;
+}
+
+static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+ struct be_queue_info *eq;
+ u8 __iomem *addr;
+ u32 reg;
+ u32 enabled;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+
+ eq = &phwi_context->be_eq.q;
+ addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
+ PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
+ reg = ioread32(addr);
+ SE_DEBUG(DBG_LVL_8, "reg =x%08x \n", reg);
+
+ enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+ if (!enabled) {
+ reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+ SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
+ iowrite32(reg, addr);
+ SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
+
+ hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
+ } else
+ shost_printk(KERN_WARNING, phba->shost,
+ "In hwi_enable_intr, Not Enabled \n");
+ return true;
+}
+
+static void hwi_disable_intr(struct beiscsi_hba *phba)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+
+ u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
+ u32 reg = ioread32(addr);
+
+ u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+ if (enabled) {
+ reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+ iowrite32(reg, addr);
+ } else
+ shost_printk(KERN_WARNING, phba->shost,
+ "In hwi_disable_intr, Already Disabled \n");
+}
+
+static int beiscsi_init_port(struct beiscsi_hba *phba)
+{
+ int ret;
+
+ ret = beiscsi_init_controller(phba);
+ if (ret < 0) {
+ shost_printk(KERN_ERR, phba->shost,
+ "beiscsi_dev_probe - Failed in"
+ "beiscsi_init_controller \n");
+ return ret;
+ }
+ ret = beiscsi_init_sgl_handle(phba);
+ if (ret < 0) {
+ shost_printk(KERN_ERR, phba->shost,
+ "beiscsi_dev_probe - Failed in"
+ "beiscsi_init_sgl_handle \n");
+ goto do_cleanup_ctrlr;
+ }
+
+ if (hba_setup_cid_tbls(phba)) {
+ shost_printk(KERN_ERR, phba->shost,
+ "Failed in hba_setup_cid_tbls\n");
+ kfree(phba->io_sgl_hndl_base);
+ kfree(phba->eh_sgl_hndl_base);
+ goto do_cleanup_ctrlr;
+ }
+
+ return ret;
+
+do_cleanup_ctrlr:
+ hwi_cleanup(phba);
+ return ret;
+}
+
+static void hwi_purge_eq(struct beiscsi_hba *phba)
+{
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+ struct be_queue_info *eq;
+ struct be_eq_entry *eqe = NULL;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+ eq = &phwi_context->be_eq.q;
+ eqe = queue_tail_node(eq);
+
+ while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
+ & EQE_VALID_MASK) {
+ AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
+ queue_tail_inc(eq);
+ eqe = queue_tail_node(eq);
+ }
+}
+
+static void beiscsi_clean_port(struct beiscsi_hba *phba)
+{
+ unsigned char mgmt_status;
+
+ mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
+ if (mgmt_status)
+ shost_printk(KERN_WARNING, phba->shost,
+ "mgmt_epfw_cleanup FAILED \n");
+ hwi_cleanup(phba);
+ hwi_purge_eq(phba);
+ kfree(phba->io_sgl_hndl_base);
+ kfree(phba->eh_sgl_hndl_base);
+ kfree(phba->cid_array);
+ kfree(phba->ep_array);
+}
+
+void
+beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
+ struct beiscsi_offload_params *params)
+{
+ struct wrb_handle *pwrb_handle;
+ struct iscsi_target_context_update_wrb *pwrb = NULL;
+ struct be_mem_descriptor *mem_descr;
+ struct beiscsi_hba *phba = beiscsi_conn->phba;
+ u32 doorbell = 0;
+
+ /*
+ * We can always use 0 here because it is reserved by libiscsi for
+ * login/startup related tasks.
+ */
+ pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, 0);
+ pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
+ memset(pwrb, 0, sizeof(*pwrb));
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ max_burst_length, pwrb, params->dw[offsetof
+ (struct amap_beiscsi_offload_params,
+ max_burst_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ max_send_data_segment_length, pwrb,
+ params->dw[offsetof(struct amap_beiscsi_offload_params,
+ max_send_data_segment_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ first_burst_length,
+ pwrb,
+ params->dw[offsetof(struct amap_beiscsi_offload_params,
+ first_burst_length) / 32]);
+
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ erl) / 32] & OFFLD_PARAMS_ERL));
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
+ pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ exp_statsn) / 32] + 1));
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
+ 0x7);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
+ pwrb, pwrb_handle->wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
+ pwrb, pwrb_handle->nxt_wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ session_state, pwrb, 0);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
+ pwrb, 1);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
+ pwrb, 0);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
+ 0);
+
+ mem_descr = phba->init_mem;
+ mem_descr += ISCSI_MEM_GLOBAL_HEADER;
+
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ pad_buffer_addr_hi, pwrb,
+ mem_descr->mem_array[0].bus_address.u.a32.address_hi);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ pad_buffer_addr_lo, pwrb,
+ mem_descr->mem_array[0].bus_address.u.a32.address_lo);
+
+ be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
+
+ doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
+ doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) <<
+ DB_DEF_PDU_WRB_INDEX_SHIFT;
+ doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
+
+ iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
+}
+
+static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
+ int *index, int *age)
+{
+ *index = be32_to_cpu(itt) >> 16;
+ if (age)
+ *age = conn->session->age;
+}
+
+/**
+ * beiscsi_alloc_pdu - allocates pdu and related resources
+ * @task: libiscsi task
+ * @opcode: opcode of pdu for task
+ *
+ * This is called with the session lock held. It will allocate
+ * the wrb and sgl if needed for the command. And it will prep
+ * the pdu's itt. beiscsi_parse_pdu will later translate
+ * the pdu itt to the libiscsi task itt.
+ */
+static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
+{
+ struct beiscsi_io_task *io_task = task->dd_data;
+ struct iscsi_conn *conn = task->conn;
+ struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ struct beiscsi_hba *phba = beiscsi_conn->phba;
+ struct hwi_wrb_context *pwrb_context;
+ struct hwi_controller *phwi_ctrlr;
+ itt_t itt;
+ struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
+ dma_addr_t paddr;
+
+ io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
+ GFP_KERNEL, &paddr);
+
+ if (!io_task->cmd_bhs)
+ return -ENOMEM;
+
+ io_task->bhs_pa.u.a64.address = paddr;
+ io_task->pwrb_handle = alloc_wrb_handle(phba,
+ beiscsi_conn->beiscsi_conn_cid,
+ task->itt);
+ io_task->pwrb_handle->pio_handle = task;
+ io_task->conn = beiscsi_conn;
+
+ task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
+ task->hdr_max = sizeof(struct be_cmd_bhs);
+
+ if (task->sc) {
+ spin_lock(&phba->io_sgl_lock);
+ io_task->psgl_handle = alloc_io_sgl_handle(phba);
+ spin_unlock(&phba->io_sgl_lock);
+ if (!io_task->psgl_handle)
+ goto free_hndls;
+
+ } else {
+ io_task->scsi_cmnd = NULL;
+ if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
+ if (!beiscsi_conn->login_in_progress) {
+ spin_lock(&phba->mgmt_sgl_lock);
+ io_task->psgl_handle = (struct sgl_handle *)
+ alloc_mgmt_sgl_handle(phba);
+ spin_unlock(&phba->mgmt_sgl_lock);
+ if (!io_task->psgl_handle)
+ goto free_hndls;
+
+ beiscsi_conn->login_in_progress = 1;
+ beiscsi_conn->plogin_sgl_handle =
+ io_task->psgl_handle;
+ } else {
+ io_task->psgl_handle =
+ beiscsi_conn->plogin_sgl_handle;
+ }
+ } else {
+ spin_lock(&phba->mgmt_sgl_lock);
+ io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
+ spin_unlock(&phba->mgmt_sgl_lock);
+ if (!io_task->psgl_handle)
+ goto free_hndls;
+ }
+ }
+ itt = (itt_t) cpu_to_be32(((unsigned int)task->itt << 16) |
+ (unsigned int)(io_task->psgl_handle->sgl_index));
+ io_task->cmd_bhs->iscsi_hdr.itt = itt;
+ return 0;
+
+free_hndls:
+ phwi_ctrlr = phba->phwi_ctrlr;
+ pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid];
+ free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
+ io_task->pwrb_handle = NULL;
+ pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
+ io_task->bhs_pa.u.a64.address);
+ SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed \n");
+ return -ENOMEM;
+}
+
+static void beiscsi_cleanup_task(struct iscsi_task *task)
+{
+ struct beiscsi_io_task *io_task = task->dd_data;
+ struct iscsi_conn *conn = task->conn;
+ struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ struct beiscsi_hba *phba = beiscsi_conn->phba;
+ struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
+ struct hwi_wrb_context *pwrb_context;
+ struct hwi_controller *phwi_ctrlr;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid];
+ if (io_task->pwrb_handle) {
+ free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
+ io_task->pwrb_handle = NULL;
+ }
+
+ if (io_task->cmd_bhs) {
+ pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
+ io_task->bhs_pa.u.a64.address);
+ }
+
+ if (task->sc) {
+ if (io_task->psgl_handle) {
+ spin_lock(&phba->io_sgl_lock);
+ free_io_sgl_handle(phba, io_task->psgl_handle);
+ spin_unlock(&phba->io_sgl_lock);
+ io_task->psgl_handle = NULL;
+ }
+ } else {
+ if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
+ return;
+ if (io_task->psgl_handle) {
+ spin_lock(&phba->mgmt_sgl_lock);
+ free_mgmt_sgl_handle(phba, io_task->psgl_handle);
+ spin_unlock(&phba->mgmt_sgl_lock);
+ io_task->psgl_handle = NULL;
+ }
+ }
+}
+
+static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
+ unsigned int num_sg, unsigned int xferlen,
+ unsigned int writedir)
+{
+
+ struct beiscsi_io_task *io_task = task->dd_data;
+ struct iscsi_conn *conn = task->conn;
+ struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ struct beiscsi_hba *phba = beiscsi_conn->phba;
+ struct iscsi_wrb *pwrb = NULL;
+ unsigned int doorbell = 0;
+
+ pwrb = io_task->pwrb_handle->pwrb;
+ io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
+ io_task->bhs_len = sizeof(struct be_cmd_bhs);
+
+ if (writedir) {
+ SE_DEBUG(DBG_LVL_4, " WRITE Command \t");
+ memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
+ AMAP_SET_BITS(struct amap_pdu_data_out, itt,
+ &io_task->cmd_bhs->iscsi_data_pdu,
+ (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
+ AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
+ &io_task->cmd_bhs->iscsi_data_pdu,
+ ISCSI_OPCODE_SCSI_DATA_OUT);
+ AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
+ &io_task->cmd_bhs->iscsi_data_pdu, 1);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_WR_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
+ } else {
+ SE_DEBUG(DBG_LVL_4, "READ Command \t");
+ AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_RD_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
+ }
+ memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
+ dw[offsetof(struct amap_pdu_data_out, lun) / 32],
+ io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
+
+ AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
+ cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr.
+ lun[0]));
+ AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
+ io_task->pwrb_handle->wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
+ be32_to_cpu(task->cmdsn));
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
+ io_task->psgl_handle->sgl_index);
+
+ hwi_write_sgl(pwrb, sg, num_sg, io_task);
+
+ AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
+ io_task->pwrb_handle->nxt_wrb_index);
+ be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
+
+ doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
+ doorbell |= (io_task->pwrb_handle->wrb_index &
+ DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
+ doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
+
+ iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
+ return 0;
+}
+
+static int beiscsi_mtask(struct iscsi_task *task)
+{
+ struct beiscsi_io_task *aborted_io_task, *io_task = task->dd_data;
+ struct iscsi_conn *conn = task->conn;
+ struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ struct beiscsi_hba *phba = beiscsi_conn->phba;
+ struct iscsi_wrb *pwrb = NULL;
+ unsigned int doorbell = 0;
+ struct iscsi_task *aborted_task;
+
+ pwrb = io_task->pwrb_handle->pwrb;
+ AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
+ be32_to_cpu(task->cmdsn));
+ AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
+ io_task->pwrb_handle->wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
+ io_task->psgl_handle->sgl_index);
+
+ switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
+ case ISCSI_OP_LOGIN:
+ AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, TGT_DM_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
+ hwi_write_buffer(pwrb, task);
+ break;
+ case ISCSI_OP_NOOP_OUT:
+ AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_RD_CMD);
+ hwi_write_buffer(pwrb, task);
+ break;
+ case ISCSI_OP_TEXT:
+ AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_WR_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
+ hwi_write_buffer(pwrb, task);
+ break;
+ case ISCSI_OP_SCSI_TMFUNC:
+ aborted_task = iscsi_itt_to_task(conn,
+ ((struct iscsi_tm *)task->hdr)->rtt);
+ if (!aborted_task)
+ return 0;
+ aborted_io_task = aborted_task->dd_data;
+ if (!aborted_io_task->scsi_cmnd)
+ return 0;
+
+ mgmt_invalidate_icds(phba,
+ aborted_io_task->psgl_handle->sgl_index,
+ beiscsi_conn->beiscsi_conn_cid);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_TMF_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
+ hwi_write_buffer(pwrb, task);
+ break;
+ case ISCSI_OP_LOGOUT:
+ AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
+ HWH_TYPE_LOGOUT);
+ hwi_write_buffer(pwrb, task);
+ break;
+
+ default:
+ SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported \n",
+ task->hdr->opcode & ISCSI_OPCODE_MASK);
+ return -EINVAL;
+ }
+
+ AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
+ be32_to_cpu(task->data_count));
+ AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
+ io_task->pwrb_handle->nxt_wrb_index);
+ be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
+
+ doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
+ doorbell |= (io_task->pwrb_handle->wrb_index &
+ DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
+ doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
+ iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
+ return 0;
+}
+
+static int beiscsi_task_xmit(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct beiscsi_io_task *io_task = task->dd_data;
+ struct scsi_cmnd *sc = task->sc;
+ struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ struct scatterlist *sg;
+ int num_sg;
+ unsigned int writedir = 0, xferlen = 0;
+
+ SE_DEBUG(DBG_LVL_4, "\n cid=%d In beiscsi_task_xmit task=%p conn=%p \t"
+ "beiscsi_conn=%p \n", beiscsi_conn->beiscsi_conn_cid,
+ task, conn, beiscsi_conn);
+ if (!sc)
+ return beiscsi_mtask(task);
+
+ io_task->scsi_cmnd = sc;
+ num_sg = scsi_dma_map(sc);
+ if (num_sg < 0) {
+ SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
+ return num_sg;
+ }
+ SE_DEBUG(DBG_LVL_4, "xferlen=0x%08x scmd=%p num_sg=%d sernum=%lu\n",
+ (scsi_bufflen(sc)), sc, num_sg, sc->serial_number);
+ xferlen = scsi_bufflen(sc);
+ sg = scsi_sglist(sc);
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+ writedir = 1;
+ SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x \n",
+ task->imm_count);
+ } else
+ writedir = 0;
+ return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
+}
+
+static void beiscsi_remove(struct pci_dev *pcidev)
+{
+ struct beiscsi_hba *phba = NULL;
+
+ phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
+ if (!phba) {
+ dev_err(&pcidev->dev, "beiscsi_remove called with no phba \n");
+ return;
+ }
+
+ hwi_disable_intr(phba);
+ if (phba->pcidev->irq)
+ free_irq(phba->pcidev->irq, phba);
+ destroy_workqueue(phba->wq);
+ if (blk_iopoll_enabled)
+ blk_iopoll_disable(&phba->iopoll);
+
+ beiscsi_clean_port(phba);
+ beiscsi_free_mem(phba);
+ beiscsi_unmap_pci_function(phba);
+ pci_free_consistent(phba->pcidev,
+ phba->ctrl.mbox_mem_alloced.size,
+ phba->ctrl.mbox_mem_alloced.va,
+ phba->ctrl.mbox_mem_alloced.dma);
+ iscsi_host_remove(phba->shost);
+ pci_dev_put(phba->pcidev);
+ iscsi_host_free(phba->shost);
+}
+
+static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
+ const struct pci_device_id *id)
+{
+ struct beiscsi_hba *phba = NULL;
+ int ret;
+
+ ret = beiscsi_enable_pci(pcidev);
+ if (ret < 0) {
+ shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
+ "Failed to enable pci device \n");
+ return ret;
+ }
+
+ phba = beiscsi_hba_alloc(pcidev);
+ if (!phba) {
+ dev_err(&pcidev->dev, "beiscsi_dev_probe-"
+ " Failed in beiscsi_hba_alloc \n");
+ goto disable_pci;
+ }
+
+ pci_set_drvdata(pcidev, phba);
+ ret = be_ctrl_init(phba, pcidev);
+ if (ret) {
+ shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
+ "Failed in be_ctrl_init\n");
+ goto hba_free;
+ }
+
+ spin_lock_init(&phba->io_sgl_lock);
+ spin_lock_init(&phba->mgmt_sgl_lock);
+ spin_lock_init(&phba->isr_lock);
+ beiscsi_get_params(phba);
+ ret = beiscsi_init_port(phba);
+ if (ret < 0) {
+ shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
+ "Failed in beiscsi_init_port\n");
+ goto free_port;
+ }
+
+ snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
+ phba->shost->host_no);
+ phba->wq = create_singlethread_workqueue(phba->wq_name);
+ if (!phba->wq) {
+ shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
+ "Failed to allocate work queue\n");
+ goto free_twq;
+ }
+
+ INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
+
+ if (blk_iopoll_enabled) {
+ blk_iopoll_init(&phba->iopoll, be_iopoll_budget, be_iopoll);
+ blk_iopoll_enable(&phba->iopoll);
+ }
+
+ ret = beiscsi_init_irqs(phba);
+ if (ret < 0) {
+ shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
+ "Failed to beiscsi_init_irqs\n");
+ goto free_blkenbld;
+ }
+ ret = hwi_enable_intr(phba);
+ if (ret < 0) {
+ shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
+ "Failed to hwi_enable_intr\n");
+ goto free_ctrlr;
+ }
+
+ SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n");
+ return 0;
+
+free_ctrlr:
+ if (phba->pcidev->irq)
+ free_irq(phba->pcidev->irq, phba);
+free_blkenbld:
+ destroy_workqueue(phba->wq);
+ if (blk_iopoll_enabled)
+ blk_iopoll_disable(&phba->iopoll);
+free_twq:
+ beiscsi_clean_port(phba);
+ beiscsi_free_mem(phba);
+free_port:
+ pci_free_consistent(phba->pcidev,
+ phba->ctrl.mbox_mem_alloced.size,
+ phba->ctrl.mbox_mem_alloced.va,
+ phba->ctrl.mbox_mem_alloced.dma);
+ beiscsi_unmap_pci_function(phba);
+hba_free:
+ iscsi_host_remove(phba->shost);
+ pci_dev_put(phba->pcidev);
+ iscsi_host_free(phba->shost);
+disable_pci:
+ pci_disable_device(pcidev);
+ return ret;
+}
+
+struct iscsi_transport beiscsi_iscsi_transport = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ .caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
+ CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
+ .param_mask = ISCSI_MAX_RECV_DLENGTH |
+ ISCSI_MAX_XMIT_DLENGTH |
+ ISCSI_HDRDGST_EN |
+ ISCSI_DATADGST_EN |
+ ISCSI_INITIAL_R2T_EN |
+ ISCSI_MAX_R2T |
+ ISCSI_IMM_DATA_EN |
+ ISCSI_FIRST_BURST |
+ ISCSI_MAX_BURST |
+ ISCSI_PDU_INORDER_EN |
+ ISCSI_DATASEQ_INORDER_EN |
+ ISCSI_ERL |
+ ISCSI_CONN_PORT |
+ ISCSI_CONN_ADDRESS |
+ ISCSI_EXP_STATSN |
+ ISCSI_PERSISTENT_PORT |
+ ISCSI_PERSISTENT_ADDRESS |
+ ISCSI_TARGET_NAME | ISCSI_TPGT |
+ ISCSI_USERNAME | ISCSI_PASSWORD |
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+ ISCSI_LU_RESET_TMO |
+ ISCSI_PING_TMO | ISCSI_RECV_TMO |
+ ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
+ .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
+ ISCSI_HOST_INITIATOR_NAME,
+ .create_session = beiscsi_session_create,
+ .destroy_session = beiscsi_session_destroy,
+ .create_conn = beiscsi_conn_create,
+ .bind_conn = beiscsi_conn_bind,
+ .destroy_conn = iscsi_conn_teardown,
+ .set_param = beiscsi_set_param,
+ .get_conn_param = beiscsi_conn_get_param,
+ .get_session_param = iscsi_session_get_param,
+ .get_host_param = beiscsi_get_host_param,
+ .start_conn = beiscsi_conn_start,
+ .stop_conn = beiscsi_conn_stop,
+ .send_pdu = iscsi_conn_send_pdu,
+ .xmit_task = beiscsi_task_xmit,
+ .cleanup_task = beiscsi_cleanup_task,
+ .alloc_pdu = beiscsi_alloc_pdu,
+ .parse_pdu_itt = beiscsi_parse_pdu,
+ .get_stats = beiscsi_conn_get_stats,
+ .ep_connect = beiscsi_ep_connect,
+ .ep_poll = beiscsi_ep_poll,
+ .ep_disconnect = beiscsi_ep_disconnect,
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+};
+
+static struct pci_driver beiscsi_pci_driver = {
+ .name = DRV_NAME,
+ .probe = beiscsi_dev_probe,
+ .remove = beiscsi_remove,
+ .id_table = beiscsi_pci_id_table
+};
+
+static int __init beiscsi_module_init(void)
+{
+ int ret;
+
+ beiscsi_scsi_transport =
+ iscsi_register_transport(&beiscsi_iscsi_transport);
+ if (!beiscsi_scsi_transport) {
+ SE_DEBUG(DBG_LVL_1,
+ "beiscsi_module_init - Unable to register beiscsi"
+ "transport.\n");
+ ret = -ENOMEM;
+ }
+ SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n",
+ &beiscsi_iscsi_transport);
+
+ ret = pci_register_driver(&beiscsi_pci_driver);
+ if (ret) {
+ SE_DEBUG(DBG_LVL_1,
+ "beiscsi_module_init - Unable to register"
+ "beiscsi pci driver.\n");
+ goto unregister_iscsi_transport;
+ }
+ return 0;
+
+unregister_iscsi_transport:
+ iscsi_unregister_transport(&beiscsi_iscsi_transport);
+ return ret;
+}
+
+static void __exit beiscsi_module_exit(void)
+{
+ pci_unregister_driver(&beiscsi_pci_driver);
+ iscsi_unregister_transport(&beiscsi_iscsi_transport);
+}
+
+module_init(beiscsi_module_init);
+module_exit(beiscsi_module_exit);
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
new file mode 100644
index 000000000000..53c9b70ac7ac
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -0,0 +1,837 @@
+/**
+ * Copyright (C) 2005 - 2009 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ *
+ */
+
+#ifndef _BEISCSI_MAIN_
+#define _BEISCSI_MAIN_
+
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/in.h>
+#include <linux/blk-iopoll.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/iscsi_proto.h>
+#include <scsi/libiscsi.h>
+#include <scsi/scsi_transport_iscsi.h>
+
+#include "be.h"
+
+
+
+#define DRV_NAME "be2iscsi"
+#define BUILD_STR "2.0.527.0"
+
+#define BE_NAME "ServerEngines BladeEngine2" \
+ "Linux iSCSI Driver version" BUILD_STR
+#define DRV_DESC BE_NAME " " "Driver"
+
+#define BE_VENDOR_ID 0x19A2
+#define BE_DEVICE_ID1 0x212
+#define OC_DEVICE_ID1 0x702
+#define OC_DEVICE_ID2 0x703
+
+#define BE2_MAX_SESSIONS 64
+#define BE2_CMDS_PER_CXN 128
+#define BE2_LOGOUTS BE2_MAX_SESSIONS
+#define BE2_TMFS 16
+#define BE2_NOPOUT_REQ 16
+#define BE2_ASYNCPDUS BE2_MAX_SESSIONS
+#define BE2_MAX_ICDS 2048
+#define BE2_SGE 32
+#define BE2_DEFPDU_HDR_SZ 64
+#define BE2_DEFPDU_DATA_SZ 8192
+#define BE2_IO_DEPTH \
+ (BE2_MAX_ICDS / 2 - (BE2_LOGOUTS + BE2_TMFS + BE2_NOPOUT_REQ))
+
+#define BEISCSI_SGLIST_ELEMENTS BE2_SGE
+
+#define BEISCSI_MAX_CMNDS 1024 /* Max IO's per Ctrlr sht->can_queue */
+#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
+#define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */
+
+#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */
+#define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */
+#define BEISCSI_NUM_DEVICES_SUPPORTED 0x01
+#define BEISCSI_MAX_FRAGS_INIT 192
+#define BE_NUM_MSIX_ENTRIES 1
+#define MPU_EP_SEMAPHORE 0xac
+
+#define BE_SENSE_INFO_SIZE 258
+#define BE_ISCSI_PDU_HEADER_SIZE 64
+#define BE_MIN_MEM_SIZE 16384
+
+#define IIOC_SCSI_DATA 0x05 /* Write Operation */
+
+#define DBG_LVL 0x00000001
+#define DBG_LVL_1 0x00000001
+#define DBG_LVL_2 0x00000002
+#define DBG_LVL_3 0x00000004
+#define DBG_LVL_4 0x00000008
+#define DBG_LVL_5 0x00000010
+#define DBG_LVL_6 0x00000020
+#define DBG_LVL_7 0x00000040
+#define DBG_LVL_8 0x00000080
+
+#define SE_DEBUG(debug_mask, fmt, args...) \
+do { \
+ if (debug_mask & DBG_LVL) { \
+ printk(KERN_ERR "(%s():%d):", __func__, __LINE__);\
+ printk(fmt, ##args); \
+ } \
+} while (0);
+
+/**
+ * hardware needs the async PDU buffers to be posted in multiples of 8
+ * So have atleast 8 of them by default
+ */
+
+#define HWI_GET_ASYNC_PDU_CTX(phwi) (phwi->phwi_ctxt->pasync_ctx)
+
+/********* Memory BAR register ************/
+#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
+/**
+ * Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
+ * Disable" may still globally block interrupts in addition to individual
+ * interrupt masks; a mechanism for the device driver to block all interrupts
+ * atomically without having to arbitrate for the PCI Interrupt Disable bit
+ * with the OS.
+ */
+#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */
+
+/********* ISR0 Register offset **********/
+#define CEV_ISR0_OFFSET 0xC18
+#define CEV_ISR_SIZE 4
+
+/**
+ * Macros for reading/writing a protection domain or CSR registers
+ * in BladeEngine.
+ */
+
+#define DB_TXULP0_OFFSET 0x40
+#define DB_RXULP0_OFFSET 0xA0
+/********* Event Q door bell *************/
+#define DB_EQ_OFFSET DB_CQ_OFFSET
+#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
+/* Clear the interrupt for this eq */
+#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
+/* Must be 1 */
+#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
+/* Number of event entries processed */
+#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
+/* Rearm bit */
+#define DB_EQ_REARM_SHIFT (29) /* bit 29 */
+
+/********* Compl Q door bell *************/
+#define DB_CQ_OFFSET 0x120
+#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
+/* Number of event entries processed */
+#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
+/* Rearm bit */
+#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
+
+#define GET_HWI_CONTROLLER_WS(pc) (pc->phwi_ctrlr)
+#define HWI_GET_DEF_BUFQ_ID(pc) (((struct hwi_controller *)\
+ (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_data.id)
+#define HWI_GET_DEF_HDRQ_ID(pc) (((struct hwi_controller *)\
+ (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_hdr.id)
+
+#define PAGES_REQUIRED(x) \
+ ((x < PAGE_SIZE) ? 1 : ((x + PAGE_SIZE - 1) / PAGE_SIZE))
+
+enum be_mem_enum {
+ HWI_MEM_ADDN_CONTEXT,
+ HWI_MEM_CQ,
+ HWI_MEM_EQ,
+ HWI_MEM_WRB,
+ HWI_MEM_WRBH,
+ HWI_MEM_SGLH, /* 5 */
+ HWI_MEM_SGE,
+ HWI_MEM_ASYNC_HEADER_BUF,
+ HWI_MEM_ASYNC_DATA_BUF,
+ HWI_MEM_ASYNC_HEADER_RING,
+ HWI_MEM_ASYNC_DATA_RING, /* 10 */
+ HWI_MEM_ASYNC_HEADER_HANDLE,
+ HWI_MEM_ASYNC_DATA_HANDLE,
+ HWI_MEM_ASYNC_PDU_CONTEXT,
+ ISCSI_MEM_GLOBAL_HEADER,
+ SE_MEM_MAX /* 15 */
+};
+
+struct be_bus_address32 {
+ unsigned int address_lo;
+ unsigned int address_hi;
+};
+
+struct be_bus_address64 {
+ unsigned long long address;
+};
+
+struct be_bus_address {
+ union {
+ struct be_bus_address32 a32;
+ struct be_bus_address64 a64;
+ } u;
+};
+
+struct mem_array {
+ struct be_bus_address bus_address; /* Bus address of location */
+ void *virtual_address; /* virtual address to the location */
+ unsigned int size; /* Size required by memory block */
+};
+
+struct be_mem_descriptor {
+ unsigned int index; /* Index of this memory parameter */
+ unsigned int category; /* type indicates cached/non-cached */
+ unsigned int num_elements; /* number of elements in this
+ * descriptor
+ */
+ unsigned int alignment_mask; /* Alignment mask for this block */
+ unsigned int size_in_bytes; /* Size required by memory block */
+ struct mem_array *mem_array;
+};
+
+struct sgl_handle {
+ unsigned int sgl_index;
+ struct iscsi_sge *pfrag;
+};
+
+struct hba_parameters {
+ unsigned int ios_per_ctrl;
+ unsigned int cxns_per_ctrl;
+ unsigned int asyncpdus_per_ctrl;
+ unsigned int icds_per_ctrl;
+ unsigned int num_sge_per_io;
+ unsigned int defpdu_hdr_sz;
+ unsigned int defpdu_data_sz;
+ unsigned int num_cq_entries;
+ unsigned int num_eq_entries;
+ unsigned int wrbs_per_cxn;
+ unsigned int crashmode;
+ unsigned int hba_num;
+
+ unsigned int mgmt_ws_sz;
+ unsigned int hwi_ws_sz;
+
+ unsigned int eto;
+ unsigned int ldto;
+
+ unsigned int dbg_flags;
+ unsigned int num_cxn;
+
+ unsigned int eq_timer;
+ /**
+ * These are calculated from other params. They're here
+ * for debug purposes
+ */
+ unsigned int num_mcc_pages;
+ unsigned int num_mcc_cq_pages;
+ unsigned int num_cq_pages;
+ unsigned int num_eq_pages;
+
+ unsigned int num_async_pdu_buf_pages;
+ unsigned int num_async_pdu_buf_sgl_pages;
+ unsigned int num_async_pdu_buf_cq_pages;
+
+ unsigned int num_async_pdu_hdr_pages;
+ unsigned int num_async_pdu_hdr_sgl_pages;
+ unsigned int num_async_pdu_hdr_cq_pages;
+
+ unsigned int num_sge;
+};
+
+struct beiscsi_hba {
+ struct hba_parameters params;
+ struct hwi_controller *phwi_ctrlr;
+ unsigned int mem_req[SE_MEM_MAX];
+ /* PCI BAR mapped addresses */
+ u8 __iomem *csr_va; /* CSR */
+ u8 __iomem *db_va; /* Door Bell */
+ u8 __iomem *pci_va; /* PCI Config */
+ struct be_bus_address csr_pa; /* CSR */
+ struct be_bus_address db_pa; /* CSR */
+ struct be_bus_address pci_pa; /* CSR */
+ /* PCI representation of our HBA */
+ struct pci_dev *pcidev;
+ unsigned int state;
+ unsigned short asic_revision;
+ struct blk_iopoll iopoll;
+ struct be_mem_descriptor *init_mem;
+
+ unsigned short io_sgl_alloc_index;
+ unsigned short io_sgl_free_index;
+ unsigned short io_sgl_hndl_avbl;
+ struct sgl_handle **io_sgl_hndl_base;
+
+ unsigned short eh_sgl_alloc_index;
+ unsigned short eh_sgl_free_index;
+ unsigned short eh_sgl_hndl_avbl;
+ struct sgl_handle **eh_sgl_hndl_base;
+ spinlock_t io_sgl_lock;
+ spinlock_t mgmt_sgl_lock;
+ spinlock_t isr_lock;
+ unsigned int age;
+ unsigned short avlbl_cids;
+ unsigned short cid_alloc;
+ unsigned short cid_free;
+ struct beiscsi_conn *conn_table[BE2_MAX_SESSIONS * 2];
+ struct list_head hba_queue;
+ unsigned short *cid_array;
+ struct iscsi_endpoint **ep_array;
+ struct Scsi_Host *shost;
+ struct {
+ /**
+ * group together since they are used most frequently
+ * for cid to cri conversion
+ */
+ unsigned int iscsi_cid_start;
+ unsigned int phys_port;
+
+ unsigned int isr_offset;
+ unsigned int iscsi_icd_start;
+ unsigned int iscsi_cid_count;
+ unsigned int iscsi_icd_count;
+ unsigned int pci_function;
+
+ unsigned short cid_alloc;
+ unsigned short cid_free;
+ unsigned short avlbl_cids;
+ spinlock_t cid_lock;
+ } fw_config;
+
+ u8 mac_address[ETH_ALEN];
+ unsigned short todo_cq;
+ unsigned short todo_mcc_cq;
+ char wq_name[20];
+ struct workqueue_struct *wq; /* The actuak work queue */
+ struct work_struct work_cqs; /* The work being queued */
+ struct be_ctrl_info ctrl;
+};
+
+struct beiscsi_session {
+ struct pci_pool *bhs_pool;
+};
+
+/**
+ * struct beiscsi_conn - iscsi connection structure
+ */
+struct beiscsi_conn {
+ struct iscsi_conn *conn;
+ struct beiscsi_hba *phba;
+ u32 exp_statsn;
+ u32 beiscsi_conn_cid;
+ struct beiscsi_endpoint *ep;
+ unsigned short login_in_progress;
+ struct sgl_handle *plogin_sgl_handle;
+ struct beiscsi_session *beiscsi_sess;
+};
+
+/* This structure is used by the chip */
+struct pdu_data_out {
+ u32 dw[12];
+};
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_pdu_data_out {
+ u8 opcode[6]; /* opcode */
+ u8 rsvd0[2]; /* should be 0 */
+ u8 rsvd1[7];
+ u8 final_bit; /* F bit */
+ u8 rsvd2[16];
+ u8 ahs_length[8]; /* no AHS */
+ u8 data_len_hi[8];
+ u8 data_len_lo[16]; /* DataSegmentLength */
+ u8 lun[64];
+ u8 itt[32]; /* ITT; initiator task tag */
+ u8 ttt[32]; /* TTT; valid for R2T or 0xffffffff */
+ u8 rsvd3[32];
+ u8 exp_stat_sn[32];
+ u8 rsvd4[32];
+ u8 data_sn[32];
+ u8 buffer_offset[32];
+ u8 rsvd5[32];
+};
+
+struct be_cmd_bhs {
+ struct iscsi_cmd iscsi_hdr;
+ unsigned char pad1[16];
+ struct pdu_data_out iscsi_data_pdu;
+ unsigned char pad2[BE_SENSE_INFO_SIZE -
+ sizeof(struct pdu_data_out)];
+};
+
+struct beiscsi_io_task {
+ struct wrb_handle *pwrb_handle;
+ struct sgl_handle *psgl_handle;
+ struct beiscsi_conn *conn;
+ struct scsi_cmnd *scsi_cmnd;
+ unsigned int cmd_sn;
+ unsigned int flags;
+ unsigned short cid;
+ unsigned short header_len;
+
+ struct be_cmd_bhs *cmd_bhs;
+ struct be_bus_address bhs_pa;
+ unsigned short bhs_len;
+};
+
+struct be_nonio_bhs {
+ struct iscsi_hdr iscsi_hdr;
+ unsigned char pad1[16];
+ struct pdu_data_out iscsi_data_pdu;
+ unsigned char pad2[BE_SENSE_INFO_SIZE -
+ sizeof(struct pdu_data_out)];
+};
+
+struct be_status_bhs {
+ struct iscsi_cmd iscsi_hdr;
+ unsigned char pad1[16];
+ /**
+ * The plus 2 below is to hold the sense info length that gets
+ * DMA'ed by RxULP
+ */
+ unsigned char sense_info[BE_SENSE_INFO_SIZE];
+};
+
+struct iscsi_sge {
+ u32 dw[4];
+};
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_iscsi_sge {
+ u8 addr_hi[32];
+ u8 addr_lo[32];
+ u8 sge_offset[22]; /* DWORD 2 */
+ u8 rsvd0[9]; /* DWORD 2 */
+ u8 last_sge; /* DWORD 2 */
+ u8 len[17]; /* DWORD 3 */
+ u8 rsvd1[15]; /* DWORD 3 */
+};
+
+struct beiscsi_offload_params {
+ u32 dw[5];
+};
+
+#define OFFLD_PARAMS_ERL 0x00000003
+#define OFFLD_PARAMS_DDE 0x00000004
+#define OFFLD_PARAMS_HDE 0x00000008
+#define OFFLD_PARAMS_IR2T 0x00000010
+#define OFFLD_PARAMS_IMD 0x00000020
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_beiscsi_offload_params {
+ u8 max_burst_length[32];
+ u8 max_send_data_segment_length[32];
+ u8 first_burst_length[32];
+ u8 erl[2];
+ u8 dde[1];
+ u8 hde[1];
+ u8 ir2t[1];
+ u8 imd[1];
+ u8 pad[26];
+ u8 exp_statsn[32];
+};
+
+/* void hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
+ struct beiscsi_hba *phba, struct sol_cqe *psol);*/
+
+struct async_pdu_handle {
+ struct list_head link;
+ struct be_bus_address pa;
+ void *pbuffer;
+ unsigned int consumed;
+ unsigned char index;
+ unsigned char is_header;
+ unsigned short cri;
+ unsigned long buffer_len;
+};
+
+struct hwi_async_entry {
+ struct {
+ unsigned char hdr_received;
+ unsigned char hdr_len;
+ unsigned short bytes_received;
+ unsigned int bytes_needed;
+ struct list_head list;
+ } wait_queue;
+
+ struct list_head header_busy_list;
+ struct list_head data_busy_list;
+};
+
+#define BE_MIN_ASYNC_ENTRIES 128
+
+struct hwi_async_pdu_context {
+ struct {
+ struct be_bus_address pa_base;
+ void *va_base;
+ void *ring_base;
+ struct async_pdu_handle *handle_base;
+
+ unsigned int host_write_ptr;
+ unsigned int ep_read_ptr;
+ unsigned int writables;
+
+ unsigned int free_entries;
+ unsigned int busy_entries;
+ unsigned int buffer_size;
+ unsigned int num_entries;
+
+ struct list_head free_list;
+ } async_header;
+
+ struct {
+ struct be_bus_address pa_base;
+ void *va_base;
+ void *ring_base;
+ struct async_pdu_handle *handle_base;
+
+ unsigned int host_write_ptr;
+ unsigned int ep_read_ptr;
+ unsigned int writables;
+
+ unsigned int free_entries;
+ unsigned int busy_entries;
+ unsigned int buffer_size;
+ struct list_head free_list;
+ unsigned int num_entries;
+ } async_data;
+
+ /**
+ * This is a varying size list! Do not add anything
+ * after this entry!!
+ */
+ struct hwi_async_entry async_entry[BE_MIN_ASYNC_ENTRIES];
+};
+
+#define PDUCQE_CODE_MASK 0x0000003F
+#define PDUCQE_DPL_MASK 0xFFFF0000
+#define PDUCQE_INDEX_MASK 0x0000FFFF
+
+struct i_t_dpdu_cqe {
+ u32 dw[4];
+} __packed;
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_i_t_dpdu_cqe {
+ u8 db_addr_hi[32];
+ u8 db_addr_lo[32];
+ u8 code[6];
+ u8 cid[10];
+ u8 dpl[16];
+ u8 index[16];
+ u8 num_cons[10];
+ u8 rsvd0[4];
+ u8 final;
+ u8 valid;
+} __packed;
+
+#define CQE_VALID_MASK 0x80000000
+#define CQE_CODE_MASK 0x0000003F
+#define CQE_CID_MASK 0x0000FFC0
+
+#define EQE_VALID_MASK 0x00000001
+#define EQE_MAJORCODE_MASK 0x0000000E
+#define EQE_RESID_MASK 0xFFFF0000
+
+struct be_eq_entry {
+ u32 dw[1];
+} __packed;
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_eq_entry {
+ u8 valid; /* DWORD 0 */
+ u8 major_code[3]; /* DWORD 0 */
+ u8 minor_code[12]; /* DWORD 0 */
+ u8 resource_id[16]; /* DWORD 0 */
+
+} __packed;
+
+struct cq_db {
+ u32 dw[1];
+} __packed;
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_cq_db {
+ u8 qid[10];
+ u8 event[1];
+ u8 rsvd0[5];
+ u8 num_popped[13];
+ u8 rearm[1];
+ u8 rsvd1[2];
+} __packed;
+
+void beiscsi_process_eq(struct beiscsi_hba *phba);
+
+
+struct iscsi_wrb {
+ u32 dw[16];
+} __packed;
+
+#define WRB_TYPE_MASK 0xF0000000
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_iscsi_wrb {
+ u8 lun[14]; /* DWORD 0 */
+ u8 lt; /* DWORD 0 */
+ u8 invld; /* DWORD 0 */
+ u8 wrb_idx[8]; /* DWORD 0 */
+ u8 dsp; /* DWORD 0 */
+ u8 dmsg; /* DWORD 0 */
+ u8 undr_run; /* DWORD 0 */
+ u8 over_run; /* DWORD 0 */
+ u8 type[4]; /* DWORD 0 */
+ u8 ptr2nextwrb[8]; /* DWORD 1 */
+ u8 r2t_exp_dtl[24]; /* DWORD 1 */
+ u8 sgl_icd_idx[12]; /* DWORD 2 */
+ u8 rsvd0[20]; /* DWORD 2 */
+ u8 exp_data_sn[32]; /* DWORD 3 */
+ u8 iscsi_bhs_addr_hi[32]; /* DWORD 4 */
+ u8 iscsi_bhs_addr_lo[32]; /* DWORD 5 */
+ u8 cmdsn_itt[32]; /* DWORD 6 */
+ u8 dif_ref_tag[32]; /* DWORD 7 */
+ u8 sge0_addr_hi[32]; /* DWORD 8 */
+ u8 sge0_addr_lo[32]; /* DWORD 9 */
+ u8 sge0_offset[22]; /* DWORD 10 */
+ u8 pbs; /* DWORD 10 */
+ u8 dif_mode[2]; /* DWORD 10 */
+ u8 rsvd1[6]; /* DWORD 10 */
+ u8 sge0_last; /* DWORD 10 */
+ u8 sge0_len[17]; /* DWORD 11 */
+ u8 dif_meta_tag[14]; /* DWORD 11 */
+ u8 sge0_in_ddr; /* DWORD 11 */
+ u8 sge1_addr_hi[32]; /* DWORD 12 */
+ u8 sge1_addr_lo[32]; /* DWORD 13 */
+ u8 sge1_r2t_offset[22]; /* DWORD 14 */
+ u8 rsvd2[9]; /* DWORD 14 */
+ u8 sge1_last; /* DWORD 14 */
+ u8 sge1_len[17]; /* DWORD 15 */
+ u8 ref_sgl_icd_idx[12]; /* DWORD 15 */
+ u8 rsvd3[2]; /* DWORD 15 */
+ u8 sge1_in_ddr; /* DWORD 15 */
+
+} __packed;
+
+struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
+ int index);
+void
+free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
+
+struct pdu_nop_out {
+ u32 dw[12];
+};
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_pdu_nop_out {
+ u8 opcode[6]; /* opcode 0x00 */
+ u8 i_bit; /* I Bit */
+ u8 x_bit; /* reserved; should be 0 */
+ u8 fp_bit_filler1[7];
+ u8 f_bit; /* always 1 */
+ u8 reserved1[16];
+ u8 ahs_length[8]; /* no AHS */
+ u8 data_len_hi[8];
+ u8 data_len_lo[16]; /* DataSegmentLength */
+ u8 lun[64];
+ u8 itt[32]; /* initiator id for ping or 0xffffffff */
+ u8 ttt[32]; /* target id for ping or 0xffffffff */
+ u8 cmd_sn[32];
+ u8 exp_stat_sn[32];
+ u8 reserved5[128];
+};
+
+#define PDUBASE_OPCODE_MASK 0x0000003F
+#define PDUBASE_DATALENHI_MASK 0x0000FF00
+#define PDUBASE_DATALENLO_MASK 0xFFFF0000
+
+struct pdu_base {
+ u32 dw[16];
+} __packed;
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_pdu_base {
+ u8 opcode[6];
+ u8 i_bit; /* immediate bit */
+ u8 x_bit; /* reserved, always 0 */
+ u8 reserved1[24]; /* opcode-specific fields */
+ u8 ahs_length[8]; /* length units is 4 byte words */
+ u8 data_len_hi[8];
+ u8 data_len_lo[16]; /* DatasegmentLength */
+ u8 lun[64]; /* lun or opcode-specific fields */
+ u8 itt[32]; /* initiator task tag */
+ u8 reserved4[224];
+};
+
+struct iscsi_target_context_update_wrb {
+ u32 dw[16];
+} __packed;
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_iscsi_target_context_update_wrb {
+ u8 lun[14]; /* DWORD 0 */
+ u8 lt; /* DWORD 0 */
+ u8 invld; /* DWORD 0 */
+ u8 wrb_idx[8]; /* DWORD 0 */
+ u8 dsp; /* DWORD 0 */
+ u8 dmsg; /* DWORD 0 */
+ u8 undr_run; /* DWORD 0 */
+ u8 over_run; /* DWORD 0 */
+ u8 type[4]; /* DWORD 0 */
+ u8 ptr2nextwrb[8]; /* DWORD 1 */
+ u8 max_burst_length[19]; /* DWORD 1 */
+ u8 rsvd0[5]; /* DWORD 1 */
+ u8 rsvd1[15]; /* DWORD 2 */
+ u8 max_send_data_segment_length[17]; /* DWORD 2 */
+ u8 first_burst_length[14]; /* DWORD 3 */
+ u8 rsvd2[2]; /* DWORD 3 */
+ u8 tx_wrbindex_drv_msg[8]; /* DWORD 3 */
+ u8 rsvd3[5]; /* DWORD 3 */
+ u8 session_state[3]; /* DWORD 3 */
+ u8 rsvd4[16]; /* DWORD 4 */
+ u8 tx_jumbo; /* DWORD 4 */
+ u8 hde; /* DWORD 4 */
+ u8 dde; /* DWORD 4 */
+ u8 erl[2]; /* DWORD 4 */
+ u8 domain_id[5]; /* DWORD 4 */
+ u8 mode; /* DWORD 4 */
+ u8 imd; /* DWORD 4 */
+ u8 ir2t; /* DWORD 4 */
+ u8 notpredblq[2]; /* DWORD 4 */
+ u8 compltonack; /* DWORD 4 */
+ u8 stat_sn[32]; /* DWORD 5 */
+ u8 pad_buffer_addr_hi[32]; /* DWORD 6 */
+ u8 pad_buffer_addr_lo[32]; /* DWORD 7 */
+ u8 pad_addr_hi[32]; /* DWORD 8 */
+ u8 pad_addr_lo[32]; /* DWORD 9 */
+ u8 rsvd5[32]; /* DWORD 10 */
+ u8 rsvd6[32]; /* DWORD 11 */
+ u8 rsvd7[32]; /* DWORD 12 */
+ u8 rsvd8[32]; /* DWORD 13 */
+ u8 rsvd9[32]; /* DWORD 14 */
+ u8 rsvd10[32]; /* DWORD 15 */
+
+} __packed;
+
+struct be_ring {
+ u32 pages; /* queue size in pages */
+ u32 id; /* queue id assigned by beklib */
+ u32 num; /* number of elements in queue */
+ u32 cidx; /* consumer index */
+ u32 pidx; /* producer index -- not used by most rings */
+ u32 item_size; /* size in bytes of one object */
+
+ void *va; /* The virtual address of the ring. This
+ * should be last to allow 32 & 64 bit debugger
+ * extensions to work.
+ */
+};
+
+struct hwi_wrb_context {
+ struct list_head wrb_handle_list;
+ struct list_head wrb_handle_drvr_list;
+ struct wrb_handle **pwrb_handle_base;
+ struct wrb_handle **pwrb_handle_basestd;
+ struct iscsi_wrb *plast_wrb;
+ unsigned short alloc_index;
+ unsigned short free_index;
+ unsigned short wrb_handles_available;
+ unsigned short cid;
+};
+
+struct hwi_controller {
+ struct list_head io_sgl_list;
+ struct list_head eh_sgl_list;
+ struct sgl_handle *psgl_handle_base;
+ unsigned int wrb_mem_index;
+
+ struct hwi_wrb_context wrb_context[BE2_MAX_SESSIONS * 2];
+ struct mcc_wrb *pmcc_wrb_base;
+ struct be_ring default_pdu_hdr;
+ struct be_ring default_pdu_data;
+ struct hwi_context_memory *phwi_ctxt;
+ unsigned short cq_errors[CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN];
+};
+
+enum hwh_type_enum {
+ HWH_TYPE_IO = 1,
+ HWH_TYPE_LOGOUT = 2,
+ HWH_TYPE_TMF = 3,
+ HWH_TYPE_NOP = 4,
+ HWH_TYPE_IO_RD = 5,
+ HWH_TYPE_LOGIN = 11,
+ HWH_TYPE_INVALID = 0xFFFFFFFF
+};
+
+struct wrb_handle {
+ enum hwh_type_enum type;
+ unsigned short wrb_index;
+ unsigned short nxt_wrb_index;
+
+ struct iscsi_task *pio_handle;
+ struct iscsi_wrb *pwrb;
+};
+
+struct hwi_context_memory {
+ struct be_eq_obj be_eq;
+ struct be_queue_info be_cq;
+ struct be_queue_info be_mcc_cq;
+ struct be_queue_info be_mcc;
+
+ struct be_queue_info be_def_hdrq;
+ struct be_queue_info be_def_dataq;
+
+ struct be_queue_info be_wrbq[BE2_MAX_SESSIONS];
+ struct be_mcc_wrb_context *pbe_mcc_context;
+
+ struct hwi_async_pdu_context *pasync_ctx;
+};
+
+#endif
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
new file mode 100644
index 000000000000..12e644fc746e
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -0,0 +1,321 @@
+/**
+ * Copyright (C) 2005 - 2009 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ *
+ */
+
+#include "be_mgmt.h"
+#include "be_iscsi.h"
+
+unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl,
+ struct beiscsi_hba *phba)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_fw_cfg *req = embedded_payload(wrb);
+ int status = 0;
+
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
+
+ status = be_mbox_notify(ctrl);
+ if (!status) {
+ struct be_fw_cfg *pfw_cfg;
+ pfw_cfg = req;
+ phba->fw_config.phys_port = pfw_cfg->phys_port;
+ phba->fw_config.iscsi_icd_start =
+ pfw_cfg->ulp[0].icd_base;
+ phba->fw_config.iscsi_icd_count =
+ pfw_cfg->ulp[0].icd_count;
+ phba->fw_config.iscsi_cid_start =
+ pfw_cfg->ulp[0].sq_base;
+ phba->fw_config.iscsi_cid_count =
+ pfw_cfg->ulp[0].sq_count;
+ } else {
+ shost_printk(KERN_WARNING, phba->shost,
+ "Failed in mgmt_get_fw_config \n");
+ }
+
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl)
+{
+ struct be_dma_mem nonemb_cmd;
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_mgmt_controller_attributes *req;
+ struct be_sge *sge = nonembedded_sgl(wrb);
+ int status = 0;
+
+ nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
+ sizeof(struct be_mgmt_controller_attributes),
+ &nonemb_cmd.dma);
+ if (nonemb_cmd.va == NULL) {
+ SE_DEBUG(DBG_LVL_1,
+ "Failed to allocate memory for mgmt_check_supported_fw"
+ "\n");
+ return -1;
+ }
+ nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
+ req = nonemb_cmd.va;
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req));
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
+ sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(nonemb_cmd.size);
+
+ status = be_mbox_notify(ctrl);
+ if (!status) {
+ struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
+ SE_DEBUG(DBG_LVL_8, "Firmware version of CMD: %s\n",
+ resp->params.hba_attribs.flashrom_version_string);
+ SE_DEBUG(DBG_LVL_8, "Firmware version is : %s\n",
+ resp->params.hba_attribs.firmware_version_string);
+ SE_DEBUG(DBG_LVL_8,
+ "Developer Build, not performing version check...\n");
+
+ } else
+ SE_DEBUG(DBG_LVL_1, " Failed in mgmt_check_supported_fw\n");
+ if (nonemb_cmd.va)
+ pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct iscsi_cleanup_req *req = embedded_payload(wrb);
+ int status = 0;
+
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
+
+ req->chute = chute;
+ req->hdr_ring_id = 0;
+ req->data_ring_id = 0;
+
+ status = be_mbox_notify(ctrl);
+ if (status)
+ shost_printk(KERN_WARNING, phba->shost,
+ " mgmt_epfw_cleanup , FAILED\n");
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
+ unsigned int icd, unsigned int cid)
+{
+ struct be_dma_mem nonemb_cmd;
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_sge *sge = nonembedded_sgl(wrb);
+ struct invalidate_commands_params_in *req;
+ int status = 0;
+
+ nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
+ sizeof(struct invalidate_commands_params_in),
+ &nonemb_cmd.dma);
+ if (nonemb_cmd.va == NULL) {
+ SE_DEBUG(DBG_LVL_1,
+ "Failed to allocate memory for"
+ "mgmt_invalidate_icds \n");
+ return -1;
+ }
+ nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
+ req = nonemb_cmd.va;
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS,
+ sizeof(*req));
+ req->ref_handle = 0;
+ req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE;
+ req->icd_count = 0;
+ req->table[req->icd_count].icd = icd;
+ req->table[req->icd_count].cid = cid;
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
+ sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(nonemb_cmd.size);
+
+ status = be_mbox_notify(ctrl);
+ if (status)
+ SE_DEBUG(DBG_LVL_1, "ICDS Invalidation Failed\n");
+ spin_unlock(&ctrl->mbox_lock);
+ if (nonemb_cmd.va)
+ pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
+ nonemb_cmd.va, nonemb_cmd.dma);
+ return status;
+}
+
+unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
+ struct beiscsi_endpoint *beiscsi_ep,
+ unsigned short cid,
+ unsigned short issue_reset,
+ unsigned short savecfg_flag)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct iscsi_invalidate_connection_params_in *req =
+ embedded_payload(wrb);
+ int status = 0;
+
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+ OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION,
+ sizeof(*req));
+ req->session_handle = beiscsi_ep->fw_handle;
+ req->cid = cid;
+ if (issue_reset)
+ req->cleanup_type = CMD_ISCSI_CONNECTION_ISSUE_TCP_RST;
+ else
+ req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE;
+ req->save_cfg = savecfg_flag;
+ status = be_mbox_notify(ctrl);
+ if (status)
+ SE_DEBUG(DBG_LVL_1, "Invalidation Failed\n");
+
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+unsigned char mgmt_upload_connection(struct beiscsi_hba *phba,
+ unsigned short cid, unsigned int upload_flag)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct tcp_upload_params_in *req = embedded_payload(wrb);
+ int status = 0;
+
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD,
+ OPCODE_COMMON_TCP_UPLOAD, sizeof(*req));
+ req->id = (unsigned short)cid;
+ req->upload_type = (unsigned char)upload_flag;
+ status = be_mbox_notify(ctrl);
+ if (status)
+ SE_DEBUG(DBG_LVL_1, "mgmt_upload_connection Failed\n");
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+int mgmt_open_connection(struct beiscsi_hba *phba,
+ struct sockaddr *dst_addr,
+ struct beiscsi_endpoint *beiscsi_ep)
+{
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+ struct sockaddr_in *daddr_in = (struct sockaddr_in *)dst_addr;
+ struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr;
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct tcp_connect_and_offload_in *req = embedded_payload(wrb);
+ unsigned short def_hdr_id;
+ unsigned short def_data_id;
+ struct phys_addr template_address = { 0, 0 };
+ struct phys_addr *ptemplate_address;
+ int status = 0;
+ unsigned short cid = beiscsi_ep->ep_cid;
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+ def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba);
+ def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba);
+
+ ptemplate_address = &template_address;
+ ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address);
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD,
+ sizeof(*req));
+ if (dst_addr->sa_family == PF_INET) {
+ __be32 s_addr = daddr_in->sin_addr.s_addr;
+ req->ip_address.ip_type = BE2_IPV4;
+ req->ip_address.ip_address[0] = s_addr & 0x000000ff;
+ req->ip_address.ip_address[1] = (s_addr & 0x0000ff00) >> 8;
+ req->ip_address.ip_address[2] = (s_addr & 0x00ff0000) >> 16;
+ req->ip_address.ip_address[3] = (s_addr & 0xff000000) >> 24;
+ req->tcp_port = ntohs(daddr_in->sin_port);
+ beiscsi_ep->dst_addr = daddr_in->sin_addr.s_addr;
+ beiscsi_ep->dst_tcpport = ntohs(daddr_in->sin_port);
+ beiscsi_ep->ip_type = BE2_IPV4;
+ } else if (dst_addr->sa_family == PF_INET6) {
+ req->ip_address.ip_type = BE2_IPV6;
+ memcpy(&req->ip_address.ip_address,
+ &daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
+ req->tcp_port = ntohs(daddr_in6->sin6_port);
+ beiscsi_ep->dst_tcpport = ntohs(daddr_in6->sin6_port);
+ memcpy(&beiscsi_ep->dst6_addr,
+ &daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
+ beiscsi_ep->ip_type = BE2_IPV6;
+ } else{
+ shost_printk(KERN_ERR, phba->shost, "unknown addr family %d\n",
+ dst_addr->sa_family);
+ spin_unlock(&ctrl->mbox_lock);
+ return -EINVAL;
+
+ }
+ req->cid = cid;
+ req->cq_id = phwi_context->be_cq.id;
+ req->defq_id = def_hdr_id;
+ req->hdr_ring_id = def_hdr_id;
+ req->data_ring_id = def_data_id;
+ req->do_offload = 1;
+ req->dataout_template_pa.lo = ptemplate_address->lo;
+ req->dataout_template_pa.hi = ptemplate_address->hi;
+ status = be_mbox_notify(ctrl);
+ if (!status) {
+ struct iscsi_endpoint *ep;
+ struct tcp_connect_and_offload_out *ptcpcnct_out =
+ embedded_payload(wrb);
+
+ ep = phba->ep_array[ptcpcnct_out->cid];
+ beiscsi_ep = ep->dd_data;
+ beiscsi_ep->fw_handle = 0;
+ beiscsi_ep->cid_vld = 1;
+ SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
+ } else
+ SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed\n");
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
new file mode 100644
index 000000000000..00e816ee8070
--- /dev/null
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -0,0 +1,249 @@
+/**
+ * Copyright (C) 2005 - 2009 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ *
+ */
+
+#ifndef _BEISCSI_MGMT_
+#define _BEISCSI_MGMT_
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include "be_iscsi.h"
+#include "be_main.h"
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_mcc_sge {
+ u8 pa_lo[32]; /* dword 0 */
+ u8 pa_hi[32]; /* dword 1 */
+ u8 length[32]; /* DWORD 2 */
+} __packed;
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_mcc_wrb_payload {
+ union {
+ struct amap_mcc_sge sgl[19];
+ u8 embedded[59 * 32]; /* DWORDS 57 to 115 */
+ } u;
+} __packed;
+
+/**
+ * Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field
+ */
+struct amap_mcc_wrb {
+ u8 embedded; /* DWORD 0 */
+ u8 rsvd0[2]; /* DWORD 0 */
+ u8 sge_count[5]; /* DWORD 0 */
+ u8 rsvd1[16]; /* DWORD 0 */
+ u8 special[8]; /* DWORD 0 */
+ u8 payload_length[32];
+ u8 tag[64]; /* DWORD 2 */
+ u8 rsvd2[32]; /* DWORD 4 */
+ struct amap_mcc_wrb_payload payload;
+};
+
+struct mcc_sge {
+ u32 pa_lo; /* dword 0 */
+ u32 pa_hi; /* dword 1 */
+ u32 length; /* DWORD 2 */
+} __packed;
+
+struct mcc_wrb_payload {
+ union {
+ struct mcc_sge sgl[19];
+ u32 embedded[59]; /* DWORDS 57 to 115 */
+ } u;
+} __packed;
+
+#define MCC_WRB_EMBEDDED_MASK 0x00000001
+
+struct mcc_wrb {
+ u32 dw[0]; /* DWORD 0 */
+ u32 payload_length;
+ u32 tag[2]; /* DWORD 2 */
+ u32 rsvd2[1]; /* DWORD 4 */
+ struct mcc_wrb_payload payload;
+};
+
+unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute);
+int mgmt_open_connection(struct beiscsi_hba *phba, struct sockaddr *dst_addr,
+ struct beiscsi_endpoint *beiscsi_ep);
+
+unsigned char mgmt_upload_connection(struct beiscsi_hba *phba,
+ unsigned short cid,
+ unsigned int upload_flag);
+unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
+ unsigned int icd, unsigned int cid);
+
+struct iscsi_invalidate_connection_params_in {
+ struct be_cmd_req_hdr hdr;
+ unsigned int session_handle;
+ unsigned short cid;
+ unsigned short unused;
+ unsigned short cleanup_type;
+ unsigned short save_cfg;
+} __packed;
+
+struct iscsi_invalidate_connection_params_out {
+ unsigned int session_handle;
+ unsigned short cid;
+ unsigned short unused;
+} __packed;
+
+union iscsi_invalidate_connection_params {
+ struct iscsi_invalidate_connection_params_in request;
+ struct iscsi_invalidate_connection_params_out response;
+} __packed;
+
+struct invalidate_command_table {
+ unsigned short icd;
+ unsigned short cid;
+} __packed;
+
+struct invalidate_commands_params_in {
+ struct be_cmd_req_hdr hdr;
+ unsigned int ref_handle;
+ unsigned int icd_count;
+ struct invalidate_command_table table[128];
+ unsigned short cleanup_type;
+ unsigned short unused;
+} __packed;
+
+struct invalidate_commands_params_out {
+ unsigned int ref_handle;
+ unsigned int icd_count;
+ unsigned int icd_status[128];
+} __packed;
+
+union invalidate_commands_params {
+ struct invalidate_commands_params_in request;
+ struct invalidate_commands_params_out response;
+} __packed;
+
+struct mgmt_hba_attributes {
+ u8 flashrom_version_string[32];
+ u8 manufacturer_name[32];
+ u32 supported_modes;
+ u8 seeprom_version_lo;
+ u8 seeprom_version_hi;
+ u8 rsvd0[2];
+ u32 fw_cmd_data_struct_version;
+ u32 ep_fw_data_struct_version;
+ u32 future_reserved[12];
+ u32 default_extended_timeout;
+ u8 controller_model_number[32];
+ u8 controller_description[64];
+ u8 controller_serial_number[32];
+ u8 ip_version_string[32];
+ u8 firmware_version_string[32];
+ u8 bios_version_string[32];
+ u8 redboot_version_string[32];
+ u8 driver_version_string[32];
+ u8 fw_on_flash_version_string[32];
+ u32 functionalities_supported;
+ u16 max_cdblength;
+ u8 asic_revision;
+ u8 generational_guid[16];
+ u8 hba_port_count;
+ u16 default_link_down_timeout;
+ u8 iscsi_ver_min_max;
+ u8 multifunction_device;
+ u8 cache_valid;
+ u8 hba_status;
+ u8 max_domains_supported;
+ u8 phy_port;
+ u32 firmware_post_status;
+ u32 hba_mtu[8];
+ u32 future_u32[4];
+} __packed;
+
+struct mgmt_controller_attributes {
+ struct mgmt_hba_attributes hba_attribs;
+ u16 pci_vendor_id;
+ u16 pci_device_id;
+ u16 pci_sub_vendor_id;
+ u16 pci_sub_system_id;
+ u8 pci_bus_number;
+ u8 pci_device_number;
+ u8 pci_function_number;
+ u8 interface_type;
+ u64 unique_identifier;
+ u8 netfilters;
+ u8 rsvd0[3];
+ u8 future_u32[4];
+} __packed;
+
+struct be_mgmt_controller_attributes {
+ struct be_cmd_req_hdr hdr;
+ struct mgmt_controller_attributes params;
+} __packed;
+
+struct be_mgmt_controller_attributes_resp {
+ struct be_cmd_resp_hdr hdr;
+ struct mgmt_controller_attributes params;
+} __packed;
+
+/* configuration management */
+
+#define GET_MGMT_CONTROLLER_WS(phba) (phba->pmgmt_ws)
+
+/* MGMT CMD flags */
+
+#define MGMT_CMDH_FREE (1<<0)
+
+/* --- MGMT_ERROR_CODES --- */
+/* Error Codes returned in the status field of the CMD response header */
+#define MGMT_STATUS_SUCCESS 0 /* The CMD completed without errors */
+#define MGMT_STATUS_FAILED 1 /* Error status in the Status field of */
+ /* the CMD_RESPONSE_HEADER */
+
+#define ISCSI_GET_PDU_TEMPLATE_ADDRESS(pc, pa) {\
+ pa->lo = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\
+ bus_address.u.a32.address_lo; \
+ pa->hi = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\
+ bus_address.u.a32.address_hi; \
+}
+
+struct beiscsi_endpoint {
+ struct beiscsi_hba *phba;
+ struct beiscsi_sess *sess;
+ struct beiscsi_conn *conn;
+ unsigned short ip_type;
+ char dst6_addr[ISCSI_ADDRESS_BUF_LEN];
+ unsigned long dst_addr;
+ unsigned short ep_cid;
+ unsigned int fw_handle;
+ u16 dst_tcpport;
+ u16 cid_vld;
+};
+
+unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl,
+ struct beiscsi_hba *phba);
+
+unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
+ struct beiscsi_endpoint *beiscsi_ep,
+ unsigned short cid,
+ unsigned short issue_reset,
+ unsigned short savecfg_flag);
+#endif
diff --git a/drivers/scsi/bfa/Makefile b/drivers/scsi/bfa/Makefile
new file mode 100644
index 000000000000..1d6009490d1c
--- /dev/null
+++ b/drivers/scsi/bfa/Makefile
@@ -0,0 +1,15 @@
+obj-$(CONFIG_SCSI_BFA_FC) := bfa.o
+
+bfa-y := bfad.o bfad_intr.o bfad_os.o bfad_im.o bfad_attr.o bfad_fwimg.o
+
+bfa-y += bfa_core.o bfa_ioc.o bfa_iocfc.o bfa_fcxp.o bfa_lps.o
+bfa-y += bfa_hw_cb.o bfa_hw_ct.o bfa_intr.o bfa_timer.o bfa_rport.o
+bfa-y += bfa_fcport.o bfa_port.o bfa_uf.o bfa_sgpg.o bfa_module.o bfa_ioim.o
+bfa-y += bfa_itnim.o bfa_fcpim.o bfa_tskim.o bfa_log.o bfa_log_module.o
+bfa-y += bfa_csdebug.o bfa_sm.o plog.o
+
+bfa-y += fcbuild.o fabric.o fcpim.o vfapi.o fcptm.o bfa_fcs.o bfa_fcs_port.o
+bfa-y += bfa_fcs_uf.o bfa_fcs_lport.o fab.o fdmi.o ms.o ns.o scn.o loop.o
+bfa-y += lport_api.o n2n.o rport.o rport_api.o rport_ftrs.o vport.o
+
+ccflags-y := -I$(obj) -I$(obj)/include -I$(obj)/include/cna
diff --git a/drivers/scsi/bfa/bfa_callback_priv.h b/drivers/scsi/bfa/bfa_callback_priv.h
new file mode 100644
index 000000000000..1e3265c9f7d4
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_callback_priv.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_CALLBACK_PRIV_H__
+#define __BFA_CALLBACK_PRIV_H__
+
+#include <cs/bfa_q.h>
+
+typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
+
+/**
+ * Generic BFA callback element.
+ */
+struct bfa_cb_qe_s {
+ struct list_head qe;
+ bfa_cb_cbfn_t cbfn;
+ bfa_boolean_t once;
+ u32 rsvd;
+ void *cbarg;
+};
+
+#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
+ (__hcb_qe)->cbfn = (__cbfn); \
+ (__hcb_qe)->cbarg = (__cbarg); \
+ list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
+} while (0)
+
+#define bfa_cb_dequeue(__hcb_qe) list_del(&(__hcb_qe)->qe)
+
+#define bfa_cb_queue_once(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
+ (__hcb_qe)->cbfn = (__cbfn); \
+ (__hcb_qe)->cbarg = (__cbarg); \
+ if (!(__hcb_qe)->once) { \
+ list_add_tail((__hcb_qe), &(__bfa)->comp_q); \
+ (__hcb_qe)->once = BFA_TRUE; \
+ } \
+} while (0)
+
+#define bfa_cb_queue_done(__hcb_qe) do { \
+ (__hcb_qe)->once = BFA_FALSE; \
+} while (0)
+
+#endif /* __BFA_CALLBACK_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_cb_ioim_macros.h b/drivers/scsi/bfa/bfa_cb_ioim_macros.h
new file mode 100644
index 000000000000..0050c838c358
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_cb_ioim_macros.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/**
+ * bfa_cb_ioim_macros.h BFA IOIM driver interface macros.
+ */
+
+#ifndef __BFA_HCB_IOIM_MACROS_H__
+#define __BFA_HCB_IOIM_MACROS_H__
+
+#include <bfa_os_inc.h>
+/*
+ * #include <linux/dma-mapping.h>
+ *
+ * #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include
+ * <scsi/scsi_device.h> #include <scsi/scsi_host.h>
+ */
+#include "bfad_im_compat.h"
+
+/*
+ * task attribute values in FCP-2 FCP_CMND IU
+ */
+#define SIMPLE_Q 0
+#define HEAD_OF_Q 1
+#define ORDERED_Q 2
+#define ACA_Q 4
+#define UNTAGGED 5
+
+static inline lun_t
+bfad_int_to_lun(u32 luno)
+{
+ union {
+ u16 scsi_lun[4];
+ lun_t bfa_lun;
+ } lun;
+
+ lun.bfa_lun = 0;
+ lun.scsi_lun[0] = bfa_os_htons(luno);
+
+ return (lun.bfa_lun);
+}
+
+/**
+ * Get LUN for the I/O request
+ */
+#define bfa_cb_ioim_get_lun(__dio) \
+ bfad_int_to_lun(((struct scsi_cmnd *)__dio)->device->lun)
+
+/**
+ * Get CDB for the I/O request
+ */
+static inline u8 *
+bfa_cb_ioim_get_cdb(struct bfad_ioim_s *dio)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
+
+ return ((u8 *) cmnd->cmnd);
+}
+
+/**
+ * Get I/O direction (read/write) for the I/O request
+ */
+static inline enum fcp_iodir
+bfa_cb_ioim_get_iodir(struct bfad_ioim_s *dio)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
+ enum dma_data_direction dmadir;
+
+ dmadir = cmnd->sc_data_direction;
+ if (dmadir == DMA_TO_DEVICE)
+ return FCP_IODIR_WRITE;
+ else if (dmadir == DMA_FROM_DEVICE)
+ return FCP_IODIR_READ;
+ else
+ return FCP_IODIR_NONE;
+}
+
+/**
+ * Get IO size in bytes for the I/O request
+ */
+static inline u32
+bfa_cb_ioim_get_size(struct bfad_ioim_s *dio)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
+
+ return (scsi_bufflen(cmnd));
+}
+
+/**
+ * Get timeout for the I/O request
+ */
+static inline u8
+bfa_cb_ioim_get_timeout(struct bfad_ioim_s *dio)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
+ /*
+ * TBD: need a timeout for scsi passthru
+ */
+ if (cmnd->device->host == NULL)
+ return 4;
+
+ return 0;
+}
+
+/**
+ * Get SG element for the I/O request given the SG element index
+ */
+static inline union bfi_addr_u
+bfa_cb_ioim_get_sgaddr(struct bfad_ioim_s *dio, int sgeid)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
+ struct scatterlist *sge;
+ u64 addr;
+
+ sge = (struct scatterlist *)scsi_sglist(cmnd) + sgeid;
+ addr = (u64) sg_dma_address(sge);
+
+ return (*(union bfi_addr_u *) &addr);
+}
+
+static inline u32
+bfa_cb_ioim_get_sglen(struct bfad_ioim_s *dio, int sgeid)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
+ struct scatterlist *sge;
+ u32 len;
+
+ sge = (struct scatterlist *)scsi_sglist(cmnd) + sgeid;
+ len = sg_dma_len(sge);
+
+ return len;
+}
+
+/**
+ * Get Command Reference Number for the I/O request. 0 if none.
+ */
+static inline u8
+bfa_cb_ioim_get_crn(struct bfad_ioim_s *dio)
+{
+ return 0;
+}
+
+/**
+ * Get SAM-3 priority for the I/O request. 0 is default.
+ */
+static inline u8
+bfa_cb_ioim_get_priority(struct bfad_ioim_s *dio)
+{
+ return 0;
+}
+
+/**
+ * Get task attributes for the I/O request. Default is FCP_TASK_ATTR_SIMPLE(0).
+ */
+static inline u8
+bfa_cb_ioim_get_taskattr(struct bfad_ioim_s *dio)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
+ u8 task_attr = UNTAGGED;
+
+ if (cmnd->device->tagged_supported) {
+ switch (cmnd->tag) {
+ case HEAD_OF_QUEUE_TAG:
+ task_attr = HEAD_OF_Q;
+ break;
+ case ORDERED_QUEUE_TAG:
+ task_attr = ORDERED_Q;
+ break;
+ default:
+ task_attr = SIMPLE_Q;
+ break;
+ }
+ }
+
+ return task_attr;
+}
+
+/**
+ * Get CDB length in bytes for the I/O request. Default is FCP_CMND_CDB_LEN(16).
+ */
+static inline u8
+bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
+
+ return (cmnd->cmd_len);
+}
+
+
+
+#endif /* __BFA_HCB_IOIM_MACROS_H__ */
diff --git a/drivers/scsi/bfa/bfa_cee.c b/drivers/scsi/bfa/bfa_cee.c
new file mode 100644
index 000000000000..7a959c34e789
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_cee.c
@@ -0,0 +1,492 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <defs/bfa_defs_cee.h>
+#include <cs/bfa_trc.h>
+#include <cs/bfa_log.h>
+#include <cs/bfa_debug.h>
+#include <cee/bfa_cee.h>
+#include <bfi/bfi_cee.h>
+#include <bfi/bfi.h>
+#include <bfa_ioc.h>
+#include <cna/bfa_cna_trcmod.h>
+
+BFA_TRC_FILE(CNA, CEE);
+
+#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
+#define bfa_lpuid(__arg) bfa_ioc_portid(&(__arg)->ioc)
+
+static void bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg_s *lldp_cfg);
+static void bfa_cee_format_dcbcx_stats(struct bfa_cee_dcbx_stats_s
+ *dcbcx_stats);
+static void bfa_cee_format_lldp_stats(struct bfa_cee_lldp_stats_s
+ *lldp_stats);
+static void bfa_cee_format_cfg_stats(struct bfa_cee_cfg_stats_s *cfg_stats);
+static void bfa_cee_format_cee_cfg(void *buffer);
+static void bfa_cee_format_cee_stats(void *buffer);
+
+static void
+bfa_cee_format_cee_stats(void *buffer)
+{
+ struct bfa_cee_stats_s *cee_stats = buffer;
+ bfa_cee_format_dcbcx_stats(&cee_stats->dcbx_stats);
+ bfa_cee_format_lldp_stats(&cee_stats->lldp_stats);
+ bfa_cee_format_cfg_stats(&cee_stats->cfg_stats);
+}
+
+static void
+bfa_cee_format_cee_cfg(void *buffer)
+{
+ struct bfa_cee_attr_s *cee_cfg = buffer;
+ bfa_cee_format_lldp_cfg(&cee_cfg->lldp_remote);
+}
+
+static void
+bfa_cee_format_dcbcx_stats(struct bfa_cee_dcbx_stats_s *dcbcx_stats)
+{
+ dcbcx_stats->subtlvs_unrecognized =
+ bfa_os_ntohl(dcbcx_stats->subtlvs_unrecognized);
+ dcbcx_stats->negotiation_failed =
+ bfa_os_ntohl(dcbcx_stats->negotiation_failed);
+ dcbcx_stats->remote_cfg_changed =
+ bfa_os_ntohl(dcbcx_stats->remote_cfg_changed);
+ dcbcx_stats->tlvs_received = bfa_os_ntohl(dcbcx_stats->tlvs_received);
+ dcbcx_stats->tlvs_invalid = bfa_os_ntohl(dcbcx_stats->tlvs_invalid);
+ dcbcx_stats->seqno = bfa_os_ntohl(dcbcx_stats->seqno);
+ dcbcx_stats->ackno = bfa_os_ntohl(dcbcx_stats->ackno);
+ dcbcx_stats->recvd_seqno = bfa_os_ntohl(dcbcx_stats->recvd_seqno);
+ dcbcx_stats->recvd_ackno = bfa_os_ntohl(dcbcx_stats->recvd_ackno);
+}
+
+static void
+bfa_cee_format_lldp_stats(struct bfa_cee_lldp_stats_s *lldp_stats)
+{
+ lldp_stats->frames_transmitted =
+ bfa_os_ntohl(lldp_stats->frames_transmitted);
+ lldp_stats->frames_aged_out = bfa_os_ntohl(lldp_stats->frames_aged_out);
+ lldp_stats->frames_discarded =
+ bfa_os_ntohl(lldp_stats->frames_discarded);
+ lldp_stats->frames_in_error = bfa_os_ntohl(lldp_stats->frames_in_error);
+ lldp_stats->frames_rcvd = bfa_os_ntohl(lldp_stats->frames_rcvd);
+ lldp_stats->tlvs_discarded = bfa_os_ntohl(lldp_stats->tlvs_discarded);
+ lldp_stats->tlvs_unrecognized =
+ bfa_os_ntohl(lldp_stats->tlvs_unrecognized);
+}
+
+static void
+bfa_cee_format_cfg_stats(struct bfa_cee_cfg_stats_s *cfg_stats)
+{
+ cfg_stats->cee_status_down = bfa_os_ntohl(cfg_stats->cee_status_down);
+ cfg_stats->cee_status_up = bfa_os_ntohl(cfg_stats->cee_status_up);
+ cfg_stats->cee_hw_cfg_changed =
+ bfa_os_ntohl(cfg_stats->cee_hw_cfg_changed);
+ cfg_stats->recvd_invalid_cfg =
+ bfa_os_ntohl(cfg_stats->recvd_invalid_cfg);
+}
+
+static void
+bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg_s *lldp_cfg)
+{
+ lldp_cfg->time_to_interval = bfa_os_ntohs(lldp_cfg->time_to_interval);
+ lldp_cfg->enabled_system_cap =
+ bfa_os_ntohs(lldp_cfg->enabled_system_cap);
+}
+
+/**
+ * bfa_cee_attr_meminfo()
+ *
+ *
+ * @param[in] void
+ *
+ * @return Size of DMA region
+ */
+static u32
+bfa_cee_attr_meminfo(void)
+{
+ return BFA_ROUNDUP(sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ);
+}
+
+/**
+ * bfa_cee_stats_meminfo()
+ *
+ *
+ * @param[in] void
+ *
+ * @return Size of DMA region
+ */
+static u32
+bfa_cee_stats_meminfo(void)
+{
+ return BFA_ROUNDUP(sizeof(struct bfa_cee_stats_s), BFA_DMA_ALIGN_SZ);
+}
+
+/**
+ * bfa_cee_get_attr_isr()
+ *
+ *
+ * @param[in] cee - Pointer to the CEE module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_get_attr_isr(struct bfa_cee_s *cee, bfa_status_t status)
+{
+ cee->get_attr_status = status;
+ bfa_trc(cee, 0);
+ if (status == BFA_STATUS_OK) {
+ bfa_trc(cee, 0);
+ /*
+ * The requested data has been copied to the DMA area, *process
+ * it.
+ */
+ memcpy(cee->attr, cee->attr_dma.kva,
+ sizeof(struct bfa_cee_attr_s));
+ bfa_cee_format_cee_cfg(cee->attr);
+ }
+ cee->get_attr_pending = BFA_FALSE;
+ if (cee->cbfn.get_attr_cbfn) {
+ bfa_trc(cee, 0);
+ cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status);
+ }
+ bfa_trc(cee, 0);
+}
+
+/**
+ * bfa_cee_get_attr_isr()
+ *
+ *
+ * @param[in] cee - Pointer to the CEE module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_get_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
+{
+ cee->get_stats_status = status;
+ bfa_trc(cee, 0);
+ if (status == BFA_STATUS_OK) {
+ bfa_trc(cee, 0);
+ /*
+ * The requested data has been copied to the DMA area, process
+ * it.
+ */
+ memcpy(cee->stats, cee->stats_dma.kva,
+ sizeof(struct bfa_cee_stats_s));
+ bfa_cee_format_cee_stats(cee->stats);
+ }
+ cee->get_stats_pending = BFA_FALSE;
+ bfa_trc(cee, 0);
+ if (cee->cbfn.get_stats_cbfn) {
+ bfa_trc(cee, 0);
+ cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status);
+ }
+ bfa_trc(cee, 0);
+}
+
+/**
+ * bfa_cee_get_attr_isr()
+ *
+ *
+ * @param[in] cee - Pointer to the CEE module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_reset_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
+{
+ cee->reset_stats_status = status;
+ cee->reset_stats_pending = BFA_FALSE;
+ if (cee->cbfn.reset_stats_cbfn)
+ cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
+}
+
+/**
+ * bfa_cee_meminfo()
+ *
+ *
+ * @param[in] void
+ *
+ * @return Size of DMA region
+ */
+u32
+bfa_cee_meminfo(void)
+{
+ return (bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo());
+}
+
+/**
+ * bfa_cee_mem_claim()
+ *
+ *
+ * @param[in] cee CEE module pointer
+ * dma_kva Kernel Virtual Address of CEE DMA Memory
+ * dma_pa Physical Address of CEE DMA Memory
+ *
+ * @return void
+ */
+void
+bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa)
+{
+ cee->attr_dma.kva = dma_kva;
+ cee->attr_dma.pa = dma_pa;
+ cee->stats_dma.kva = dma_kva + bfa_cee_attr_meminfo();
+ cee->stats_dma.pa = dma_pa + bfa_cee_attr_meminfo();
+ cee->attr = (struct bfa_cee_attr_s *)dma_kva;
+ cee->stats =
+ (struct bfa_cee_stats_s *)(dma_kva + bfa_cee_attr_meminfo());
+}
+
+/**
+ * bfa_cee_get_attr()
+ *
+ * Send the request to the f/w to fetch CEE attributes.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+
+bfa_status_t
+bfa_cee_get_attr(struct bfa_cee_s *cee, struct bfa_cee_attr_s *attr,
+ bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_cee_get_req_s *cmd;
+
+ bfa_assert((cee != NULL) && (cee->ioc != NULL));
+ bfa_trc(cee, 0);
+ if (!bfa_ioc_is_operational(cee->ioc)) {
+ bfa_trc(cee, 0);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+ if (cee->get_attr_pending == BFA_TRUE) {
+ bfa_trc(cee, 0);
+ return BFA_STATUS_DEVBUSY;
+ }
+ cee->get_attr_pending = BFA_TRUE;
+ cmd = (struct bfi_cee_get_req_s *)cee->get_cfg_mb.msg;
+ cee->attr = attr;
+ cee->cbfn.get_attr_cbfn = cbfn;
+ cee->cbfn.get_attr_cbarg = cbarg;
+ bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
+ bfa_ioc_portid(cee->ioc));
+ bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
+ bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb);
+ bfa_trc(cee, 0);
+
+ return BFA_STATUS_OK;
+}
+
+/**
+ * bfa_cee_get_stats()
+ *
+ * Send the request to the f/w to fetch CEE statistics.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+
+bfa_status_t
+bfa_cee_get_stats(struct bfa_cee_s *cee, struct bfa_cee_stats_s *stats,
+ bfa_cee_get_stats_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_cee_get_req_s *cmd;
+
+ bfa_assert((cee != NULL) && (cee->ioc != NULL));
+
+ if (!bfa_ioc_is_operational(cee->ioc)) {
+ bfa_trc(cee, 0);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+ if (cee->get_stats_pending == BFA_TRUE) {
+ bfa_trc(cee, 0);
+ return BFA_STATUS_DEVBUSY;
+ }
+ cee->get_stats_pending = BFA_TRUE;
+ cmd = (struct bfi_cee_get_req_s *)cee->get_stats_mb.msg;
+ cee->stats = stats;
+ cee->cbfn.get_stats_cbfn = cbfn;
+ cee->cbfn.get_stats_cbarg = cbarg;
+ bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ,
+ bfa_ioc_portid(cee->ioc));
+ bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa);
+ bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb);
+ bfa_trc(cee, 0);
+
+ return BFA_STATUS_OK;
+}
+
+/**
+ * bfa_cee_reset_stats()
+ *
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+
+bfa_status_t
+bfa_cee_reset_stats(struct bfa_cee_s *cee, bfa_cee_reset_stats_cbfn_t cbfn,
+ void *cbarg)
+{
+ struct bfi_cee_reset_stats_s *cmd;
+
+ bfa_assert((cee != NULL) && (cee->ioc != NULL));
+ if (!bfa_ioc_is_operational(cee->ioc)) {
+ bfa_trc(cee, 0);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+ if (cee->reset_stats_pending == BFA_TRUE) {
+ bfa_trc(cee, 0);
+ return BFA_STATUS_DEVBUSY;
+ }
+ cee->reset_stats_pending = BFA_TRUE;
+ cmd = (struct bfi_cee_reset_stats_s *)cee->reset_stats_mb.msg;
+ cee->cbfn.reset_stats_cbfn = cbfn;
+ cee->cbfn.reset_stats_cbarg = cbarg;
+ bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS,
+ bfa_ioc_portid(cee->ioc));
+ bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb);
+ bfa_trc(cee, 0);
+ return BFA_STATUS_OK;
+}
+
+/**
+ * bfa_cee_isrs()
+ *
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return void
+ */
+
+void
+bfa_cee_isr(void *cbarg, struct bfi_mbmsg_s *m)
+{
+ union bfi_cee_i2h_msg_u *msg;
+ struct bfi_cee_get_rsp_s *get_rsp;
+ struct bfa_cee_s *cee = (struct bfa_cee_s *)cbarg;
+ msg = (union bfi_cee_i2h_msg_u *)m;
+ get_rsp = (struct bfi_cee_get_rsp_s *)m;
+ bfa_trc(cee, msg->mh.msg_id);
+ switch (msg->mh.msg_id) {
+ case BFI_CEE_I2H_GET_CFG_RSP:
+ bfa_trc(cee, get_rsp->cmd_status);
+ bfa_cee_get_attr_isr(cee, get_rsp->cmd_status);
+ break;
+ case BFI_CEE_I2H_GET_STATS_RSP:
+ bfa_cee_get_stats_isr(cee, get_rsp->cmd_status);
+ break;
+ case BFI_CEE_I2H_RESET_STATS_RSP:
+ bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status);
+ break;
+ default:
+ bfa_assert(0);
+ }
+}
+
+/**
+ * bfa_cee_hbfail()
+ *
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return void
+ */
+
+void
+bfa_cee_hbfail(void *arg)
+{
+ struct bfa_cee_s *cee;
+ cee = (struct bfa_cee_s *)arg;
+
+ if (cee->get_attr_pending == BFA_TRUE) {
+ cee->get_attr_status = BFA_STATUS_FAILED;
+ cee->get_attr_pending = BFA_FALSE;
+ if (cee->cbfn.get_attr_cbfn) {
+ cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg,
+ BFA_STATUS_FAILED);
+ }
+ }
+ if (cee->get_stats_pending == BFA_TRUE) {
+ cee->get_stats_status = BFA_STATUS_FAILED;
+ cee->get_stats_pending = BFA_FALSE;
+ if (cee->cbfn.get_stats_cbfn) {
+ cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg,
+ BFA_STATUS_FAILED);
+ }
+ }
+ if (cee->reset_stats_pending == BFA_TRUE) {
+ cee->reset_stats_status = BFA_STATUS_FAILED;
+ cee->reset_stats_pending = BFA_FALSE;
+ if (cee->cbfn.reset_stats_cbfn) {
+ cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg,
+ BFA_STATUS_FAILED);
+ }
+ }
+}
+
+/**
+ * bfa_cee_attach()
+ *
+ *
+ * @param[in] cee - Pointer to the CEE module data structure
+ * ioc - Pointer to the ioc module data structure
+ * dev - Pointer to the device driver module data structure
+ * The device driver specific mbox ISR functions have
+ * this pointer as one of the parameters.
+ * trcmod -
+ * logmod -
+ *
+ * @return void
+ */
+void
+bfa_cee_attach(struct bfa_cee_s *cee, struct bfa_ioc_s *ioc, void *dev,
+ struct bfa_trc_mod_s *trcmod, struct bfa_log_mod_s *logmod)
+{
+ bfa_assert(cee != NULL);
+ cee->dev = dev;
+ cee->trcmod = trcmod;
+ cee->logmod = logmod;
+ cee->ioc = ioc;
+
+ bfa_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
+ bfa_ioc_hbfail_init(&cee->hbfail, bfa_cee_hbfail, cee);
+ bfa_ioc_hbfail_register(cee->ioc, &cee->hbfail);
+ bfa_trc(cee, 0);
+}
+
+/**
+ * bfa_cee_detach()
+ *
+ *
+ * @param[in] cee - Pointer to the CEE module data structure
+ *
+ * @return void
+ */
+void
+bfa_cee_detach(struct bfa_cee_s *cee)
+{
+ /*
+ * For now, just check if there is some ioctl pending and mark that as
+ * failed?
+ */
+ /* bfa_cee_hbfail(cee); */
+}
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
new file mode 100644
index 000000000000..44e2d1155c51
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -0,0 +1,402 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <bfa.h>
+#include <defs/bfa_defs_pci.h>
+#include <cs/bfa_debug.h>
+#include <bfa_iocfc.h>
+
+#define DEF_CFG_NUM_FABRICS 1
+#define DEF_CFG_NUM_LPORTS 256
+#define DEF_CFG_NUM_CQS 4
+#define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
+#define DEF_CFG_NUM_TSKIM_REQS 128
+#define DEF_CFG_NUM_FCXP_REQS 64
+#define DEF_CFG_NUM_UF_BUFS 64
+#define DEF_CFG_NUM_RPORTS 1024
+#define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
+#define DEF_CFG_NUM_TINS 256
+
+#define DEF_CFG_NUM_SGPGS 2048
+#define DEF_CFG_NUM_REQQ_ELEMS 256
+#define DEF_CFG_NUM_RSPQ_ELEMS 64
+#define DEF_CFG_NUM_SBOOT_TGTS 16
+#define DEF_CFG_NUM_SBOOT_LUNS 16
+
+/**
+ * Use this function query the memory requirement of the BFA library.
+ * This function needs to be called before bfa_attach() to get the
+ * memory required of the BFA layer for a given driver configuration.
+ *
+ * This call will fail, if the cap is out of range compared to pre-defined
+ * values within the BFA library
+ *
+ * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
+ * its configuration in this structure.
+ * The default values for struct bfa_iocfc_cfg_s can be
+ * fetched using bfa_cfg_get_default() API.
+ *
+ * If cap's boundary check fails, the library will use
+ * the default bfa_cap_t values (and log a warning msg).
+ *
+ * @param[out] meminfo - pointer to bfa_meminfo_t. This content
+ * indicates the memory type (see bfa_mem_type_t) and
+ * amount of memory required.
+ *
+ * Driver should allocate the memory, populate the
+ * starting address for each block and provide the same
+ * structure as input parameter to bfa_attach() call.
+ *
+ * @return void
+ *
+ * Special Considerations: @note
+ */
+void
+bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
+{
+ int i;
+ u32 km_len = 0, dm_len = 0;
+
+ bfa_assert((cfg != NULL) && (meminfo != NULL));
+
+ bfa_os_memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
+ meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
+ BFA_MEM_TYPE_KVA;
+ meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type =
+ BFA_MEM_TYPE_DMA;
+
+ bfa_iocfc_meminfo(cfg, &km_len, &dm_len);
+
+ for (i = 0; hal_mods[i]; i++)
+ hal_mods[i]->meminfo(cfg, &km_len, &dm_len);
+
+
+ meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len;
+ meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
+}
+
+/**
+ * Use this function to do attach the driver instance with the BFA
+ * library. This function will not trigger any HW initialization
+ * process (which will be done in bfa_init() call)
+ *
+ * This call will fail, if the cap is out of range compared to
+ * pre-defined values within the BFA library
+ *
+ * @param[out] bfa Pointer to bfa_t.
+ * @param[in] bfad Opaque handle back to the driver's IOC structure
+ * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
+ * that was used in bfa_cfg_get_meminfo().
+ * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
+ * use the bfa_cfg_get_meminfo() call to
+ * find the memory blocks required, allocate the
+ * required memory and provide the starting addresses.
+ * @param[in] pcidev pointer to struct bfa_pcidev_s
+ *
+ * @return
+ * void
+ *
+ * Special Considerations:
+ *
+ * @note
+ *
+ */
+void
+bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+{
+ int i;
+ struct bfa_mem_elem_s *melem;
+
+ bfa->fcs = BFA_FALSE;
+
+ bfa_assert((cfg != NULL) && (meminfo != NULL));
+
+ /**
+ * initialize all memory pointers for iterative allocation
+ */
+ for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
+ melem = meminfo->meminfo + i;
+ melem->kva_curp = melem->kva;
+ melem->dma_curp = melem->dma;
+ }
+
+ bfa_iocfc_attach(bfa, bfad, cfg, meminfo, pcidev);
+
+ for (i = 0; hal_mods[i]; i++)
+ hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev);
+
+}
+
+/**
+ * Use this function to delete a BFA IOC. IOC should be stopped (by
+ * calling bfa_stop()) before this function call.
+ *
+ * @param[in] bfa - pointer to bfa_t.
+ *
+ * @return
+ * void
+ *
+ * Special Considerations:
+ *
+ * @note
+ */
+void
+bfa_detach(struct bfa_s *bfa)
+{
+ int i;
+
+ for (i = 0; hal_mods[i]; i++)
+ hal_mods[i]->detach(bfa);
+
+ bfa_iocfc_detach(bfa);
+}
+
+
+void
+bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod)
+{
+ bfa->trcmod = trcmod;
+}
+
+
+void
+bfa_init_log(struct bfa_s *bfa, struct bfa_log_mod_s *logmod)
+{
+ bfa->logm = logmod;
+}
+
+
+void
+bfa_init_aen(struct bfa_s *bfa, struct bfa_aen_s *aen)
+{
+ bfa->aen = aen;
+}
+
+void
+bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog)
+{
+ bfa->plog = plog;
+}
+
+/**
+ * Initialize IOC.
+ *
+ * This function will return immediately, when the IOC initialization is
+ * completed, the bfa_cb_init() will be called.
+ *
+ * @param[in] bfa instance
+ *
+ * @return void
+ *
+ * Special Considerations:
+ *
+ * @note
+ * When this function returns, the driver should register the interrupt service
+ * routine(s) and enable the device interrupts. If this is not done,
+ * bfa_cb_init() will never get called
+ */
+void
+bfa_init(struct bfa_s *bfa)
+{
+ bfa_iocfc_init(bfa);
+}
+
+/**
+ * Use this function initiate the IOC configuration setup. This function
+ * will return immediately.
+ *
+ * @param[in] bfa instance
+ *
+ * @return None
+ */
+void
+bfa_start(struct bfa_s *bfa)
+{
+ bfa_iocfc_start(bfa);
+}
+
+/**
+ * Use this function quiese the IOC. This function will return immediately,
+ * when the IOC is actually stopped, the bfa_cb_stop() will be called.
+ *
+ * @param[in] bfa - pointer to bfa_t.
+ *
+ * @return None
+ *
+ * Special Considerations:
+ * bfa_cb_stop() could be called before or after bfa_stop() returns.
+ *
+ * @note
+ * In case of any failure, we could handle it automatically by doing a
+ * reset and then succeed the bfa_stop() call.
+ */
+void
+bfa_stop(struct bfa_s *bfa)
+{
+ bfa_iocfc_stop(bfa);
+}
+
+void
+bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
+{
+ INIT_LIST_HEAD(comp_q);
+ list_splice_tail_init(&bfa->comp_q, comp_q);
+}
+
+void
+bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
+{
+ struct list_head *qe;
+ struct list_head *qen;
+ struct bfa_cb_qe_s *hcb_qe;
+
+ list_for_each_safe(qe, qen, comp_q) {
+ hcb_qe = (struct bfa_cb_qe_s *) qe;
+ hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
+ }
+}
+
+void
+bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
+{
+ struct list_head *qe;
+ struct bfa_cb_qe_s *hcb_qe;
+
+ while (!list_empty(comp_q)) {
+ bfa_q_deq(comp_q, &qe);
+ hcb_qe = (struct bfa_cb_qe_s *) qe;
+ hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
+ }
+}
+
+void
+bfa_attach_fcs(struct bfa_s *bfa)
+{
+ bfa->fcs = BFA_TRUE;
+}
+
+/**
+ * Periodic timer heart beat from driver
+ */
+void
+bfa_timer_tick(struct bfa_s *bfa)
+{
+ bfa_timer_beat(&bfa->timer_mod);
+}
+
+#ifndef BFA_BIOS_BUILD
+/**
+ * Return the list of PCI vendor/device id lists supported by this
+ * BFA instance.
+ */
+void
+bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
+{
+ static struct bfa_pciid_s __pciids[] = {
+ {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
+ {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
+ {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
+ };
+
+ *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
+ *pciids = __pciids;
+}
+
+/**
+ * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
+ * into BFA layer). The OS driver can then turn back and overwrite entries that
+ * have been configured by the user.
+ *
+ * @param[in] cfg - pointer to bfa_ioc_cfg_t
+ *
+ * @return
+ * void
+ *
+ * Special Considerations:
+ * note
+ */
+void
+bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
+{
+ cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
+ cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
+ cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
+ cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
+ cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
+ cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
+ cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
+ cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
+
+ cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
+ cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
+ cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
+ cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
+ cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
+ cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
+ cfg->drvcfg.ioc_recover = BFA_FALSE;
+ cfg->drvcfg.delay_comp = BFA_FALSE;
+
+}
+
+void
+bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
+{
+ bfa_cfg_get_default(cfg);
+ cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
+ cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
+ cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
+ cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
+ cfg->fwcfg.num_rports = BFA_RPORT_MIN;
+
+ cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
+ cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
+ cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
+ cfg->drvcfg.min_cfg = BFA_TRUE;
+}
+
+void
+bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr)
+{
+ bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
+}
+
+/**
+ * Retrieve firmware trace information on IOC failure.
+ */
+bfa_status_t
+bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen)
+{
+ return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen);
+}
+
+/**
+ * Fetch firmware trace data.
+ *
+ * @param[in] bfa BFA instance
+ * @param[out] trcdata Firmware trace buffer
+ * @param[in,out] trclen Firmware trace buffer len
+ *
+ * @retval BFA_STATUS_OK Firmware trace is fetched.
+ * @retval BFA_STATUS_INPROGRESS Firmware trace fetch is in progress.
+ */
+bfa_status_t
+bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen)
+{
+ return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen);
+}
+#endif
diff --git a/drivers/scsi/bfa/bfa_csdebug.c b/drivers/scsi/bfa/bfa_csdebug.c
new file mode 100644
index 000000000000..1b71d349451a
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_csdebug.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <cs/bfa_debug.h>
+#include <bfa_os_inc.h>
+#include <cs/bfa_q.h>
+#include <log/bfa_log_hal.h>
+
+/**
+ * cs_debug_api
+ */
+
+
+void
+bfa_panic(int line, char *file, char *panicstr)
+{
+ bfa_log(NULL, BFA_LOG_HAL_ASSERT, file, line, panicstr);
+ bfa_os_panic();
+}
+
+void
+bfa_sm_panic(struct bfa_log_mod_s *logm, int line, char *file, int event)
+{
+ bfa_log(logm, BFA_LOG_HAL_SM_ASSERT, file, line, event);
+ bfa_os_panic();
+}
+
+int
+bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
+{
+ struct list_head *tqe;
+
+ tqe = bfa_q_next(q);
+ while (tqe != q) {
+ if (tqe == qe)
+ return (1);
+ tqe = bfa_q_next(tqe);
+ if (tqe == NULL)
+ break;
+ }
+ return (0);
+}
+
+
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
new file mode 100644
index 000000000000..401babe3494e
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <bfa.h>
+#include <log/bfa_log_hal.h>
+
+BFA_TRC_FILE(HAL, FCPIM);
+BFA_MODULE(fcpim);
+
+/**
+ * hal_fcpim_mod BFA FCP Initiator Mode module
+ */
+
+/**
+ * Compute and return memory needed by FCP(im) module.
+ */
+static void
+bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
+ u32 *dm_len)
+{
+ bfa_itnim_meminfo(cfg, km_len, dm_len);
+
+ /**
+ * IO memory
+ */
+ if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
+ cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
+ else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
+ cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
+
+ *km_len += cfg->fwcfg.num_ioim_reqs *
+ (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
+
+ *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN;
+
+ /**
+ * task management command memory
+ */
+ if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
+ cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
+ *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
+}
+
+
+static void
+bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+{
+ struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+
+ bfa_trc(bfa, cfg->drvcfg.path_tov);
+ bfa_trc(bfa, cfg->fwcfg.num_rports);
+ bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
+ bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
+
+ fcpim->bfa = bfa;
+ fcpim->num_itnims = cfg->fwcfg.num_rports;
+ fcpim->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
+ fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
+ fcpim->path_tov = cfg->drvcfg.path_tov;
+ fcpim->delay_comp = cfg->drvcfg.delay_comp;
+
+ bfa_itnim_attach(fcpim, meminfo);
+ bfa_tskim_attach(fcpim, meminfo);
+ bfa_ioim_attach(fcpim, meminfo);
+}
+
+static void
+bfa_fcpim_initdone(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcpim_detach(struct bfa_s *bfa)
+{
+ struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+
+ bfa_ioim_detach(fcpim);
+ bfa_tskim_detach(fcpim);
+}
+
+static void
+bfa_fcpim_start(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcpim_stop(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcpim_iocdisable(struct bfa_s *bfa)
+{
+ struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+ struct bfa_itnim_s *itnim;
+ struct list_head *qe, *qen;
+
+ list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+ itnim = (struct bfa_itnim_s *) qe;
+ bfa_itnim_iocdisable(itnim);
+ }
+}
+
+void
+bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
+{
+ struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+
+ fcpim->path_tov = path_tov * 1000;
+ if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
+ fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
+}
+
+u16
+bfa_fcpim_path_tov_get(struct bfa_s *bfa)
+{
+ struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+
+ return (fcpim->path_tov / 1000);
+}
+
+bfa_status_t
+bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_fcpim_stats_s *modstats)
+{
+ struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+
+ *modstats = fcpim->stats;
+
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcpim_clr_modstats(struct bfa_s *bfa)
+{
+ struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+
+ memset(&fcpim->stats, 0, sizeof(struct bfa_fcpim_stats_s));
+
+ return BFA_STATUS_OK;
+}
+
+void
+bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth)
+{
+ struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+
+ bfa_assert(q_depth <= BFA_IOCFC_QDEPTH_MAX);
+
+ fcpim->q_depth = q_depth;
+}
+
+u16
+bfa_fcpim_qdepth_get(struct bfa_s *bfa)
+{
+ struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+
+ return (fcpim->q_depth);
+}
+
+
diff --git a/drivers/scsi/bfa/bfa_fcpim_priv.h b/drivers/scsi/bfa/bfa_fcpim_priv.h
new file mode 100644
index 000000000000..153206cfb37a
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcpim_priv.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_FCPIM_PRIV_H__
+#define __BFA_FCPIM_PRIV_H__
+
+#include <bfa_fcpim.h>
+#include <defs/bfa_defs_fcpim.h>
+#include <cs/bfa_wc.h>
+#include "bfa_sgpg_priv.h"
+
+#define BFA_ITNIM_MIN 32
+#define BFA_ITNIM_MAX 1024
+
+#define BFA_IOIM_MIN 8
+#define BFA_IOIM_MAX 2000
+
+#define BFA_TSKIM_MIN 4
+#define BFA_TSKIM_MAX 512
+#define BFA_FCPIM_PATHTOV_DEF (30 * 1000) /* in millisecs */
+#define BFA_FCPIM_PATHTOV_MAX (90 * 1000) /* in millisecs */
+
+#define bfa_fcpim_stats(__fcpim, __stats) \
+ (__fcpim)->stats.__stats ++
+
+struct bfa_fcpim_mod_s {
+ struct bfa_s *bfa;
+ struct bfa_itnim_s *itnim_arr;
+ struct bfa_ioim_s *ioim_arr;
+ struct bfa_ioim_sp_s *ioim_sp_arr;
+ struct bfa_tskim_s *tskim_arr;
+ struct bfa_dma_s snsbase;
+ int num_itnims;
+ int num_ioim_reqs;
+ int num_tskim_reqs;
+ u32 path_tov;
+ u16 q_depth;
+ u16 rsvd;
+ struct list_head itnim_q; /* queue of active itnim */
+ struct list_head ioim_free_q; /* free IO resources */
+ struct list_head ioim_resfree_q; /* IOs waiting for f/w */
+ struct list_head ioim_comp_q; /* IO global comp Q */
+ struct list_head tskim_free_q;
+ u32 ios_active; /* current active IOs */
+ u32 delay_comp;
+ struct bfa_fcpim_stats_s stats;
+};
+
+struct bfa_ioim_s;
+struct bfa_tskim_s;
+
+/**
+ * BFA IO (initiator mode)
+ */
+struct bfa_ioim_s {
+ struct list_head qe; /* queue elememt */
+ bfa_sm_t sm; /* BFA ioim state machine */
+ struct bfa_s *bfa; /* BFA module */
+ struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */
+ struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
+ struct bfad_ioim_s *dio; /* driver IO handle */
+ u16 iotag; /* FWI IO tag */
+ u16 abort_tag; /* unqiue abort request tag */
+ u16 nsges; /* number of SG elements */
+ u16 nsgpgs; /* number of SG pages */
+ struct bfa_sgpg_s *sgpg; /* first SG page */
+ struct list_head sgpg_q; /* allocated SG pages */
+ struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
+ bfa_cb_cbfn_t io_cbfn; /* IO completion handler */
+ struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */
+};
+
+struct bfa_ioim_sp_s {
+ struct bfi_msg_s comp_rspmsg; /* IO comp f/w response */
+ u8 *snsinfo; /* sense info for this IO */
+ struct bfa_sgpg_wqe_s sgpg_wqe; /* waitq elem for sgpg */
+ struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
+ bfa_boolean_t abort_explicit; /* aborted by OS */
+ struct bfa_tskim_s *tskim; /* Relevant TM cmd */
+};
+
+/**
+ * BFA Task management command (initiator mode)
+ */
+struct bfa_tskim_s {
+ struct list_head qe;
+ bfa_sm_t sm;
+ struct bfa_s *bfa; /* BFA module */
+ struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */
+ struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
+ struct bfad_tskim_s *dtsk; /* driver task mgmt cmnd */
+ bfa_boolean_t notify; /* notify itnim on TM comp */
+ lun_t lun; /* lun if applicable */
+ enum fcp_tm_cmnd tm_cmnd; /* task management command */
+ u16 tsk_tag; /* FWI IO tag */
+ u8 tsecs; /* timeout in seconds */
+ struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
+ struct list_head io_q; /* queue of affected IOs */
+ struct bfa_wc_s wc; /* waiting counter */
+ struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
+ enum bfi_tskim_status tsk_status; /* TM status */
+};
+
+/**
+ * BFA i-t-n (initiator mode)
+ */
+struct bfa_itnim_s {
+ struct list_head qe; /* queue element */
+ bfa_sm_t sm; /* i-t-n im BFA state machine */
+ struct bfa_s *bfa; /* bfa instance */
+ struct bfa_rport_s *rport; /* bfa rport */
+ void *ditn; /* driver i-t-n structure */
+ struct bfi_mhdr_s mhdr; /* pre-built mhdr */
+ u8 msg_no; /* itnim/rport firmware handle */
+ u8 reqq; /* CQ for requests */
+ struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
+ struct list_head pending_q; /* queue of pending IO requests*/
+ struct list_head io_q; /* queue of active IO requests */
+ struct list_head io_cleanup_q; /* IO being cleaned up */
+ struct list_head tsk_q; /* queue of active TM commands */
+ struct list_head delay_comp_q;/* queue of failed inflight cmds */
+ bfa_boolean_t seq_rec; /* SQER supported */
+ bfa_boolean_t is_online; /* itnim is ONLINE for IO */
+ bfa_boolean_t iotov_active; /* IO TOV timer is active */
+ struct bfa_wc_s wc; /* waiting counter */
+ struct bfa_timer_s timer; /* pending IO TOV */
+ struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
+ struct bfa_fcpim_mod_s *fcpim; /* fcpim module */
+ struct bfa_itnim_hal_stats_s stats;
+};
+
+#define bfa_itnim_is_online(_itnim) (_itnim)->is_online
+#define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod)
+#define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \
+ (&fcpim->ioim_arr[_iotag])
+#define BFA_TSKIM_FROM_TAG(_fcpim, _tmtag) \
+ (&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)])
+
+/*
+ * function prototypes
+ */
+void bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim,
+ struct bfa_meminfo_s *minfo);
+void bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim);
+void bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+void bfa_ioim_good_comp_isr(struct bfa_s *bfa,
+ struct bfi_msg_s *msg);
+void bfa_ioim_cleanup(struct bfa_ioim_s *ioim);
+void bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim,
+ struct bfa_tskim_s *tskim);
+void bfa_ioim_iocdisable(struct bfa_ioim_s *ioim);
+void bfa_ioim_tov(struct bfa_ioim_s *ioim);
+
+void bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim,
+ struct bfa_meminfo_s *minfo);
+void bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim);
+void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+void bfa_tskim_iodone(struct bfa_tskim_s *tskim);
+void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim);
+void bfa_tskim_cleanup(struct bfa_tskim_s *tskim);
+
+void bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
+ u32 *dm_len);
+void bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim,
+ struct bfa_meminfo_s *minfo);
+void bfa_itnim_detach(struct bfa_fcpim_mod_s *fcpim);
+void bfa_itnim_iocdisable(struct bfa_itnim_s *itnim);
+void bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+void bfa_itnim_iodone(struct bfa_itnim_s *itnim);
+void bfa_itnim_tskdone(struct bfa_itnim_s *itnim);
+bfa_boolean_t bfa_itnim_hold_io(struct bfa_itnim_s *itnim);
+
+#endif /* __BFA_FCPIM_PRIV_H__ */
+
diff --git a/drivers/scsi/bfa/bfa_fcport.c b/drivers/scsi/bfa/bfa_fcport.c
new file mode 100644
index 000000000000..992435987deb
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcport.c
@@ -0,0 +1,1671 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <bfa.h>
+#include <bfa_svc.h>
+#include <bfi/bfi_pport.h>
+#include <cs/bfa_debug.h>
+#include <aen/bfa_aen.h>
+#include <cs/bfa_plog.h>
+#include <aen/bfa_aen_port.h>
+
+BFA_TRC_FILE(HAL, PPORT);
+BFA_MODULE(pport);
+
+#define bfa_pport_callback(__pport, __event) do { \
+ if ((__pport)->bfa->fcs) { \
+ (__pport)->event_cbfn((__pport)->event_cbarg, (__event)); \
+ } else { \
+ (__pport)->hcb_event = (__event); \
+ bfa_cb_queue((__pport)->bfa, &(__pport)->hcb_qe, \
+ __bfa_cb_port_event, (__pport)); \
+ } \
+} while (0)
+
+/*
+ * The port is considered disabled if corresponding physical port or IOC are
+ * disabled explicitly
+ */
+#define BFA_PORT_IS_DISABLED(bfa) \
+ ((bfa_pport_is_disabled(bfa) == BFA_TRUE) || \
+ (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
+
+/*
+ * forward declarations
+ */
+static bfa_boolean_t bfa_pport_send_enable(struct bfa_pport_s *port);
+static bfa_boolean_t bfa_pport_send_disable(struct bfa_pport_s *port);
+static void bfa_pport_update_linkinfo(struct bfa_pport_s *pport);
+static void bfa_pport_reset_linkinfo(struct bfa_pport_s *pport);
+static void bfa_pport_set_wwns(struct bfa_pport_s *port);
+static void __bfa_cb_port_event(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete);
+static void bfa_port_stats_timeout(void *cbarg);
+static void bfa_port_stats_clr_timeout(void *cbarg);
+
+/**
+ * bfa_pport_private
+ */
+
+/**
+ * BFA port state machine events
+ */
+enum bfa_pport_sm_event {
+ BFA_PPORT_SM_START = 1, /* start port state machine */
+ BFA_PPORT_SM_STOP = 2, /* stop port state machine */
+ BFA_PPORT_SM_ENABLE = 3, /* enable port */
+ BFA_PPORT_SM_DISABLE = 4, /* disable port state machine */
+ BFA_PPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
+ BFA_PPORT_SM_LINKUP = 6, /* firmware linkup event */
+ BFA_PPORT_SM_LINKDOWN = 7, /* firmware linkup down */
+ BFA_PPORT_SM_QRESUME = 8, /* CQ space available */
+ BFA_PPORT_SM_HWFAIL = 9, /* IOC h/w failure */
+};
+
+static void bfa_pport_sm_uninit(struct bfa_pport_s *pport,
+ enum bfa_pport_sm_event event);
+static void bfa_pport_sm_enabling_qwait(struct bfa_pport_s *pport,
+ enum bfa_pport_sm_event event);
+static void bfa_pport_sm_enabling(struct bfa_pport_s *pport,
+ enum bfa_pport_sm_event event);
+static void bfa_pport_sm_linkdown(struct bfa_pport_s *pport,
+ enum bfa_pport_sm_event event);
+static void bfa_pport_sm_linkup(struct bfa_pport_s *pport,
+ enum bfa_pport_sm_event event);
+static void bfa_pport_sm_disabling(struct bfa_pport_s *pport,
+ enum bfa_pport_sm_event event);
+static void bfa_pport_sm_disabling_qwait(struct bfa_pport_s *pport,
+ enum bfa_pport_sm_event event);
+static void bfa_pport_sm_disabled(struct bfa_pport_s *pport,
+ enum bfa_pport_sm_event event);
+static void bfa_pport_sm_stopped(struct bfa_pport_s *pport,
+ enum bfa_pport_sm_event event);
+static void bfa_pport_sm_iocdown(struct bfa_pport_s *pport,
+ enum bfa_pport_sm_event event);
+static void bfa_pport_sm_iocfail(struct bfa_pport_s *pport,
+ enum bfa_pport_sm_event event);
+
+static struct bfa_sm_table_s hal_pport_sm_table[] = {
+ {BFA_SM(bfa_pport_sm_uninit), BFA_PPORT_ST_UNINIT},
+ {BFA_SM(bfa_pport_sm_enabling_qwait), BFA_PPORT_ST_ENABLING_QWAIT},
+ {BFA_SM(bfa_pport_sm_enabling), BFA_PPORT_ST_ENABLING},
+ {BFA_SM(bfa_pport_sm_linkdown), BFA_PPORT_ST_LINKDOWN},
+ {BFA_SM(bfa_pport_sm_linkup), BFA_PPORT_ST_LINKUP},
+ {BFA_SM(bfa_pport_sm_disabling_qwait),
+ BFA_PPORT_ST_DISABLING_QWAIT},
+ {BFA_SM(bfa_pport_sm_disabling), BFA_PPORT_ST_DISABLING},
+ {BFA_SM(bfa_pport_sm_disabled), BFA_PPORT_ST_DISABLED},
+ {BFA_SM(bfa_pport_sm_stopped), BFA_PPORT_ST_STOPPED},
+ {BFA_SM(bfa_pport_sm_iocdown), BFA_PPORT_ST_IOCDOWN},
+ {BFA_SM(bfa_pport_sm_iocfail), BFA_PPORT_ST_IOCDOWN},
+};
+
+static void
+bfa_pport_aen_post(struct bfa_pport_s *pport, enum bfa_port_aen_event event)
+{
+ union bfa_aen_data_u aen_data;
+ struct bfa_log_mod_s *logmod = pport->bfa->logm;
+ wwn_t pwwn = pport->pwwn;
+ char pwwn_ptr[BFA_STRING_32];
+ struct bfa_ioc_attr_s ioc_attr;
+
+ wwn2str(pwwn_ptr, pwwn);
+ switch (event) {
+ case BFA_PORT_AEN_ONLINE:
+ bfa_log(logmod, BFA_AEN_PORT_ONLINE, pwwn_ptr);
+ break;
+ case BFA_PORT_AEN_OFFLINE:
+ bfa_log(logmod, BFA_AEN_PORT_OFFLINE, pwwn_ptr);
+ break;
+ case BFA_PORT_AEN_ENABLE:
+ bfa_log(logmod, BFA_AEN_PORT_ENABLE, pwwn_ptr);
+ break;
+ case BFA_PORT_AEN_DISABLE:
+ bfa_log(logmod, BFA_AEN_PORT_DISABLE, pwwn_ptr);
+ break;
+ case BFA_PORT_AEN_DISCONNECT:
+ bfa_log(logmod, BFA_AEN_PORT_DISCONNECT, pwwn_ptr);
+ break;
+ case BFA_PORT_AEN_QOS_NEG:
+ bfa_log(logmod, BFA_AEN_PORT_QOS_NEG, pwwn_ptr);
+ break;
+ default:
+ break;
+ }
+
+ bfa_ioc_get_attr(&pport->bfa->ioc, &ioc_attr);
+ aen_data.port.ioc_type = ioc_attr.ioc_type;
+ aen_data.port.pwwn = pwwn;
+}
+
+static void
+bfa_pport_sm_uninit(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+{
+ bfa_trc(pport->bfa, event);
+
+ switch (event) {
+ case BFA_PPORT_SM_START:
+ /**
+ * Start event after IOC is configured and BFA is started.
+ */
+ if (bfa_pport_send_enable(pport))
+ bfa_sm_set_state(pport, bfa_pport_sm_enabling);
+ else
+ bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
+ break;
+
+ case BFA_PPORT_SM_ENABLE:
+ /**
+ * Port is persistently configured to be in enabled state. Do
+ * not change state. Port enabling is done when START event is
+ * received.
+ */
+ break;
+
+ case BFA_PPORT_SM_DISABLE:
+ /**
+ * If a port is persistently configured to be disabled, the
+ * first event will a port disable request.
+ */
+ bfa_sm_set_state(pport, bfa_pport_sm_disabled);
+ break;
+
+ case BFA_PPORT_SM_HWFAIL:
+ bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
+ break;
+
+ default:
+ bfa_sm_fault(pport->bfa, event);
+ }
+}
+
+static void
+bfa_pport_sm_enabling_qwait(struct bfa_pport_s *pport,
+ enum bfa_pport_sm_event event)
+{
+ bfa_trc(pport->bfa, event);
+
+ switch (event) {
+ case BFA_PPORT_SM_QRESUME:
+ bfa_sm_set_state(pport, bfa_pport_sm_enabling);
+ bfa_pport_send_enable(pport);
+ break;
+
+ case BFA_PPORT_SM_STOP:
+ bfa_reqq_wcancel(&pport->reqq_wait);
+ bfa_sm_set_state(pport, bfa_pport_sm_stopped);
+ break;
+
+ case BFA_PPORT_SM_ENABLE:
+ /**
+ * Already enable is in progress.
+ */
+ break;
+
+ case BFA_PPORT_SM_DISABLE:
+ /**
+ * Just send disable request to firmware when room becomes
+ * available in request queue.
+ */
+ bfa_sm_set_state(pport, bfa_pport_sm_disabled);
+ bfa_reqq_wcancel(&pport->reqq_wait);
+ bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
+ bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
+ break;
+
+ case BFA_PPORT_SM_LINKUP:
+ case BFA_PPORT_SM_LINKDOWN:
+ /**
+ * Possible to get link events when doing back-to-back
+ * enable/disables.
+ */
+ break;
+
+ case BFA_PPORT_SM_HWFAIL:
+ bfa_reqq_wcancel(&pport->reqq_wait);
+ bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
+ break;
+
+ default:
+ bfa_sm_fault(pport->bfa, event);
+ }
+}
+
+static void
+bfa_pport_sm_enabling(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+{
+ bfa_trc(pport->bfa, event);
+
+ switch (event) {
+ case BFA_PPORT_SM_FWRSP:
+ case BFA_PPORT_SM_LINKDOWN:
+ bfa_sm_set_state(pport, bfa_pport_sm_linkdown);
+ break;
+
+ case BFA_PPORT_SM_LINKUP:
+ bfa_pport_update_linkinfo(pport);
+ bfa_sm_set_state(pport, bfa_pport_sm_linkup);
+
+ bfa_assert(pport->event_cbfn);
+ bfa_pport_callback(pport, BFA_PPORT_LINKUP);
+ break;
+
+ case BFA_PPORT_SM_ENABLE:
+ /**
+ * Already being enabled.
+ */
+ break;
+
+ case BFA_PPORT_SM_DISABLE:
+ if (bfa_pport_send_disable(pport))
+ bfa_sm_set_state(pport, bfa_pport_sm_disabling);
+ else
+ bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait);
+
+ bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
+ bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
+ break;
+
+ case BFA_PPORT_SM_STOP:
+ bfa_sm_set_state(pport, bfa_pport_sm_stopped);
+ break;
+
+ case BFA_PPORT_SM_HWFAIL:
+ bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
+ break;
+
+ default:
+ bfa_sm_fault(pport->bfa, event);
+ }
+}
+
+static void
+bfa_pport_sm_linkdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+{
+ bfa_trc(pport->bfa, event);
+
+ switch (event) {
+ case BFA_PPORT_SM_LINKUP:
+ bfa_pport_update_linkinfo(pport);
+ bfa_sm_set_state(pport, bfa_pport_sm_linkup);
+ bfa_assert(pport->event_cbfn);
+ bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
+ bfa_pport_callback(pport, BFA_PPORT_LINKUP);
+ bfa_pport_aen_post(pport, BFA_PORT_AEN_ONLINE);
+ /**
+ * If QoS is enabled and it is not online,
+ * Send a separate event.
+ */
+ if ((pport->cfg.qos_enabled)
+ && (bfa_os_ntohl(pport->qos_attr.state) != BFA_QOS_ONLINE))
+ bfa_pport_aen_post(pport, BFA_PORT_AEN_QOS_NEG);
+
+ break;
+
+ case BFA_PPORT_SM_LINKDOWN:
+ /**
+ * Possible to get link down event.
+ */
+ break;
+
+ case BFA_PPORT_SM_ENABLE:
+ /**
+ * Already enabled.
+ */
+ break;
+
+ case BFA_PPORT_SM_DISABLE:
+ if (bfa_pport_send_disable(pport))
+ bfa_sm_set_state(pport, bfa_pport_sm_disabling);
+ else
+ bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait);
+
+ bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
+ bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
+ break;
+
+ case BFA_PPORT_SM_STOP:
+ bfa_sm_set_state(pport, bfa_pport_sm_stopped);
+ break;
+
+ case BFA_PPORT_SM_HWFAIL:
+ bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
+ break;
+
+ default:
+ bfa_sm_fault(pport->bfa, event);
+ }
+}
+
+static void
+bfa_pport_sm_linkup(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+{
+ bfa_trc(pport->bfa, event);
+
+ switch (event) {
+ case BFA_PPORT_SM_ENABLE:
+ /**
+ * Already enabled.
+ */
+ break;
+
+ case BFA_PPORT_SM_DISABLE:
+ if (bfa_pport_send_disable(pport))
+ bfa_sm_set_state(pport, bfa_pport_sm_disabling);
+ else
+ bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait);
+
+ bfa_pport_reset_linkinfo(pport);
+ bfa_pport_callback(pport, BFA_PPORT_LINKDOWN);
+ bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
+ bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
+ bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
+ break;
+
+ case BFA_PPORT_SM_LINKDOWN:
+ bfa_sm_set_state(pport, bfa_pport_sm_linkdown);
+ bfa_pport_reset_linkinfo(pport);
+ bfa_pport_callback(pport, BFA_PPORT_LINKDOWN);
+ bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
+ if (BFA_PORT_IS_DISABLED(pport->bfa)) {
+ bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
+ } else {
+ bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
+ }
+ break;
+
+ case BFA_PPORT_SM_STOP:
+ bfa_sm_set_state(pport, bfa_pport_sm_stopped);
+ bfa_pport_reset_linkinfo(pport);
+ if (BFA_PORT_IS_DISABLED(pport->bfa)) {
+ bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
+ } else {
+ bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
+ }
+ break;
+
+ case BFA_PPORT_SM_HWFAIL:
+ bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
+ bfa_pport_reset_linkinfo(pport);
+ bfa_pport_callback(pport, BFA_PPORT_LINKDOWN);
+ if (BFA_PORT_IS_DISABLED(pport->bfa)) {
+ bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
+ } else {
+ bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(pport->bfa, event);
+ }
+}
+
+static void
+bfa_pport_sm_disabling_qwait(struct bfa_pport_s *pport,
+ enum bfa_pport_sm_event event)
+{
+ bfa_trc(pport->bfa, event);
+
+ switch (event) {
+ case BFA_PPORT_SM_QRESUME:
+ bfa_sm_set_state(pport, bfa_pport_sm_disabling);
+ bfa_pport_send_disable(pport);
+ break;
+
+ case BFA_PPORT_SM_STOP:
+ bfa_sm_set_state(pport, bfa_pport_sm_stopped);
+ bfa_reqq_wcancel(&pport->reqq_wait);
+ break;
+
+ case BFA_PPORT_SM_DISABLE:
+ /**
+ * Already being disabled.
+ */
+ break;
+
+ case BFA_PPORT_SM_LINKUP:
+ case BFA_PPORT_SM_LINKDOWN:
+ /**
+ * Possible to get link events when doing back-to-back
+ * enable/disables.
+ */
+ break;
+
+ case BFA_PPORT_SM_HWFAIL:
+ bfa_sm_set_state(pport, bfa_pport_sm_iocfail);
+ bfa_reqq_wcancel(&pport->reqq_wait);
+ break;
+
+ default:
+ bfa_sm_fault(pport->bfa, event);
+ }
+}
+
+static void
+bfa_pport_sm_disabling(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+{
+ bfa_trc(pport->bfa, event);
+
+ switch (event) {
+ case BFA_PPORT_SM_FWRSP:
+ bfa_sm_set_state(pport, bfa_pport_sm_disabled);
+ break;
+
+ case BFA_PPORT_SM_DISABLE:
+ /**
+ * Already being disabled.
+ */
+ break;
+
+ case BFA_PPORT_SM_ENABLE:
+ if (bfa_pport_send_enable(pport))
+ bfa_sm_set_state(pport, bfa_pport_sm_enabling);
+ else
+ bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
+
+ bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
+ bfa_pport_aen_post(pport, BFA_PORT_AEN_ENABLE);
+ break;
+
+ case BFA_PPORT_SM_STOP:
+ bfa_sm_set_state(pport, bfa_pport_sm_stopped);
+ break;
+
+ case BFA_PPORT_SM_LINKUP:
+ case BFA_PPORT_SM_LINKDOWN:
+ /**
+ * Possible to get link events when doing back-to-back
+ * enable/disables.
+ */
+ break;
+
+ case BFA_PPORT_SM_HWFAIL:
+ bfa_sm_set_state(pport, bfa_pport_sm_iocfail);
+ break;
+
+ default:
+ bfa_sm_fault(pport->bfa, event);
+ }
+}
+
+static void
+bfa_pport_sm_disabled(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+{
+ bfa_trc(pport->bfa, event);
+
+ switch (event) {
+ case BFA_PPORT_SM_START:
+ /**
+ * Ignore start event for a port that is disabled.
+ */
+ break;
+
+ case BFA_PPORT_SM_STOP:
+ bfa_sm_set_state(pport, bfa_pport_sm_stopped);
+ break;
+
+ case BFA_PPORT_SM_ENABLE:
+ if (bfa_pport_send_enable(pport))
+ bfa_sm_set_state(pport, bfa_pport_sm_enabling);
+ else
+ bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
+
+ bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
+ bfa_pport_aen_post(pport, BFA_PORT_AEN_ENABLE);
+ break;
+
+ case BFA_PPORT_SM_DISABLE:
+ /**
+ * Already disabled.
+ */
+ break;
+
+ case BFA_PPORT_SM_HWFAIL:
+ bfa_sm_set_state(pport, bfa_pport_sm_iocfail);
+ break;
+
+ default:
+ bfa_sm_fault(pport->bfa, event);
+ }
+}
+
+static void
+bfa_pport_sm_stopped(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+{
+ bfa_trc(pport->bfa, event);
+
+ switch (event) {
+ case BFA_PPORT_SM_START:
+ if (bfa_pport_send_enable(pport))
+ bfa_sm_set_state(pport, bfa_pport_sm_enabling);
+ else
+ bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
+ break;
+
+ default:
+ /**
+ * Ignore all other events.
+ */
+ ;
+ }
+}
+
+/**
+ * Port is enabled. IOC is down/failed.
+ */
+static void
+bfa_pport_sm_iocdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+{
+ bfa_trc(pport->bfa, event);
+
+ switch (event) {
+ case BFA_PPORT_SM_START:
+ if (bfa_pport_send_enable(pport))
+ bfa_sm_set_state(pport, bfa_pport_sm_enabling);
+ else
+ bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
+ break;
+
+ default:
+ /**
+ * Ignore all events.
+ */
+ ;
+ }
+}
+
+/**
+ * Port is disabled. IOC is down/failed.
+ */
+static void
+bfa_pport_sm_iocfail(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+{
+ bfa_trc(pport->bfa, event);
+
+ switch (event) {
+ case BFA_PPORT_SM_START:
+ bfa_sm_set_state(pport, bfa_pport_sm_disabled);
+ break;
+
+ case BFA_PPORT_SM_ENABLE:
+ bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
+ break;
+
+ default:
+ /**
+ * Ignore all events.
+ */
+ ;
+ }
+}
+
+
+
+/**
+ * bfa_pport_private
+ */
+
+static void
+__bfa_cb_port_event(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_pport_s *pport = cbarg;
+
+ if (complete)
+ pport->event_cbfn(pport->event_cbarg, pport->hcb_event);
+}
+
+#define PPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_pport_stats_u), \
+ BFA_CACHELINE_SZ))
+
+static void
+bfa_pport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
+ u32 *dm_len)
+{
+ *dm_len += PPORT_STATS_DMA_SZ;
+}
+
+static void
+bfa_pport_qresume(void *cbarg)
+{
+ struct bfa_pport_s *port = cbarg;
+
+ bfa_sm_send_event(port, BFA_PPORT_SM_QRESUME);
+}
+
+static void
+bfa_pport_mem_claim(struct bfa_pport_s *pport, struct bfa_meminfo_s *meminfo)
+{
+ u8 *dm_kva;
+ u64 dm_pa;
+
+ dm_kva = bfa_meminfo_dma_virt(meminfo);
+ dm_pa = bfa_meminfo_dma_phys(meminfo);
+
+ pport->stats_kva = dm_kva;
+ pport->stats_pa = dm_pa;
+ pport->stats = (union bfa_pport_stats_u *)dm_kva;
+
+ dm_kva += PPORT_STATS_DMA_SZ;
+ dm_pa += PPORT_STATS_DMA_SZ;
+
+ bfa_meminfo_dma_virt(meminfo) = dm_kva;
+ bfa_meminfo_dma_phys(meminfo) = dm_pa;
+}
+
+/**
+ * Memory initialization.
+ */
+static void
+bfa_pport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+{
+ struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+ struct bfa_pport_cfg_s *port_cfg = &pport->cfg;
+
+ bfa_os_memset(pport, 0, sizeof(struct bfa_pport_s));
+ pport->bfa = bfa;
+
+ bfa_pport_mem_claim(pport, meminfo);
+
+ bfa_sm_set_state(pport, bfa_pport_sm_uninit);
+
+ /**
+ * initialize and set default configuration
+ */
+ port_cfg->topology = BFA_PPORT_TOPOLOGY_P2P;
+ port_cfg->speed = BFA_PPORT_SPEED_AUTO;
+ port_cfg->trunked = BFA_FALSE;
+ port_cfg->maxfrsize = 0;
+
+ port_cfg->trl_def_speed = BFA_PPORT_SPEED_1GBPS;
+
+ bfa_reqq_winit(&pport->reqq_wait, bfa_pport_qresume, pport);
+}
+
+static void
+bfa_pport_initdone(struct bfa_s *bfa)
+{
+ struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+
+ /**
+ * Initialize port attributes from IOC hardware data.
+ */
+ bfa_pport_set_wwns(pport);
+ if (pport->cfg.maxfrsize == 0)
+ pport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
+ pport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
+ pport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
+
+ bfa_assert(pport->cfg.maxfrsize);
+ bfa_assert(pport->cfg.rx_bbcredit);
+ bfa_assert(pport->speed_sup);
+}
+
+static void
+bfa_pport_detach(struct bfa_s *bfa)
+{
+}
+
+/**
+ * Called when IOC is ready.
+ */
+static void
+bfa_pport_start(struct bfa_s *bfa)
+{
+ bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_START);
+}
+
+/**
+ * Called before IOC is stopped.
+ */
+static void
+bfa_pport_stop(struct bfa_s *bfa)
+{
+ bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_STOP);
+}
+
+/**
+ * Called when IOC failure is detected.
+ */
+static void
+bfa_pport_iocdisable(struct bfa_s *bfa)
+{
+ bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_HWFAIL);
+}
+
+static void
+bfa_pport_update_linkinfo(struct bfa_pport_s *pport)
+{
+ struct bfi_pport_event_s *pevent = pport->event_arg.i2hmsg.event;
+
+ pport->speed = pevent->link_state.speed;
+ pport->topology = pevent->link_state.topology;
+
+ if (pport->topology == BFA_PPORT_TOPOLOGY_LOOP)
+ pport->myalpa = pevent->link_state.tl.loop_info.myalpa;
+
+ /*
+ * QoS Details
+ */
+ bfa_os_assign(pport->qos_attr, pevent->link_state.qos_attr);
+ bfa_os_assign(pport->qos_vc_attr, pevent->link_state.qos_vc_attr);
+
+ bfa_trc(pport->bfa, pport->speed);
+ bfa_trc(pport->bfa, pport->topology);
+}
+
+static void
+bfa_pport_reset_linkinfo(struct bfa_pport_s *pport)
+{
+ pport->speed = BFA_PPORT_SPEED_UNKNOWN;
+ pport->topology = BFA_PPORT_TOPOLOGY_NONE;
+}
+
+/**
+ * Send port enable message to firmware.
+ */
+static bfa_boolean_t
+bfa_pport_send_enable(struct bfa_pport_s *port)
+{
+ struct bfi_pport_enable_req_s *m;
+
+ /**
+ * Increment message tag before queue check, so that responses to old
+ * requests are discarded.
+ */
+ port->msgtag++;
+
+ /**
+ * check for room in queue to send request now
+ */
+ m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
+ if (!m) {
+ bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->reqq_wait);
+ return BFA_FALSE;
+ }
+
+ bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_ENABLE_REQ,
+ bfa_lpuid(port->bfa));
+ m->nwwn = port->nwwn;
+ m->pwwn = port->pwwn;
+ m->port_cfg = port->cfg;
+ m->msgtag = port->msgtag;
+ m->port_cfg.maxfrsize = bfa_os_htons(port->cfg.maxfrsize);
+ bfa_dma_be_addr_set(m->stats_dma_addr, port->stats_pa);
+ bfa_trc(port->bfa, m->stats_dma_addr.a32.addr_lo);
+ bfa_trc(port->bfa, m->stats_dma_addr.a32.addr_hi);
+
+ /**
+ * queue I/O message to firmware
+ */
+ bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
+ return BFA_TRUE;
+}
+
+/**
+ * Send port disable message to firmware.
+ */
+static bfa_boolean_t
+bfa_pport_send_disable(struct bfa_pport_s *port)
+{
+ bfi_pport_disable_req_t *m;
+
+ /**
+ * Increment message tag before queue check, so that responses to old
+ * requests are discarded.
+ */
+ port->msgtag++;
+
+ /**
+ * check for room in queue to send request now
+ */
+ m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
+ if (!m) {
+ bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->reqq_wait);
+ return BFA_FALSE;
+ }
+
+ bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_DISABLE_REQ,
+ bfa_lpuid(port->bfa));
+ m->msgtag = port->msgtag;
+
+ /**
+ * queue I/O message to firmware
+ */
+ bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
+
+ return BFA_TRUE;
+}
+
+static void
+bfa_pport_set_wwns(struct bfa_pport_s *port)
+{
+ port->pwwn = bfa_ioc_get_pwwn(&port->bfa->ioc);
+ port->nwwn = bfa_ioc_get_nwwn(&port->bfa->ioc);
+
+ bfa_trc(port->bfa, port->pwwn);
+ bfa_trc(port->bfa, port->nwwn);
+}
+
+static void
+bfa_port_send_txcredit(void *port_cbarg)
+{
+
+ struct bfa_pport_s *port = port_cbarg;
+ struct bfi_pport_set_svc_params_req_s *m;
+
+ /**
+ * check for room in queue to send request now
+ */
+ m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
+ if (!m) {
+ bfa_trc(port->bfa, port->cfg.tx_bbcredit);
+ return;
+ }
+
+ bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_SET_SVC_PARAMS_REQ,
+ bfa_lpuid(port->bfa));
+ m->tx_bbcredit = bfa_os_htons((u16) port->cfg.tx_bbcredit);
+
+ /**
+ * queue I/O message to firmware
+ */
+ bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
+}
+
+
+
+/**
+ * bfa_pport_public
+ */
+
+/**
+ * Firmware message handler.
+ */
+void
+bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
+{
+ struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+ union bfi_pport_i2h_msg_u i2hmsg;
+
+ i2hmsg.msg = msg;
+ pport->event_arg.i2hmsg = i2hmsg;
+
+ switch (msg->mhdr.msg_id) {
+ case BFI_PPORT_I2H_ENABLE_RSP:
+ if (pport->msgtag == i2hmsg.enable_rsp->msgtag)
+ bfa_sm_send_event(pport, BFA_PPORT_SM_FWRSP);
+ break;
+
+ case BFI_PPORT_I2H_DISABLE_RSP:
+ if (pport->msgtag == i2hmsg.enable_rsp->msgtag)
+ bfa_sm_send_event(pport, BFA_PPORT_SM_FWRSP);
+ break;
+
+ case BFI_PPORT_I2H_EVENT:
+ switch (i2hmsg.event->link_state.linkstate) {
+ case BFA_PPORT_LINKUP:
+ bfa_sm_send_event(pport, BFA_PPORT_SM_LINKUP);
+ break;
+ case BFA_PPORT_LINKDOWN:
+ bfa_sm_send_event(pport, BFA_PPORT_SM_LINKDOWN);
+ break;
+ case BFA_PPORT_TRUNK_LINKDOWN:
+ /** todo: event notification */
+ break;
+ }
+ break;
+
+ case BFI_PPORT_I2H_GET_STATS_RSP:
+ case BFI_PPORT_I2H_GET_QOS_STATS_RSP:
+ /*
+ * check for timer pop before processing the rsp
+ */
+ if (pport->stats_busy == BFA_FALSE
+ || pport->stats_status == BFA_STATUS_ETIMER)
+ break;
+
+ bfa_timer_stop(&pport->timer);
+ pport->stats_status = i2hmsg.getstats_rsp->status;
+ bfa_cb_queue(pport->bfa, &pport->hcb_qe, __bfa_cb_port_stats,
+ pport);
+ break;
+ case BFI_PPORT_I2H_CLEAR_STATS_RSP:
+ case BFI_PPORT_I2H_CLEAR_QOS_STATS_RSP:
+ /*
+ * check for timer pop before processing the rsp
+ */
+ if (pport->stats_busy == BFA_FALSE
+ || pport->stats_status == BFA_STATUS_ETIMER)
+ break;
+
+ bfa_timer_stop(&pport->timer);
+ pport->stats_status = BFA_STATUS_OK;
+ bfa_cb_queue(pport->bfa, &pport->hcb_qe,
+ __bfa_cb_port_stats_clr, pport);
+ break;
+
+ default:
+ bfa_assert(0);
+ }
+}
+
+
+
+/**
+ * bfa_pport_api
+ */
+
+/**
+ * Registered callback for port events.
+ */
+void
+bfa_pport_event_register(struct bfa_s *bfa,
+ void (*cbfn) (void *cbarg, bfa_pport_event_t event),
+ void *cbarg)
+{
+ struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+
+ pport->event_cbfn = cbfn;
+ pport->event_cbarg = cbarg;
+}
+
+bfa_status_t
+bfa_pport_enable(struct bfa_s *bfa)
+{
+ struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+
+ if (pport->diag_busy)
+ return (BFA_STATUS_DIAG_BUSY);
+ else if (bfa_sm_cmp_state
+ (BFA_PORT_MOD(bfa), bfa_pport_sm_disabling_qwait))
+ return (BFA_STATUS_DEVBUSY);
+
+ bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_ENABLE);
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_pport_disable(struct bfa_s *bfa)
+{
+ bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_DISABLE);
+ return BFA_STATUS_OK;
+}
+
+/**
+ * Configure port speed.
+ */
+bfa_status_t
+bfa_pport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
+{
+ struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+
+ bfa_trc(bfa, speed);
+
+ if ((speed != BFA_PPORT_SPEED_AUTO) && (speed > pport->speed_sup)) {
+ bfa_trc(bfa, pport->speed_sup);
+ return BFA_STATUS_UNSUPP_SPEED;
+ }
+
+ pport->cfg.speed = speed;
+
+ return (BFA_STATUS_OK);
+}
+
+/**
+ * Get current speed.
+ */
+enum bfa_pport_speed
+bfa_pport_get_speed(struct bfa_s *bfa)
+{
+ struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+
+ return port->speed;
+}
+
+/**
+ * Configure port topology.
+ */
+bfa_status_t
+bfa_pport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology)
+{
+ struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+
+ bfa_trc(bfa, topology);
+ bfa_trc(bfa, pport->cfg.topology);
+
+ switch (topology) {
+ case BFA_PPORT_TOPOLOGY_P2P:
+ case BFA_PPORT_TOPOLOGY_LOOP:
+ case BFA_PPORT_TOPOLOGY_AUTO:
+ break;
+
+ default:
+ return BFA_STATUS_EINVAL;
+ }
+
+ pport->cfg.topology = topology;
+ return (BFA_STATUS_OK);
+}
+
+/**
+ * Get current topology.
+ */
+enum bfa_pport_topology
+bfa_pport_get_topology(struct bfa_s *bfa)
+{
+ struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+
+ return port->topology;
+}
+
+bfa_status_t
+bfa_pport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
+{
+ struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+
+ bfa_trc(bfa, alpa);
+ bfa_trc(bfa, pport->cfg.cfg_hardalpa);
+ bfa_trc(bfa, pport->cfg.hardalpa);
+
+ pport->cfg.cfg_hardalpa = BFA_TRUE;
+ pport->cfg.hardalpa = alpa;
+
+ return (BFA_STATUS_OK);
+}
+
+bfa_status_t
+bfa_pport_clr_hardalpa(struct bfa_s *bfa)
+{
+ struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+
+ bfa_trc(bfa, pport->cfg.cfg_hardalpa);
+ bfa_trc(bfa, pport->cfg.hardalpa);
+
+ pport->cfg.cfg_hardalpa = BFA_FALSE;
+ return (BFA_STATUS_OK);
+}
+
+bfa_boolean_t
+bfa_pport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
+{
+ struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+
+ *alpa = port->cfg.hardalpa;
+ return port->cfg.cfg_hardalpa;
+}
+
+u8
+bfa_pport_get_myalpa(struct bfa_s *bfa)
+{
+ struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+
+ return port->myalpa;
+}
+
+bfa_status_t
+bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
+{
+ struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+
+ bfa_trc(bfa, maxfrsize);
+ bfa_trc(bfa, pport->cfg.maxfrsize);
+
+ /*
+ * with in range
+ */
+ if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
+ return (BFA_STATUS_INVLD_DFSZ);
+
+ /*
+ * power of 2, if not the max frame size of 2112
+ */
+ if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
+ return (BFA_STATUS_INVLD_DFSZ);
+
+ pport->cfg.maxfrsize = maxfrsize;
+ return (BFA_STATUS_OK);
+}
+
+u16
+bfa_pport_get_maxfrsize(struct bfa_s *bfa)
+{
+ struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+
+ return port->cfg.maxfrsize;
+}
+
+u32
+bfa_pport_mypid(struct bfa_s *bfa)
+{
+ struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+
+ return port->mypid;
+}
+
+u8
+bfa_pport_get_rx_bbcredit(struct bfa_s *bfa)
+{
+ struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+
+ return port->cfg.rx_bbcredit;
+}
+
+void
+bfa_pport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
+{
+ struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+
+ port->cfg.tx_bbcredit = (u8) tx_bbcredit;
+ bfa_port_send_txcredit(port);
+}
+
+/**
+ * Get port attributes.
+ */
+
+wwn_t
+bfa_pport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
+{
+ struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+ if (node)
+ return pport->nwwn;
+ else
+ return pport->pwwn;
+}
+
+void
+bfa_pport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr)
+{
+ struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+
+ bfa_os_memset(attr, 0, sizeof(struct bfa_pport_attr_s));
+
+ attr->nwwn = pport->nwwn;
+ attr->pwwn = pport->pwwn;
+
+ bfa_os_memcpy(&attr->pport_cfg, &pport->cfg,
+ sizeof(struct bfa_pport_cfg_s));
+ /*
+ * speed attributes
+ */
+ attr->pport_cfg.speed = pport->cfg.speed;
+ attr->speed_supported = pport->speed_sup;
+ attr->speed = pport->speed;
+ attr->cos_supported = FC_CLASS_3;
+
+ /*
+ * topology attributes
+ */
+ attr->pport_cfg.topology = pport->cfg.topology;
+ attr->topology = pport->topology;
+
+ /*
+ * beacon attributes
+ */
+ attr->beacon = pport->beacon;
+ attr->link_e2e_beacon = pport->link_e2e_beacon;
+ attr->plog_enabled = bfa_plog_get_setting(pport->bfa->plog);
+
+ attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
+ attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
+ attr->port_state = bfa_sm_to_state(hal_pport_sm_table, pport->sm);
+ if (bfa_ioc_is_disabled(&pport->bfa->ioc))
+ attr->port_state = BFA_PPORT_ST_IOCDIS;
+ else if (bfa_ioc_fw_mismatch(&pport->bfa->ioc))
+ attr->port_state = BFA_PPORT_ST_FWMISMATCH;
+}
+
+static void
+bfa_port_stats_query(void *cbarg)
+{
+ struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
+ bfi_pport_get_stats_req_t *msg;
+
+ msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
+
+ if (!msg) {
+ port->stats_qfull = BFA_TRUE;
+ bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_stats_query,
+ port);
+ bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
+ return;
+ }
+ port->stats_qfull = BFA_FALSE;
+
+ bfa_os_memset(msg, 0, sizeof(bfi_pport_get_stats_req_t));
+ bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_GET_STATS_REQ,
+ bfa_lpuid(port->bfa));
+ bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
+
+ return;
+}
+
+static void
+bfa_port_stats_clear(void *cbarg)
+{
+ struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
+ bfi_pport_clear_stats_req_t *msg;
+
+ msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
+
+ if (!msg) {
+ port->stats_qfull = BFA_TRUE;
+ bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_stats_clear,
+ port);
+ bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
+ return;
+ }
+ port->stats_qfull = BFA_FALSE;
+
+ bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_stats_req_t));
+ bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_STATS_REQ,
+ bfa_lpuid(port->bfa));
+ bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
+ return;
+}
+
+static void
+bfa_port_qos_stats_clear(void *cbarg)
+{
+ struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
+ bfi_pport_clear_qos_stats_req_t *msg;
+
+ msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
+
+ if (!msg) {
+ port->stats_qfull = BFA_TRUE;
+ bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_qos_stats_clear,
+ port);
+ bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
+ return;
+ }
+ port->stats_qfull = BFA_FALSE;
+
+ bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_qos_stats_req_t));
+ bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ,
+ bfa_lpuid(port->bfa));
+ bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
+ return;
+}
+
+static void
+bfa_pport_stats_swap(union bfa_pport_stats_u *d, union bfa_pport_stats_u *s)
+{
+ u32 *dip = (u32 *) d;
+ u32 *sip = (u32 *) s;
+ int i;
+
+ /*
+ * Do 64 bit fields swap first
+ */
+ for (i = 0;
+ i <
+ ((sizeof(union bfa_pport_stats_u) -
+ sizeof(struct bfa_qos_stats_s)) / sizeof(u32)); i = i + 2) {
+#ifdef __BIGENDIAN
+ dip[i] = bfa_os_ntohl(sip[i]);
+ dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
+#else
+ dip[i] = bfa_os_ntohl(sip[i + 1]);
+ dip[i + 1] = bfa_os_ntohl(sip[i]);
+#endif
+ }
+
+ /*
+ * Now swap the 32 bit fields
+ */
+ for (; i < (sizeof(union bfa_pport_stats_u) / sizeof(u32)); ++i)
+ dip[i] = bfa_os_ntohl(sip[i]);
+}
+
+static void
+__bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_pport_s *port = cbarg;
+
+ if (complete) {
+ port->stats_cbfn(port->stats_cbarg, port->stats_status);
+ } else {
+ port->stats_busy = BFA_FALSE;
+ port->stats_status = BFA_STATUS_OK;
+ }
+}
+
+static void
+bfa_port_stats_clr_timeout(void *cbarg)
+{
+ struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
+
+ bfa_trc(port->bfa, port->stats_qfull);
+
+ if (port->stats_qfull) {
+ bfa_reqq_wcancel(&port->stats_reqq_wait);
+ port->stats_qfull = BFA_FALSE;
+ }
+
+ port->stats_status = BFA_STATUS_ETIMER;
+ bfa_cb_queue(port->bfa, &port->hcb_qe, __bfa_cb_port_stats_clr, port);
+}
+
+static void
+__bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_pport_s *port = cbarg;
+
+ if (complete) {
+ if (port->stats_status == BFA_STATUS_OK)
+ bfa_pport_stats_swap(port->stats_ret, port->stats);
+ port->stats_cbfn(port->stats_cbarg, port->stats_status);
+ } else {
+ port->stats_busy = BFA_FALSE;
+ port->stats_status = BFA_STATUS_OK;
+ }
+}
+
+static void
+bfa_port_stats_timeout(void *cbarg)
+{
+ struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
+
+ bfa_trc(port->bfa, port->stats_qfull);
+
+ if (port->stats_qfull) {
+ bfa_reqq_wcancel(&port->stats_reqq_wait);
+ port->stats_qfull = BFA_FALSE;
+ }
+
+ port->stats_status = BFA_STATUS_ETIMER;
+ bfa_cb_queue(port->bfa, &port->hcb_qe, __bfa_cb_port_stats, port);
+}
+
+#define BFA_PORT_STATS_TOV 1000
+
+/**
+ * Fetch port attributes.
+ */
+bfa_status_t
+bfa_pport_get_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats,
+ bfa_cb_pport_t cbfn, void *cbarg)
+{
+ struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+
+ if (port->stats_busy) {
+ bfa_trc(bfa, port->stats_busy);
+ return (BFA_STATUS_DEVBUSY);
+ }
+
+ port->stats_busy = BFA_TRUE;
+ port->stats_ret = stats;
+ port->stats_cbfn = cbfn;
+ port->stats_cbarg = cbarg;
+
+ bfa_port_stats_query(port);
+
+ bfa_timer_start(bfa, &port->timer, bfa_port_stats_timeout, port,
+ BFA_PORT_STATS_TOV);
+ return (BFA_STATUS_OK);
+}
+
+bfa_status_t
+bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
+{
+ struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+
+ if (port->stats_busy) {
+ bfa_trc(bfa, port->stats_busy);
+ return (BFA_STATUS_DEVBUSY);
+ }
+
+ port->stats_busy = BFA_TRUE;
+ port->stats_cbfn = cbfn;
+ port->stats_cbarg = cbarg;
+
+ bfa_port_stats_clear(port);
+
+ bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port,
+ BFA_PORT_STATS_TOV);
+ return (BFA_STATUS_OK);
+}
+
+bfa_status_t
+bfa_pport_trunk_enable(struct bfa_s *bfa, u8 bitmap)
+{
+ struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+
+ bfa_trc(bfa, bitmap);
+ bfa_trc(bfa, pport->cfg.trunked);
+ bfa_trc(bfa, pport->cfg.trunk_ports);
+
+ if (!bitmap || (bitmap & (bitmap - 1)))
+ return BFA_STATUS_EINVAL;
+
+ pport->cfg.trunked = BFA_TRUE;
+ pport->cfg.trunk_ports = bitmap;
+
+ return BFA_STATUS_OK;
+}
+
+void
+bfa_pport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
+{
+ struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+
+ qos_attr->state = bfa_os_ntohl(pport->qos_attr.state);
+ qos_attr->total_bb_cr = bfa_os_ntohl(pport->qos_attr.total_bb_cr);
+}
+
+void
+bfa_pport_qos_get_vc_attr(struct bfa_s *bfa,
+ struct bfa_qos_vc_attr_s *qos_vc_attr)
+{
+ struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+ struct bfa_qos_vc_attr_s *bfa_vc_attr = &pport->qos_vc_attr;
+ u32 i = 0;
+
+ qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count);
+ qos_vc_attr->shared_credit = bfa_os_ntohs(bfa_vc_attr->shared_credit);
+ qos_vc_attr->elp_opmode_flags =
+ bfa_os_ntohl(bfa_vc_attr->elp_opmode_flags);
+
+ /*
+ * Individual VC info
+ */
+ while (i < qos_vc_attr->total_vc_count) {
+ qos_vc_attr->vc_info[i].vc_credit =
+ bfa_vc_attr->vc_info[i].vc_credit;
+ qos_vc_attr->vc_info[i].borrow_credit =
+ bfa_vc_attr->vc_info[i].borrow_credit;
+ qos_vc_attr->vc_info[i].priority =
+ bfa_vc_attr->vc_info[i].priority;
+ ++i;
+ }
+}
+
+/**
+ * Fetch QoS Stats.
+ */
+bfa_status_t
+bfa_pport_get_qos_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats,
+ bfa_cb_pport_t cbfn, void *cbarg)
+{
+ /*
+ * QoS stats is embedded in port stats
+ */
+ return (bfa_pport_get_stats(bfa, stats, cbfn, cbarg));
+}
+
+bfa_status_t
+bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
+{
+ struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+
+ if (port->stats_busy) {
+ bfa_trc(bfa, port->stats_busy);
+ return (BFA_STATUS_DEVBUSY);
+ }
+
+ port->stats_busy = BFA_TRUE;
+ port->stats_cbfn = cbfn;
+ port->stats_cbarg = cbarg;
+
+ bfa_port_qos_stats_clear(port);
+
+ bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port,
+ BFA_PORT_STATS_TOV);
+ return (BFA_STATUS_OK);
+}
+
+/**
+ * Fetch port attributes.
+ */
+bfa_status_t
+bfa_pport_trunk_disable(struct bfa_s *bfa)
+{
+ return (BFA_STATUS_OK);
+}
+
+bfa_boolean_t
+bfa_pport_trunk_query(struct bfa_s *bfa, u32 *bitmap)
+{
+ struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+
+ *bitmap = port->cfg.trunk_ports;
+ return port->cfg.trunked;
+}
+
+bfa_boolean_t
+bfa_pport_is_disabled(struct bfa_s *bfa)
+{
+ struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+
+ return (bfa_sm_to_state(hal_pport_sm_table, port->sm) ==
+ BFA_PPORT_ST_DISABLED);
+
+}
+
+bfa_boolean_t
+bfa_pport_is_ratelim(struct bfa_s *bfa)
+{
+ struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+
+return (pport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE);
+
+}
+
+void
+bfa_pport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
+{
+ struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+
+ bfa_trc(bfa, on_off);
+ bfa_trc(bfa, pport->cfg.qos_enabled);
+
+ pport->cfg.qos_enabled = on_off;
+}
+
+void
+bfa_pport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
+{
+ struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+
+ bfa_trc(bfa, on_off);
+ bfa_trc(bfa, pport->cfg.ratelimit);
+
+ pport->cfg.ratelimit = on_off;
+ if (pport->cfg.trl_def_speed == BFA_PPORT_SPEED_UNKNOWN)
+ pport->cfg.trl_def_speed = BFA_PPORT_SPEED_1GBPS;
+}
+
+/**
+ * Configure default minimum ratelim speed
+ */
+bfa_status_t
+bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
+{
+ struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+
+ bfa_trc(bfa, speed);
+
+ /*
+ * Auto and speeds greater than the supported speed, are invalid
+ */
+ if ((speed == BFA_PPORT_SPEED_AUTO) || (speed > pport->speed_sup)) {
+ bfa_trc(bfa, pport->speed_sup);
+ return BFA_STATUS_UNSUPP_SPEED;
+ }
+
+ pport->cfg.trl_def_speed = speed;
+
+ return (BFA_STATUS_OK);
+}
+
+/**
+ * Get default minimum ratelim speed
+ */
+enum bfa_pport_speed
+bfa_pport_get_ratelim_speed(struct bfa_s *bfa)
+{
+ struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+
+ bfa_trc(bfa, pport->cfg.trl_def_speed);
+ return (pport->cfg.trl_def_speed);
+
+}
+
+void
+bfa_pport_busy(struct bfa_s *bfa, bfa_boolean_t status)
+{
+ struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+
+ bfa_trc(bfa, status);
+ bfa_trc(bfa, pport->diag_busy);
+
+ pport->diag_busy = status;
+}
+
+void
+bfa_pport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
+ bfa_boolean_t link_e2e_beacon)
+{
+ struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+
+ bfa_trc(bfa, beacon);
+ bfa_trc(bfa, link_e2e_beacon);
+ bfa_trc(bfa, pport->beacon);
+ bfa_trc(bfa, pport->link_e2e_beacon);
+
+ pport->beacon = beacon;
+ pport->link_e2e_beacon = link_e2e_beacon;
+}
+
+bfa_boolean_t
+bfa_pport_is_linkup(struct bfa_s *bfa)
+{
+ return bfa_sm_cmp_state(BFA_PORT_MOD(bfa), bfa_pport_sm_linkup);
+}
+
+
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
new file mode 100644
index 000000000000..7cb39a306ea9
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/**
+ * bfa_fcs.c BFA FCS main
+ */
+
+#include <fcs/bfa_fcs.h>
+#include "fcs_port.h"
+#include "fcs_uf.h"
+#include "fcs_vport.h"
+#include "fcs_rport.h"
+#include "fcs_fabric.h"
+#include "fcs_fcpim.h"
+#include "fcs_fcptm.h"
+#include "fcbuild.h"
+#include "fcs.h"
+#include "bfad_drv.h"
+#include <fcb/bfa_fcb.h>
+
+/**
+ * FCS sub-modules
+ */
+struct bfa_fcs_mod_s {
+ void (*modinit) (struct bfa_fcs_s *fcs);
+ void (*modexit) (struct bfa_fcs_s *fcs);
+};
+
+#define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
+
+static struct bfa_fcs_mod_s fcs_modules[] = {
+ BFA_FCS_MODULE(bfa_fcs_pport),
+ BFA_FCS_MODULE(bfa_fcs_uf),
+ BFA_FCS_MODULE(bfa_fcs_fabric),
+ BFA_FCS_MODULE(bfa_fcs_vport),
+ BFA_FCS_MODULE(bfa_fcs_rport),
+ BFA_FCS_MODULE(bfa_fcs_fcpim),
+};
+
+/**
+ * fcs_api BFA FCS API
+ */
+
+static void
+bfa_fcs_exit_comp(void *fcs_cbarg)
+{
+ struct bfa_fcs_s *fcs = fcs_cbarg;
+ struct bfad_s *bfad = fcs->bfad;
+
+ complete(&bfad->comp);
+}
+
+
+
+/**
+ * fcs_api BFA FCS API
+ */
+
+/**
+ * FCS instance initialization.
+ *
+ * param[in] fcs FCS instance
+ * param[in] bfa BFA instance
+ * param[in] bfad BFA driver instance
+ *
+ * return None
+ */
+void
+bfa_fcs_init(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
+ bfa_boolean_t min_cfg)
+{
+ int i;
+ struct bfa_fcs_mod_s *mod;
+
+ fcs->bfa = bfa;
+ fcs->bfad = bfad;
+ fcs->min_cfg = min_cfg;
+
+ bfa_attach_fcs(bfa);
+ fcbuild_init();
+
+ for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
+ mod = &fcs_modules[i];
+ mod->modinit(fcs);
+ }
+}
+
+/**
+ * Start FCS operations.
+ */
+void
+bfa_fcs_start(struct bfa_fcs_s *fcs)
+{
+ bfa_fcs_fabric_modstart(fcs);
+}
+
+/**
+ * FCS driver details initialization.
+ *
+ * param[in] fcs FCS instance
+ * param[in] driver_info Driver Details
+ *
+ * return None
+ */
+void
+bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
+ struct bfa_fcs_driver_info_s *driver_info)
+{
+
+ fcs->driver_info = *driver_info;
+
+ bfa_fcs_fabric_psymb_init(&fcs->fabric);
+}
+
+/**
+ * FCS instance cleanup and exit.
+ *
+ * param[in] fcs FCS instance
+ * return None
+ */
+void
+bfa_fcs_exit(struct bfa_fcs_s *fcs)
+{
+ struct bfa_fcs_mod_s *mod;
+ int nmods, i;
+
+ bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs);
+
+ nmods = sizeof(fcs_modules) / sizeof(fcs_modules[0]);
+
+ for (i = 0; i < nmods; i++) {
+ bfa_wc_up(&fcs->wc);
+
+ mod = &fcs_modules[i];
+ mod->modexit(fcs);
+ }
+
+ bfa_wc_wait(&fcs->wc);
+}
+
+
+void
+bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod)
+{
+ fcs->trcmod = trcmod;
+}
+
+
+void
+bfa_fcs_log_init(struct bfa_fcs_s *fcs, struct bfa_log_mod_s *logmod)
+{
+ fcs->logm = logmod;
+}
+
+
+void
+bfa_fcs_aen_init(struct bfa_fcs_s *fcs, struct bfa_aen_s *aen)
+{
+ fcs->aen = aen;
+}
+
+void
+bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs)
+{
+ bfa_wc_down(&fcs->wc);
+}
+
+
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
new file mode 100644
index 000000000000..8975ed041dc0
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -0,0 +1,940 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/**
+ * bfa_fcs_port.c BFA FCS port
+ */
+
+#include <fcs/bfa_fcs.h>
+#include <fcs/bfa_fcs_lport.h>
+#include <fcs/bfa_fcs_rport.h>
+#include <fcb/bfa_fcb_port.h>
+#include <bfa_svc.h>
+#include <log/bfa_log_fcs.h>
+#include "fcs.h"
+#include "fcs_lport.h"
+#include "fcs_vport.h"
+#include "fcs_rport.h"
+#include "fcs_fcxp.h"
+#include "fcs_trcmod.h"
+#include "lport_priv.h"
+#include <aen/bfa_aen_lport.h>
+
+BFA_TRC_FILE(FCS, PORT);
+
+/**
+ * Forward declarations
+ */
+
+static void bfa_fcs_port_aen_post(struct bfa_fcs_port_s *port,
+ enum bfa_lport_aen_event event);
+static void bfa_fcs_port_send_ls_rjt(struct bfa_fcs_port_s *port,
+ struct fchs_s *rx_fchs, u8 reason_code,
+ u8 reason_code_expl);
+static void bfa_fcs_port_plogi(struct bfa_fcs_port_s *port,
+ struct fchs_s *rx_fchs,
+ struct fc_logi_s *plogi);
+static void bfa_fcs_port_online_actions(struct bfa_fcs_port_s *port);
+static void bfa_fcs_port_offline_actions(struct bfa_fcs_port_s *port);
+static void bfa_fcs_port_unknown_init(struct bfa_fcs_port_s *port);
+static void bfa_fcs_port_unknown_online(struct bfa_fcs_port_s *port);
+static void bfa_fcs_port_unknown_offline(struct bfa_fcs_port_s *port);
+static void bfa_fcs_port_deleted(struct bfa_fcs_port_s *port);
+static void bfa_fcs_port_echo(struct bfa_fcs_port_s *port,
+ struct fchs_s *rx_fchs,
+ struct fc_echo_s *echo, u16 len);
+static void bfa_fcs_port_rnid(struct bfa_fcs_port_s *port,
+ struct fchs_s *rx_fchs,
+ struct fc_rnid_cmd_s *rnid, u16 len);
+static void bfa_fs_port_get_gen_topo_data(struct bfa_fcs_port_s *port,
+ struct fc_rnid_general_topology_data_s *gen_topo_data);
+
+static struct {
+ void (*init) (struct bfa_fcs_port_s *port);
+ void (*online) (struct bfa_fcs_port_s *port);
+ void (*offline) (struct bfa_fcs_port_s *port);
+} __port_action[] = {
+ {
+ bfa_fcs_port_unknown_init, bfa_fcs_port_unknown_online,
+ bfa_fcs_port_unknown_offline}, {
+ bfa_fcs_port_fab_init, bfa_fcs_port_fab_online,
+ bfa_fcs_port_fab_offline}, {
+ bfa_fcs_port_loop_init, bfa_fcs_port_loop_online,
+ bfa_fcs_port_loop_offline}, {
+bfa_fcs_port_n2n_init, bfa_fcs_port_n2n_online,
+ bfa_fcs_port_n2n_offline},};
+
+/**
+ * fcs_port_sm FCS logical port state machine
+ */
+
+enum bfa_fcs_port_event {
+ BFA_FCS_PORT_SM_CREATE = 1,
+ BFA_FCS_PORT_SM_ONLINE = 2,
+ BFA_FCS_PORT_SM_OFFLINE = 3,
+ BFA_FCS_PORT_SM_DELETE = 4,
+ BFA_FCS_PORT_SM_DELRPORT = 5,
+};
+
+static void bfa_fcs_port_sm_uninit(struct bfa_fcs_port_s *port,
+ enum bfa_fcs_port_event event);
+static void bfa_fcs_port_sm_init(struct bfa_fcs_port_s *port,
+ enum bfa_fcs_port_event event);
+static void bfa_fcs_port_sm_online(struct bfa_fcs_port_s *port,
+ enum bfa_fcs_port_event event);
+static void bfa_fcs_port_sm_offline(struct bfa_fcs_port_s *port,
+ enum bfa_fcs_port_event event);
+static void bfa_fcs_port_sm_deleting(struct bfa_fcs_port_s *port,
+ enum bfa_fcs_port_event event);
+
+static void
+bfa_fcs_port_sm_uninit(struct bfa_fcs_port_s *port,
+ enum bfa_fcs_port_event event)
+{
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_PORT_SM_CREATE:
+ bfa_sm_set_state(port, bfa_fcs_port_sm_init);
+ break;
+
+ default:
+ bfa_assert(0);
+ }
+}
+
+static void
+bfa_fcs_port_sm_init(struct bfa_fcs_port_s *port, enum bfa_fcs_port_event event)
+{
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_PORT_SM_ONLINE:
+ bfa_sm_set_state(port, bfa_fcs_port_sm_online);
+ bfa_fcs_port_online_actions(port);
+ break;
+
+ case BFA_FCS_PORT_SM_DELETE:
+ bfa_sm_set_state(port, bfa_fcs_port_sm_uninit);
+ bfa_fcs_port_deleted(port);
+ break;
+
+ default:
+ bfa_assert(0);
+ }
+}
+
+static void
+bfa_fcs_port_sm_online(struct bfa_fcs_port_s *port,
+ enum bfa_fcs_port_event event)
+{
+ struct bfa_fcs_rport_s *rport;
+ struct list_head *qe, *qen;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_PORT_SM_OFFLINE:
+ bfa_sm_set_state(port, bfa_fcs_port_sm_offline);
+ bfa_fcs_port_offline_actions(port);
+ break;
+
+ case BFA_FCS_PORT_SM_DELETE:
+
+ __port_action[port->fabric->fab_type].offline(port);
+
+ if (port->num_rports == 0) {
+ bfa_sm_set_state(port, bfa_fcs_port_sm_uninit);
+ bfa_fcs_port_deleted(port);
+ } else {
+ bfa_sm_set_state(port, bfa_fcs_port_sm_deleting);
+ list_for_each_safe(qe, qen, &port->rport_q) {
+ rport = (struct bfa_fcs_rport_s *)qe;
+ bfa_fcs_rport_delete(rport);
+ }
+ }
+ break;
+
+ case BFA_FCS_PORT_SM_DELRPORT:
+ break;
+
+ default:
+ bfa_assert(0);
+ }
+}
+
+static void
+bfa_fcs_port_sm_offline(struct bfa_fcs_port_s *port,
+ enum bfa_fcs_port_event event)
+{
+ struct bfa_fcs_rport_s *rport;
+ struct list_head *qe, *qen;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_PORT_SM_ONLINE:
+ bfa_sm_set_state(port, bfa_fcs_port_sm_online);
+ bfa_fcs_port_online_actions(port);
+ break;
+
+ case BFA_FCS_PORT_SM_DELETE:
+ if (port->num_rports == 0) {
+ bfa_sm_set_state(port, bfa_fcs_port_sm_uninit);
+ bfa_fcs_port_deleted(port);
+ } else {
+ bfa_sm_set_state(port, bfa_fcs_port_sm_deleting);
+ list_for_each_safe(qe, qen, &port->rport_q) {
+ rport = (struct bfa_fcs_rport_s *)qe;
+ bfa_fcs_rport_delete(rport);
+ }
+ }
+ break;
+
+ case BFA_FCS_PORT_SM_DELRPORT:
+ case BFA_FCS_PORT_SM_OFFLINE:
+ break;
+
+ default:
+ bfa_assert(0);
+ }
+}
+
+static void
+bfa_fcs_port_sm_deleting(struct bfa_fcs_port_s *port,
+ enum bfa_fcs_port_event event)
+{
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_PORT_SM_DELRPORT:
+ if (port->num_rports == 0) {
+ bfa_sm_set_state(port, bfa_fcs_port_sm_uninit);
+ bfa_fcs_port_deleted(port);
+ }
+ break;
+
+ default:
+ bfa_assert(0);
+ }
+}
+
+
+
+/**
+ * fcs_port_pvt
+ */
+
+/**
+ * Send AEN notification
+ */
+static void
+bfa_fcs_port_aen_post(struct bfa_fcs_port_s *port,
+ enum bfa_lport_aen_event event)
+{
+ union bfa_aen_data_u aen_data;
+ struct bfa_log_mod_s *logmod = port->fcs->logm;
+ enum bfa_port_role role = port->port_cfg.roles;
+ wwn_t lpwwn = bfa_fcs_port_get_pwwn(port);
+ char lpwwn_ptr[BFA_STRING_32];
+ char *role_str[BFA_PORT_ROLE_FCP_MAX / 2 + 1] =
+ { "Initiator", "Target", "IPFC" };
+
+ wwn2str(lpwwn_ptr, lpwwn);
+
+ bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX);
+
+ switch (event) {
+ case BFA_LPORT_AEN_ONLINE:
+ bfa_log(logmod, BFA_AEN_LPORT_ONLINE, lpwwn_ptr,
+ role_str[role / 2]);
+ break;
+ case BFA_LPORT_AEN_OFFLINE:
+ bfa_log(logmod, BFA_AEN_LPORT_OFFLINE, lpwwn_ptr,
+ role_str[role / 2]);
+ break;
+ case BFA_LPORT_AEN_NEW:
+ bfa_log(logmod, BFA_AEN_LPORT_NEW, lpwwn_ptr,
+ role_str[role / 2]);
+ break;
+ case BFA_LPORT_AEN_DELETE:
+ bfa_log(logmod, BFA_AEN_LPORT_DELETE, lpwwn_ptr,
+ role_str[role / 2]);
+ break;
+ case BFA_LPORT_AEN_DISCONNECT:
+ bfa_log(logmod, BFA_AEN_LPORT_DISCONNECT, lpwwn_ptr,
+ role_str[role / 2]);
+ break;
+ default:
+ break;
+ }
+
+ aen_data.lport.vf_id = port->fabric->vf_id;
+ aen_data.lport.roles = role;
+ aen_data.lport.ppwwn =
+ bfa_fcs_port_get_pwwn(bfa_fcs_get_base_port(port->fcs));
+ aen_data.lport.lpwwn = lpwwn;
+}
+
+/*
+ * Send a LS reject
+ */
+static void
+bfa_fcs_port_send_ls_rjt(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs,
+ u8 reason_code, u8 reason_code_expl)
+{
+ struct fchs_s fchs;
+ struct bfa_fcxp_s *fcxp;
+ struct bfa_rport_s *bfa_rport = NULL;
+ int len;
+
+ bfa_trc(port->fcs, rx_fchs->s_id);
+
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+ if (!fcxp)
+ return;
+
+ len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id,
+ bfa_fcs_port_get_fcid(port), rx_fchs->ox_id,
+ reason_code, reason_code_expl);
+
+ bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
+ BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
+ FC_MAX_PDUSZ, 0);
+}
+
+/**
+ * Process incoming plogi from a remote port.
+ */
+static void
+bfa_fcs_port_plogi(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs,
+ struct fc_logi_s *plogi)
+{
+ struct bfa_fcs_rport_s *rport;
+
+ bfa_trc(port->fcs, rx_fchs->d_id);
+ bfa_trc(port->fcs, rx_fchs->s_id);
+
+ /*
+ * If min cfg mode is enabled, drop any incoming PLOGIs
+ */
+ if (__fcs_min_cfg(port->fcs)) {
+ bfa_trc(port->fcs, rx_fchs->s_id);
+ return;
+ }
+
+ if (fc_plogi_parse(rx_fchs) != FC_PARSE_OK) {
+ bfa_trc(port->fcs, rx_fchs->s_id);
+ /*
+ * send a LS reject
+ */
+ bfa_fcs_port_send_ls_rjt(port, rx_fchs,
+ FC_LS_RJT_RSN_PROTOCOL_ERROR,
+ FC_LS_RJT_EXP_SPARMS_ERR_OPTIONS);
+ return;
+ }
+
+ /**
+* Direct Attach P2P mode : verify address assigned by the r-port.
+ */
+ if ((!bfa_fcs_fabric_is_switched(port->fabric))
+ &&
+ (memcmp
+ ((void *)&bfa_fcs_port_get_pwwn(port), (void *)&plogi->port_name,
+ sizeof(wwn_t)) < 0)) {
+ if (BFA_FCS_PID_IS_WKA(rx_fchs->d_id)) {
+ /*
+ * Address assigned to us cannot be a WKA
+ */
+ bfa_fcs_port_send_ls_rjt(port, rx_fchs,
+ FC_LS_RJT_RSN_PROTOCOL_ERROR,
+ FC_LS_RJT_EXP_INVALID_NPORT_ID);
+ return;
+ }
+ port->pid = rx_fchs->d_id;
+ }
+
+ /**
+ * First, check if we know the device by pwwn.
+ */
+ rport = bfa_fcs_port_get_rport_by_pwwn(port, plogi->port_name);
+ if (rport) {
+ /**
+ * Direct Attach P2P mode: handle address assigned by the rport.
+ */
+ if ((!bfa_fcs_fabric_is_switched(port->fabric))
+ &&
+ (memcmp
+ ((void *)&bfa_fcs_port_get_pwwn(port),
+ (void *)&plogi->port_name, sizeof(wwn_t)) < 0)) {
+ port->pid = rx_fchs->d_id;
+ rport->pid = rx_fchs->s_id;
+ }
+ bfa_fcs_rport_plogi(rport, rx_fchs, plogi);
+ return;
+ }
+
+ /**
+ * Next, lookup rport by PID.
+ */
+ rport = bfa_fcs_port_get_rport_by_pid(port, rx_fchs->s_id);
+ if (!rport) {
+ /**
+ * Inbound PLOGI from a new device.
+ */
+ bfa_fcs_rport_plogi_create(port, rx_fchs, plogi);
+ return;
+ }
+
+ /**
+ * Rport is known only by PID.
+ */
+ if (rport->pwwn) {
+ /**
+ * This is a different device with the same pid. Old device
+ * disappeared. Send implicit LOGO to old device.
+ */
+ bfa_assert(rport->pwwn != plogi->port_name);
+ bfa_fcs_rport_logo_imp(rport);
+
+ /**
+ * Inbound PLOGI from a new device (with old PID).
+ */
+ bfa_fcs_rport_plogi_create(port, rx_fchs, plogi);
+ return;
+ }
+
+ /**
+ * PLOGI crossing each other.
+ */
+ bfa_assert(rport->pwwn == WWN_NULL);
+ bfa_fcs_rport_plogi(rport, rx_fchs, plogi);
+}
+
+/*
+ * Process incoming ECHO.
+ * Since it does not require a login, it is processed here.
+ */
+static void
+bfa_fcs_port_echo(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs,
+ struct fc_echo_s *echo, u16 rx_len)
+{
+ struct fchs_s fchs;
+ struct bfa_fcxp_s *fcxp;
+ struct bfa_rport_s *bfa_rport = NULL;
+ int len, pyld_len;
+
+ bfa_trc(port->fcs, rx_fchs->s_id);
+ bfa_trc(port->fcs, rx_fchs->d_id);
+ bfa_trc(port->fcs, rx_len);
+
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+ if (!fcxp)
+ return;
+
+ len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id,
+ bfa_fcs_port_get_fcid(port), rx_fchs->ox_id);
+
+ /*
+ * Copy the payload (if any) from the echo frame
+ */
+ pyld_len = rx_len - sizeof(struct fchs_s);
+ bfa_trc(port->fcs, pyld_len);
+
+ if (pyld_len > len)
+ memcpy(((u8 *) bfa_fcxp_get_reqbuf(fcxp)) +
+ sizeof(struct fc_echo_s), (echo + 1),
+ (pyld_len - sizeof(struct fc_echo_s)));
+
+ bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
+ BFA_FALSE, FC_CLASS_3, pyld_len, &fchs, NULL, NULL,
+ FC_MAX_PDUSZ, 0);
+}
+
+/*
+ * Process incoming RNID.
+ * Since it does not require a login, it is processed here.
+ */
+static void
+bfa_fcs_port_rnid(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs,
+ struct fc_rnid_cmd_s *rnid, u16 rx_len)
+{
+ struct fc_rnid_common_id_data_s common_id_data;
+ struct fc_rnid_general_topology_data_s gen_topo_data;
+ struct fchs_s fchs;
+ struct bfa_fcxp_s *fcxp;
+ struct bfa_rport_s *bfa_rport = NULL;
+ u16 len;
+ u32 data_format;
+
+ bfa_trc(port->fcs, rx_fchs->s_id);
+ bfa_trc(port->fcs, rx_fchs->d_id);
+ bfa_trc(port->fcs, rx_len);
+
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+ if (!fcxp)
+ return;
+
+ /*
+ * Check Node Indentification Data Format
+ * We only support General Topology Discovery Format.
+ * For any other requested Data Formats, we return Common Node Id Data
+ * only, as per FC-LS.
+ */
+ bfa_trc(port->fcs, rnid->node_id_data_format);
+ if (rnid->node_id_data_format == RNID_NODEID_DATA_FORMAT_DISCOVERY) {
+ data_format = RNID_NODEID_DATA_FORMAT_DISCOVERY;
+ /*
+ * Get General topology data for this port
+ */
+ bfa_fs_port_get_gen_topo_data(port, &gen_topo_data);
+ } else {
+ data_format = RNID_NODEID_DATA_FORMAT_COMMON;
+ }
+
+ /*
+ * Copy the Node Id Info
+ */
+ common_id_data.port_name = bfa_fcs_port_get_pwwn(port);
+ common_id_data.node_name = bfa_fcs_port_get_nwwn(port);
+
+ len = fc_rnid_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id,
+ bfa_fcs_port_get_fcid(port), rx_fchs->ox_id,
+ data_format, &common_id_data, &gen_topo_data);
+
+ bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
+ BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
+ FC_MAX_PDUSZ, 0);
+
+ return;
+}
+
+/*
+ * Fill out General Topolpgy Discovery Data for RNID ELS.
+ */
+static void
+bfa_fs_port_get_gen_topo_data(struct bfa_fcs_port_s *port,
+ struct fc_rnid_general_topology_data_s *gen_topo_data)
+{
+
+ bfa_os_memset(gen_topo_data, 0,
+ sizeof(struct fc_rnid_general_topology_data_s));
+
+ gen_topo_data->asso_type = bfa_os_htonl(RNID_ASSOCIATED_TYPE_HOST);
+ gen_topo_data->phy_port_num = 0; /* @todo */
+ gen_topo_data->num_attached_nodes = bfa_os_htonl(1);
+}
+
+static void
+bfa_fcs_port_online_actions(struct bfa_fcs_port_s *port)
+{
+ bfa_trc(port->fcs, port->fabric->oper_type);
+
+ __port_action[port->fabric->fab_type].init(port);
+ __port_action[port->fabric->fab_type].online(port);
+
+ bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_ONLINE);
+ bfa_fcb_port_online(port->fcs->bfad, port->port_cfg.roles,
+ port->fabric->vf_drv, (port->vport == NULL) ?
+ NULL : port->vport->vport_drv);
+}
+
+static void
+bfa_fcs_port_offline_actions(struct bfa_fcs_port_s *port)
+{
+ struct list_head *qe, *qen;
+ struct bfa_fcs_rport_s *rport;
+
+ bfa_trc(port->fcs, port->fabric->oper_type);
+
+ __port_action[port->fabric->fab_type].offline(port);
+
+ if (bfa_fcs_fabric_is_online(port->fabric) == BFA_TRUE) {
+ bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_DISCONNECT);
+ } else {
+ bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_OFFLINE);
+ }
+ bfa_fcb_port_offline(port->fcs->bfad, port->port_cfg.roles,
+ port->fabric->vf_drv,
+ (port->vport == NULL) ? NULL : port->vport->vport_drv);
+
+ list_for_each_safe(qe, qen, &port->rport_q) {
+ rport = (struct bfa_fcs_rport_s *)qe;
+ bfa_fcs_rport_offline(rport);
+ }
+}
+
+static void
+bfa_fcs_port_unknown_init(struct bfa_fcs_port_s *port)
+{
+ bfa_assert(0);
+}
+
+static void
+bfa_fcs_port_unknown_online(struct bfa_fcs_port_s *port)
+{
+ bfa_assert(0);
+}
+
+static void
+bfa_fcs_port_unknown_offline(struct bfa_fcs_port_s *port)
+{
+ bfa_assert(0);
+}
+
+static void
+bfa_fcs_port_deleted(struct bfa_fcs_port_s *port)
+{
+ bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_DELETE);
+
+ /*
+ * Base port will be deleted by the OS driver
+ */
+ if (port->vport) {
+ bfa_fcb_port_delete(port->fcs->bfad, port->port_cfg.roles,
+ port->fabric->vf_drv,
+ port->vport ? port->vport->vport_drv : NULL);
+ bfa_fcs_vport_delete_comp(port->vport);
+ } else {
+ bfa_fcs_fabric_port_delete_comp(port->fabric);
+ }
+}
+
+
+
+/**
+ * fcs_lport_api BFA FCS port API
+ */
+/**
+ * Module initialization
+ */
+void
+bfa_fcs_port_modinit(struct bfa_fcs_s *fcs)
+{
+
+}
+
+/**
+ * Module cleanup
+ */
+void
+bfa_fcs_port_modexit(struct bfa_fcs_s *fcs)
+{
+ bfa_fcs_modexit_comp(fcs);
+}
+
+/**
+ * Unsolicited frame receive handling.
+ */
+void
+bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs,
+ u16 len)
+{
+ u32 pid = fchs->s_id;
+ struct bfa_fcs_rport_s *rport = NULL;
+ struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+
+ bfa_stats(lport, uf_recvs);
+
+ if (!bfa_fcs_port_is_online(lport)) {
+ bfa_stats(lport, uf_recv_drops);
+ return;
+ }
+
+ /**
+ * First, handle ELSs that donot require a login.
+ */
+ /*
+ * Handle PLOGI first
+ */
+ if ((fchs->type == FC_TYPE_ELS) &&
+ (els_cmd->els_code == FC_ELS_PLOGI)) {
+ bfa_fcs_port_plogi(lport, fchs, (struct fc_logi_s *) els_cmd);
+ return;
+ }
+
+ /*
+ * Handle ECHO separately.
+ */
+ if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_ECHO)) {
+ bfa_fcs_port_echo(lport, fchs,
+ (struct fc_echo_s *) els_cmd, len);
+ return;
+ }
+
+ /*
+ * Handle RNID separately.
+ */
+ if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_RNID)) {
+ bfa_fcs_port_rnid(lport, fchs,
+ (struct fc_rnid_cmd_s *) els_cmd, len);
+ return;
+ }
+
+ /**
+ * look for a matching remote port ID
+ */
+ rport = bfa_fcs_port_get_rport_by_pid(lport, pid);
+ if (rport) {
+ bfa_trc(rport->fcs, fchs->s_id);
+ bfa_trc(rport->fcs, fchs->d_id);
+ bfa_trc(rport->fcs, fchs->type);
+
+ bfa_fcs_rport_uf_recv(rport, fchs, len);
+ return;
+ }
+
+ /**
+ * Only handles ELS frames for now.
+ */
+ if (fchs->type != FC_TYPE_ELS) {
+ bfa_trc(lport->fcs, fchs->type);
+ bfa_assert(0);
+ return;
+ }
+
+ bfa_trc(lport->fcs, els_cmd->els_code);
+ if (els_cmd->els_code == FC_ELS_RSCN) {
+ bfa_fcs_port_scn_process_rscn(lport, fchs, len);
+ return;
+ }
+
+ if (els_cmd->els_code == FC_ELS_LOGO) {
+ /**
+ * @todo Handle LOGO frames received.
+ */
+ bfa_trc(lport->fcs, els_cmd->els_code);
+ return;
+ }
+
+ if (els_cmd->els_code == FC_ELS_PRLI) {
+ /**
+ * @todo Handle PRLI frames received.
+ */
+ bfa_trc(lport->fcs, els_cmd->els_code);
+ return;
+ }
+
+ /**
+ * Unhandled ELS frames. Send a LS_RJT.
+ */
+ bfa_fcs_port_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP,
+ FC_LS_RJT_EXP_NO_ADDL_INFO);
+
+}
+
+/**
+ * PID based Lookup for a R-Port in the Port R-Port Queue
+ */
+struct bfa_fcs_rport_s *
+bfa_fcs_port_get_rport_by_pid(struct bfa_fcs_port_s *port, u32 pid)
+{
+ struct bfa_fcs_rport_s *rport;
+ struct list_head *qe;
+
+ list_for_each(qe, &port->rport_q) {
+ rport = (struct bfa_fcs_rport_s *)qe;
+ if (rport->pid == pid)
+ return rport;
+ }
+
+ bfa_trc(port->fcs, pid);
+ return NULL;
+}
+
+/**
+ * PWWN based Lookup for a R-Port in the Port R-Port Queue
+ */
+struct bfa_fcs_rport_s *
+bfa_fcs_port_get_rport_by_pwwn(struct bfa_fcs_port_s *port, wwn_t pwwn)
+{
+ struct bfa_fcs_rport_s *rport;
+ struct list_head *qe;
+
+ list_for_each(qe, &port->rport_q) {
+ rport = (struct bfa_fcs_rport_s *)qe;
+ if (wwn_is_equal(rport->pwwn, pwwn))
+ return rport;
+ }
+
+ bfa_trc(port->fcs, pwwn);
+ return (NULL);
+}
+
+/**
+ * NWWN based Lookup for a R-Port in the Port R-Port Queue
+ */
+struct bfa_fcs_rport_s *
+bfa_fcs_port_get_rport_by_nwwn(struct bfa_fcs_port_s *port, wwn_t nwwn)
+{
+ struct bfa_fcs_rport_s *rport;
+ struct list_head *qe;
+
+ list_for_each(qe, &port->rport_q) {
+ rport = (struct bfa_fcs_rport_s *)qe;
+ if (wwn_is_equal(rport->nwwn, nwwn))
+ return rport;
+ }
+
+ bfa_trc(port->fcs, nwwn);
+ return (NULL);
+}
+
+/**
+ * Called by rport module when new rports are discovered.
+ */
+void
+bfa_fcs_port_add_rport(struct bfa_fcs_port_s *port,
+ struct bfa_fcs_rport_s *rport)
+{
+ list_add_tail(&rport->qe, &port->rport_q);
+ port->num_rports++;
+}
+
+/**
+ * Called by rport module to when rports are deleted.
+ */
+void
+bfa_fcs_port_del_rport(struct bfa_fcs_port_s *port,
+ struct bfa_fcs_rport_s *rport)
+{
+ bfa_assert(bfa_q_is_on_q(&port->rport_q, rport));
+ list_del(&rport->qe);
+ port->num_rports--;
+
+ bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELRPORT);
+}
+
+/**
+ * Called by fabric for base port when fabric login is complete.
+ * Called by vport for virtual ports when FDISC is complete.
+ */
+void
+bfa_fcs_port_online(struct bfa_fcs_port_s *port)
+{
+ bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE);
+}
+
+/**
+ * Called by fabric for base port when fabric goes offline.
+ * Called by vport for virtual ports when virtual port becomes offline.
+ */
+void
+bfa_fcs_port_offline(struct bfa_fcs_port_s *port)
+{
+ bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE);
+}
+
+/**
+ * Called by fabric to delete base lport and associated resources.
+ *
+ * Called by vport to delete lport and associated resources. Should call
+ * bfa_fcs_vport_delete_comp() for vports on completion.
+ */
+void
+bfa_fcs_port_delete(struct bfa_fcs_port_s *port)
+{
+ bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE);
+}
+
+/**
+ * Called by fabric in private loop topology to process LIP event.
+ */
+void
+bfa_fcs_port_lip(struct bfa_fcs_port_s *port)
+{
+}
+
+/**
+ * Return TRUE if port is online, else return FALSE
+ */
+bfa_boolean_t
+bfa_fcs_port_is_online(struct bfa_fcs_port_s *port)
+{
+ return (bfa_sm_cmp_state(port, bfa_fcs_port_sm_online));
+}
+
+/**
+ * Logical port initialization of base or virtual port.
+ * Called by fabric for base port or by vport for virtual ports.
+ */
+void
+bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs,
+ u16 vf_id, struct bfa_port_cfg_s *port_cfg,
+ struct bfa_fcs_vport_s *vport)
+{
+ lport->fcs = fcs;
+ lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id);
+ bfa_os_assign(lport->port_cfg, *port_cfg);
+ lport->vport = vport;
+ lport->lp_tag = (vport) ? bfa_lps_get_tag(vport->lps) :
+ bfa_lps_get_tag(lport->fabric->lps);
+
+ INIT_LIST_HEAD(&lport->rport_q);
+ lport->num_rports = 0;
+
+ lport->bfad_port =
+ bfa_fcb_port_new(fcs->bfad, lport, lport->port_cfg.roles,
+ lport->fabric->vf_drv,
+ vport ? vport->vport_drv : NULL);
+ bfa_fcs_port_aen_post(lport, BFA_LPORT_AEN_NEW);
+
+ bfa_sm_set_state(lport, bfa_fcs_port_sm_uninit);
+ bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
+}
+
+
+
+/**
+ * fcs_lport_api
+ */
+
+void
+bfa_fcs_port_get_attr(struct bfa_fcs_port_s *port,
+ struct bfa_port_attr_s *port_attr)
+{
+ if (bfa_sm_cmp_state(port, bfa_fcs_port_sm_online))
+ port_attr->pid = port->pid;
+ else
+ port_attr->pid = 0;
+
+ port_attr->port_cfg = port->port_cfg;
+
+ if (port->fabric) {
+ port_attr->port_type = bfa_fcs_fabric_port_type(port->fabric);
+ port_attr->loopback = bfa_fcs_fabric_is_loopback(port->fabric);
+ port_attr->fabric_name = bfa_fcs_port_get_fabric_name(port);
+ memcpy(port_attr->fabric_ip_addr,
+ bfa_fcs_port_get_fabric_ipaddr(port),
+ BFA_FCS_FABRIC_IPADDR_SZ);
+
+ if (port->vport != NULL)
+ port_attr->port_type = BFA_PPORT_TYPE_VPORT;
+
+ } else {
+ port_attr->port_type = BFA_PPORT_TYPE_UNKNOWN;
+ port_attr->state = BFA_PORT_UNINIT;
+ }
+
+}
+
+
diff --git a/drivers/scsi/bfa/bfa_fcs_port.c b/drivers/scsi/bfa/bfa_fcs_port.c
new file mode 100644
index 000000000000..9c4b24e62de1
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcs_port.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/**
+ * bfa_fcs_pport.c BFA FCS PPORT ( physical port)
+ */
+
+#include <fcs/bfa_fcs.h>
+#include <bfa_svc.h>
+#include <fcs/bfa_fcs_fabric.h>
+#include "fcs_trcmod.h"
+#include "fcs.h"
+#include "fcs_fabric.h"
+#include "fcs_port.h"
+
+BFA_TRC_FILE(FCS, PPORT);
+
+static void
+bfa_fcs_pport_event_handler(void *cbarg, bfa_pport_event_t event)
+{
+ struct bfa_fcs_s *fcs = cbarg;
+
+ bfa_trc(fcs, event);
+
+ switch (event) {
+ case BFA_PPORT_LINKUP:
+ bfa_fcs_fabric_link_up(&fcs->fabric);
+ break;
+
+ case BFA_PPORT_LINKDOWN:
+ bfa_fcs_fabric_link_down(&fcs->fabric);
+ break;
+
+ case BFA_PPORT_TRUNK_LINKDOWN:
+ bfa_assert(0);
+ break;
+
+ default:
+ bfa_assert(0);
+ }
+}
+
+void
+bfa_fcs_pport_modinit(struct bfa_fcs_s *fcs)
+{
+ bfa_pport_event_register(fcs->bfa, bfa_fcs_pport_event_handler,
+ fcs);
+}
+
+void
+bfa_fcs_pport_modexit(struct bfa_fcs_s *fcs)
+{
+ bfa_fcs_modexit_comp(fcs);
+}
diff --git a/drivers/scsi/bfa/bfa_fcs_uf.c b/drivers/scsi/bfa/bfa_fcs_uf.c
new file mode 100644
index 000000000000..ad01db6444b2
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcs_uf.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/**
+ * bfa_fcs_uf.c BFA FCS UF ( Unsolicited Frames)
+ */
+
+#include <fcs/bfa_fcs.h>
+#include <bfa_svc.h>
+#include <fcs/bfa_fcs_fabric.h>
+#include "fcs.h"
+#include "fcs_trcmod.h"
+#include "fcs_fabric.h"
+#include "fcs_uf.h"
+
+BFA_TRC_FILE(FCS, UF);
+
+/**
+ * BFA callback for unsolicited frame receive handler.
+ *
+ * @param[in] cbarg callback arg for receive handler
+ * @param[in] uf unsolicited frame descriptor
+ *
+ * @return None
+ */
+static void
+bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
+{
+ struct bfa_fcs_s *fcs = (struct bfa_fcs_s *) cbarg;
+ struct fchs_s *fchs = bfa_uf_get_frmbuf(uf);
+ u16 len = bfa_uf_get_frmlen(uf);
+ struct fc_vft_s *vft;
+ struct bfa_fcs_fabric_s *fabric;
+
+ /**
+ * check for VFT header
+ */
+ if (fchs->routing == FC_RTG_EXT_HDR &&
+ fchs->cat_info == FC_CAT_VFT_HDR) {
+ bfa_stats(fcs, uf.tagged);
+ vft = bfa_uf_get_frmbuf(uf);
+ if (fcs->port_vfid == vft->vf_id)
+ fabric = &fcs->fabric;
+ else
+ fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id);
+
+ /**
+ * drop frame if vfid is unknown
+ */
+ if (!fabric) {
+ bfa_assert(0);
+ bfa_stats(fcs, uf.vfid_unknown);
+ bfa_uf_free(uf);
+ return;
+ }
+
+ /**
+ * skip vft header
+ */
+ fchs = (struct fchs_s *) (vft + 1);
+ len -= sizeof(struct fc_vft_s);
+
+ bfa_trc(fcs, vft->vf_id);
+ } else {
+ bfa_stats(fcs, uf.untagged);
+ fabric = &fcs->fabric;
+ }
+
+ bfa_trc(fcs, ((u32 *) fchs)[0]);
+ bfa_trc(fcs, ((u32 *) fchs)[1]);
+ bfa_trc(fcs, ((u32 *) fchs)[2]);
+ bfa_trc(fcs, ((u32 *) fchs)[3]);
+ bfa_trc(fcs, ((u32 *) fchs)[4]);
+ bfa_trc(fcs, ((u32 *) fchs)[5]);
+ bfa_trc(fcs, len);
+
+ bfa_fcs_fabric_uf_recv(fabric, fchs, len);
+ bfa_uf_free(uf);
+}
+
+void
+bfa_fcs_uf_modinit(struct bfa_fcs_s *fcs)
+{
+ bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs);
+}
+
+void
+bfa_fcs_uf_modexit(struct bfa_fcs_s *fcs)
+{
+ bfa_fcs_modexit_comp(fcs);
+}
diff --git a/drivers/scsi/bfa/bfa_fcxp.c b/drivers/scsi/bfa/bfa_fcxp.c
new file mode 100644
index 000000000000..4754a0e9006a
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcxp.c
@@ -0,0 +1,782 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <bfa.h>
+#include <bfi/bfi_uf.h>
+#include <cs/bfa_debug.h>
+
+BFA_TRC_FILE(HAL, FCXP);
+BFA_MODULE(fcxp);
+
+/**
+ * forward declarations
+ */
+static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
+static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
+ struct bfi_fcxp_send_rsp_s *fcxp_rsp);
+static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
+ struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
+static void bfa_fcxp_qresume(void *cbarg);
+static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
+ struct bfi_fcxp_send_req_s *send_req);
+
+/**
+ * fcxp_pvt BFA FCXP private functions
+ */
+
+static void
+claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
+{
+ u8 *dm_kva = NULL;
+ u64 dm_pa;
+ u32 buf_pool_sz;
+
+ dm_kva = bfa_meminfo_dma_virt(mi);
+ dm_pa = bfa_meminfo_dma_phys(mi);
+
+ buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
+
+ /*
+ * Initialize the fcxp req payload list
+ */
+ mod->req_pld_list_kva = dm_kva;
+ mod->req_pld_list_pa = dm_pa;
+ dm_kva += buf_pool_sz;
+ dm_pa += buf_pool_sz;
+ bfa_os_memset(mod->req_pld_list_kva, 0, buf_pool_sz);
+
+ /*
+ * Initialize the fcxp rsp payload list
+ */
+ buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
+ mod->rsp_pld_list_kva = dm_kva;
+ mod->rsp_pld_list_pa = dm_pa;
+ dm_kva += buf_pool_sz;
+ dm_pa += buf_pool_sz;
+ bfa_os_memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
+
+ bfa_meminfo_dma_virt(mi) = dm_kva;
+ bfa_meminfo_dma_phys(mi) = dm_pa;
+}
+
+static void
+claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
+{
+ u16 i;
+ struct bfa_fcxp_s *fcxp;
+
+ fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
+ bfa_os_memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
+
+ INIT_LIST_HEAD(&mod->fcxp_free_q);
+ INIT_LIST_HEAD(&mod->fcxp_active_q);
+
+ mod->fcxp_list = fcxp;
+
+ for (i = 0; i < mod->num_fcxps; i++) {
+ fcxp->fcxp_mod = mod;
+ fcxp->fcxp_tag = i;
+
+ list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
+ bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
+ fcxp->reqq_waiting = BFA_FALSE;
+
+ fcxp = fcxp + 1;
+ }
+
+ bfa_meminfo_kva(mi) = (void *)fcxp;
+}
+
+static void
+bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
+ u32 *dm_len)
+{
+ u16 num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
+
+ if (num_fcxp_reqs == 0)
+ return;
+
+ /*
+ * Account for req/rsp payload
+ */
+ *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
+ if (cfg->drvcfg.min_cfg)
+ *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
+ else
+ *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
+
+ /*
+ * Account for fcxp structs
+ */
+ *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
+}
+
+static void
+bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+{
+ struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+
+ bfa_os_memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
+ mod->bfa = bfa;
+ mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
+
+ /**
+ * Initialize FCXP request and response payload sizes.
+ */
+ mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
+ if (!cfg->drvcfg.min_cfg)
+ mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
+
+ INIT_LIST_HEAD(&mod->wait_q);
+
+ claim_fcxp_req_rsp_mem(mod, meminfo);
+ claim_fcxps_mem(mod, meminfo);
+}
+
+static void
+bfa_fcxp_initdone(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcxp_detach(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcxp_start(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcxp_stop(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcxp_iocdisable(struct bfa_s *bfa)
+{
+ struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+ struct bfa_fcxp_s *fcxp;
+ struct list_head *qe, *qen;
+
+ list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
+ fcxp = (struct bfa_fcxp_s *) qe;
+ if (fcxp->caller == NULL) {
+ fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
+ BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
+ bfa_fcxp_free(fcxp);
+ } else {
+ fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
+ bfa_cb_queue(bfa, &fcxp->hcb_qe,
+ __bfa_fcxp_send_cbfn, fcxp);
+ }
+ }
+}
+
+static struct bfa_fcxp_s *
+bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
+{
+ struct bfa_fcxp_s *fcxp;
+
+ bfa_q_deq(&fm->fcxp_free_q, &fcxp);
+
+ if (fcxp)
+ list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
+
+ return (fcxp);
+}
+
+static void
+bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
+{
+ struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
+ struct bfa_fcxp_wqe_s *wqe;
+
+ bfa_q_deq(&mod->wait_q, &wqe);
+ if (wqe) {
+ bfa_trc(mod->bfa, fcxp->fcxp_tag);
+ wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
+ return;
+ }
+
+ bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
+ list_del(&fcxp->qe);
+ list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
+}
+
+static void
+bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
+ bfa_status_t req_status, u32 rsp_len,
+ u32 resid_len, struct fchs_s *rsp_fchs)
+{
+ /**discarded fcxp completion */
+}
+
+static void
+__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_fcxp_s *fcxp = cbarg;
+
+ if (complete) {
+ fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
+ fcxp->rsp_status, fcxp->rsp_len,
+ fcxp->residue_len, &fcxp->rsp_fchs);
+ } else {
+ bfa_fcxp_free(fcxp);
+ }
+}
+
+static void
+hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
+{
+ struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+ struct bfa_fcxp_s *fcxp;
+ u16 fcxp_tag = bfa_os_ntohs(fcxp_rsp->fcxp_tag);
+
+ bfa_trc(bfa, fcxp_tag);
+
+ fcxp_rsp->rsp_len = bfa_os_ntohl(fcxp_rsp->rsp_len);
+
+ /**
+ * @todo f/w should not set residue to non-0 when everything
+ * is received.
+ */
+ if (fcxp_rsp->req_status == BFA_STATUS_OK)
+ fcxp_rsp->residue_len = 0;
+ else
+ fcxp_rsp->residue_len = bfa_os_ntohl(fcxp_rsp->residue_len);
+
+ fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
+
+ bfa_assert(fcxp->send_cbfn != NULL);
+
+ hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
+
+ if (fcxp->send_cbfn != NULL) {
+ if (fcxp->caller == NULL) {
+ bfa_trc(mod->bfa, fcxp->fcxp_tag);
+
+ fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
+ fcxp_rsp->req_status, fcxp_rsp->rsp_len,
+ fcxp_rsp->residue_len, &fcxp_rsp->fchs);
+ /*
+ * fcxp automatically freed on return from the callback
+ */
+ bfa_fcxp_free(fcxp);
+ } else {
+ bfa_trc(mod->bfa, fcxp->fcxp_tag);
+ fcxp->rsp_status = fcxp_rsp->req_status;
+ fcxp->rsp_len = fcxp_rsp->rsp_len;
+ fcxp->residue_len = fcxp_rsp->residue_len;
+ fcxp->rsp_fchs = fcxp_rsp->fchs;
+
+ bfa_cb_queue(bfa, &fcxp->hcb_qe,
+ __bfa_fcxp_send_cbfn, fcxp);
+ }
+ } else {
+ bfa_trc(bfa, fcxp_tag);
+ }
+}
+
+static void
+hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
+{
+ union bfi_addr_u sga_zero = { {0} };
+
+ sge->sg_len = reqlen;
+ sge->flags = BFI_SGE_DATA_LAST;
+ bfa_dma_addr_set(sge[0].sga, req_pa);
+ bfa_sge_to_be(sge);
+ sge++;
+
+ sge->sga = sga_zero;
+ sge->sg_len = reqlen;
+ sge->flags = BFI_SGE_PGDLEN;
+ bfa_sge_to_be(sge);
+}
+
+static void
+hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
+ struct fchs_s *fchs)
+{
+ /*
+ * TODO: TX ox_id
+ */
+ if (reqlen > 0) {
+ if (fcxp->use_ireqbuf) {
+ u32 pld_w0 =
+ *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
+
+ bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
+ BFA_PL_EID_TX,
+ reqlen + sizeof(struct fchs_s), fchs, pld_w0);
+ } else {
+ bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
+ BFA_PL_EID_TX, reqlen + sizeof(struct fchs_s),
+ fchs);
+ }
+ } else {
+ bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
+ reqlen + sizeof(struct fchs_s), fchs);
+ }
+}
+
+static void
+hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
+ struct bfi_fcxp_send_rsp_s *fcxp_rsp)
+{
+ if (fcxp_rsp->rsp_len > 0) {
+ if (fcxp->use_irspbuf) {
+ u32 pld_w0 =
+ *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
+
+ bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
+ BFA_PL_EID_RX,
+ (u16) fcxp_rsp->rsp_len,
+ &fcxp_rsp->fchs, pld_w0);
+ } else {
+ bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
+ BFA_PL_EID_RX,
+ (u16) fcxp_rsp->rsp_len,
+ &fcxp_rsp->fchs);
+ }
+ } else {
+ bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
+ (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
+ }
+}
+
+/**
+ * Handler to resume sending fcxp when space in available in cpe queue.
+ */
+static void
+bfa_fcxp_qresume(void *cbarg)
+{
+ struct bfa_fcxp_s *fcxp = cbarg;
+ struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
+ struct bfi_fcxp_send_req_s *send_req;
+
+ fcxp->reqq_waiting = BFA_FALSE;
+ send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
+ bfa_fcxp_queue(fcxp, send_req);
+}
+
+/**
+ * Queue fcxp send request to foimrware.
+ */
+static void
+bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
+{
+ struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
+ struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
+ struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
+ struct bfa_rport_s *rport = reqi->bfa_rport;
+
+ bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
+ bfa_lpuid(bfa));
+
+ send_req->fcxp_tag = bfa_os_htons(fcxp->fcxp_tag);
+ if (rport) {
+ send_req->rport_fw_hndl = rport->fw_handle;
+ send_req->max_frmsz = bfa_os_htons(rport->rport_info.max_frmsz);
+ if (send_req->max_frmsz == 0)
+ send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
+ } else {
+ send_req->rport_fw_hndl = 0;
+ send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
+ }
+
+ send_req->vf_id = bfa_os_htons(reqi->vf_id);
+ send_req->lp_tag = reqi->lp_tag;
+ send_req->class = reqi->class;
+ send_req->rsp_timeout = rspi->rsp_timeout;
+ send_req->cts = reqi->cts;
+ send_req->fchs = reqi->fchs;
+
+ send_req->req_len = bfa_os_htonl(reqi->req_tot_len);
+ send_req->rsp_maxlen = bfa_os_htonl(rspi->rsp_maxlen);
+
+ /*
+ * setup req sgles
+ */
+ if (fcxp->use_ireqbuf == 1) {
+ hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
+ BFA_FCXP_REQ_PLD_PA(fcxp));
+ } else {
+ if (fcxp->nreq_sgles > 0) {
+ bfa_assert(fcxp->nreq_sgles == 1);
+ hal_fcxp_set_local_sges(send_req->req_sge,
+ reqi->req_tot_len,
+ fcxp->req_sga_cbfn(fcxp->caller,
+ 0));
+ } else {
+ bfa_assert(reqi->req_tot_len == 0);
+ hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
+ }
+ }
+
+ /*
+ * setup rsp sgles
+ */
+ if (fcxp->use_irspbuf == 1) {
+ bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ);
+
+ hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
+ BFA_FCXP_RSP_PLD_PA(fcxp));
+
+ } else {
+ if (fcxp->nrsp_sgles > 0) {
+ bfa_assert(fcxp->nrsp_sgles == 1);
+ hal_fcxp_set_local_sges(send_req->rsp_sge,
+ rspi->rsp_maxlen,
+ fcxp->rsp_sga_cbfn(fcxp->caller,
+ 0));
+ } else {
+ bfa_assert(rspi->rsp_maxlen == 0);
+ hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
+ }
+ }
+
+ hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
+
+ bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
+
+ bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
+ bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
+}
+
+
+/**
+ * hal_fcxp_api BFA FCXP API
+ */
+
+/**
+ * Allocate an FCXP instance to send a response or to send a request
+ * that has a response. Request/response buffers are allocated by caller.
+ *
+ * @param[in] bfa BFA bfa instance
+ * @param[in] nreq_sgles Number of SG elements required for request
+ * buffer. 0, if fcxp internal buffers are used.
+ * Use bfa_fcxp_get_reqbuf() to get the
+ * internal req buffer.
+ * @param[in] req_sgles SG elements describing request buffer. Will be
+ * copied in by BFA and hence can be freed on
+ * return from this function.
+ * @param[in] get_req_sga function ptr to be called to get a request SG
+ * Address (given the sge index).
+ * @param[in] get_req_sglen function ptr to be called to get a request SG
+ * len (given the sge index).
+ * @param[in] get_rsp_sga function ptr to be called to get a response SG
+ * Address (given the sge index).
+ * @param[in] get_rsp_sglen function ptr to be called to get a response SG
+ * len (given the sge index).
+ *
+ * @return FCXP instance. NULL on failure.
+ */
+struct bfa_fcxp_s *
+bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
+ int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
+ bfa_fcxp_get_sglen_t req_sglen_cbfn,
+ bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
+ bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
+{
+ struct bfa_fcxp_s *fcxp = NULL;
+ u32 nreq_sgpg, nrsp_sgpg;
+
+ bfa_assert(bfa != NULL);
+
+ fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
+ if (fcxp == NULL)
+ return (NULL);
+
+ bfa_trc(bfa, fcxp->fcxp_tag);
+
+ fcxp->caller = caller;
+
+ if (nreq_sgles == 0) {
+ fcxp->use_ireqbuf = 1;
+ } else {
+ bfa_assert(req_sga_cbfn != NULL);
+ bfa_assert(req_sglen_cbfn != NULL);
+
+ fcxp->use_ireqbuf = 0;
+ fcxp->req_sga_cbfn = req_sga_cbfn;
+ fcxp->req_sglen_cbfn = req_sglen_cbfn;
+
+ fcxp->nreq_sgles = nreq_sgles;
+
+ /*
+ * alloc required sgpgs
+ */
+ if (nreq_sgles > BFI_SGE_INLINE) {
+ nreq_sgpg = BFA_SGPG_NPAGE(nreq_sgles);
+
+ if (bfa_sgpg_malloc
+ (bfa, &fcxp->req_sgpg_q, nreq_sgpg)
+ != BFA_STATUS_OK) {
+ /* bfa_sgpg_wait(bfa, &fcxp->req_sgpg_wqe,
+ nreq_sgpg); */
+ /*
+ * TODO
+ */
+ }
+ }
+ }
+
+ if (nrsp_sgles == 0) {
+ fcxp->use_irspbuf = 1;
+ } else {
+ bfa_assert(rsp_sga_cbfn != NULL);
+ bfa_assert(rsp_sglen_cbfn != NULL);
+
+ fcxp->use_irspbuf = 0;
+ fcxp->rsp_sga_cbfn = rsp_sga_cbfn;
+ fcxp->rsp_sglen_cbfn = rsp_sglen_cbfn;
+
+ fcxp->nrsp_sgles = nrsp_sgles;
+ /*
+ * alloc required sgpgs
+ */
+ if (nrsp_sgles > BFI_SGE_INLINE) {
+ nrsp_sgpg = BFA_SGPG_NPAGE(nreq_sgles);
+
+ if (bfa_sgpg_malloc
+ (bfa, &fcxp->rsp_sgpg_q, nrsp_sgpg)
+ != BFA_STATUS_OK) {
+ /* bfa_sgpg_wait(bfa, &fcxp->rsp_sgpg_wqe,
+ nrsp_sgpg); */
+ /*
+ * TODO
+ */
+ }
+ }
+ }
+
+ return (fcxp);
+}
+
+/**
+ * Get the internal request buffer pointer
+ *
+ * @param[in] fcxp BFA fcxp pointer
+ *
+ * @return pointer to the internal request buffer
+ */
+void *
+bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
+{
+ struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
+ void *reqbuf;
+
+ bfa_assert(fcxp->use_ireqbuf == 1);
+ reqbuf = ((u8 *)mod->req_pld_list_kva) +
+ fcxp->fcxp_tag * mod->req_pld_sz;
+ return reqbuf;
+}
+
+u32
+bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
+{
+ struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
+
+ return mod->req_pld_sz;
+}
+
+/**
+ * Get the internal response buffer pointer
+ *
+ * @param[in] fcxp BFA fcxp pointer
+ *
+ * @return pointer to the internal request buffer
+ */
+void *
+bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
+{
+ struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
+ void *rspbuf;
+
+ bfa_assert(fcxp->use_irspbuf == 1);
+
+ rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
+ fcxp->fcxp_tag * mod->rsp_pld_sz;
+ return rspbuf;
+}
+
+/**
+ * Free the BFA FCXP
+ *
+ * @param[in] fcxp BFA fcxp pointer
+ *
+ * @return void
+ */
+void
+bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
+{
+ struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
+
+ bfa_assert(fcxp != NULL);
+ bfa_trc(mod->bfa, fcxp->fcxp_tag);
+ bfa_fcxp_put(fcxp);
+}
+
+/**
+ * Send a FCXP request
+ *
+ * @param[in] fcxp BFA fcxp pointer
+ * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
+ * @param[in] vf_id virtual Fabric ID
+ * @param[in] lp_tag lport tag
+ * @param[in] cts use Continous sequence
+ * @param[in] cos fc Class of Service
+ * @param[in] reqlen request length, does not include FCHS length
+ * @param[in] fchs fc Header Pointer. The header content will be copied
+ * in by BFA.
+ *
+ * @param[in] cbfn call back function to be called on receiving
+ * the response
+ * @param[in] cbarg arg for cbfn
+ * @param[in] rsp_timeout
+ * response timeout
+ *
+ * @return bfa_status_t
+ */
+void
+bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
+ u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
+ u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
+ void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
+{
+ struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
+ struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
+ struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
+ struct bfi_fcxp_send_req_s *send_req;
+
+ bfa_trc(bfa, fcxp->fcxp_tag);
+
+ /**
+ * setup request/response info
+ */
+ reqi->bfa_rport = rport;
+ reqi->vf_id = vf_id;
+ reqi->lp_tag = lp_tag;
+ reqi->class = cos;
+ rspi->rsp_timeout = rsp_timeout;
+ reqi->cts = cts;
+ reqi->fchs = *fchs;
+ reqi->req_tot_len = reqlen;
+ rspi->rsp_maxlen = rsp_maxlen;
+ fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
+ fcxp->send_cbarg = cbarg;
+
+ /**
+ * If no room in CPE queue, wait for
+ */
+ send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
+ if (!send_req) {
+ bfa_trc(bfa, fcxp->fcxp_tag);
+ fcxp->reqq_waiting = BFA_TRUE;
+ bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
+ return;
+ }
+
+ bfa_fcxp_queue(fcxp, send_req);
+}
+
+/**
+ * Abort a BFA FCXP
+ *
+ * @param[in] fcxp BFA fcxp pointer
+ *
+ * @return void
+ */
+bfa_status_t
+bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
+{
+ bfa_assert(0);
+ return (BFA_STATUS_OK);
+}
+
+void
+bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
+ bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg)
+{
+ struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+
+ bfa_assert(list_empty(&mod->fcxp_free_q));
+
+ wqe->alloc_cbfn = alloc_cbfn;
+ wqe->alloc_cbarg = alloc_cbarg;
+ list_add_tail(&wqe->qe, &mod->wait_q);
+}
+
+void
+bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
+{
+ struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+
+ bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe));
+ list_del(&wqe->qe);
+}
+
+void
+bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
+{
+ /**
+ * If waiting for room in request queue, cancel reqq wait
+ * and free fcxp.
+ */
+ if (fcxp->reqq_waiting) {
+ fcxp->reqq_waiting = BFA_FALSE;
+ bfa_reqq_wcancel(&fcxp->reqq_wqe);
+ bfa_fcxp_free(fcxp);
+ return;
+ }
+
+ fcxp->send_cbfn = bfa_fcxp_null_comp;
+}
+
+
+
+/**
+ * hal_fcxp_public BFA FCXP public functions
+ */
+
+void
+bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
+{
+ switch (msg->mhdr.msg_id) {
+ case BFI_FCXP_I2H_SEND_RSP:
+ hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
+ break;
+
+ default:
+ bfa_trc(bfa, msg->mhdr.msg_id);
+ bfa_assert(0);
+ }
+}
+
+u32
+bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
+{
+ struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+
+ return mod->rsp_pld_sz;
+}
+
+
diff --git a/drivers/scsi/bfa/bfa_fcxp_priv.h b/drivers/scsi/bfa/bfa_fcxp_priv.h
new file mode 100644
index 000000000000..4cda49397da0
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcxp_priv.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_FCXP_PRIV_H__
+#define __BFA_FCXP_PRIV_H__
+
+#include <cs/bfa_sm.h>
+#include <protocol/fc.h>
+#include <bfa_svc.h>
+#include <bfi/bfi_fcxp.h>
+
+#define BFA_FCXP_MIN (1)
+#define BFA_FCXP_MAX_IBUF_SZ (2 * 1024 + 256)
+#define BFA_FCXP_MAX_LBUF_SZ (4 * 1024 + 256)
+
+struct bfa_fcxp_mod_s {
+ struct bfa_s *bfa; /* backpointer to BFA */
+ struct bfa_fcxp_s *fcxp_list; /* array of FCXPs */
+ u16 num_fcxps; /* max num FCXP requests */
+ struct list_head fcxp_free_q; /* free FCXPs */
+ struct list_head fcxp_active_q; /* active FCXPs */
+ void *req_pld_list_kva; /* list of FCXP req pld */
+ u64 req_pld_list_pa; /* list of FCXP req pld */
+ void *rsp_pld_list_kva; /* list of FCXP resp pld */
+ u64 rsp_pld_list_pa; /* list of FCXP resp pld */
+ struct list_head wait_q; /* wait queue for free fcxp */
+ u32 req_pld_sz;
+ u32 rsp_pld_sz;
+};
+
+#define BFA_FCXP_MOD(__bfa) (&(__bfa)->modules.fcxp_mod)
+#define BFA_FCXP_FROM_TAG(__mod, __tag) (&(__mod)->fcxp_list[__tag])
+
+typedef void (*fcxp_send_cb_t) (struct bfa_s *ioc, struct bfa_fcxp_s *fcxp,
+ void *cb_arg, bfa_status_t req_status,
+ u32 rsp_len, u32 resid_len,
+ struct fchs_s *rsp_fchs);
+
+/**
+ * Information needed for a FCXP request
+ */
+struct bfa_fcxp_req_info_s {
+ struct bfa_rport_s *bfa_rport; /* Pointer to the bfa rport that was
+ *returned from bfa_rport_create().
+ *This could be left NULL for WKA or for
+ *FCXP interactions before the rport
+ *nexus is established
+ */
+ struct fchs_s fchs; /* request FC header structure */
+ u8 cts; /* continous sequence */
+ u8 class; /* FC class for the request/response */
+ u16 max_frmsz; /* max send frame size */
+ u16 vf_id; /* vsan tag if applicable */
+ u8 lp_tag; /* lport tag */
+ u32 req_tot_len; /* request payload total length */
+};
+
+struct bfa_fcxp_rsp_info_s {
+ struct fchs_s rsp_fchs; /* Response frame's FC header will
+ * be *sent back in this field */
+ u8 rsp_timeout; /* timeout in seconds, 0-no response
+ */
+ u8 rsvd2[3];
+ u32 rsp_maxlen; /* max response length expected */
+};
+
+struct bfa_fcxp_s {
+ struct list_head qe; /* fcxp queue element */
+ bfa_sm_t sm; /* state machine */
+ void *caller; /* driver or fcs */
+ struct bfa_fcxp_mod_s *fcxp_mod;
+ /* back pointer to fcxp mod */
+ u16 fcxp_tag; /* internal tag */
+ struct bfa_fcxp_req_info_s req_info;
+ /* request info */
+ struct bfa_fcxp_rsp_info_s rsp_info;
+ /* response info */
+ u8 use_ireqbuf; /* use internal req buf */
+ u8 use_irspbuf; /* use internal rsp buf */
+ u32 nreq_sgles; /* num request SGLEs */
+ u32 nrsp_sgles; /* num response SGLEs */
+ struct list_head req_sgpg_q; /* SG pages for request buf */
+ struct list_head req_sgpg_wqe; /* wait queue for req SG page */
+ struct list_head rsp_sgpg_q; /* SG pages for response buf */
+ struct list_head rsp_sgpg_wqe; /* wait queue for rsp SG page */
+
+ bfa_fcxp_get_sgaddr_t req_sga_cbfn;
+ /* SG elem addr user function */
+ bfa_fcxp_get_sglen_t req_sglen_cbfn;
+ /* SG elem len user function */
+ bfa_fcxp_get_sgaddr_t rsp_sga_cbfn;
+ /* SG elem addr user function */
+ bfa_fcxp_get_sglen_t rsp_sglen_cbfn;
+ /* SG elem len user function */
+ bfa_cb_fcxp_send_t send_cbfn; /* send completion callback */
+ void *send_cbarg; /* callback arg */
+ struct bfa_sge_s req_sge[BFA_FCXP_MAX_SGES];
+ /* req SG elems */
+ struct bfa_sge_s rsp_sge[BFA_FCXP_MAX_SGES];
+ /* rsp SG elems */
+ u8 rsp_status; /* comp: rsp status */
+ u32 rsp_len; /* comp: actual response len */
+ u32 residue_len; /* comp: residual rsp length */
+ struct fchs_s rsp_fchs; /* comp: response fchs */
+ struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */
+ struct bfa_reqq_wait_s reqq_wqe;
+ bfa_boolean_t reqq_waiting;
+};
+
+#define BFA_FCXP_REQ_PLD(_fcxp) (bfa_fcxp_get_reqbuf(_fcxp))
+
+#define BFA_FCXP_RSP_FCHS(_fcxp) (&((_fcxp)->rsp_info.fchs))
+#define BFA_FCXP_RSP_PLD(_fcxp) (bfa_fcxp_get_rspbuf(_fcxp))
+
+#define BFA_FCXP_REQ_PLD_PA(_fcxp) \
+ ((_fcxp)->fcxp_mod->req_pld_list_pa + \
+ ((_fcxp)->fcxp_mod->req_pld_sz * (_fcxp)->fcxp_tag))
+
+#define BFA_FCXP_RSP_PLD_PA(_fcxp) \
+ ((_fcxp)->fcxp_mod->rsp_pld_list_pa + \
+ ((_fcxp)->fcxp_mod->rsp_pld_sz * (_fcxp)->fcxp_tag))
+
+void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+#endif /* __BFA_FCXP_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_fwimg_priv.h b/drivers/scsi/bfa/bfa_fwimg_priv.h
new file mode 100644
index 000000000000..1ec1355924d9
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fwimg_priv.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_FWIMG_PRIV_H__
+#define __BFA_FWIMG_PRIV_H__
+
+#define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */
+#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
+
+extern u32 *bfi_image_ct_get_chunk(u32 off);
+extern u32 bfi_image_ct_size;
+extern u32 *bfi_image_cb_get_chunk(u32 off);
+extern u32 bfi_image_cb_size;
+extern u32 *bfi_image_cb;
+extern u32 *bfi_image_ct;
+
+#endif /* __BFA_FWIMG_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
new file mode 100644
index 000000000000..ede1438619e2
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <bfa_priv.h>
+#include <bfi/bfi_cbreg.h>
+
+void
+bfa_hwcb_reginit(struct bfa_s *bfa)
+{
+ struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
+ bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc);
+ int i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
+
+ if (fn == 0) {
+ bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
+ bfa_regs->intr_mask = (kva + HOSTFN0_INT_MSK);
+ } else {
+ bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS);
+ bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK);
+ }
+
+ for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
+ /*
+ * CPE registers
+ */
+ q = CPE_Q_NUM(fn, i);
+ bfa_regs->cpe_q_pi[i] = (kva + CPE_Q_PI(q));
+ bfa_regs->cpe_q_ci[i] = (kva + CPE_Q_CI(q));
+ bfa_regs->cpe_q_depth[i] = (kva + CPE_Q_DEPTH(q));
+
+ /*
+ * RME registers
+ */
+ q = CPE_Q_NUM(fn, i);
+ bfa_regs->rme_q_pi[i] = (kva + RME_Q_PI(q));
+ bfa_regs->rme_q_ci[i] = (kva + RME_Q_CI(q));
+ bfa_regs->rme_q_depth[i] = (kva + RME_Q_DEPTH(q));
+ }
+}
+
+void
+bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq)
+{
+}
+
+static void
+bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq)
+{
+ bfa_reg_write(bfa->iocfc.bfa_regs.intr_status,
+ __HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq));
+}
+
+void
+bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
+ u32 *num_vecs, u32 *max_vec_bit)
+{
+#define __HFN_NUMINTS 13
+ if (bfa_ioc_pcifn(&bfa->ioc) == 0) {
+ *msix_vecs_bmap = (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
+ __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
+ __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
+ __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
+ __HFN_INT_MBOX_LPU0);
+ *max_vec_bit = __HFN_INT_MBOX_LPU0;
+ } else {
+ *msix_vecs_bmap = (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
+ __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
+ __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
+ __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
+ __HFN_INT_MBOX_LPU1);
+ *max_vec_bit = __HFN_INT_MBOX_LPU1;
+ }
+
+ *msix_vecs_bmap |= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
+ __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS);
+ *num_vecs = __HFN_NUMINTS;
+}
+
+/**
+ * No special setup required for crossbow -- vector assignments are implicit.
+ */
+void
+bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
+{
+ int i;
+
+ bfa_assert((nvecs == 1) || (nvecs == __HFN_NUMINTS));
+
+ bfa->msix.nvecs = nvecs;
+ if (nvecs == 1) {
+ for (i = 0; i < BFA_MSIX_CB_MAX; i++)
+ bfa->msix.handler[i] = bfa_msix_all;
+ return;
+ }
+
+ for (i = BFA_MSIX_CPE_Q0; i <= BFA_MSIX_CPE_Q7; i++)
+ bfa->msix.handler[i] = bfa_msix_reqq;
+
+ for (i = BFA_MSIX_RME_Q0; i <= BFA_MSIX_RME_Q7; i++)
+ bfa->msix.handler[i] = bfa_msix_rspq;
+
+ for (; i < BFA_MSIX_CB_MAX; i++)
+ bfa->msix.handler[i] = bfa_msix_lpu_err;
+}
+
+/**
+ * Crossbow -- dummy, interrupts are masked
+ */
+void
+bfa_hwcb_msix_install(struct bfa_s *bfa)
+{
+}
+
+void
+bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
+{
+}
+
+/**
+ * No special enable/disable -- vector assignments are implicit.
+ */
+void
+bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
+{
+ bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
+}
+
+
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
new file mode 100644
index 000000000000..51ae5740e6e9
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <bfa_priv.h>
+#include <bfi/bfi_ctreg.h>
+#include <bfa_ioc.h>
+
+BFA_TRC_FILE(HAL, IOCFC_CT);
+
+static u32 __ct_msix_err_vec_reg[] = {
+ HOST_MSIX_ERR_INDEX_FN0,
+ HOST_MSIX_ERR_INDEX_FN1,
+ HOST_MSIX_ERR_INDEX_FN2,
+ HOST_MSIX_ERR_INDEX_FN3,
+};
+
+static void
+bfa_hwct_msix_lpu_err_set(struct bfa_s *bfa, bfa_boolean_t msix, int vec)
+{
+ int fn = bfa_ioc_pcifn(&bfa->ioc);
+ bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc);
+
+ if (msix)
+ bfa_reg_write(kva + __ct_msix_err_vec_reg[fn], vec);
+ else
+ bfa_reg_write(kva + __ct_msix_err_vec_reg[fn], 0);
+}
+
+/**
+ * Dummy interrupt handler for handling spurious interrupt during chip-reinit.
+ */
+static void
+bfa_hwct_msix_dummy(struct bfa_s *bfa, int vec)
+{
+}
+
+void
+bfa_hwct_reginit(struct bfa_s *bfa)
+{
+ struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
+ bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc);
+ int i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
+
+ if (fn == 0) {
+ bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
+ bfa_regs->intr_mask = (kva + HOSTFN0_INT_MSK);
+ } else {
+ bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS);
+ bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK);
+ }
+
+ for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
+ /*
+ * CPE registers
+ */
+ q = CPE_Q_NUM(fn, i);
+ bfa_regs->cpe_q_pi[i] = (kva + CPE_PI_PTR_Q(q << 5));
+ bfa_regs->cpe_q_ci[i] = (kva + CPE_CI_PTR_Q(q << 5));
+ bfa_regs->cpe_q_depth[i] = (kva + CPE_DEPTH_Q(q << 5));
+ bfa_regs->cpe_q_ctrl[i] = (kva + CPE_QCTRL_Q(q << 5));
+
+ /*
+ * RME registers
+ */
+ q = CPE_Q_NUM(fn, i);
+ bfa_regs->rme_q_pi[i] = (kva + RME_PI_PTR_Q(q << 5));
+ bfa_regs->rme_q_ci[i] = (kva + RME_CI_PTR_Q(q << 5));
+ bfa_regs->rme_q_depth[i] = (kva + RME_DEPTH_Q(q << 5));
+ bfa_regs->rme_q_ctrl[i] = (kva + RME_QCTRL_Q(q << 5));
+ }
+}
+
+void
+bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq)
+{
+ u32 r32;
+
+ r32 = bfa_reg_read(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
+ bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq], r32);
+}
+
+void
+bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
+ u32 *num_vecs, u32 *max_vec_bit)
+{
+ *msix_vecs_bmap = (1 << BFA_MSIX_CT_MAX) - 1;
+ *max_vec_bit = (1 << (BFA_MSIX_CT_MAX - 1));
+ *num_vecs = BFA_MSIX_CT_MAX;
+}
+
+/**
+ * Setup MSI-X vector for catapult
+ */
+void
+bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs)
+{
+ bfa_assert((nvecs == 1) || (nvecs == BFA_MSIX_CT_MAX));
+ bfa_trc(bfa, nvecs);
+
+ bfa->msix.nvecs = nvecs;
+ bfa_hwct_msix_uninstall(bfa);
+}
+
+void
+bfa_hwct_msix_install(struct bfa_s *bfa)
+{
+ int i;
+
+ if (bfa->msix.nvecs == 0)
+ return;
+
+ if (bfa->msix.nvecs == 1) {
+ for (i = 0; i < BFA_MSIX_CT_MAX; i++)
+ bfa->msix.handler[i] = bfa_msix_all;
+ return;
+ }
+
+ for (i = BFA_MSIX_CPE_Q0; i <= BFA_MSIX_CPE_Q3; i++)
+ bfa->msix.handler[i] = bfa_msix_reqq;
+
+ for (; i <= BFA_MSIX_RME_Q3; i++)
+ bfa->msix.handler[i] = bfa_msix_rspq;
+
+ bfa_assert(i == BFA_MSIX_LPU_ERR);
+ bfa->msix.handler[BFA_MSIX_LPU_ERR] = bfa_msix_lpu_err;
+}
+
+void
+bfa_hwct_msix_uninstall(struct bfa_s *bfa)
+{
+ int i;
+
+ for (i = 0; i < BFA_MSIX_CT_MAX; i++)
+ bfa->msix.handler[i] = bfa_hwct_msix_dummy;
+}
+
+/**
+ * Enable MSI-X vectors
+ */
+void
+bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
+{
+ bfa_trc(bfa, 0);
+ bfa_hwct_msix_lpu_err_set(bfa, msix, BFA_MSIX_LPU_ERR);
+ bfa_ioc_isr_mode_set(&bfa->ioc, msix);
+}
+
+
diff --git a/drivers/scsi/bfa/bfa_intr.c b/drivers/scsi/bfa/bfa_intr.c
new file mode 100644
index 000000000000..0ca125712a04
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_intr.c
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <bfa.h>
+#include <bfi/bfi_cbreg.h>
+#include <bfa_port_priv.h>
+#include <bfa_intr_priv.h>
+#include <cs/bfa_debug.h>
+
+BFA_TRC_FILE(HAL, INTR);
+
+static void
+bfa_msix_errint(struct bfa_s *bfa, u32 intr)
+{
+ bfa_ioc_error_isr(&bfa->ioc);
+}
+
+static void
+bfa_msix_lpu(struct bfa_s *bfa)
+{
+ bfa_ioc_mbox_isr(&bfa->ioc);
+}
+
+void
+bfa_msix_all(struct bfa_s *bfa, int vec)
+{
+ bfa_intx(bfa);
+}
+
+/**
+ * hal_intr_api
+ */
+bfa_boolean_t
+bfa_intx(struct bfa_s *bfa)
+{
+ u32 intr, qintr;
+ int queue;
+
+ intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
+ if (!intr)
+ return BFA_FALSE;
+
+ /**
+ * RME completion queue interrupt
+ */
+ qintr = intr & __HFN_INT_RME_MASK;
+ bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
+
+ for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue ++) {
+ if (intr & (__HFN_INT_RME_Q0 << queue))
+ bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
+ }
+ intr &= ~qintr;
+ if (!intr)
+ return BFA_TRUE;
+
+ /**
+ * CPE completion queue interrupt
+ */
+ qintr = intr & __HFN_INT_CPE_MASK;
+ bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
+
+ for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
+ if (intr & (__HFN_INT_CPE_Q0 << queue))
+ bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
+ }
+ intr &= ~qintr;
+ if (!intr)
+ return BFA_TRUE;
+
+ bfa_msix_lpu_err(bfa, intr);
+
+ return BFA_TRUE;
+}
+
+void
+bfa_isr_enable(struct bfa_s *bfa)
+{
+ u32 intr_unmask;
+ int pci_func = bfa_ioc_pcifn(&bfa->ioc);
+
+ bfa_trc(bfa, pci_func);
+
+ bfa_msix_install(bfa);
+ intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
+ __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS);
+
+ if (pci_func == 0)
+ intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
+ __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
+ __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
+ __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
+ __HFN_INT_MBOX_LPU0);
+ else
+ intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
+ __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
+ __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
+ __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
+ __HFN_INT_MBOX_LPU1);
+
+ bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask);
+ bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask);
+ bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
+}
+
+void
+bfa_isr_disable(struct bfa_s *bfa)
+{
+ bfa_isr_mode_set(bfa, BFA_FALSE);
+ bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
+ bfa_msix_uninstall(bfa);
+}
+
+void
+bfa_msix_reqq(struct bfa_s *bfa, int qid)
+{
+ struct list_head *waitq, *qe, *qen;
+ struct bfa_reqq_wait_s *wqe;
+
+ qid &= (BFI_IOC_MAX_CQS - 1);
+
+ waitq = bfa_reqq(bfa, qid);
+ list_for_each_safe(qe, qen, waitq) {
+ /**
+ * Callback only as long as there is room in request queue
+ */
+ if (bfa_reqq_full(bfa, qid))
+ break;
+
+ list_del(qe);
+ wqe = (struct bfa_reqq_wait_s *) qe;
+ wqe->qresume(wqe->cbarg);
+ }
+}
+
+void
+bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+ bfa_trc(bfa, m->mhdr.msg_class);
+ bfa_trc(bfa, m->mhdr.msg_id);
+ bfa_trc(bfa, m->mhdr.mtag.i2htok);
+ bfa_assert(0);
+ bfa_trc_stop(bfa->trcmod);
+}
+
+void
+bfa_msix_rspq(struct bfa_s *bfa, int rsp_qid)
+{
+ struct bfi_msg_s *m;
+ u32 pi, ci;
+
+ bfa_trc_fp(bfa, rsp_qid);
+
+ rsp_qid &= (BFI_IOC_MAX_CQS - 1);
+
+ bfa->iocfc.hwif.hw_rspq_ack(bfa, rsp_qid);
+
+ ci = bfa_rspq_ci(bfa, rsp_qid);
+ pi = bfa_rspq_pi(bfa, rsp_qid);
+
+ bfa_trc_fp(bfa, ci);
+ bfa_trc_fp(bfa, pi);
+
+ if (bfa->rme_process) {
+ while (ci != pi) {
+ m = bfa_rspq_elem(bfa, rsp_qid, ci);
+ bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
+
+ bfa_isrs[m->mhdr.msg_class] (bfa, m);
+
+ CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
+ }
+ }
+
+ /**
+ * update CI
+ */
+ bfa_rspq_ci(bfa, rsp_qid) = pi;
+ bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[rsp_qid], pi);
+ bfa_os_mmiowb();
+}
+
+void
+bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
+{
+ u32 intr;
+
+ intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
+
+ if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
+ bfa_msix_lpu(bfa);
+
+ if (intr & (__HFN_INT_ERR_EMC |
+ __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 |
+ __HFN_INT_ERR_PSS))
+ bfa_msix_errint(bfa, intr);
+}
+
+void
+bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func)
+{
+ bfa_isrs[mc] = isr_func;
+}
+
+
diff --git a/drivers/scsi/bfa/bfa_intr_priv.h b/drivers/scsi/bfa/bfa_intr_priv.h
new file mode 100644
index 000000000000..8ce6e6b105c8
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_intr_priv.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_INTR_PRIV_H__
+#define __BFA_INTR_PRIV_H__
+
+/**
+ * Message handler
+ */
+typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
+void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
+void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
+
+
+#define bfa_reqq_pi(__bfa, __reqq) (__bfa)->iocfc.req_cq_pi[__reqq]
+#define bfa_reqq_ci(__bfa, __reqq) \
+ *(u32 *)((__bfa)->iocfc.req_cq_shadow_ci[__reqq].kva)
+
+#define bfa_reqq_full(__bfa, __reqq) \
+ (((bfa_reqq_pi(__bfa, __reqq) + 1) & \
+ ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1)) == \
+ bfa_reqq_ci(__bfa, __reqq))
+
+#define bfa_reqq_next(__bfa, __reqq) \
+ (bfa_reqq_full(__bfa, __reqq) ? NULL : \
+ ((void *)((struct bfi_msg_s *)((__bfa)->iocfc.req_cq_ba[__reqq].kva) \
+ + bfa_reqq_pi((__bfa), (__reqq)))))
+
+#define bfa_reqq_produce(__bfa, __reqq) do { \
+ (__bfa)->iocfc.req_cq_pi[__reqq]++; \
+ (__bfa)->iocfc.req_cq_pi[__reqq] &= \
+ ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
+ bfa_reg_write((__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq], \
+ (__bfa)->iocfc.req_cq_pi[__reqq]); \
+ bfa_os_mmiowb(); \
+} while (0)
+
+#define bfa_rspq_pi(__bfa, __rspq) \
+ *(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva)
+
+#define bfa_rspq_ci(__bfa, __rspq) (__bfa)->iocfc.rsp_cq_ci[__rspq]
+#define bfa_rspq_elem(__bfa, __rspq, __ci) \
+ &((struct bfi_msg_s *)((__bfa)->iocfc.rsp_cq_ba[__rspq].kva))[__ci]
+
+#define CQ_INCR(__index, __size) \
+ (__index)++; (__index) &= ((__size) - 1)
+
+/**
+ * Queue element to wait for room in request queue. FIFO order is
+ * maintained when fullfilling requests.
+ */
+struct bfa_reqq_wait_s {
+ struct list_head qe;
+ void (*qresume) (void *cbarg);
+ void *cbarg;
+};
+
+/**
+ * Circular queue usage assignments
+ */
+enum {
+ BFA_REQQ_IOC = 0, /* all low-priority IOC msgs */
+ BFA_REQQ_FCXP = 0, /* all FCXP messages */
+ BFA_REQQ_LPS = 0, /* all lport service msgs */
+ BFA_REQQ_PORT = 0, /* all port messages */
+ BFA_REQQ_FLASH = 0, /* for flash module */
+ BFA_REQQ_DIAG = 0, /* for diag module */
+ BFA_REQQ_RPORT = 0, /* all port messages */
+ BFA_REQQ_SBOOT = 0, /* all san boot messages */
+ BFA_REQQ_QOS_LO = 1, /* all low priority IO */
+ BFA_REQQ_QOS_MD = 2, /* all medium priority IO */
+ BFA_REQQ_QOS_HI = 3, /* all high priority IO */
+};
+
+static inline void
+bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
+ void *cbarg)
+{
+ wqe->qresume = qresume;
+ wqe->cbarg = cbarg;
+}
+
+#define bfa_reqq(__bfa, __reqq) &(__bfa)->reqq_waitq[__reqq]
+
+/**
+ * static inline void
+ * bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe)
+ */
+#define bfa_reqq_wait(__bfa, __reqq, __wqe) do { \
+ \
+ struct list_head *waitq = bfa_reqq(__bfa, __reqq); \
+ \
+ bfa_assert(((__reqq) < BFI_IOC_MAX_CQS)); \
+ bfa_assert((__wqe)->qresume && (__wqe)->cbarg); \
+ \
+ list_add_tail(&(__wqe)->qe, waitq); \
+} while (0)
+
+#define bfa_reqq_wcancel(__wqe) list_del(&(__wqe)->qe)
+
+#endif /* __BFA_INTR_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
new file mode 100644
index 000000000000..149348934ce3
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -0,0 +1,2382 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <bfa.h>
+#include <bfa_ioc.h>
+#include <bfa_fwimg_priv.h>
+#include <bfa_trcmod_priv.h>
+#include <cs/bfa_debug.h>
+#include <bfi/bfi_ioc.h>
+#include <bfi/bfi_ctreg.h>
+#include <aen/bfa_aen_ioc.h>
+#include <aen/bfa_aen.h>
+#include <log/bfa_log_hal.h>
+#include <defs/bfa_defs_pci.h>
+
+BFA_TRC_FILE(HAL, IOC);
+
+/**
+ * IOC local definitions
+ */
+#define BFA_IOC_TOV 2000 /* msecs */
+#define BFA_IOC_HB_TOV 1000 /* msecs */
+#define BFA_IOC_HB_FAIL_MAX 4
+#define BFA_IOC_HWINIT_MAX 2
+#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
+#define BFA_IOC_TOV_RECOVER (BFA_IOC_HB_FAIL_MAX * BFA_IOC_HB_TOV \
+ + BFA_IOC_TOV)
+
+#define bfa_ioc_timer_start(__ioc) \
+ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
+ bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
+#define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
+
+#define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS)
+#define BFA_DBG_FWTRC_LEN \
+ (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \
+ (sizeof(struct bfa_trc_mod_s) - \
+ BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
+#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
+#define bfa_ioc_stats(_ioc, _stats) (_ioc)->stats._stats ++
+
+#define BFA_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
+#define BFA_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
+#define BFA_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
+bfa_boolean_t bfa_auto_recover = BFA_FALSE;
+
+/*
+ * forward declarations
+ */
+static void bfa_ioc_aen_post(struct bfa_ioc_s *bfa,
+ enum bfa_ioc_aen_event event);
+static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
+static void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc);
+static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
+static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
+static void bfa_ioc_timeout(void *ioc);
+static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
+static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
+static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
+static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
+static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc);
+static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
+static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
+static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
+static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_firmware_lock(struct bfa_ioc_s *ioc);
+static void bfa_ioc_firmware_unlock(struct bfa_ioc_s *ioc);
+static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
+static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
+
+/**
+ * bfa_ioc_sm
+ */
+
+/**
+ * IOC state machine events
+ */
+enum ioc_event {
+ IOC_E_ENABLE = 1, /* IOC enable request */
+ IOC_E_DISABLE = 2, /* IOC disable request */
+ IOC_E_TIMEOUT = 3, /* f/w response timeout */
+ IOC_E_FWREADY = 4, /* f/w initialization done */
+ IOC_E_FWRSP_GETATTR = 5, /* IOC get attribute response */
+ IOC_E_FWRSP_ENABLE = 6, /* enable f/w response */
+ IOC_E_FWRSP_DISABLE = 7, /* disable f/w response */
+ IOC_E_HBFAIL = 8, /* heartbeat failure */
+ IOC_E_HWERROR = 9, /* hardware error interrupt */
+ IOC_E_SEMLOCKED = 10, /* h/w semaphore is locked */
+ IOC_E_DETACH = 11, /* driver detach cleanup */
+};
+
+bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
+
+static struct bfa_sm_table_s ioc_sm_table[] = {
+ {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
+ {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
+ {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
+ {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
+ {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
+ {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
+ {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
+ {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
+ {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
+ {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
+ {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
+ {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
+};
+
+/**
+ * Reset entry actions -- initialize state machine
+ */
+static void
+bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
+{
+ ioc->retry_count = 0;
+ ioc->auto_recover = bfa_auto_recover;
+}
+
+/**
+ * Beginning state. IOC is in reset state.
+ */
+static void
+bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_ENABLE:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_disable_comp(ioc);
+ break;
+
+ case IOC_E_DETACH:
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * Semaphore should be acquired for version check.
+ */
+static void
+bfa_ioc_sm_fwcheck_entry(struct bfa_ioc_s *ioc)
+{
+ bfa_ioc_hw_sem_get(ioc);
+}
+
+/**
+ * Awaiting h/w semaphore to continue with version check.
+ */
+static void
+bfa_ioc_sm_fwcheck(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_SEMLOCKED:
+ if (bfa_ioc_firmware_lock(ioc)) {
+ ioc->retry_count = 0;
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
+ } else {
+ bfa_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
+ }
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_disable_comp(ioc);
+ /*
+ * fall through
+ */
+
+ case IOC_E_DETACH:
+ bfa_ioc_hw_sem_get_cancel(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ break;
+
+ case IOC_E_FWREADY:
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * Notify enable completion callback and generate mismatch AEN.
+ */
+static void
+bfa_ioc_sm_mismatch_entry(struct bfa_ioc_s *ioc)
+{
+ /**
+ * Provide enable completion callback and AEN notification only once.
+ */
+ if (ioc->retry_count == 0) {
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
+ }
+ ioc->retry_count++;
+ bfa_ioc_timer_start(ioc);
+}
+
+/**
+ * Awaiting firmware version match.
+ */
+static void
+bfa_ioc_sm_mismatch(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_TIMEOUT:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_disable_comp(ioc);
+ /*
+ * fall through
+ */
+
+ case IOC_E_DETACH:
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ break;
+
+ case IOC_E_FWREADY:
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * Request for semaphore.
+ */
+static void
+bfa_ioc_sm_semwait_entry(struct bfa_ioc_s *ioc)
+{
+ bfa_ioc_hw_sem_get(ioc);
+}
+
+/**
+ * Awaiting semaphore for h/w initialzation.
+ */
+static void
+bfa_ioc_sm_semwait(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_SEMLOCKED:
+ ioc->retry_count = 0;
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_hw_sem_get_cancel(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+
+static void
+bfa_ioc_sm_hwinit_entry(struct bfa_ioc_s *ioc)
+{
+ bfa_ioc_timer_start(ioc);
+ bfa_ioc_reset(ioc, BFA_FALSE);
+}
+
+/**
+ * Hardware is being initialized. Interrupts are enabled.
+ * Holding hardware semaphore lock.
+ */
+static void
+bfa_ioc_sm_hwinit(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_FWREADY:
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
+ break;
+
+ case IOC_E_HWERROR:
+ bfa_ioc_timer_stop(ioc);
+ /*
+ * fall through
+ */
+
+ case IOC_E_TIMEOUT:
+ ioc->retry_count++;
+ if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
+ bfa_ioc_timer_start(ioc);
+ bfa_ioc_reset(ioc, BFA_TRUE);
+ break;
+ }
+
+ bfa_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_hw_sem_release(ioc);
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+
+static void
+bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
+{
+ bfa_ioc_timer_start(ioc);
+ bfa_ioc_send_enable(ioc);
+}
+
+/**
+ * Host IOC function is being enabled, awaiting response from firmware.
+ * Semaphore is acquired.
+ */
+static void
+bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_FWRSP_ENABLE:
+ bfa_ioc_timer_stop(ioc);
+ bfa_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
+ break;
+
+ case IOC_E_HWERROR:
+ bfa_ioc_timer_stop(ioc);
+ /*
+ * fall through
+ */
+
+ case IOC_E_TIMEOUT:
+ ioc->retry_count++;
+ if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
+ bfa_reg_write(ioc->ioc_regs.ioc_fwstate,
+ BFI_IOC_UNINIT);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
+ break;
+ }
+
+ bfa_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_timer_stop(ioc);
+ bfa_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ case IOC_E_FWREADY:
+ bfa_ioc_send_enable(ioc);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+
+static void
+bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
+{
+ bfa_ioc_timer_start(ioc);
+ bfa_ioc_send_getattr(ioc);
+}
+
+/**
+ * IOC configuration in progress. Timer is active.
+ */
+static void
+bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_FWRSP_GETATTR:
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
+ break;
+
+ case IOC_E_HWERROR:
+ bfa_ioc_timer_stop(ioc);
+ /*
+ * fall through
+ */
+
+ case IOC_E_TIMEOUT:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+
+static void
+bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
+{
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
+ bfa_ioc_hb_monitor(ioc);
+ bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
+}
+
+static void
+bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_ENABLE:
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_hb_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
+
+ case IOC_E_HWERROR:
+ case IOC_E_FWREADY:
+ /**
+ * Hard error or IOC recovery by other function.
+ * Treat it same as heartbeat failure.
+ */
+ bfa_ioc_hb_stop(ioc);
+ /*
+ * !!! fall through !!!
+ */
+
+ case IOC_E_HBFAIL:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+
+static void
+bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
+{
+ bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
+ bfa_ioc_timer_start(ioc);
+ bfa_ioc_send_disable(ioc);
+}
+
+/**
+ * IOC is being disabled
+ */
+static void
+bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_HWERROR:
+ case IOC_E_FWRSP_DISABLE:
+ bfa_ioc_timer_stop(ioc);
+ /*
+ * !!! fall through !!!
+ */
+
+ case IOC_E_TIMEOUT:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * IOC disable completion entry.
+ */
+static void
+bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
+{
+ bfa_ioc_disable_comp(ioc);
+}
+
+static void
+bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_ENABLE:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
+ break;
+
+ case IOC_E_DISABLE:
+ ioc->cbfn->disable_cbfn(ioc->bfa);
+ break;
+
+ case IOC_E_FWREADY:
+ break;
+
+ case IOC_E_DETACH:
+ bfa_ioc_firmware_unlock(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+
+static void
+bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
+{
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_ioc_timer_start(ioc);
+}
+
+/**
+ * Hardware initialization failed.
+ */
+static void
+bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_DISABLE:
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ case IOC_E_DETACH:
+ bfa_ioc_timer_stop(ioc);
+ bfa_ioc_firmware_unlock(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ break;
+
+ case IOC_E_TIMEOUT:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+
+static void
+bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s *ioc)
+{
+ struct list_head *qe;
+ struct bfa_ioc_hbfail_notify_s *notify;
+
+ /**
+ * Mark IOC as failed in hardware and stop firmware.
+ */
+ bfa_ioc_lpu_stop(ioc);
+ bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_HBFAIL);
+
+ if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) {
+ bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P);
+ /*
+ * Wait for halt to take effect
+ */
+ bfa_reg_read(ioc->ioc_regs.ll_halt);
+ }
+
+ /**
+ * Notify driver and common modules registered for notification.
+ */
+ ioc->cbfn->hbfail_cbfn(ioc->bfa);
+ list_for_each(qe, &ioc->hb_notify_q) {
+ notify = (struct bfa_ioc_hbfail_notify_s *)qe;
+ notify->cbfn(notify->cbarg);
+ }
+
+ /**
+ * Flush any queued up mailbox requests.
+ */
+ bfa_ioc_mbox_hbfail(ioc);
+ bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
+
+ /**
+ * Trigger auto-recovery after a delay.
+ */
+ if (ioc->auto_recover) {
+ bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer,
+ bfa_ioc_timeout, ioc, BFA_IOC_TOV_RECOVER);
+ }
+}
+
+/**
+ * IOC heartbeat failure.
+ */
+static void
+bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+
+ case IOC_E_ENABLE:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ break;
+
+ case IOC_E_DISABLE:
+ if (ioc->auto_recover)
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ case IOC_E_TIMEOUT:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
+ break;
+
+ case IOC_E_FWREADY:
+ /**
+ * Recovery is already initiated by other function.
+ */
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+
+
+/**
+ * bfa_ioc_pvt BFA IOC private functions
+ */
+
+static void
+bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
+{
+ struct list_head *qe;
+ struct bfa_ioc_hbfail_notify_s *notify;
+
+ ioc->cbfn->disable_cbfn(ioc->bfa);
+
+ /**
+ * Notify common modules registered for notification.
+ */
+ list_for_each(qe, &ioc->hb_notify_q) {
+ notify = (struct bfa_ioc_hbfail_notify_s *)qe;
+ notify->cbfn(notify->cbarg);
+ }
+}
+
+static void
+bfa_ioc_sem_timeout(void *ioc_arg)
+{
+ struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
+
+ bfa_ioc_hw_sem_get(ioc);
+}
+
+static void
+bfa_ioc_usage_sem_get(struct bfa_ioc_s *ioc)
+{
+ u32 r32;
+ int cnt = 0;
+#define BFA_SEM_SPINCNT 1000
+
+ do {
+ r32 = bfa_reg_read(ioc->ioc_regs.ioc_usage_sem_reg);
+ cnt++;
+ if (cnt > BFA_SEM_SPINCNT)
+ break;
+ } while (r32 != 0);
+ bfa_assert(cnt < BFA_SEM_SPINCNT);
+}
+
+static void
+bfa_ioc_usage_sem_release(struct bfa_ioc_s *ioc)
+{
+ bfa_reg_write(ioc->ioc_regs.ioc_usage_sem_reg, 1);
+}
+
+static void
+bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
+{
+ u32 r32;
+
+ /**
+ * First read to the semaphore register will return 0, subsequent reads
+ * will return 1. Semaphore is released by writing 0 to the register
+ */
+ r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
+ if (r32 == 0) {
+ bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
+ return;
+ }
+
+ bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout,
+ ioc, BFA_IOC_TOV);
+}
+
+static void
+bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
+{
+ bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1);
+}
+
+static void
+bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
+{
+ bfa_timer_stop(&ioc->sem_timer);
+}
+
+/**
+ * Initialize LPU local memory (aka secondary memory / SRAM)
+ */
+static void
+bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
+{
+ u32 pss_ctl;
+ int i;
+#define PSS_LMEM_INIT_TIME 10000
+
+ pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
+ pss_ctl &= ~__PSS_LMEM_RESET;
+ pss_ctl |= __PSS_LMEM_INIT_EN;
+ pss_ctl |= __PSS_I2C_CLK_DIV(3UL); /* i2c workaround 12.5khz clock */
+ bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
+
+ /**
+ * wait for memory initialization to be complete
+ */
+ i = 0;
+ do {
+ pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
+ i++;
+ } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
+
+ /**
+ * If memory initialization is not successful, IOC timeout will catch
+ * such failures.
+ */
+ bfa_assert(pss_ctl & __PSS_LMEM_INIT_DONE);
+ bfa_trc(ioc, pss_ctl);
+
+ pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
+ bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
+}
+
+static void
+bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
+{
+ u32 pss_ctl;
+
+ /**
+ * Take processor out of reset.
+ */
+ pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
+ pss_ctl &= ~__PSS_LPU0_RESET;
+
+ bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
+}
+
+static void
+bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
+{
+ u32 pss_ctl;
+
+ /**
+ * Put processors in reset.
+ */
+ pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
+ pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
+
+ bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
+}
+
+/**
+ * Get driver and firmware versions.
+ */
+static void
+bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
+{
+ u32 pgnum, pgoff;
+ u32 loff = 0;
+ int i;
+ u32 *fwsig = (u32 *) fwhdr;
+
+ pgnum = bfa_ioc_smem_pgnum(ioc, loff);
+ pgoff = bfa_ioc_smem_pgoff(ioc, loff);
+ bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+
+ for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
+ i++) {
+ fwsig[i] = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
+ loff += sizeof(u32);
+ }
+}
+
+static u32 *
+bfa_ioc_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off)
+{
+ if (ioc->ctdev)
+ return bfi_image_ct_get_chunk(off);
+ return bfi_image_cb_get_chunk(off);
+}
+
+static u32
+bfa_ioc_fwimg_get_size(struct bfa_ioc_s *ioc)
+{
+return (ioc->ctdev) ? bfi_image_ct_size : bfi_image_cb_size;
+}
+
+/**
+ * Returns TRUE if same.
+ */
+static bfa_boolean_t
+bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
+{
+ struct bfi_ioc_image_hdr_s *drv_fwhdr;
+ int i;
+
+ drv_fwhdr =
+ (struct bfi_ioc_image_hdr_s *)bfa_ioc_fwimg_get_chunk(ioc, 0);
+
+ for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
+ if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
+ bfa_trc(ioc, i);
+ bfa_trc(ioc, fwhdr->md5sum[i]);
+ bfa_trc(ioc, drv_fwhdr->md5sum[i]);
+ return BFA_FALSE;
+ }
+ }
+
+ bfa_trc(ioc, fwhdr->md5sum[0]);
+ return BFA_TRUE;
+}
+
+/**
+ * Return true if current running version is valid. Firmware signature and
+ * execution context (driver/bios) must match.
+ */
+static bfa_boolean_t
+bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc)
+{
+ struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
+
+ /**
+ * If bios/efi boot (flash based) -- return true
+ */
+ if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
+ return BFA_TRUE;
+
+ bfa_ioc_fwver_get(ioc, &fwhdr);
+ drv_fwhdr =
+ (struct bfi_ioc_image_hdr_s *)bfa_ioc_fwimg_get_chunk(ioc, 0);
+
+ if (fwhdr.signature != drv_fwhdr->signature) {
+ bfa_trc(ioc, fwhdr.signature);
+ bfa_trc(ioc, drv_fwhdr->signature);
+ return BFA_FALSE;
+ }
+
+ if (fwhdr.exec != drv_fwhdr->exec) {
+ bfa_trc(ioc, fwhdr.exec);
+ bfa_trc(ioc, drv_fwhdr->exec);
+ return BFA_FALSE;
+ }
+
+ return bfa_ioc_fwver_cmp(ioc, &fwhdr);
+}
+
+/**
+ * Return true if firmware of current driver matches the running firmware.
+ */
+static bfa_boolean_t
+bfa_ioc_firmware_lock(struct bfa_ioc_s *ioc)
+{
+ enum bfi_ioc_state ioc_fwstate;
+ u32 usecnt;
+ struct bfi_ioc_image_hdr_s fwhdr;
+
+ /**
+ * Firmware match check is relevant only for CNA.
+ */
+ if (!ioc->cna)
+ return BFA_TRUE;
+
+ /**
+ * If bios boot (flash based) -- do not increment usage count
+ */
+ if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
+ return BFA_TRUE;
+
+ bfa_ioc_usage_sem_get(ioc);
+ usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
+
+ /**
+ * If usage count is 0, always return TRUE.
+ */
+ if (usecnt == 0) {
+ bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1);
+ bfa_ioc_usage_sem_release(ioc);
+ bfa_trc(ioc, usecnt);
+ return BFA_TRUE;
+ }
+
+ ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
+ bfa_trc(ioc, ioc_fwstate);
+
+ /**
+ * Use count cannot be non-zero and chip in uninitialized state.
+ */
+ bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
+
+ /**
+ * Check if another driver with a different firmware is active
+ */
+ bfa_ioc_fwver_get(ioc, &fwhdr);
+ if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
+ bfa_ioc_usage_sem_release(ioc);
+ bfa_trc(ioc, usecnt);
+ return BFA_FALSE;
+ }
+
+ /**
+ * Same firmware version. Increment the reference count.
+ */
+ usecnt++;
+ bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
+ bfa_ioc_usage_sem_release(ioc);
+ bfa_trc(ioc, usecnt);
+ return BFA_TRUE;
+}
+
+static void
+bfa_ioc_firmware_unlock(struct bfa_ioc_s *ioc)
+{
+ u32 usecnt;
+
+ /**
+ * Firmware lock is relevant only for CNA.
+ * If bios boot (flash based) -- do not decrement usage count
+ */
+ if (!ioc->cna || (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ))
+ return;
+
+ /**
+ * decrement usage count
+ */
+ bfa_ioc_usage_sem_get(ioc);
+ usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
+ bfa_assert(usecnt > 0);
+
+ usecnt--;
+ bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
+ bfa_trc(ioc, usecnt);
+
+ bfa_ioc_usage_sem_release(ioc);
+}
+
+/**
+ * Conditionally flush any pending message from firmware at start.
+ */
+static void
+bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
+{
+ u32 r32;
+
+ r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
+ if (r32)
+ bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
+}
+
+
+static void
+bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
+{
+ enum bfi_ioc_state ioc_fwstate;
+ bfa_boolean_t fwvalid;
+
+ ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
+
+ if (force)
+ ioc_fwstate = BFI_IOC_UNINIT;
+
+ bfa_trc(ioc, ioc_fwstate);
+
+ /**
+ * check if firmware is valid
+ */
+ fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
+ BFA_FALSE : bfa_ioc_fwver_valid(ioc);
+
+ if (!fwvalid) {
+ bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+ return;
+ }
+
+ /**
+ * If hardware initialization is in progress (initialized by other IOC),
+ * just wait for an initialization completion interrupt.
+ */
+ if (ioc_fwstate == BFI_IOC_INITING) {
+ bfa_trc(ioc, ioc_fwstate);
+ ioc->cbfn->reset_cbfn(ioc->bfa);
+ return;
+ }
+
+ /**
+ * If IOC function is disabled and firmware version is same,
+ * just re-enable IOC.
+ */
+ if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
+ bfa_trc(ioc, ioc_fwstate);
+
+ /**
+ * When using MSI-X any pending firmware ready event should
+ * be flushed. Otherwise MSI-X interrupts are not delivered.
+ */
+ bfa_ioc_msgflush(ioc);
+ ioc->cbfn->reset_cbfn(ioc->bfa);
+ bfa_fsm_send_event(ioc, IOC_E_FWREADY);
+ return;
+ }
+
+ /**
+ * Initialize the h/w for any other states.
+ */
+ bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+}
+
+static void
+bfa_ioc_timeout(void *ioc_arg)
+{
+ struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
+
+ bfa_trc(ioc, 0);
+ bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
+}
+
+void
+bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
+{
+ u32 *msgp = (u32 *) ioc_msg;
+ u32 i;
+
+ bfa_trc(ioc, msgp[0]);
+ bfa_trc(ioc, len);
+
+ bfa_assert(len <= BFI_IOC_MSGLEN_MAX);
+
+ /*
+ * first write msg to mailbox registers
+ */
+ for (i = 0; i < len / sizeof(u32); i++)
+ bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32),
+ bfa_os_wtole(msgp[i]));
+
+ for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
+ bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), 0);
+
+ /*
+ * write 1 to mailbox CMD to trigger LPU event
+ */
+ bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1);
+ (void)bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
+}
+
+static void
+bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
+{
+ struct bfi_ioc_ctrl_req_s enable_req;
+
+ bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
+ bfa_ioc_portid(ioc));
+ enable_req.ioc_class = ioc->ioc_mc;
+ bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
+}
+
+static void
+bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
+{
+ struct bfi_ioc_ctrl_req_s disable_req;
+
+ bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
+ bfa_ioc_portid(ioc));
+ bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
+}
+
+static void
+bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
+{
+ struct bfi_ioc_getattr_req_s attr_req;
+
+ bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
+ bfa_ioc_portid(ioc));
+ bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
+ bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
+}
+
+static void
+bfa_ioc_hb_check(void *cbarg)
+{
+ struct bfa_ioc_s *ioc = cbarg;
+ u32 hb_count;
+
+ hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
+ if (ioc->hb_count == hb_count) {
+ ioc->hb_fail++;
+ } else {
+ ioc->hb_count = hb_count;
+ ioc->hb_fail = 0;
+ }
+
+ if (ioc->hb_fail >= BFA_IOC_HB_FAIL_MAX) {
+ bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE, hb_count);
+ ioc->hb_fail = 0;
+ bfa_ioc_recover(ioc);
+ return;
+ }
+
+ bfa_ioc_mbox_poll(ioc);
+ bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc,
+ BFA_IOC_HB_TOV);
+}
+
+static void
+bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
+{
+ ioc->hb_fail = 0;
+ ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
+ bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc,
+ BFA_IOC_HB_TOV);
+}
+
+static void
+bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
+{
+ bfa_timer_stop(&ioc->ioc_timer);
+}
+
+/**
+ * Host to LPU mailbox message addresses
+ */
+static struct {
+ u32 hfn_mbox, lpu_mbox, hfn_pgn;
+} iocreg_fnreg[] = {
+ {
+ HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0}, {
+ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1}, {
+ HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2}, {
+ HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3}
+};
+
+/**
+ * Host <-> LPU mailbox command/status registers - port 0
+ */
+static struct {
+ u32 hfn, lpu;
+} iocreg_mbcmd_p0[] = {
+ {
+ HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT}, {
+ HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT}, {
+ HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT}, {
+ HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT}
+};
+
+/**
+ * Host <-> LPU mailbox command/status registers - port 1
+ */
+static struct {
+ u32 hfn, lpu;
+} iocreg_mbcmd_p1[] = {
+ {
+ HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT}, {
+ HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT}, {
+ HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT}, {
+ HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT}
+};
+
+/**
+ * Shared IRQ handling in INTX mode
+ */
+static struct {
+ u32 isr, msk;
+} iocreg_shirq_next[] = {
+ {
+ HOSTFN1_INT_STATUS, HOSTFN1_INT_MSK}, {
+ HOSTFN2_INT_STATUS, HOSTFN2_INT_MSK}, {
+ HOSTFN3_INT_STATUS, HOSTFN3_INT_MSK}, {
+HOSTFN0_INT_STATUS, HOSTFN0_INT_MSK},};
+
+static void
+bfa_ioc_reg_init(struct bfa_ioc_s *ioc)
+{
+ bfa_os_addr_t rb;
+ int pcifn = bfa_ioc_pcifn(ioc);
+
+ rb = bfa_ioc_bar0(ioc);
+
+ ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
+ ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
+ ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
+
+ if (ioc->port_id == 0) {
+ ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
+ ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+ ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
+ } else {
+ ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
+ ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
+ ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
+ }
+
+ /**
+ * Shared IRQ handling in INTX mode
+ */
+ ioc->ioc_regs.shirq_isr_next = rb + iocreg_shirq_next[pcifn].isr;
+ ioc->ioc_regs.shirq_msk_next = rb + iocreg_shirq_next[pcifn].msk;
+
+ /*
+ * PSS control registers
+ */
+ ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
+ ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
+ ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
+
+ /*
+ * IOC semaphore registers and serialization
+ */
+ ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
+ ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
+ ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
+
+ /**
+ * sram memory access
+ */
+ ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
+ ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CB;
+ if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT)
+ ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
+}
+
+/**
+ * Initiate a full firmware download.
+ */
+static void
+bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
+ u32 boot_param)
+{
+ u32 *fwimg;
+ u32 pgnum, pgoff;
+ u32 loff = 0;
+ u32 chunkno = 0;
+ u32 i;
+
+ /**
+ * Initialize LMEM first before code download
+ */
+ bfa_ioc_lmem_init(ioc);
+
+ /**
+ * Flash based firmware boot
+ */
+ bfa_trc(ioc, bfa_ioc_fwimg_get_size(ioc));
+ if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
+ boot_type = BFI_BOOT_TYPE_FLASH;
+ fwimg = bfa_ioc_fwimg_get_chunk(ioc, chunkno);
+ fwimg[BFI_BOOT_TYPE_OFF / sizeof(u32)] = bfa_os_swap32(boot_type);
+ fwimg[BFI_BOOT_PARAM_OFF / sizeof(u32)] =
+ bfa_os_swap32(boot_param);
+
+ pgnum = bfa_ioc_smem_pgnum(ioc, loff);
+ pgoff = bfa_ioc_smem_pgoff(ioc, loff);
+
+ bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+
+ for (i = 0; i < bfa_ioc_fwimg_get_size(ioc); i++) {
+
+ if (BFA_FLASH_CHUNK_NO(i) != chunkno) {
+ chunkno = BFA_FLASH_CHUNK_NO(i);
+ fwimg = bfa_ioc_fwimg_get_chunk(ioc,
+ BFA_FLASH_CHUNK_ADDR(chunkno));
+ }
+
+ /**
+ * write smem
+ */
+ bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
+ fwimg[BFA_FLASH_OFFSET_IN_CHUNK(i)]);
+
+ loff += sizeof(u32);
+
+ /**
+ * handle page offset wrap around
+ */
+ loff = PSS_SMEM_PGOFF(loff);
+ if (loff == 0) {
+ pgnum++;
+ bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+ }
+ }
+
+ bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
+ bfa_ioc_smem_pgnum(ioc, 0));
+}
+
+static void
+bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
+{
+ bfa_ioc_hwinit(ioc, force);
+}
+
+/**
+ * Update BFA configuration from firmware configuration.
+ */
+static void
+bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
+{
+ struct bfi_ioc_attr_s *attr = ioc->attr;
+
+ attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop);
+ attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize);
+
+ bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
+}
+
+/**
+ * Attach time initialization of mbox logic.
+ */
+static void
+bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
+{
+ struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
+ int mc;
+
+ INIT_LIST_HEAD(&mod->cmd_q);
+ for (mc = 0; mc < BFI_MC_MAX; mc++) {
+ mod->mbhdlr[mc].cbfn = NULL;
+ mod->mbhdlr[mc].cbarg = ioc->bfa;
+ }
+}
+
+/**
+ * Mbox poll timer -- restarts any pending mailbox requests.
+ */
+static void
+bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
+{
+ struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
+ struct bfa_mbox_cmd_s *cmd;
+ u32 stat;
+
+ /**
+ * If no command pending, do nothing
+ */
+ if (list_empty(&mod->cmd_q))
+ return;
+
+ /**
+ * If previous command is not yet fetched by firmware, do nothing
+ */
+ stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
+ if (stat)
+ return;
+
+ /**
+ * Enqueue command to firmware.
+ */
+ bfa_q_deq(&mod->cmd_q, &cmd);
+ bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
+}
+
+/**
+ * Cleanup any pending requests.
+ */
+static void
+bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
+{
+ struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
+ struct bfa_mbox_cmd_s *cmd;
+
+ while (!list_empty(&mod->cmd_q))
+ bfa_q_deq(&mod->cmd_q, &cmd);
+}
+
+/**
+ * Initialize IOC to port mapping.
+ */
+
+#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
+static void
+bfa_ioc_map_port(struct bfa_ioc_s *ioc)
+{
+ bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
+ u32 r32;
+
+ /**
+ * For crossbow, port id is same as pci function.
+ */
+ if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_CT) {
+ ioc->port_id = bfa_ioc_pcifn(ioc);
+ return;
+ }
+
+ /**
+ * For catapult, base port id on personality register and IOC type
+ */
+ r32 = bfa_reg_read(rb + FNC_PERS_REG);
+ r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
+ ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
+
+ bfa_trc(ioc, bfa_ioc_pcifn(ioc));
+ bfa_trc(ioc, ioc->port_id);
+}
+
+
+
+/**
+ * bfa_ioc_public
+ */
+
+/**
+* Set interrupt mode for a function: INTX or MSIX
+ */
+void
+bfa_ioc_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
+{
+ bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
+ u32 r32, mode;
+
+ r32 = bfa_reg_read(rb + FNC_PERS_REG);
+ bfa_trc(ioc, r32);
+
+ mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
+ __F0_INTX_STATUS;
+
+ /**
+ * If already in desired mode, do not change anything
+ */
+ if (!msix && mode)
+ return;
+
+ if (msix)
+ mode = __F0_INTX_STATUS_MSIX;
+ else
+ mode = __F0_INTX_STATUS_INTA;
+
+ r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
+ r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
+ bfa_trc(ioc, r32);
+
+ bfa_reg_write(rb + FNC_PERS_REG, r32);
+}
+
+bfa_status_t
+bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
+{
+ bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
+ u32 pll_sclk, pll_fclk, r32;
+
+ if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) {
+ pll_sclk =
+ __APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN |
+ __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(0U) |
+ __APP_PLL_312_JITLMT0_1(3U) |
+ __APP_PLL_312_CNTLMT0_1(1U);
+ pll_fclk =
+ __APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN |
+ __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(0U) |
+ __APP_PLL_425_JITLMT0_1(3U) |
+ __APP_PLL_425_CNTLMT0_1(1U);
+
+ /**
+ * For catapult, choose operational mode FC/FCoE
+ */
+ if (ioc->fcmode) {
+ bfa_reg_write((rb + OP_MODE), 0);
+ bfa_reg_write((rb + ETH_MAC_SER_REG),
+ __APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2
+ | __APP_EMS_CHANNEL_SEL);
+ } else {
+ ioc->pllinit = BFA_TRUE;
+ bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
+ bfa_reg_write((rb + ETH_MAC_SER_REG),
+ __APP_EMS_REFCKBUFEN1);
+ }
+ } else {
+ pll_sclk =
+ __APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN |
+ __APP_PLL_312_P0_1(3U) | __APP_PLL_312_JITLMT0_1(3U) |
+ __APP_PLL_312_CNTLMT0_1(3U);
+ pll_fclk =
+ __APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN |
+ __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
+ __APP_PLL_425_JITLMT0_1(3U) |
+ __APP_PLL_425_CNTLMT0_1(3U);
+ }
+
+ bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
+ bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
+
+ bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
+ bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
+ bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
+ bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
+ bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
+ bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
+
+ bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
+ __APP_PLL_312_LOGIC_SOFT_RESET);
+ bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
+ __APP_PLL_312_BYPASS | __APP_PLL_312_LOGIC_SOFT_RESET);
+ bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
+ __APP_PLL_425_LOGIC_SOFT_RESET);
+ bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
+ __APP_PLL_425_BYPASS | __APP_PLL_425_LOGIC_SOFT_RESET);
+ bfa_os_udelay(2);
+ bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
+ __APP_PLL_312_LOGIC_SOFT_RESET);
+ bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
+ __APP_PLL_425_LOGIC_SOFT_RESET);
+
+ bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
+ pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET);
+ bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
+ pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET);
+
+ /**
+ * Wait for PLLs to lock.
+ */
+ bfa_os_udelay(2000);
+ bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
+ bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
+
+ bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk);
+ bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk);
+
+ if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) {
+ bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
+ bfa_os_udelay(1000);
+ r32 = bfa_reg_read((rb + MBIST_STAT_REG));
+ bfa_trc(ioc, r32);
+ }
+
+ return BFA_STATUS_OK;
+}
+
+/**
+ * Interface used by diag module to do firmware boot with memory test
+ * as the entry vector.
+ */
+void
+bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param)
+{
+ bfa_os_addr_t rb;
+
+ bfa_ioc_stats(ioc, ioc_boots);
+
+ if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
+ return;
+
+ /**
+ * Initialize IOC state of all functions on a chip reset.
+ */
+ rb = ioc->pcidev.pci_bar_kva;
+ if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
+ bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST);
+ bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST);
+ } else {
+ bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_INITING);
+ bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_INITING);
+ }
+
+ bfa_ioc_download_fw(ioc, boot_type, boot_param);
+
+ /**
+ * Enable interrupts just before starting LPU
+ */
+ ioc->cbfn->reset_cbfn(ioc->bfa);
+ bfa_ioc_lpu_start(ioc);
+}
+
+/**
+ * Enable/disable IOC failure auto recovery.
+ */
+void
+bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
+{
+ bfa_auto_recover = BFA_FALSE;
+}
+
+
+bfa_boolean_t
+bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
+{
+ return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
+}
+
+void
+bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
+{
+ u32 *msgp = mbmsg;
+ u32 r32;
+ int i;
+
+ /**
+ * read the MBOX msg
+ */
+ for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
+ i++) {
+ r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox +
+ i * sizeof(u32));
+ msgp[i] = bfa_os_htonl(r32);
+ }
+
+ /**
+ * turn off mailbox interrupt by clearing mailbox status
+ */
+ bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
+ bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
+}
+
+void
+bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
+{
+ union bfi_ioc_i2h_msg_u *msg;
+
+ msg = (union bfi_ioc_i2h_msg_u *)m;
+
+ bfa_ioc_stats(ioc, ioc_isrs);
+
+ switch (msg->mh.msg_id) {
+ case BFI_IOC_I2H_HBEAT:
+ break;
+
+ case BFI_IOC_I2H_READY_EVENT:
+ bfa_fsm_send_event(ioc, IOC_E_FWREADY);
+ break;
+
+ case BFI_IOC_I2H_ENABLE_REPLY:
+ bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
+ break;
+
+ case BFI_IOC_I2H_DISABLE_REPLY:
+ bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
+ break;
+
+ case BFI_IOC_I2H_GETATTR_REPLY:
+ bfa_ioc_getattr_reply(ioc);
+ break;
+
+ default:
+ bfa_trc(ioc, msg->mh.msg_id);
+ bfa_assert(0);
+ }
+}
+
+/**
+ * IOC attach time initialization and setup.
+ *
+ * @param[in] ioc memory for IOC
+ * @param[in] bfa driver instance structure
+ * @param[in] trcmod kernel trace module
+ * @param[in] aen kernel aen event module
+ * @param[in] logm kernel logging module
+ */
+void
+bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
+ struct bfa_timer_mod_s *timer_mod, struct bfa_trc_mod_s *trcmod,
+ struct bfa_aen_s *aen, struct bfa_log_mod_s *logm)
+{
+ ioc->bfa = bfa;
+ ioc->cbfn = cbfn;
+ ioc->timer_mod = timer_mod;
+ ioc->trcmod = trcmod;
+ ioc->aen = aen;
+ ioc->logm = logm;
+ ioc->fcmode = BFA_FALSE;
+ ioc->pllinit = BFA_FALSE;
+ ioc->dbg_fwsave_once = BFA_TRUE;
+
+ bfa_ioc_mbox_attach(ioc);
+ INIT_LIST_HEAD(&ioc->hb_notify_q);
+
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+}
+
+/**
+ * Driver detach time IOC cleanup.
+ */
+void
+bfa_ioc_detach(struct bfa_ioc_s *ioc)
+{
+ bfa_fsm_send_event(ioc, IOC_E_DETACH);
+}
+
+/**
+ * Setup IOC PCI properties.
+ *
+ * @param[in] pcidev PCI device information for this IOC
+ */
+void
+bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
+ enum bfi_mclass mc)
+{
+ ioc->ioc_mc = mc;
+ ioc->pcidev = *pcidev;
+ ioc->ctdev = (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT);
+ ioc->cna = ioc->ctdev && !ioc->fcmode;
+
+ bfa_ioc_map_port(ioc);
+ bfa_ioc_reg_init(ioc);
+}
+
+/**
+ * Initialize IOC dma memory
+ *
+ * @param[in] dm_kva kernel virtual address of IOC dma memory
+ * @param[in] dm_pa physical address of IOC dma memory
+ */
+void
+bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
+{
+ /**
+ * dma memory for firmware attribute
+ */
+ ioc->attr_dma.kva = dm_kva;
+ ioc->attr_dma.pa = dm_pa;
+ ioc->attr = (struct bfi_ioc_attr_s *)dm_kva;
+}
+
+/**
+ * Return size of dma memory required.
+ */
+u32
+bfa_ioc_meminfo(void)
+{
+ return BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
+}
+
+void
+bfa_ioc_enable(struct bfa_ioc_s *ioc)
+{
+ bfa_ioc_stats(ioc, ioc_enables);
+ ioc->dbg_fwsave_once = BFA_TRUE;
+
+ bfa_fsm_send_event(ioc, IOC_E_ENABLE);
+}
+
+void
+bfa_ioc_disable(struct bfa_ioc_s *ioc)
+{
+ bfa_ioc_stats(ioc, ioc_disables);
+ bfa_fsm_send_event(ioc, IOC_E_DISABLE);
+}
+
+/**
+ * Returns memory required for saving firmware trace in case of crash.
+ * Driver must call this interface to allocate memory required for
+ * automatic saving of firmware trace. Driver should call
+ * bfa_ioc_debug_memclaim() right after bfa_ioc_attach() to setup this
+ * trace memory.
+ */
+int
+bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
+{
+return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
+}
+
+/**
+ * Initialize memory for saving firmware trace. Driver must initialize
+ * trace memory before call bfa_ioc_enable().
+ */
+void
+bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
+{
+ bfa_assert(ioc->auto_recover);
+ ioc->dbg_fwsave = dbg_fwsave;
+ ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover);
+}
+
+u32
+bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr)
+{
+ return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
+}
+
+u32
+bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
+{
+ return PSS_SMEM_PGOFF(fmaddr);
+}
+
+/**
+ * Register mailbox message handler functions
+ *
+ * @param[in] ioc IOC instance
+ * @param[in] mcfuncs message class handler functions
+ */
+void
+bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
+{
+ struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
+ int mc;
+
+ for (mc = 0; mc < BFI_MC_MAX; mc++)
+ mod->mbhdlr[mc].cbfn = mcfuncs[mc];
+}
+
+/**
+ * Register mailbox message handler function, to be called by common modules
+ */
+void
+bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
+ bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
+{
+ struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
+
+ mod->mbhdlr[mc].cbfn = cbfn;
+ mod->mbhdlr[mc].cbarg = cbarg;
+}
+
+/**
+ * Queue a mailbox command request to firmware. Waits if mailbox is busy.
+ * Responsibility of caller to serialize
+ *
+ * @param[in] ioc IOC instance
+ * @param[i] cmd Mailbox command
+ */
+void
+bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
+{
+ struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
+ u32 stat;
+
+ /**
+ * If a previous command is pending, queue new command
+ */
+ if (!list_empty(&mod->cmd_q)) {
+ list_add_tail(&cmd->qe, &mod->cmd_q);
+ return;
+ }
+
+ /**
+ * If mailbox is busy, queue command for poll timer
+ */
+ stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
+ if (stat) {
+ list_add_tail(&cmd->qe, &mod->cmd_q);
+ return;
+ }
+
+ /**
+ * mailbox is free -- queue command to firmware
+ */
+ bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
+}
+
+/**
+ * Handle mailbox interrupts
+ */
+void
+bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
+{
+ struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
+ struct bfi_mbmsg_s m;
+ int mc;
+
+ bfa_ioc_msgget(ioc, &m);
+
+ /**
+ * Treat IOC message class as special.
+ */
+ mc = m.mh.msg_class;
+ if (mc == BFI_MC_IOC) {
+ bfa_ioc_isr(ioc, &m);
+ return;
+ }
+
+ if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
+ return;
+
+ mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
+}
+
+void
+bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
+{
+ bfa_fsm_send_event(ioc, IOC_E_HWERROR);
+}
+
+#ifndef BFA_BIOS_BUILD
+
+/**
+ * return true if IOC is disabled
+ */
+bfa_boolean_t
+bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
+{
+ return (bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling)
+ || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled));
+}
+
+/**
+ * return true if IOC firmware is different.
+ */
+bfa_boolean_t
+bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
+{
+ return (bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset)
+ || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck)
+ || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch));
+}
+
+#define bfa_ioc_state_disabled(__sm) \
+ (((__sm) == BFI_IOC_UNINIT) || \
+ ((__sm) == BFI_IOC_INITING) || \
+ ((__sm) == BFI_IOC_HWINIT) || \
+ ((__sm) == BFI_IOC_DISABLED) || \
+ ((__sm) == BFI_IOC_HBFAIL) || \
+ ((__sm) == BFI_IOC_CFG_DISABLED))
+
+/**
+ * Check if adapter is disabled -- both IOCs should be in a disabled
+ * state.
+ */
+bfa_boolean_t
+bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
+{
+ u32 ioc_state;
+ bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
+
+ if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
+ return BFA_FALSE;
+
+ ioc_state = bfa_reg_read(rb + BFA_IOC0_STATE_REG);
+ if (!bfa_ioc_state_disabled(ioc_state))
+ return BFA_FALSE;
+
+ ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG);
+ if (!bfa_ioc_state_disabled(ioc_state))
+ return BFA_FALSE;
+
+ return BFA_TRUE;
+}
+
+/**
+ * Add to IOC heartbeat failure notification queue. To be used by common
+ * modules such as
+ */
+void
+bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
+ struct bfa_ioc_hbfail_notify_s *notify)
+{
+ list_add_tail(&notify->qe, &ioc->hb_notify_q);
+}
+
+#define BFA_MFG_NAME "Brocade"
+void
+bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
+ struct bfa_adapter_attr_s *ad_attr)
+{
+ struct bfi_ioc_attr_s *ioc_attr;
+ char model[BFA_ADAPTER_MODEL_NAME_LEN];
+
+ ioc_attr = ioc->attr;
+ bfa_os_memcpy((void *)&ad_attr->serial_num,
+ (void *)ioc_attr->brcd_serialnum,
+ BFA_ADAPTER_SERIAL_NUM_LEN);
+
+ bfa_os_memcpy(&ad_attr->fw_ver, ioc_attr->fw_version, BFA_VERSION_LEN);
+ bfa_os_memcpy(&ad_attr->optrom_ver, ioc_attr->optrom_version,
+ BFA_VERSION_LEN);
+ bfa_os_memcpy(&ad_attr->manufacturer, BFA_MFG_NAME,
+ BFA_ADAPTER_MFG_NAME_LEN);
+ bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd,
+ sizeof(struct bfa_mfg_vpd_s));
+
+ ad_attr->nports = BFI_ADAPTER_GETP(NPORTS, ioc_attr->adapter_prop);
+ ad_attr->max_speed = BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop);
+
+ /**
+ * model name
+ */
+ if (BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop) == 10) {
+ strcpy(model, "BR-10?0");
+ model[5] = '0' + ad_attr->nports;
+ } else {
+ strcpy(model, "Brocade-??5");
+ model[8] =
+ '0' + BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop);
+ model[9] = '0' + ad_attr->nports;
+ }
+
+ if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
+ ad_attr->prototype = 1;
+ else
+ ad_attr->prototype = 0;
+
+ bfa_os_memcpy(&ad_attr->model, model, BFA_ADAPTER_MODEL_NAME_LEN);
+ bfa_os_memcpy(&ad_attr->model_descr, &ad_attr->model,
+ BFA_ADAPTER_MODEL_NAME_LEN);
+
+ ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
+ ad_attr->mac = bfa_ioc_get_mac(ioc);
+
+ ad_attr->pcie_gen = ioc_attr->pcie_gen;
+ ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
+ ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
+ ad_attr->asic_rev = ioc_attr->asic_rev;
+ ad_attr->hw_ver[0] = 'R';
+ ad_attr->hw_ver[1] = 'e';
+ ad_attr->hw_ver[2] = 'v';
+ ad_attr->hw_ver[3] = '-';
+ ad_attr->hw_ver[4] = ioc_attr->asic_rev;
+ ad_attr->hw_ver[5] = '\0';
+
+ ad_attr->cna_capable = ioc->cna;
+}
+
+void
+bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
+{
+ bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
+
+ ioc_attr->state = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
+ ioc_attr->port_id = ioc->port_id;
+
+ if (!ioc->ctdev)
+ ioc_attr->ioc_type = BFA_IOC_TYPE_FC;
+ else if (ioc->ioc_mc == BFI_MC_IOCFC)
+ ioc_attr->ioc_type = BFA_IOC_TYPE_FCoE;
+ else if (ioc->ioc_mc == BFI_MC_LL)
+ ioc_attr->ioc_type = BFA_IOC_TYPE_LL;
+
+ bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
+
+ ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
+ ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
+ ioc_attr->pci_attr.chip_rev[0] = 'R';
+ ioc_attr->pci_attr.chip_rev[1] = 'e';
+ ioc_attr->pci_attr.chip_rev[2] = 'v';
+ ioc_attr->pci_attr.chip_rev[3] = '-';
+ ioc_attr->pci_attr.chip_rev[4] = ioc_attr->adapter_attr.asic_rev;
+ ioc_attr->pci_attr.chip_rev[5] = '\0';
+}
+
+/**
+ * hal_wwn_public
+ */
+wwn_t
+bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc)
+{
+ union {
+ wwn_t wwn;
+ u8 byte[sizeof(wwn_t)];
+ }
+ w;
+
+ w.wwn = ioc->attr->mfg_wwn;
+
+ if (bfa_ioc_portid(ioc) == 1)
+ w.byte[7]++;
+
+ return w.wwn;
+}
+
+wwn_t
+bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc)
+{
+ union {
+ wwn_t wwn;
+ u8 byte[sizeof(wwn_t)];
+ }
+ w;
+
+ w.wwn = ioc->attr->mfg_wwn;
+
+ if (bfa_ioc_portid(ioc) == 1)
+ w.byte[7]++;
+
+ w.byte[0] = 0x20;
+
+ return w.wwn;
+}
+
+wwn_t
+bfa_ioc_get_wwn_naa5(struct bfa_ioc_s *ioc, u16 inst)
+{
+ union {
+ wwn_t wwn;
+ u8 byte[sizeof(wwn_t)];
+ }
+ w , w5;
+
+ bfa_trc(ioc, inst);
+
+ w.wwn = ioc->attr->mfg_wwn;
+ w5.byte[0] = 0x50 | w.byte[2] >> 4;
+ w5.byte[1] = w.byte[2] << 4 | w.byte[3] >> 4;
+ w5.byte[2] = w.byte[3] << 4 | w.byte[4] >> 4;
+ w5.byte[3] = w.byte[4] << 4 | w.byte[5] >> 4;
+ w5.byte[4] = w.byte[5] << 4 | w.byte[6] >> 4;
+ w5.byte[5] = w.byte[6] << 4 | w.byte[7] >> 4;
+ w5.byte[6] = w.byte[7] << 4 | (inst & 0x0f00) >> 8;
+ w5.byte[7] = (inst & 0xff);
+
+ return w5.wwn;
+}
+
+u64
+bfa_ioc_get_adid(struct bfa_ioc_s *ioc)
+{
+ return ioc->attr->mfg_wwn;
+}
+
+mac_t
+bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
+{
+ mac_t mac;
+
+ mac = ioc->attr->mfg_mac;
+ mac.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
+
+ return mac;
+}
+
+void
+bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
+{
+ ioc->fcmode = BFA_TRUE;
+ ioc->port_id = bfa_ioc_pcifn(ioc);
+}
+
+bfa_boolean_t
+bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
+{
+ return ioc->fcmode || (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_CT);
+}
+
+/**
+ * Return true if interrupt should be claimed.
+ */
+bfa_boolean_t
+bfa_ioc_intx_claim(struct bfa_ioc_s *ioc)
+{
+ u32 isr, msk;
+
+ /**
+ * Always claim if not catapult.
+ */
+ if (!ioc->ctdev)
+ return BFA_TRUE;
+
+ /**
+ * FALSE if next device is claiming interrupt.
+ * TRUE if next device is not interrupting or not present.
+ */
+ msk = bfa_reg_read(ioc->ioc_regs.shirq_msk_next);
+ isr = bfa_reg_read(ioc->ioc_regs.shirq_isr_next);
+ return !(isr & ~msk);
+}
+
+/**
+ * Send AEN notification
+ */
+static void
+bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
+{
+ union bfa_aen_data_u aen_data;
+ struct bfa_log_mod_s *logmod = ioc->logm;
+ s32 inst_num = 0;
+ struct bfa_ioc_attr_s ioc_attr;
+
+ switch (event) {
+ case BFA_IOC_AEN_HBGOOD:
+ bfa_log(logmod, BFA_AEN_IOC_HBGOOD, inst_num);
+ break;
+ case BFA_IOC_AEN_HBFAIL:
+ bfa_log(logmod, BFA_AEN_IOC_HBFAIL, inst_num);
+ break;
+ case BFA_IOC_AEN_ENABLE:
+ bfa_log(logmod, BFA_AEN_IOC_ENABLE, inst_num);
+ break;
+ case BFA_IOC_AEN_DISABLE:
+ bfa_log(logmod, BFA_AEN_IOC_DISABLE, inst_num);
+ break;
+ case BFA_IOC_AEN_FWMISMATCH:
+ bfa_log(logmod, BFA_AEN_IOC_FWMISMATCH, inst_num);
+ break;
+ default:
+ break;
+ }
+
+ memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn));
+ memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac));
+ bfa_ioc_get_attr(ioc, &ioc_attr);
+ switch (ioc_attr.ioc_type) {
+ case BFA_IOC_TYPE_FC:
+ aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
+ break;
+ case BFA_IOC_TYPE_FCoE:
+ aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
+ aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
+ break;
+ case BFA_IOC_TYPE_LL:
+ aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
+ break;
+ default:
+ bfa_assert(ioc_attr.ioc_type == BFA_IOC_TYPE_FC);
+ break;
+ }
+ aen_data.ioc.ioc_type = ioc_attr.ioc_type;
+}
+
+/**
+ * Retrieve saved firmware trace from a prior IOC failure.
+ */
+bfa_status_t
+bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
+{
+ int tlen;
+
+ if (ioc->dbg_fwsave_len == 0)
+ return BFA_STATUS_ENOFSAVE;
+
+ tlen = *trclen;
+ if (tlen > ioc->dbg_fwsave_len)
+ tlen = ioc->dbg_fwsave_len;
+
+ bfa_os_memcpy(trcdata, ioc->dbg_fwsave, tlen);
+ *trclen = tlen;
+ return BFA_STATUS_OK;
+}
+
+/**
+ * Retrieve saved firmware trace from a prior IOC failure.
+ */
+bfa_status_t
+bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
+{
+ u32 pgnum;
+ u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
+ int i, tlen;
+ u32 *tbuf = trcdata, r32;
+
+ bfa_trc(ioc, *trclen);
+
+ pgnum = bfa_ioc_smem_pgnum(ioc, loff);
+ loff = bfa_ioc_smem_pgoff(ioc, loff);
+ bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+
+ tlen = *trclen;
+ if (tlen > BFA_DBG_FWTRC_LEN)
+ tlen = BFA_DBG_FWTRC_LEN;
+ tlen /= sizeof(u32);
+
+ bfa_trc(ioc, tlen);
+
+ for (i = 0; i < tlen; i++) {
+ r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
+ tbuf[i] = bfa_os_ntohl(r32);
+ loff += sizeof(u32);
+
+ /**
+ * handle page offset wrap around
+ */
+ loff = PSS_SMEM_PGOFF(loff);
+ if (loff == 0) {
+ pgnum++;
+ bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+ }
+ }
+ bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
+ bfa_ioc_smem_pgnum(ioc, 0));
+ bfa_trc(ioc, pgnum);
+
+ *trclen = tlen * sizeof(u32);
+ return BFA_STATUS_OK;
+}
+
+/**
+ * Save firmware trace if configured.
+ */
+static void
+bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
+{
+ int tlen;
+
+ if (ioc->dbg_fwsave_len) {
+ tlen = ioc->dbg_fwsave_len;
+ bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
+ }
+}
+
+/**
+ * Firmware failure detected. Start recovery actions.
+ */
+static void
+bfa_ioc_recover(struct bfa_ioc_s *ioc)
+{
+ if (ioc->dbg_fwsave_once) {
+ ioc->dbg_fwsave_once = BFA_FALSE;
+ bfa_ioc_debug_save(ioc);
+ }
+
+ bfa_ioc_stats(ioc, ioc_hbfails);
+ bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
+}
+
+#else
+
+static void
+bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
+{
+}
+
+static void
+bfa_ioc_recover(struct bfa_ioc_s *ioc)
+{
+ bfa_assert(0);
+}
+
+#endif
+
+
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
new file mode 100644
index 000000000000..58efd4b13143
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_IOC_H__
+#define __BFA_IOC_H__
+
+#include <cs/bfa_sm.h>
+#include <bfi/bfi.h>
+#include <bfi/bfi_ioc.h>
+#include <bfi/bfi_boot.h>
+#include <bfa_timer.h>
+
+/**
+ * PCI device information required by IOC
+ */
+struct bfa_pcidev_s {
+ int pci_slot;
+ u8 pci_func;
+ u16 device_id;
+ bfa_os_addr_t pci_bar_kva;
+};
+
+/**
+ * Structure used to remember the DMA-able memory block's KVA and Physical
+ * Address
+ */
+struct bfa_dma_s {
+ void *kva; /*! Kernel virtual address */
+ u64 pa; /*! Physical address */
+};
+
+#define BFA_DMA_ALIGN_SZ 256
+#define BFA_ROUNDUP(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1))
+
+
+
+#define bfa_dma_addr_set(dma_addr, pa) \
+ __bfa_dma_addr_set(&dma_addr, (u64)pa)
+
+static inline void
+__bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
+{
+ dma_addr->a32.addr_lo = (u32) pa;
+ dma_addr->a32.addr_hi = (u32) (bfa_os_u32(pa));
+}
+
+
+#define bfa_dma_be_addr_set(dma_addr, pa) \
+ __bfa_dma_be_addr_set(&dma_addr, (u64)pa)
+static inline void
+__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
+{
+ dma_addr->a32.addr_lo = (u32) bfa_os_htonl(pa);
+ dma_addr->a32.addr_hi = (u32) bfa_os_htonl(bfa_os_u32(pa));
+}
+
+struct bfa_ioc_regs_s {
+ bfa_os_addr_t hfn_mbox_cmd;
+ bfa_os_addr_t hfn_mbox;
+ bfa_os_addr_t lpu_mbox_cmd;
+ bfa_os_addr_t lpu_mbox;
+ bfa_os_addr_t pss_ctl_reg;
+ bfa_os_addr_t app_pll_fast_ctl_reg;
+ bfa_os_addr_t app_pll_slow_ctl_reg;
+ bfa_os_addr_t ioc_sem_reg;
+ bfa_os_addr_t ioc_usage_sem_reg;
+ bfa_os_addr_t ioc_usage_reg;
+ bfa_os_addr_t host_page_num_fn;
+ bfa_os_addr_t heartbeat;
+ bfa_os_addr_t ioc_fwstate;
+ bfa_os_addr_t ll_halt;
+ bfa_os_addr_t shirq_isr_next;
+ bfa_os_addr_t shirq_msk_next;
+ bfa_os_addr_t smem_page_start;
+ u32 smem_pg0;
+};
+
+#define bfa_reg_read(_raddr) bfa_os_reg_read(_raddr)
+#define bfa_reg_write(_raddr, _val) bfa_os_reg_write(_raddr, _val)
+#define bfa_mem_read(_raddr, _off) bfa_os_mem_read(_raddr, _off)
+#define bfa_mem_write(_raddr, _off, _val) \
+ bfa_os_mem_write(_raddr, _off, _val)
+/**
+ * IOC Mailbox structures
+ */
+struct bfa_mbox_cmd_s {
+ struct list_head qe;
+ u32 msg[BFI_IOC_MSGSZ];
+};
+
+/**
+ * IOC mailbox module
+ */
+typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg_s *m);
+struct bfa_ioc_mbox_mod_s {
+ struct list_head cmd_q; /* pending mbox queue */
+ int nmclass; /* number of handlers */
+ struct {
+ bfa_ioc_mbox_mcfunc_t cbfn; /* message handlers */
+ void *cbarg;
+ } mbhdlr[BFI_MC_MAX];
+};
+
+/**
+ * IOC callback function interfaces
+ */
+typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
+typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa);
+typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa);
+typedef void (*bfa_ioc_reset_cbfn_t)(void *bfa);
+struct bfa_ioc_cbfn_s {
+ bfa_ioc_enable_cbfn_t enable_cbfn;
+ bfa_ioc_disable_cbfn_t disable_cbfn;
+ bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
+ bfa_ioc_reset_cbfn_t reset_cbfn;
+};
+
+/**
+ * Heartbeat failure notification queue element.
+ */
+struct bfa_ioc_hbfail_notify_s {
+ struct list_head qe;
+ bfa_ioc_hbfail_cbfn_t cbfn;
+ void *cbarg;
+};
+
+/**
+ * Initialize a heartbeat failure notification structure
+ */
+#define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do { \
+ (__notify)->cbfn = (__cbfn); \
+ (__notify)->cbarg = (__cbarg); \
+} while (0)
+
+struct bfa_ioc_s {
+ bfa_fsm_t fsm;
+ struct bfa_s *bfa;
+ struct bfa_pcidev_s pcidev;
+ struct bfa_timer_mod_s *timer_mod;
+ struct bfa_timer_s ioc_timer;
+ struct bfa_timer_s sem_timer;
+ u32 hb_count;
+ u32 hb_fail;
+ u32 retry_count;
+ struct list_head hb_notify_q;
+ void *dbg_fwsave;
+ int dbg_fwsave_len;
+ bfa_boolean_t dbg_fwsave_once;
+ enum bfi_mclass ioc_mc;
+ struct bfa_ioc_regs_s ioc_regs;
+ struct bfa_trc_mod_s *trcmod;
+ struct bfa_aen_s *aen;
+ struct bfa_log_mod_s *logm;
+ struct bfa_ioc_drv_stats_s stats;
+ bfa_boolean_t auto_recover;
+ bfa_boolean_t fcmode;
+ bfa_boolean_t ctdev;
+ bfa_boolean_t cna;
+ bfa_boolean_t pllinit;
+ u8 port_id;
+
+ struct bfa_dma_s attr_dma;
+ struct bfi_ioc_attr_s *attr;
+ struct bfa_ioc_cbfn_s *cbfn;
+ struct bfa_ioc_mbox_mod_s mbox_mod;
+};
+
+#define bfa_ioc_pcifn(__ioc) (__ioc)->pcidev.pci_func
+#define bfa_ioc_devid(__ioc) (__ioc)->pcidev.device_id
+#define bfa_ioc_bar0(__ioc) (__ioc)->pcidev.pci_bar_kva
+#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
+#define bfa_ioc_fetch_stats(__ioc, __stats) \
+ ((__stats)->drv_stats) = (__ioc)->stats
+#define bfa_ioc_clr_stats(__ioc) \
+ bfa_os_memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
+#define bfa_ioc_maxfrsize(__ioc) (__ioc)->attr->maxfrsize
+#define bfa_ioc_rx_bbcredit(__ioc) (__ioc)->attr->rx_bbcredit
+#define bfa_ioc_speed_sup(__ioc) \
+ BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
+
+/**
+ * IOC mailbox interface
+ */
+void bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd);
+void bfa_ioc_mbox_register(struct bfa_ioc_s *ioc,
+ bfa_ioc_mbox_mcfunc_t *mcfuncs);
+void bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc);
+void bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len);
+void bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg);
+void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
+ bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
+
+/**
+ * IOC interfaces
+ */
+void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa,
+ struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod,
+ struct bfa_trc_mod_s *trcmod,
+ struct bfa_aen_s *aen, struct bfa_log_mod_s *logm);
+void bfa_ioc_detach(struct bfa_ioc_s *ioc);
+void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
+ enum bfi_mclass mc);
+u32 bfa_ioc_meminfo(void);
+void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa);
+void bfa_ioc_enable(struct bfa_ioc_s *ioc);
+void bfa_ioc_disable(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc);
+
+void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param);
+void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg);
+void bfa_ioc_error_isr(struct bfa_ioc_s *ioc);
+void bfa_ioc_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t intx);
+bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
+void bfa_ioc_cfg_complete(struct bfa_ioc_s *ioc);
+void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr);
+void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
+ struct bfa_adapter_attr_s *ad_attr);
+int bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover);
+void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave);
+bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata,
+ int *trclen);
+bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
+ int *trclen);
+u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr);
+u32 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr);
+void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc);
+void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
+ struct bfa_ioc_hbfail_notify_s *notify);
+
+/*
+ * bfa mfg wwn API functions
+ */
+wwn_t bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc);
+wwn_t bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc);
+wwn_t bfa_ioc_get_wwn_naa5(struct bfa_ioc_s *ioc, u16 inst);
+mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc);
+u64 bfa_ioc_get_adid(struct bfa_ioc_s *ioc);
+
+#endif /* __BFA_IOC_H__ */
+
diff --git a/drivers/scsi/bfa/bfa_iocfc.c b/drivers/scsi/bfa/bfa_iocfc.c
new file mode 100644
index 000000000000..12350b022d63
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_iocfc.c
@@ -0,0 +1,872 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <cs/bfa_debug.h>
+#include <bfa_priv.h>
+#include <log/bfa_log_hal.h>
+#include <bfi/bfi_boot.h>
+#include <bfi/bfi_cbreg.h>
+#include <aen/bfa_aen_ioc.h>
+#include <defs/bfa_defs_iocfc.h>
+#include <defs/bfa_defs_pci.h>
+#include "bfa_callback_priv.h"
+#include "bfad_drv.h"
+
+BFA_TRC_FILE(HAL, IOCFC);
+
+/**
+ * IOC local definitions
+ */
+#define BFA_IOCFC_TOV 5000 /* msecs */
+
+enum {
+ BFA_IOCFC_ACT_NONE = 0,
+ BFA_IOCFC_ACT_INIT = 1,
+ BFA_IOCFC_ACT_STOP = 2,
+ BFA_IOCFC_ACT_DISABLE = 3,
+};
+
+/*
+ * forward declarations
+ */
+static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
+static void bfa_iocfc_disable_cbfn(void *bfa_arg);
+static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
+static void bfa_iocfc_reset_cbfn(void *bfa_arg);
+static void bfa_iocfc_stats_clear(void *bfa_arg);
+static void bfa_iocfc_stats_swap(struct bfa_fw_stats_s *d,
+ struct bfa_fw_stats_s *s);
+static void bfa_iocfc_stats_clr_cb(void *bfa_arg, bfa_boolean_t complete);
+static void bfa_iocfc_stats_clr_timeout(void *bfa_arg);
+static void bfa_iocfc_stats_cb(void *bfa_arg, bfa_boolean_t complete);
+static void bfa_iocfc_stats_timeout(void *bfa_arg);
+
+static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
+
+/**
+ * bfa_ioc_pvt BFA IOC private functions
+ */
+
+static void
+bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
+{
+ int i, per_reqq_sz, per_rspq_sz;
+
+ per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
+ BFA_DMA_ALIGN_SZ);
+ per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
+ BFA_DMA_ALIGN_SZ);
+
+ /*
+ * Calculate CQ size
+ */
+ for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
+ *dm_len = *dm_len + per_reqq_sz;
+ *dm_len = *dm_len + per_rspq_sz;
+ }
+
+ /*
+ * Calculate Shadow CI/PI size
+ */
+ for (i = 0; i < cfg->fwcfg.num_cqs; i++)
+ *dm_len += (2 * BFA_CACHELINE_SZ);
+}
+
+static void
+bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
+{
+ *dm_len +=
+ BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
+ *dm_len +=
+ BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
+ BFA_CACHELINE_SZ);
+ *dm_len += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
+}
+
+/**
+ * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
+ */
+static void
+bfa_iocfc_send_cfg(void *bfa_arg)
+{
+ struct bfa_s *bfa = bfa_arg;
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ struct bfi_iocfc_cfg_req_s cfg_req;
+ struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
+ struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
+ int i;
+
+ bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
+ bfa_trc(bfa, cfg->fwcfg.num_cqs);
+
+ iocfc->cfgdone = BFA_FALSE;
+ bfa_iocfc_reset_queues(bfa);
+
+ /**
+ * initialize IOC configuration info
+ */
+ cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
+ cfg_info->num_cqs = cfg->fwcfg.num_cqs;
+
+ bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
+ bfa_dma_be_addr_set(cfg_info->stats_addr, iocfc->stats_pa);
+
+ /**
+ * dma map REQ and RSP circular queues and shadow pointers
+ */
+ for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
+ bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
+ iocfc->req_cq_ba[i].pa);
+ bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
+ iocfc->req_cq_shadow_ci[i].pa);
+ cfg_info->req_cq_elems[i] =
+ bfa_os_htons(cfg->drvcfg.num_reqq_elems);
+
+ bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
+ iocfc->rsp_cq_ba[i].pa);
+ bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
+ iocfc->rsp_cq_shadow_pi[i].pa);
+ cfg_info->rsp_cq_elems[i] =
+ bfa_os_htons(cfg->drvcfg.num_rspq_elems);
+ }
+
+ /**
+ * dma map IOC configuration itself
+ */
+ bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
+ bfa_lpuid(bfa));
+ bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
+
+ bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
+ sizeof(struct bfi_iocfc_cfg_req_s));
+}
+
+static void
+bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_pcidev_s *pcidev)
+{
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+
+ bfa->bfad = bfad;
+ iocfc->bfa = bfa;
+ iocfc->action = BFA_IOCFC_ACT_NONE;
+
+ bfa_os_assign(iocfc->cfg, *cfg);
+
+ /**
+ * Initialize chip specific handlers.
+ */
+ if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT) {
+ iocfc->hwif.hw_reginit = bfa_hwct_reginit;
+ iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
+ iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
+ iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
+ iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
+ iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
+ iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
+ } else {
+ iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
+ iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
+ iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
+ iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
+ iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
+ iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
+ iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
+ }
+
+ iocfc->hwif.hw_reginit(bfa);
+ bfa->msix.nvecs = 0;
+}
+
+static void
+bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_meminfo_s *meminfo)
+{
+ u8 *dm_kva;
+ u64 dm_pa;
+ int i, per_reqq_sz, per_rspq_sz;
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ int dbgsz;
+
+ dm_kva = bfa_meminfo_dma_virt(meminfo);
+ dm_pa = bfa_meminfo_dma_phys(meminfo);
+
+ /*
+ * First allocate dma memory for IOC.
+ */
+ bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
+ dm_kva += bfa_ioc_meminfo();
+ dm_pa += bfa_ioc_meminfo();
+
+ /*
+ * Claim DMA-able memory for the request/response queues and for shadow
+ * ci/pi registers
+ */
+ per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
+ BFA_DMA_ALIGN_SZ);
+ per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
+ BFA_DMA_ALIGN_SZ);
+
+ for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
+ iocfc->req_cq_ba[i].kva = dm_kva;
+ iocfc->req_cq_ba[i].pa = dm_pa;
+ bfa_os_memset(dm_kva, 0, per_reqq_sz);
+ dm_kva += per_reqq_sz;
+ dm_pa += per_reqq_sz;
+
+ iocfc->rsp_cq_ba[i].kva = dm_kva;
+ iocfc->rsp_cq_ba[i].pa = dm_pa;
+ bfa_os_memset(dm_kva, 0, per_rspq_sz);
+ dm_kva += per_rspq_sz;
+ dm_pa += per_rspq_sz;
+ }
+
+ for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
+ iocfc->req_cq_shadow_ci[i].kva = dm_kva;
+ iocfc->req_cq_shadow_ci[i].pa = dm_pa;
+ dm_kva += BFA_CACHELINE_SZ;
+ dm_pa += BFA_CACHELINE_SZ;
+
+ iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
+ iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
+ dm_kva += BFA_CACHELINE_SZ;
+ dm_pa += BFA_CACHELINE_SZ;
+ }
+
+ /*
+ * Claim DMA-able memory for the config info page
+ */
+ bfa->iocfc.cfg_info.kva = dm_kva;
+ bfa->iocfc.cfg_info.pa = dm_pa;
+ bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
+ dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
+ dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
+
+ /*
+ * Claim DMA-able memory for the config response
+ */
+ bfa->iocfc.cfgrsp_dma.kva = dm_kva;
+ bfa->iocfc.cfgrsp_dma.pa = dm_pa;
+ bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
+
+ dm_kva +=
+ BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
+ BFA_CACHELINE_SZ);
+ dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
+ BFA_CACHELINE_SZ);
+
+ /*
+ * Claim DMA-able memory for iocfc stats
+ */
+ bfa->iocfc.stats_kva = dm_kva;
+ bfa->iocfc.stats_pa = dm_pa;
+ bfa->iocfc.fw_stats = (struct bfa_fw_stats_s *) dm_kva;
+ dm_kva += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
+ dm_pa += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
+
+ bfa_meminfo_dma_virt(meminfo) = dm_kva;
+ bfa_meminfo_dma_phys(meminfo) = dm_pa;
+
+ dbgsz = bfa_ioc_debug_trcsz(bfa_auto_recover);
+ if (dbgsz > 0) {
+ bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
+ bfa_meminfo_kva(meminfo) += dbgsz;
+ }
+}
+
+/**
+ * BFA submodules initialization completion notification.
+ */
+static void
+bfa_iocfc_initdone_submod(struct bfa_s *bfa)
+{
+ int i;
+
+ for (i = 0; hal_mods[i]; i++)
+ hal_mods[i]->initdone(bfa);
+}
+
+/**
+ * Start BFA submodules.
+ */
+static void
+bfa_iocfc_start_submod(struct bfa_s *bfa)
+{
+ int i;
+
+ bfa->rme_process = BFA_TRUE;
+
+ for (i = 0; hal_mods[i]; i++)
+ hal_mods[i]->start(bfa);
+}
+
+/**
+ * Disable BFA submodules.
+ */
+static void
+bfa_iocfc_disable_submod(struct bfa_s *bfa)
+{
+ int i;
+
+ for (i = 0; hal_mods[i]; i++)
+ hal_mods[i]->iocdisable(bfa);
+}
+
+static void
+bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
+{
+ struct bfa_s *bfa = bfa_arg;
+
+ if (complete) {
+ if (bfa->iocfc.cfgdone)
+ bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
+ else
+ bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
+ } else
+ bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
+}
+
+static void
+bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
+{
+ struct bfa_s *bfa = bfa_arg;
+ struct bfad_s *bfad = bfa->bfad;
+
+ if (compl)
+ complete(&bfad->comp);
+
+ else
+ bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
+}
+
+static void
+bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
+{
+ struct bfa_s *bfa = bfa_arg;
+ struct bfad_s *bfad = bfa->bfad;
+
+ if (compl)
+ complete(&bfad->disable_comp);
+}
+
+/**
+ * Update BFA configuration from firmware configuration.
+ */
+static void
+bfa_iocfc_cfgrsp(struct bfa_s *bfa)
+{
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
+ struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
+ struct bfi_iocfc_cfg_s *cfginfo = iocfc->cfginfo;
+
+ fwcfg->num_cqs = fwcfg->num_cqs;
+ fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs);
+ fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs);
+ fwcfg->num_fcxp_reqs = bfa_os_ntohs(fwcfg->num_fcxp_reqs);
+ fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs);
+ fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports);
+
+ cfginfo->intr_attr.coalesce = cfgrsp->intr_attr.coalesce;
+ cfginfo->intr_attr.delay = bfa_os_ntohs(cfgrsp->intr_attr.delay);
+ cfginfo->intr_attr.latency = bfa_os_ntohs(cfgrsp->intr_attr.latency);
+
+ iocfc->cfgdone = BFA_TRUE;
+
+ /**
+ * Configuration is complete - initialize/start submodules
+ */
+ if (iocfc->action == BFA_IOCFC_ACT_INIT)
+ bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
+ else
+ bfa_iocfc_start_submod(bfa);
+}
+
+static void
+bfa_iocfc_stats_clear(void *bfa_arg)
+{
+ struct bfa_s *bfa = bfa_arg;
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ struct bfi_iocfc_stats_req_s stats_req;
+
+ bfa_timer_start(bfa, &iocfc->stats_timer,
+ bfa_iocfc_stats_clr_timeout, bfa,
+ BFA_IOCFC_TOV);
+
+ bfi_h2i_set(stats_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CLEAR_STATS_REQ,
+ bfa_lpuid(bfa));
+ bfa_ioc_mbox_send(&bfa->ioc, &stats_req,
+ sizeof(struct bfi_iocfc_stats_req_s));
+}
+
+static void
+bfa_iocfc_stats_swap(struct bfa_fw_stats_s *d, struct bfa_fw_stats_s *s)
+{
+ u32 *dip = (u32 *) d;
+ u32 *sip = (u32 *) s;
+ int i;
+
+ for (i = 0; i < (sizeof(struct bfa_fw_stats_s) / sizeof(u32)); i++)
+ dip[i] = bfa_os_ntohl(sip[i]);
+}
+
+static void
+bfa_iocfc_stats_clr_cb(void *bfa_arg, bfa_boolean_t complete)
+{
+ struct bfa_s *bfa = bfa_arg;
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+
+ if (complete) {
+ bfa_ioc_clr_stats(&bfa->ioc);
+ iocfc->stats_cbfn(iocfc->stats_cbarg, iocfc->stats_status);
+ } else {
+ iocfc->stats_busy = BFA_FALSE;
+ iocfc->stats_status = BFA_STATUS_OK;
+ }
+}
+
+static void
+bfa_iocfc_stats_clr_timeout(void *bfa_arg)
+{
+ struct bfa_s *bfa = bfa_arg;
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+
+ bfa_trc(bfa, 0);
+
+ iocfc->stats_status = BFA_STATUS_ETIMER;
+ bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_clr_cb, bfa);
+}
+
+static void
+bfa_iocfc_stats_cb(void *bfa_arg, bfa_boolean_t complete)
+{
+ struct bfa_s *bfa = bfa_arg;
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+
+ if (complete) {
+ if (iocfc->stats_status == BFA_STATUS_OK) {
+ bfa_os_memset(iocfc->stats_ret, 0,
+ sizeof(*iocfc->stats_ret));
+ bfa_iocfc_stats_swap(&iocfc->stats_ret->fw_stats,
+ iocfc->fw_stats);
+ }
+ iocfc->stats_cbfn(iocfc->stats_cbarg, iocfc->stats_status);
+ } else {
+ iocfc->stats_busy = BFA_FALSE;
+ iocfc->stats_status = BFA_STATUS_OK;
+ }
+}
+
+static void
+bfa_iocfc_stats_timeout(void *bfa_arg)
+{
+ struct bfa_s *bfa = bfa_arg;
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+
+ bfa_trc(bfa, 0);
+
+ iocfc->stats_status = BFA_STATUS_ETIMER;
+ bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_cb, bfa);
+}
+
+static void
+bfa_iocfc_stats_query(struct bfa_s *bfa)
+{
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ struct bfi_iocfc_stats_req_s stats_req;
+
+ bfa_timer_start(bfa, &iocfc->stats_timer,
+ bfa_iocfc_stats_timeout, bfa, BFA_IOCFC_TOV);
+
+ bfi_h2i_set(stats_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_GET_STATS_REQ,
+ bfa_lpuid(bfa));
+ bfa_ioc_mbox_send(&bfa->ioc, &stats_req,
+ sizeof(struct bfi_iocfc_stats_req_s));
+}
+
+void
+bfa_iocfc_reset_queues(struct bfa_s *bfa)
+{
+ int q;
+
+ for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
+ bfa_reqq_ci(bfa, q) = 0;
+ bfa_reqq_pi(bfa, q) = 0;
+ bfa_rspq_ci(bfa, q) = 0;
+ bfa_rspq_pi(bfa, q) = 0;
+ }
+}
+
+/**
+ * IOC enable request is complete
+ */
+static void
+bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
+{
+ struct bfa_s *bfa = bfa_arg;
+
+ if (status != BFA_STATUS_OK) {
+ bfa_isr_disable(bfa);
+ if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
+ bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
+ bfa_iocfc_init_cb, bfa);
+ return;
+ }
+
+ bfa_iocfc_initdone_submod(bfa);
+ bfa_iocfc_send_cfg(bfa);
+}
+
+/**
+ * IOC disable request is complete
+ */
+static void
+bfa_iocfc_disable_cbfn(void *bfa_arg)
+{
+ struct bfa_s *bfa = bfa_arg;
+
+ bfa_isr_disable(bfa);
+ bfa_iocfc_disable_submod(bfa);
+
+ if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
+ bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
+ bfa);
+ else {
+ bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE);
+ bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
+ bfa);
+ }
+}
+
+/**
+ * Notify sub-modules of hardware failure.
+ */
+static void
+bfa_iocfc_hbfail_cbfn(void *bfa_arg)
+{
+ struct bfa_s *bfa = bfa_arg;
+
+ bfa->rme_process = BFA_FALSE;
+
+ bfa_isr_disable(bfa);
+ bfa_iocfc_disable_submod(bfa);
+
+ if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
+ bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
+ bfa);
+}
+
+/**
+ * Actions on chip-reset completion.
+ */
+static void
+bfa_iocfc_reset_cbfn(void *bfa_arg)
+{
+ struct bfa_s *bfa = bfa_arg;
+
+ bfa_iocfc_reset_queues(bfa);
+ bfa_isr_enable(bfa);
+}
+
+
+
+/**
+ * bfa_ioc_public
+ */
+
+/**
+ * Query IOC memory requirement information.
+ */
+void
+bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
+ u32 *dm_len)
+{
+ /* dma memory for IOC */
+ *dm_len += bfa_ioc_meminfo();
+
+ bfa_iocfc_fw_cfg_sz(cfg, dm_len);
+ bfa_iocfc_cqs_sz(cfg, dm_len);
+ *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover);
+}
+
+/**
+ * Query IOC memory requirement information.
+ */
+void
+bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+{
+ int i;
+
+ bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
+ bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
+ bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
+ bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
+
+ bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod,
+ bfa->trcmod, bfa->aen, bfa->logm);
+ bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
+ bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
+
+ /**
+ * Choose FC (ssid: 0x1C) v/s FCoE (ssid: 0x14) mode.
+ */
+ if (0)
+ bfa_ioc_set_fcmode(&bfa->ioc);
+
+ bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
+ bfa_iocfc_mem_claim(bfa, cfg, meminfo);
+ bfa_timer_init(&bfa->timer_mod);
+
+ INIT_LIST_HEAD(&bfa->comp_q);
+ for (i = 0; i < BFI_IOC_MAX_CQS; i++)
+ INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
+}
+
+/**
+ * Query IOC memory requirement information.
+ */
+void
+bfa_iocfc_detach(struct bfa_s *bfa)
+{
+ bfa_ioc_detach(&bfa->ioc);
+}
+
+/**
+ * Query IOC memory requirement information.
+ */
+void
+bfa_iocfc_init(struct bfa_s *bfa)
+{
+ bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
+ bfa_ioc_enable(&bfa->ioc);
+ bfa_msix_install(bfa);
+}
+
+/**
+ * IOC start called from bfa_start(). Called to start IOC operations
+ * at driver instantiation for this instance.
+ */
+void
+bfa_iocfc_start(struct bfa_s *bfa)
+{
+ if (bfa->iocfc.cfgdone)
+ bfa_iocfc_start_submod(bfa);
+}
+
+/**
+ * IOC stop called from bfa_stop(). Called only when driver is unloaded
+ * for this instance.
+ */
+void
+bfa_iocfc_stop(struct bfa_s *bfa)
+{
+ bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
+
+ bfa->rme_process = BFA_FALSE;
+ bfa_ioc_disable(&bfa->ioc);
+}
+
+void
+bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
+{
+ struct bfa_s *bfa = bfaarg;
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ union bfi_iocfc_i2h_msg_u *msg;
+
+ msg = (union bfi_iocfc_i2h_msg_u *) m;
+ bfa_trc(bfa, msg->mh.msg_id);
+
+ switch (msg->mh.msg_id) {
+ case BFI_IOCFC_I2H_CFG_REPLY:
+ iocfc->cfg_reply = &msg->cfg_reply;
+ bfa_iocfc_cfgrsp(bfa);
+ break;
+
+ case BFI_IOCFC_I2H_GET_STATS_RSP:
+ if (iocfc->stats_busy == BFA_FALSE
+ || iocfc->stats_status == BFA_STATUS_ETIMER)
+ break;
+
+ bfa_timer_stop(&iocfc->stats_timer);
+ iocfc->stats_status = BFA_STATUS_OK;
+ bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_cb,
+ bfa);
+ break;
+ case BFI_IOCFC_I2H_CLEAR_STATS_RSP:
+ /*
+ * check for timer pop before processing the rsp
+ */
+ if (iocfc->stats_busy == BFA_FALSE
+ || iocfc->stats_status == BFA_STATUS_ETIMER)
+ break;
+
+ bfa_timer_stop(&iocfc->stats_timer);
+ iocfc->stats_status = BFA_STATUS_OK;
+ bfa_cb_queue(bfa, &iocfc->stats_hcb_qe,
+ bfa_iocfc_stats_clr_cb, bfa);
+ break;
+ case BFI_IOCFC_I2H_UPDATEQ_RSP:
+ iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
+ break;
+ default:
+ bfa_assert(0);
+ }
+}
+
+#ifndef BFA_BIOS_BUILD
+void
+bfa_adapter_get_attr(struct bfa_s *bfa, struct bfa_adapter_attr_s *ad_attr)
+{
+ bfa_ioc_get_adapter_attr(&bfa->ioc, ad_attr);
+}
+
+u64
+bfa_adapter_get_id(struct bfa_s *bfa)
+{
+ return bfa_ioc_get_adid(&bfa->ioc);
+}
+
+void
+bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
+{
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+
+ attr->intr_attr = iocfc->cfginfo->intr_attr;
+ attr->config = iocfc->cfg;
+}
+
+bfa_status_t
+bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
+{
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ struct bfi_iocfc_set_intr_req_s *m;
+
+ iocfc->cfginfo->intr_attr = *attr;
+ if (!bfa_iocfc_is_operational(bfa))
+ return BFA_STATUS_OK;
+
+ m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
+ if (!m)
+ return BFA_STATUS_DEVBUSY;
+
+ bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
+ bfa_lpuid(bfa));
+ m->coalesce = attr->coalesce;
+ m->delay = bfa_os_htons(attr->delay);
+ m->latency = bfa_os_htons(attr->latency);
+
+ bfa_trc(bfa, attr->delay);
+ bfa_trc(bfa, attr->latency);
+
+ bfa_reqq_produce(bfa, BFA_REQQ_IOC);
+ return BFA_STATUS_OK;
+}
+
+void
+bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
+{
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+
+ iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
+ bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
+}
+
+bfa_status_t
+bfa_iocfc_get_stats(struct bfa_s *bfa, struct bfa_iocfc_stats_s *stats,
+ bfa_cb_ioc_t cbfn, void *cbarg)
+{
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+
+ if (iocfc->stats_busy) {
+ bfa_trc(bfa, iocfc->stats_busy);
+ return (BFA_STATUS_DEVBUSY);
+ }
+
+ iocfc->stats_busy = BFA_TRUE;
+ iocfc->stats_ret = stats;
+ iocfc->stats_cbfn = cbfn;
+ iocfc->stats_cbarg = cbarg;
+
+ bfa_iocfc_stats_query(bfa);
+
+ return (BFA_STATUS_OK);
+}
+
+bfa_status_t
+bfa_iocfc_clear_stats(struct bfa_s *bfa, bfa_cb_ioc_t cbfn, void *cbarg)
+{
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+
+ if (iocfc->stats_busy) {
+ bfa_trc(bfa, iocfc->stats_busy);
+ return (BFA_STATUS_DEVBUSY);
+ }
+
+ iocfc->stats_busy = BFA_TRUE;
+ iocfc->stats_cbfn = cbfn;
+ iocfc->stats_cbarg = cbarg;
+
+ bfa_iocfc_stats_clear(bfa);
+ return (BFA_STATUS_OK);
+}
+
+/**
+ * Enable IOC after it is disabled.
+ */
+void
+bfa_iocfc_enable(struct bfa_s *bfa)
+{
+ bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
+ "IOC Enable");
+ bfa_ioc_enable(&bfa->ioc);
+}
+
+void
+bfa_iocfc_disable(struct bfa_s *bfa)
+{
+ bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
+ "IOC Disable");
+ bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
+
+ bfa->rme_process = BFA_FALSE;
+ bfa_ioc_disable(&bfa->ioc);
+}
+
+
+bfa_boolean_t
+bfa_iocfc_is_operational(struct bfa_s *bfa)
+{
+ return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
+}
+
+/**
+ * Return boot target port wwns -- read from boot information in flash.
+ */
+void
+bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t **wwns)
+{
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
+
+ *nwwns = cfgrsp->bootwwns.nwwns;
+ *wwns = cfgrsp->bootwwns.wwn;
+}
+
+#endif
+
+
diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
new file mode 100644
index 000000000000..7ad177ed4cfc
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_iocfc.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_IOCFC_H__
+#define __BFA_IOCFC_H__
+
+#include <bfa_ioc.h>
+#include <bfa.h>
+#include <bfi/bfi_iocfc.h>
+#include <bfa_callback_priv.h>
+
+#define BFA_REQQ_NELEMS_MIN (4)
+#define BFA_RSPQ_NELEMS_MIN (4)
+
+struct bfa_iocfc_regs_s {
+ bfa_os_addr_t intr_status;
+ bfa_os_addr_t intr_mask;
+ bfa_os_addr_t cpe_q_pi[BFI_IOC_MAX_CQS];
+ bfa_os_addr_t cpe_q_ci[BFI_IOC_MAX_CQS];
+ bfa_os_addr_t cpe_q_depth[BFI_IOC_MAX_CQS];
+ bfa_os_addr_t cpe_q_ctrl[BFI_IOC_MAX_CQS];
+ bfa_os_addr_t rme_q_ci[BFI_IOC_MAX_CQS];
+ bfa_os_addr_t rme_q_pi[BFI_IOC_MAX_CQS];
+ bfa_os_addr_t rme_q_depth[BFI_IOC_MAX_CQS];
+ bfa_os_addr_t rme_q_ctrl[BFI_IOC_MAX_CQS];
+};
+
+/**
+ * MSIX vector handlers
+ */
+#define BFA_MSIX_MAX_VECTORS 22
+typedef void (*bfa_msix_handler_t)(struct bfa_s *bfa, int vec);
+struct bfa_msix_s {
+ int nvecs;
+ bfa_msix_handler_t handler[BFA_MSIX_MAX_VECTORS];
+};
+
+/**
+ * Chip specific interfaces
+ */
+struct bfa_hwif_s {
+ void (*hw_reginit)(struct bfa_s *bfa);
+ void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq);
+ void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
+ void (*hw_msix_install)(struct bfa_s *bfa);
+ void (*hw_msix_uninstall)(struct bfa_s *bfa);
+ void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
+ void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
+ u32 *nvecs, u32 *maxvec);
+};
+typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
+
+struct bfa_iocfc_s {
+ struct bfa_s *bfa;
+ struct bfa_iocfc_cfg_s cfg;
+ int action;
+
+ u32 req_cq_pi[BFI_IOC_MAX_CQS];
+ u32 rsp_cq_ci[BFI_IOC_MAX_CQS];
+
+ struct bfa_cb_qe_s init_hcb_qe;
+ struct bfa_cb_qe_s stop_hcb_qe;
+ struct bfa_cb_qe_s dis_hcb_qe;
+ struct bfa_cb_qe_s stats_hcb_qe;
+ bfa_boolean_t cfgdone;
+
+ struct bfa_dma_s cfg_info;
+ struct bfi_iocfc_cfg_s *cfginfo;
+ struct bfa_dma_s cfgrsp_dma;
+ struct bfi_iocfc_cfgrsp_s *cfgrsp;
+ struct bfi_iocfc_cfg_reply_s *cfg_reply;
+
+ u8 *stats_kva;
+ u64 stats_pa;
+ struct bfa_fw_stats_s *fw_stats;
+ struct bfa_timer_s stats_timer; /* timer */
+ struct bfa_iocfc_stats_s *stats_ret; /* driver stats location */
+ bfa_status_t stats_status; /* stats/statsclr status */
+ bfa_boolean_t stats_busy; /* outstanding stats */
+ bfa_cb_ioc_t stats_cbfn; /* driver callback function */
+ void *stats_cbarg; /* user callback arg */
+
+ struct bfa_dma_s req_cq_ba[BFI_IOC_MAX_CQS];
+ struct bfa_dma_s req_cq_shadow_ci[BFI_IOC_MAX_CQS];
+ struct bfa_dma_s rsp_cq_ba[BFI_IOC_MAX_CQS];
+ struct bfa_dma_s rsp_cq_shadow_pi[BFI_IOC_MAX_CQS];
+ struct bfa_iocfc_regs_s bfa_regs; /* BFA device registers */
+ struct bfa_hwif_s hwif;
+
+ bfa_cb_iocfc_t updateq_cbfn; /* bios callback function */
+ void *updateq_cbarg; /* bios callback arg */
+};
+
+#define bfa_lpuid(__bfa) bfa_ioc_portid(&(__bfa)->ioc)
+#define bfa_msix_init(__bfa, __nvecs) \
+ (__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs)
+#define bfa_msix_install(__bfa) \
+ (__bfa)->iocfc.hwif.hw_msix_install(__bfa)
+#define bfa_msix_uninstall(__bfa) \
+ (__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa)
+#define bfa_isr_mode_set(__bfa, __msix) \
+ (__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix)
+#define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \
+ (__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec)
+
+/*
+ * FC specific IOC functions.
+ */
+void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
+ u32 *dm_len);
+void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad,
+ struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
+ struct bfa_pcidev_s *pcidev);
+void bfa_iocfc_detach(struct bfa_s *bfa);
+void bfa_iocfc_init(struct bfa_s *bfa);
+void bfa_iocfc_start(struct bfa_s *bfa);
+void bfa_iocfc_stop(struct bfa_s *bfa);
+void bfa_iocfc_isr(void *bfa, struct bfi_mbmsg_s *msg);
+void bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa);
+bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa);
+void bfa_iocfc_reset_queues(struct bfa_s *bfa);
+void bfa_iocfc_updateq(struct bfa_s *bfa, u32 reqq_ba, u32 rspq_ba,
+ u32 reqq_sci, u32 rspq_spi,
+ bfa_cb_iocfc_t cbfn, void *cbarg);
+
+void bfa_msix_all(struct bfa_s *bfa, int vec);
+void bfa_msix_reqq(struct bfa_s *bfa, int vec);
+void bfa_msix_rspq(struct bfa_s *bfa, int vec);
+void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
+
+void bfa_hwcb_reginit(struct bfa_s *bfa);
+void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq);
+void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
+void bfa_hwcb_msix_install(struct bfa_s *bfa);
+void bfa_hwcb_msix_uninstall(struct bfa_s *bfa);
+void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
+void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap,
+ u32 *nvecs, u32 *maxvec);
+void bfa_hwct_reginit(struct bfa_s *bfa);
+void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
+void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
+void bfa_hwct_msix_install(struct bfa_s *bfa);
+void bfa_hwct_msix_uninstall(struct bfa_s *bfa);
+void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
+void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap,
+ u32 *nvecs, u32 *maxvec);
+
+void bfa_com_meminfo(bfa_boolean_t mincfg, u32 *dm_len);
+void bfa_com_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi,
+ bfa_boolean_t mincfg);
+void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t **wwns);
+
+#endif /* __BFA_IOCFC_H__ */
+
diff --git a/drivers/scsi/bfa/bfa_iocfc_q.c b/drivers/scsi/bfa/bfa_iocfc_q.c
new file mode 100644
index 000000000000..500a17df40b2
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_iocfc_q.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <bfa.h>
+#include "bfa_intr_priv.h"
+
+BFA_TRC_FILE(HAL, IOCFC_Q);
+
+void
+bfa_iocfc_updateq(struct bfa_s *bfa, u32 reqq_ba, u32 rspq_ba,
+ u32 reqq_sci, u32 rspq_spi, bfa_cb_iocfc_t cbfn,
+ void *cbarg)
+{
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ struct bfi_iocfc_updateq_req_s updateq_req;
+
+ iocfc->updateq_cbfn = cbfn;
+ iocfc->updateq_cbarg = cbarg;
+
+ bfi_h2i_set(updateq_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_UPDATEQ_REQ,
+ bfa_lpuid(bfa));
+
+ updateq_req.reqq_ba = bfa_os_htonl(reqq_ba);
+ updateq_req.rspq_ba = bfa_os_htonl(rspq_ba);
+ updateq_req.reqq_sci = bfa_os_htonl(reqq_sci);
+ updateq_req.rspq_spi = bfa_os_htonl(rspq_spi);
+
+ bfa_ioc_mbox_send(&bfa->ioc, &updateq_req,
+ sizeof(struct bfi_iocfc_updateq_req_s));
+}
diff --git a/drivers/scsi/bfa/bfa_ioim.c b/drivers/scsi/bfa/bfa_ioim.c
new file mode 100644
index 000000000000..7ae2552e1e14
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_ioim.c
@@ -0,0 +1,1311 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <bfa.h>
+#include <cs/bfa_debug.h>
+#include <bfa_cb_ioim_macros.h>
+
+BFA_TRC_FILE(HAL, IOIM);
+
+/*
+ * forward declarations.
+ */
+static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
+static bfa_boolean_t bfa_ioim_sge_setup(struct bfa_ioim_s *ioim);
+static void bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim);
+static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
+static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
+static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
+
+/**
+ * bfa_ioim_sm
+ */
+
+/**
+ * IO state machine events
+ */
+enum bfa_ioim_event {
+ BFA_IOIM_SM_START = 1, /* io start request from host */
+ BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
+ BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
+ BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
+ BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
+ BFA_IOIM_SM_FREE = 6, /* io resource is freed */
+ BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
+ BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
+ BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
+ BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
+ BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
+ BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
+ BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
+ BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
+ BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
+ BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
+ BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
+ BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
+};
+
+/*
+ * forward declaration of IO state machine
+ */
+static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
+ enum bfa_ioim_event event);
+static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
+ enum bfa_ioim_event event);
+static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
+ enum bfa_ioim_event event);
+static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
+ enum bfa_ioim_event event);
+static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
+ enum bfa_ioim_event event);
+static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
+ enum bfa_ioim_event event);
+static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
+ enum bfa_ioim_event event);
+static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
+ enum bfa_ioim_event event);
+static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
+ enum bfa_ioim_event event);
+static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
+ enum bfa_ioim_event event);
+static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
+ enum bfa_ioim_event event);
+
+/**
+ * IO is not started (unallocated).
+ */
+static void
+bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+ bfa_trc_fp(ioim->bfa, ioim->iotag);
+ bfa_trc_fp(ioim->bfa, event);
+
+ switch (event) {
+ case BFA_IOIM_SM_START:
+ if (!bfa_itnim_is_online(ioim->itnim)) {
+ if (!bfa_itnim_hold_io(ioim->itnim)) {
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ list_del(&ioim->qe);
+ list_add_tail(&ioim->qe,
+ &ioim->fcpim->ioim_comp_q);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+ __bfa_cb_ioim_pathtov, ioim);
+ } else {
+ list_del(&ioim->qe);
+ list_add_tail(&ioim->qe,
+ &ioim->itnim->pending_q);
+ }
+ break;
+ }
+
+ if (ioim->nsges > BFI_SGE_INLINE) {
+ if (!bfa_ioim_sge_setup(ioim)) {
+ bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
+ return;
+ }
+ }
+
+ if (!bfa_ioim_send_ioreq(ioim)) {
+ bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
+ break;
+ }
+
+ bfa_sm_set_state(ioim, bfa_ioim_sm_active);
+ break;
+
+ case BFA_IOIM_SM_IOTOV:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+ __bfa_cb_ioim_pathtov, ioim);
+ break;
+
+ case BFA_IOIM_SM_ABORT:
+ /**
+ * IO in pending queue can get abort requests. Complete abort
+ * requests immediately.
+ */
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+ ioim);
+ break;
+
+ default:
+ bfa_assert(0);
+ }
+}
+
+/**
+ * IO is waiting for SG pages.
+ */
+static void
+bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+ bfa_trc(ioim->bfa, ioim->iotag);
+ bfa_trc(ioim->bfa, event);
+
+ switch (event) {
+ case BFA_IOIM_SM_SGALLOCED:
+ if (!bfa_ioim_send_ioreq(ioim)) {
+ bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
+ break;
+ }
+ bfa_sm_set_state(ioim, bfa_ioim_sm_active);
+ break;
+
+ case BFA_IOIM_SM_CLEANUP:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+ ioim);
+ bfa_ioim_notify_cleanup(ioim);
+ break;
+
+ case BFA_IOIM_SM_ABORT:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+ ioim);
+ break;
+
+ case BFA_IOIM_SM_HWFAIL:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+ ioim);
+ break;
+
+ default:
+ bfa_assert(0);
+ }
+}
+
+/**
+ * IO is active.
+ */
+static void
+bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+ bfa_trc_fp(ioim->bfa, ioim->iotag);
+ bfa_trc_fp(ioim->bfa, event);
+
+ switch (event) {
+ case BFA_IOIM_SM_COMP_GOOD:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+ __bfa_cb_ioim_good_comp, ioim);
+ break;
+
+ case BFA_IOIM_SM_COMP:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
+ ioim);
+ break;
+
+ case BFA_IOIM_SM_DONE:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
+ ioim);
+ break;
+
+ case BFA_IOIM_SM_ABORT:
+ ioim->iosp->abort_explicit = BFA_TRUE;
+ ioim->io_cbfn = __bfa_cb_ioim_abort;
+
+ if (bfa_ioim_send_abort(ioim))
+ bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
+ else {
+ bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
+ bfa_reqq_wait(ioim->bfa, ioim->itnim->reqq,
+ &ioim->iosp->reqq_wait);
+ }
+ break;
+
+ case BFA_IOIM_SM_CLEANUP:
+ ioim->iosp->abort_explicit = BFA_FALSE;
+ ioim->io_cbfn = __bfa_cb_ioim_failed;
+
+ if (bfa_ioim_send_abort(ioim))
+ bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
+ else {
+ bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
+ bfa_reqq_wait(ioim->bfa, ioim->itnim->reqq,
+ &ioim->iosp->reqq_wait);
+ }
+ break;
+
+ case BFA_IOIM_SM_HWFAIL:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+ ioim);
+ break;
+
+ default:
+ bfa_assert(0);
+ }
+}
+
+/**
+ * IO is being aborted, waiting for completion from firmware.
+ */
+static void
+bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+ bfa_trc(ioim->bfa, ioim->iotag);
+ bfa_trc(ioim->bfa, event);
+
+ switch (event) {
+ case BFA_IOIM_SM_COMP_GOOD:
+ case BFA_IOIM_SM_COMP:
+ case BFA_IOIM_SM_DONE:
+ case BFA_IOIM_SM_FREE:
+ break;
+
+ case BFA_IOIM_SM_ABORT_DONE:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+ ioim);
+ break;
+
+ case BFA_IOIM_SM_ABORT_COMP:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+ ioim);
+ break;
+
+ case BFA_IOIM_SM_COMP_UTAG:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+ ioim);
+ break;
+
+ case BFA_IOIM_SM_CLEANUP:
+ bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
+ ioim->iosp->abort_explicit = BFA_FALSE;
+
+ if (bfa_ioim_send_abort(ioim))
+ bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
+ else {
+ bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
+ bfa_reqq_wait(ioim->bfa, ioim->itnim->reqq,
+ &ioim->iosp->reqq_wait);
+ }
+ break;
+
+ case BFA_IOIM_SM_HWFAIL:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+ ioim);
+ break;
+
+ default:
+ bfa_assert(0);
+ }
+}
+
+/**
+ * IO is being cleaned up (implicit abort), waiting for completion from
+ * firmware.
+ */
+static void
+bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+ bfa_trc(ioim->bfa, ioim->iotag);
+ bfa_trc(ioim->bfa, event);
+
+ switch (event) {
+ case BFA_IOIM_SM_COMP_GOOD:
+ case BFA_IOIM_SM_COMP:
+ case BFA_IOIM_SM_DONE:
+ case BFA_IOIM_SM_FREE:
+ break;
+
+ case BFA_IOIM_SM_ABORT:
+ /**
+ * IO is already being aborted implicitly
+ */
+ ioim->io_cbfn = __bfa_cb_ioim_abort;
+ break;
+
+ case BFA_IOIM_SM_ABORT_DONE:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
+ bfa_ioim_notify_cleanup(ioim);
+ break;
+
+ case BFA_IOIM_SM_ABORT_COMP:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
+ bfa_ioim_notify_cleanup(ioim);
+ break;
+
+ case BFA_IOIM_SM_COMP_UTAG:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
+ bfa_ioim_notify_cleanup(ioim);
+ break;
+
+ case BFA_IOIM_SM_HWFAIL:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+ ioim);
+ break;
+
+ case BFA_IOIM_SM_CLEANUP:
+ /**
+ * IO can be in cleanup state already due to TM command. 2nd cleanup
+ * request comes from ITN offline event.
+ */
+ break;
+
+ default:
+ bfa_assert(0);
+ }
+}
+
+/**
+ * IO is waiting for room in request CQ
+ */
+static void
+bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+ bfa_trc(ioim->bfa, ioim->iotag);
+ bfa_trc(ioim->bfa, event);
+
+ switch (event) {
+ case BFA_IOIM_SM_QRESUME:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_active);
+ bfa_ioim_send_ioreq(ioim);
+ break;
+
+ case BFA_IOIM_SM_ABORT:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+ ioim);
+ break;
+
+ case BFA_IOIM_SM_CLEANUP:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+ ioim);
+ bfa_ioim_notify_cleanup(ioim);
+ break;
+
+ case BFA_IOIM_SM_HWFAIL:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+ ioim);
+ break;
+
+ default:
+ bfa_assert(0);
+ }
+}
+
+/**
+ * Active IO is being aborted, waiting for room in request CQ.
+ */
+static void
+bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+ bfa_trc(ioim->bfa, ioim->iotag);
+ bfa_trc(ioim->bfa, event);
+
+ switch (event) {
+ case BFA_IOIM_SM_QRESUME:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
+ bfa_ioim_send_abort(ioim);
+ break;
+
+ case BFA_IOIM_SM_CLEANUP:
+ bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
+ ioim->iosp->abort_explicit = BFA_FALSE;
+ bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
+ break;
+
+ case BFA_IOIM_SM_COMP_GOOD:
+ case BFA_IOIM_SM_COMP:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+ ioim);
+ break;
+
+ case BFA_IOIM_SM_DONE:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+ bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+ ioim);
+ break;
+
+ case BFA_IOIM_SM_HWFAIL:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+ ioim);
+ break;
+
+ default:
+ bfa_assert(0);
+ }
+}
+
+/**
+ * Active IO is being cleaned up, waiting for room in request CQ.
+ */
+static void
+bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+ bfa_trc(ioim->bfa, ioim->iotag);
+ bfa_trc(ioim->bfa, event);
+
+ switch (event) {
+ case BFA_IOIM_SM_QRESUME:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
+ bfa_ioim_send_abort(ioim);
+ break;
+
+ case BFA_IOIM_SM_ABORT:
+ /**
+ * IO is alraedy being cleaned up implicitly
+ */
+ ioim->io_cbfn = __bfa_cb_ioim_abort;
+ break;
+
+ case BFA_IOIM_SM_COMP_GOOD:
+ case BFA_IOIM_SM_COMP:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
+ bfa_ioim_notify_cleanup(ioim);
+ break;
+
+ case BFA_IOIM_SM_DONE:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+ bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
+ bfa_ioim_notify_cleanup(ioim);
+ break;
+
+ case BFA_IOIM_SM_HWFAIL:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+ ioim);
+ break;
+
+ default:
+ bfa_assert(0);
+ }
+}
+
+/**
+ * IO bfa callback is pending.
+ */
+static void
+bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+ bfa_trc_fp(ioim->bfa, ioim->iotag);
+ bfa_trc_fp(ioim->bfa, event);
+
+ switch (event) {
+ case BFA_IOIM_SM_HCB:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
+ bfa_ioim_free(ioim);
+ bfa_cb_ioim_resfree(ioim->bfa->bfad);
+ break;
+
+ case BFA_IOIM_SM_CLEANUP:
+ bfa_ioim_notify_cleanup(ioim);
+ break;
+
+ case BFA_IOIM_SM_HWFAIL:
+ break;
+
+ default:
+ bfa_assert(0);
+ }
+}
+
+/**
+ * IO bfa callback is pending. IO resource cannot be freed.
+ */
+static void
+bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+ bfa_trc(ioim->bfa, ioim->iotag);
+ bfa_trc(ioim->bfa, event);
+
+ switch (event) {
+ case BFA_IOIM_SM_HCB:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
+ list_del(&ioim->qe);
+ list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
+ break;
+
+ case BFA_IOIM_SM_FREE:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ break;
+
+ case BFA_IOIM_SM_CLEANUP:
+ bfa_ioim_notify_cleanup(ioim);
+ break;
+
+ case BFA_IOIM_SM_HWFAIL:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ break;
+
+ default:
+ bfa_assert(0);
+ }
+}
+
+/**
+ * IO is completed, waiting resource free from firmware.
+ */
+static void
+bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+ bfa_trc(ioim->bfa, ioim->iotag);
+ bfa_trc(ioim->bfa, event);
+
+ switch (event) {
+ case BFA_IOIM_SM_FREE:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
+ bfa_ioim_free(ioim);
+ bfa_cb_ioim_resfree(ioim->bfa->bfad);
+ break;
+
+ case BFA_IOIM_SM_CLEANUP:
+ bfa_ioim_notify_cleanup(ioim);
+ break;