summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-01-25 17:19:08 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2008-01-25 17:19:08 -0800
commit9b73e76f3cf63379dcf45fcd4f112f5812418d0a (patch)
tree4e6bef87cd0cd6d848fc39a5ae25b981dbbe035b
parent50d9a126240f9961cfdd063336bbeb91f77a7dce (diff)
parent23c3e290fb9ce38cabc2822b47583fc8702411bf (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (200 commits) [SCSI] usbstorage: use last_sector_bug flag universally [SCSI] libsas: abstract STP task status into a function [SCSI] ultrastor: clean up inline asm warnings [SCSI] aic7xxx: fix firmware build [SCSI] aacraid: fib context lock for management ioctls [SCSI] ch: remove forward declarations [SCSI] ch: fix device minor number management bug [SCSI] ch: handle class_device_create failure properly [SCSI] NCR5380: fix section mismatch [SCSI] sg: fix /proc/scsi/sg/devices when no SCSI devices [SCSI] IB/iSER: add logical unit reset support [SCSI] don't use __GFP_DMA for sense buffers if not required [SCSI] use dynamically allocated sense buffer [SCSI] scsi.h: add macro for enclosure bit of inquiry data [SCSI] sd: add fix for devices with last sector access problems [SCSI] fix pcmcia compile problem [SCSI] aacraid: add Voodoo Lite class of cards. [SCSI] aacraid: add new driver features flags [SCSI] qla2xxx: Update version number to 8.02.00-k7. [SCSI] qla2xxx: Issue correct MBC_INITIALIZE_FIRMWARE command. ...
-rw-r--r--Documentation/DocBook/Makefile2
-rw-r--r--Documentation/DocBook/scsi.tmpl409
-rw-r--r--Documentation/dontdiff2
-rw-r--r--Documentation/kernel-parameters.txt8
-rw-r--r--Documentation/m68k/kernel-options.txt60
-rw-r--r--Documentation/scsi/00-INDEX2
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas159
-rw-r--r--Documentation/scsi/aacraid.txt4
-rw-r--r--Documentation/scsi/hptiop.txt30
-rw-r--r--Documentation/scsi/ncr53c7xx.txt40
-rw-r--r--MAINTAINERS6
-rw-r--r--block/bsg.c14
-rw-r--r--block/ll_rw_blk.c24
-rw-r--r--drivers/ata/libata-scsi.c11
-rw-r--r--drivers/base/attribute_container.c14
-rw-r--r--drivers/firewire/fw-sbp2.c6
-rw-r--r--drivers/ieee1394/sbp2.c6
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c15
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c4
-rw-r--r--drivers/message/fusion/mptbase.c94
-rw-r--r--drivers/message/fusion/mptbase.h2
-rw-r--r--drivers/message/fusion/mptsas.c2
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/message/i2o/i2o_scsi.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c2
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c3
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c6
-rw-r--r--drivers/s390/scsi/zfcp_def.h22
-rw-r--r--drivers/s390/scsi/zfcp_erp.c14
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c70
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c24
-rw-r--r--drivers/scsi/.gitignore2
-rw-r--r--drivers/scsi/3w-9xxx.c1
-rw-r--r--drivers/scsi/53c700.c11
-rw-r--r--drivers/scsi/BusLogic.c2
-rw-r--r--drivers/scsi/Kconfig32
-rw-r--r--drivers/scsi/Makefile5
-rw-r--r--drivers/scsi/NCR5380.c23
-rw-r--r--drivers/scsi/a2091.c36
-rw-r--r--drivers/scsi/a3000.c15
-rw-r--r--drivers/scsi/aacraid/aachba.c400
-rw-r--r--drivers/scsi/aacraid/aacraid.h335
-rw-r--r--drivers/scsi/aacraid/commctrl.c112
-rw-r--r--drivers/scsi/aacraid/comminit.c4
-rw-r--r--drivers/scsi/aacraid/commsup.c394
-rw-r--r--drivers/scsi/aacraid/dpcsup.c10
-rw-r--r--drivers/scsi/aacraid/linit.c242
-rw-r--r--drivers/scsi/aacraid/rx.c6
-rw-r--r--drivers/scsi/advansys.c14
-rw-r--r--drivers/scsi/aha152x.c38
-rw-r--r--drivers/scsi/aha1542.c49
-rw-r--r--drivers/scsi/aha1740.c2
-rw-r--r--drivers/scsi/aic7xxx/Makefile45
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c6
-rw-r--r--drivers/scsi/aic7xxx_old.c11
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dev.c6
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dump.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.h3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c190
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c6
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.c389
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.h121
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c50
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c12
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c6
-rw-r--r--drivers/scsi/atari_NCR5380.c24
-rw-r--r--drivers/scsi/atp870u.c102
-rw-r--r--drivers/scsi/ch.c215
-rw-r--r--drivers/scsi/constants.c3
-rw-r--r--drivers/scsi/dc395x.c16
-rw-r--r--drivers/scsi/dpt_i2o.c5
-rw-r--r--drivers/scsi/eata.c4
-rw-r--r--drivers/scsi/eata_pio.c13
-rw-r--r--drivers/scsi/fd_mcs.c36
-rw-r--r--drivers/scsi/gdth.c22
-rw-r--r--drivers/scsi/hosts.c4
-rw-r--r--drivers/scsi/hptiop.c593
-rw-r--r--drivers/scsi/hptiop.h124
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c155
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c2
-rw-r--r--drivers/scsi/imm.c13
-rw-r--r--drivers/scsi/in2000.c10
-rw-r--r--drivers/scsi/ipr.c2
-rw-r--r--drivers/scsi/ips.c326
-rw-r--r--drivers/scsi/ips.h32
-rw-r--r--drivers/scsi/iscsi_tcp.c2075
-rw-r--r--drivers/scsi/iscsi_tcp.h134
-rw-r--r--drivers/scsi/libiscsi.c1091
-rw-r--r--drivers/scsi/libsas/Kconfig9
-rw-r--r--drivers/scsi/libsas/Makefile4
-rw-r--r--drivers/scsi/libsas/sas_ata.c2
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c35
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c274
-rw-r--r--drivers/scsi/libsas/sas_internal.h16
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c9
-rw-r--r--drivers/scsi/libsas/sas_task.c36
-rw-r--r--drivers/scsi/libsrp.c23
-rw-r--r--drivers/scsi/lpfc/lpfc.h53
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c217
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h33
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c304
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c157
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c623
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c372
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h112
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c429
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c32
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c160
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c54
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c534
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c93
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h2
-rw-r--r--drivers/scsi/megaraid.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c572
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h18
-rw-r--r--drivers/scsi/ncr53c8xx.c3
-rw-r--r--drivers/scsi/pcmcia/Kconfig3
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c54
-rw-r--r--drivers/scsi/ppa.c12
-rw-r--r--drivers/scsi/psi240i.c689
-rw-r--r--drivers/scsi/psi240i.h315
-rw-r--r--drivers/scsi/psi_chip.h195
-rw-r--r--drivers/scsi/qla1280.c4
-rw-r--r--drivers/scsi/qla2xxx/Makefile2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c54
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c37
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h19
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h52
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c175
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h30
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h51
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c97
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c120
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c430
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c46
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c80
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c11
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c7
-rw-r--r--drivers/scsi/qlogicpti.c31
-rw-r--r--drivers/scsi/scsi.c282
-rw-r--r--drivers/scsi/scsi_debug.c37
-rw-r--r--drivers/scsi/scsi_devinfo.c34
-rw-r--r--drivers/scsi/scsi_error.c131
-rw-r--r--drivers/scsi/scsi_ioctl.c26
-rw-r--r--drivers/scsi/scsi_lib.c117
-rw-r--r--drivers/scsi/scsi_netlink.c19
-rw-r--r--drivers/scsi/scsi_proc.c110
-rw-r--r--drivers/scsi/scsi_scan.c36
-rw-r--r--drivers/scsi/scsi_sysfs.c1
-rw-r--r--drivers/scsi/scsi_tgt_if.c2
-rw-r--r--drivers/scsi/scsi_tgt_lib.c30
-rw-r--r--drivers/scsi/scsi_transport_fc.c102
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c330
-rw-r--r--drivers/scsi/scsi_transport_sas.c41
-rw-r--r--drivers/scsi/scsi_transport_spi.c258
-rw-r--r--drivers/scsi/scsi_transport_srp.c13
-rw-r--r--drivers/scsi/scsicam.c35
-rw-r--r--drivers/scsi/sd.c44
-rw-r--r--drivers/scsi/seagate.c1667
-rw-r--r--drivers/scsi/sg.c24
-rw-r--r--drivers/scsi/sgiwd93.c1
-rw-r--r--drivers/scsi/sr.c35
-rw-r--r--drivers/scsi/sr.h4
-rw-r--r--drivers/scsi/sr_ioctl.c48
-rw-r--r--drivers/scsi/st.c9
-rw-r--r--drivers/scsi/sun3_NCR5380.c24
-rw-r--r--drivers/scsi/sym53c416.c16
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c22
-rw-r--r--drivers/scsi/tmscsim.c6
-rw-r--r--drivers/scsi/u14-34f.c4
-rw-r--r--drivers/scsi/ultrastor.c15
-rw-r--r--drivers/scsi/wd33c93.c10
-rw-r--r--drivers/scsi/wd7000.c12
-rw-r--r--drivers/usb/storage/freecom.c14
-rw-r--r--drivers/usb/storage/isd200.c66
-rw-r--r--drivers/usb/storage/protocol.c126
-rw-r--r--drivers/usb/storage/scsiglue.c24
-rw-r--r--drivers/usb/storage/sddr09.c9
-rw-r--r--drivers/usb/storage/shuttle_usbat.c68
-rw-r--r--drivers/usb/storage/transport.c45
-rw-r--r--drivers/usb/storage/transport.h2
-rw-r--r--fs/sysfs/file.c11
-rw-r--r--fs/sysfs/group.c26
-rw-r--r--include/linux/attribute_container.h1
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/sysfs.h2
-rw-r--r--include/scsi/iscsi_if.h20
-rw-r--r--include/scsi/iscsi_proto.h14
-rw-r--r--include/scsi/libiscsi.h82
-rw-r--r--include/scsi/libsas.h28
-rw-r--r--include/scsi/sas.h13
-rw-r--r--include/scsi/scsi_cmnd.h2
-rw-r--r--include/scsi/scsi_device.h13
-rw-r--r--include/scsi/scsi_transport_iscsi.h10
-rw-r--r--include/scsi/scsi_transport_sas.h16
-rw-r--r--include/scsi/sd.h1
-rw-r--r--kernel/params.c2
210 files changed, 10519 insertions, 9019 deletions
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 4953bc258729..6a0ad4715e9f 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -11,7 +11,7 @@ DOCBOOKS := wanbook.xml z8530book.xml mcabook.xml videobook.xml \
procfs-guide.xml writing_usb_driver.xml \
kernel-api.xml filesystems.xml lsm.xml usb.xml \
gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
- genericirq.xml s390-drivers.xml uio-howto.xml
+ genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml
###
# The build process is as follows (targets):
diff --git a/Documentation/DocBook/scsi.tmpl b/Documentation/DocBook/scsi.tmpl
new file mode 100644
index 000000000000..f299ab182bbe
--- /dev/null
+++ b/Documentation/DocBook/scsi.tmpl
@@ -0,0 +1,409 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="scsimid">
+ <bookinfo>
+ <title>SCSI Interfaces Guide</title>
+
+ <authorgroup>
+ <author>
+ <firstname>James</firstname>
+ <surname>Bottomley</surname>
+ <affiliation>
+ <address>
+ <email>James.Bottomley@steeleye.com</email>
+ </address>
+ </affiliation>
+ </author>
+
+ <author>
+ <firstname>Rob</firstname>
+ <surname>Landley</surname>
+ <affiliation>
+ <address>
+ <email>rob@landley.net</email>
+ </address>
+ </affiliation>
+ </author>
+
+ </authorgroup>
+
+ <copyright>
+ <year>2007</year>
+ <holder>Linux Foundation</holder>
+ </copyright>
+
+ <legalnotice>
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License version 2.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+ <toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <sect1 id="protocol_vs_bus">
+ <title>Protocol vs bus</title>
+ <para>
+ Once upon a time, the Small Computer Systems Interface defined both
+ a parallel I/O bus and a data protocol to connect a wide variety of
+ peripherals (disk drives, tape drives, modems, printers, scanners,
+ optical drives, test equipment, and medical devices) to a host
+ computer.
+ </para>
+ <para>
+ Although the old parallel (fast/wide/ultra) SCSI bus has largely
+ fallen out of use, the SCSI command set is more widely used than ever
+ to communicate with devices over a number of different busses.
+ </para>
+ <para>
+ The <ulink url='http://www.t10.org/scsi-3.htm'>SCSI protocol</ulink>
+ is a big-endian peer-to-peer packet based protocol. SCSI commands
+ are 6, 10, 12, or 16 bytes long, often followed by an associated data
+ payload.
+ </para>
+ <para>
+ SCSI commands can be transported over just about any kind of bus, and
+ are the default protocol for storage devices attached to USB, SATA,
+ SAS, Fibre Channel, FireWire, and ATAPI devices. SCSI packets are
+ also commonly exchanged over Infiniband,
+ <ulink url='http://i2o.shadowconnect.com/faq.php'>I20</ulink>, TCP/IP
+ (<ulink url='http://en.wikipedia.org/wiki/ISCSI'>iSCSI</ulink>), even
+ <ulink url='http://cyberelk.net/tim/parport/parscsi.html'>Parallel
+ ports</ulink>.
+ </para>
+ </sect1>
+ <sect1 id="subsystem_design">
+ <title>Design of the Linux SCSI subsystem</title>
+ <para>
+ The SCSI subsystem uses a three layer design, with upper, mid, and low
+ layers. Every operation involving the SCSI subsystem (such as reading
+ a sector from a disk) uses one driver at each of the 3 levels: one
+ upper layer driver, one lower layer driver, and the SCSI midlayer.
+ </para>
+ <para>
+ The SCSI upper layer provides the interface between userspace and the
+ kernel, in the form of block and char device nodes for I/O and
+ ioctl(). The SCSI lower layer contains drivers for specific hardware
+ devices.
+ </para>
+ <para>
+ In between is the SCSI mid-layer, analogous to a network routing
+ layer such as the IPv4 stack. The SCSI mid-layer routes a packet
+ based data protocol between the upper layer's /dev nodes and the
+ corresponding devices in the lower layer. It manages command queues,
+ provides error handling and power management functions, and responds
+ to ioctl() requests.
+ </para>
+ </sect1>
+ </chapter>
+
+ <chapter id="upper_layer">
+ <title>SCSI upper layer</title>
+ <para>
+ The upper layer supports the user-kernel interface by providing
+ device nodes.
+ </para>
+ <sect1 id="sd">
+ <title>sd (SCSI Disk)</title>
+ <para>sd (sd_mod.o)</para>
+<!-- !Idrivers/scsi/sd.c -->
+ </sect1>
+ <sect1 id="sr">
+ <title>sr (SCSI CD-ROM)</title>
+ <para>sr (sr_mod.o)</para>
+ </sect1>
+ <sect1 id="st">
+ <title>st (SCSI Tape)</title>
+ <para>st (st.o)</para>
+ </sect1>
+ <sect1 id="sg">
+ <title>sg (SCSI Generic)</title>
+ <para>sg (sg.o)</para>
+ </sect1>
+ <sect1 id="ch">
+ <title>ch (SCSI Media Changer)</title>
+ <para>ch (ch.c)</para>
+ </sect1>
+ </chapter>
+
+ <chapter id="mid_layer">
+ <title>SCSI mid layer</title>
+
+ <sect1 id="midlayer_implementation">
+ <title>SCSI midlayer implementation</title>
+ <sect2 id="scsi_device.h">
+ <title>include/scsi/scsi_device.h</title>
+ <para>
+ </para>
+!Iinclude/scsi/scsi_device.h
+ </sect2>
+
+ <sect2 id="scsi.c">
+ <title>drivers/scsi/scsi.c</title>
+ <para>Main file for the SCSI midlayer.</para>
+!Edrivers/scsi/scsi.c
+ </sect2>
+ <sect2 id="scsicam.c">
+ <title>drivers/scsi/scsicam.c</title>
+ <para>
+ <ulink url='http://www.t10.org/ftp/t10/drafts/cam/cam-r12b.pdf'>SCSI
+ Common Access Method</ulink> support functions, for use with
+ HDIO_GETGEO, etc.
+ </para>
+!Edrivers/scsi/scsicam.c
+ </sect2>
+ <sect2 id="scsi_error.c">
+ <title>drivers/scsi/scsi_error.c</title>
+ <para>Common SCSI error/timeout handling routines.</para>
+!Edrivers/scsi/scsi_error.c
+ </sect2>
+ <sect2 id="scsi_devinfo.c">
+ <title>drivers/scsi/scsi_devinfo.c</title>
+ <para>
+ Manage scsi_dev_info_list, which tracks blacklisted and whitelisted
+ devices.
+ </para>
+!Idrivers/scsi/scsi_devinfo.c
+ </sect2>
+ <sect2 id="scsi_ioctl.c">
+ <title>drivers/scsi/scsi_ioctl.c</title>
+ <para>
+ Handle ioctl() calls for SCSI devices.
+ </para>
+!Edrivers/scsi/scsi_ioctl.c
+ </sect2>
+ <sect2 id="scsi_lib.c">
+ <title>drivers/scsi/scsi_lib.c</title>
+ <para>
+ SCSI queuing library.
+ </para>
+!Edrivers/scsi/scsi_lib.c
+ </sect2>
+ <sect2 id="scsi_lib_dma.c">
+ <title>drivers/scsi/scsi_lib_dma.c</title>
+ <para>
+ SCSI library functions depending on DMA
+ (map and unmap scatter-gather lists).
+ </para>
+!Edrivers/scsi/scsi_lib_dma.c
+ </sect2>
+ <sect2 id="scsi_module.c">
+ <title>drivers/scsi/scsi_module.c</title>
+ <para>
+ The file drivers/scsi/scsi_module.c contains legacy support for
+ old-style host templates. It should never be used by any new driver.
+ </para>
+ </sect2>
+ <sect2 id="scsi_proc.c">
+ <title>drivers/scsi/scsi_proc.c</title>
+ <para>
+ The functions in this file provide an interface between
+ the PROC file system and the SCSI device drivers
+ It is mainly used for debugging, statistics and to pass
+ information directly to the lowlevel driver.
+
+ I.E. plumbing to manage /proc/scsi/*
+ </para>
+!Idrivers/scsi/scsi_proc.c
+ </sect2>
+ <sect2 id="scsi_netlink.c">
+ <title>drivers/scsi/scsi_netlink.c</title>
+ <para>
+ Infrastructure to provide async events from transports to userspace
+ via netlink, using a single NETLINK_SCSITRANSPORT protocol for all
+ transports.
+
+ See <ulink url='http://marc.info/?l=linux-scsi&amp;m=115507374832500&amp;w=2'>the
+ original patch submission</ulink> for more details.
+ </para>
+!Idrivers/scsi/scsi_netlink.c
+ </sect2>
+ <sect2 id="scsi_scan.c">
+ <title>drivers/scsi/scsi_scan.c</title>
+ <para>
+ Scan a host to determine which (if any) devices are attached.
+
+ The general scanning/probing algorithm is as follows, exceptions are
+ made to it depending on device specific flags, compilation options,
+ and global variable (boot or module load time) settings.
+
+ A specific LUN is scanned via an INQUIRY command; if the LUN has a
+ device attached, a scsi_device is allocated and setup for it.
+
+ For every id of every channel on the given host, start by scanning
+ LUN 0. Skip hosts that don't respond at all to a scan of LUN 0.
+ Otherwise, if LUN 0 has a device attached, allocate and setup a
+ scsi_device for it. If target is SCSI-3 or up, issue a REPORT LUN,
+ and scan all of the LUNs returned by the REPORT LUN; else,
+ sequentially scan LUNs up until some maximum is reached, or a LUN is
+ seen that cannot have a device attached to it.
+ </para>
+!Idrivers/scsi/scsi_scan.c
+ </sect2>
+ <sect2 id="scsi_sysctl.c">
+ <title>drivers/scsi/scsi_sysctl.c</title>
+ <para>
+ Set up the sysctl entry: "/dev/scsi/logging_level"
+ (DEV_SCSI_LOGGING_LEVEL) which sets/returns scsi_logging_level.
+ </para>
+ </sect2>
+ <sect2 id="scsi_sysfs.c">
+ <title>drivers/scsi/scsi_sysfs.c</title>
+ <para>
+ SCSI sysfs interface routines.
+ </para>
+!Edrivers/scsi/scsi_sysfs.c
+ </sect2>
+ <sect2 id="hosts.c">
+ <title>drivers/scsi/hosts.c</title>
+ <para>
+ mid to lowlevel SCSI driver interface
+ </para>
+!Edrivers/scsi/hosts.c
+ </sect2>
+ <sect2 id="constants.c">
+ <title>drivers/scsi/constants.c</title>
+ <para>
+ mid to lowlevel SCSI driver interface
+ </para>
+!Edrivers/scsi/constants.c
+ </sect2>
+ </sect1>
+
+ <sect1 id="Transport_classes">
+ <title>Transport classes</title>
+ <para>
+ Transport classes are service libraries for drivers in the SCSI
+ lower layer, which expose transport attributes in sysfs.
+ </para>
+ <sect2 id="Fibre_Channel_transport">
+ <title>Fibre Channel transport</title>
+ <para>
+ The file drivers/scsi/scsi_transport_fc.c defines transport attributes
+ for Fibre Channel.
+ </para>
+!Edrivers/scsi/scsi_transport_fc.c
+ </sect2>
+ <sect2 id="iSCSI_transport">
+ <title>iSCSI transport class</title>
+ <para>
+ The file drivers/scsi/scsi_transport_iscsi.c defines transport
+ attributes for the iSCSI class, which sends SCSI packets over TCP/IP
+ connections.
+ </para>
+!Edrivers/scsi/scsi_transport_iscsi.c
+ </sect2>
+ <sect2 id="SAS_transport">
+ <title>Serial Attached SCSI (SAS) transport class</title>
+ <para>
+ The file drivers/scsi/scsi_transport_sas.c defines transport
+ attributes for Serial Attached SCSI, a variant of SATA aimed at
+ large high-end systems.
+ </para>
+ <para>
+ The SAS transport class contains common code to deal with SAS HBAs,
+ an aproximated representation of SAS topologies in the driver model,
+ and various sysfs attributes to expose these topologies and managment
+ interfaces to userspace.
+ </para>
+ <para>
+ In addition to the basic SCSI core objects this transport class
+ introduces two additional intermediate objects: The SAS PHY
+ as represented by struct sas_phy defines an "outgoing" PHY on
+ a SAS HBA or Expander, and the SAS remote PHY represented by
+ struct sas_rphy defines an "incoming" PHY on a SAS Expander or
+ end device. Note that this is purely a software concept, the
+ underlying hardware for a PHY and a remote PHY is the exactly
+ the same.
+ </para>
+ <para>
+ There is no concept of a SAS port in this code, users can see
+ what PHYs form a wide port based on the port_identifier attribute,
+ which is the same for all PHYs in a port.
+ </para>
+!Edrivers/scsi/scsi_transport_sas.c
+ </sect2>
+ <sect2 id="SATA_transport">
+ <title>SATA transport class</title>
+ <para>
+ The SATA transport is handled by libata, which has its own book of
+ documentation in this directory.
+ </para>
+ </sect2>
+ <sect2 id="SPI_transport">
+ <title>Parallel SCSI (SPI) transport class</title>
+ <para>
+ The file drivers/scsi/scsi_transport_spi.c defines transport
+ attributes for traditional (fast/wide/ultra) SCSI busses.
+ </para>
+!Edrivers/scsi/scsi_transport_spi.c
+ </sect2>
+ <sect2 id="SRP_transport">
+ <title>SCSI RDMA (SRP) transport class</title>
+ <para>
+ The file drivers/scsi/scsi_transport_srp.c defines transport
+ attributes for SCSI over Remote Direct Memory Access.
+ </para>
+!Edrivers/scsi/scsi_transport_srp.c
+ </sect2>
+ </sect1>
+
+ </chapter>
+
+ <chapter id="lower_layer">
+ <title>SCSI lower layer</title>
+ <sect1 id="hba_drivers">
+ <title>Host Bus Adapter transport types</title>
+ <para>
+ Many modern device controllers use the SCSI command set as a protocol to
+ communicate with their devices through many different types of physical
+ connections.
+ </para>
+ <para>
+ In SCSI language a bus capable of carrying SCSI commands is
+ called a "transport", and a controller connecting to such a bus is
+ called a "host bus adapter" (HBA).
+ </para>
+ <sect2 id="scsi_debug.c">
+ <title>Debug transport</title>
+ <para>
+ The file drivers/scsi/scsi_debug.c simulates a host adapter with a
+ variable number of disks (or disk like devices) attached, sharing a
+ common amount of RAM. Does a lot of checking to make sure that we are
+ not getting blocks mixed up, and panics the kernel if anything out of
+ the ordinary is seen.
+ </para>
+ <para>
+ To be more realistic, the simulated devices have the transport
+ attributes of SAS disks.
+ </para>
+ <para>
+ For documentation see
+ <ulink url='http://www.torque.net/sg/sdebug26.html'>http://www.torque.net/sg/sdebug26.html</ulink>
+ </para>
+<!-- !Edrivers/scsi/scsi_debug.c -->
+ </sect2>
+ <sect2 id="todo">
+ <title>todo</title>
+ <para>Parallel (fast/wide/ultra) SCSI, USB, SATA,
+ SAS, Fibre Channel, FireWire, ATAPI devices, Infiniband,
+ I20, iSCSI, Parallel ports, netlink...
+ </para>
+ </sect2>
+ </sect1>
+ </chapter>
+</book>
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index f2d658a6a942..c09a96b99354 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -46,8 +46,6 @@
.mailmap
.mm
53c700_d.h
-53c7xx_d.h
-53c7xx_u.h
53c8xx_d.h*
BitKeeper
COPYING
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 17fc60e32443..65de5ba7b74c 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1598,7 +1598,13 @@ and is between 256 and 4096 characters. It is defined in the file
Format: <vendor>:<model>:<flags>
(flags are integer value)
- scsi_logging= [SCSI]
+ scsi_logging_level= [SCSI] a bit mask of logging levels
+ See drivers/scsi/scsi_logging.h for bits. Also
+ settable via sysctl at dev.scsi.logging_level
+ (/proc/sys/dev/scsi/logging_level).
+ There is also a nice 'scsi_logging_level' script in the
+ S390-tools package, available for download at
+ http://www-128.ibm.com/developerworks/linux/linux390/s390-tools-1.5.4.html
scsi_mod.scan= [SCSI] sync (default) scans SCSI busses as they are
discovered. async scans them in kernel threads,
diff --git a/Documentation/m68k/kernel-options.txt b/Documentation/m68k/kernel-options.txt
index 248589e8bcf5..c93bed66e25d 100644
--- a/Documentation/m68k/kernel-options.txt
+++ b/Documentation/m68k/kernel-options.txt
@@ -867,66 +867,6 @@ controller and should be autodetected by the driver. An example is the
24 bit region which is specified by a mask of 0x00fffffe.
-5.5) 53c7xx=
-------------
-
-Syntax: 53c7xx=<sub-options...>
-
-These options affect the A4000T, A4091, WarpEngine, Blizzard 603e+,
-and GForce 040/060 SCSI controllers on the Amiga, as well as the
-builtin MVME 16x SCSI controller.
-
-The <sub-options> is a comma-separated list of the sub-options listed
-below.
-
-5.5.1) nosync
--------------
-
-Syntax: nosync:0
-
- Disables sync negotiation for all devices. Any value after the
- colon is acceptable (and has the same effect).
-
-5.5.2) noasync
---------------
-
-[OBSOLETE, REMOVED]
-
-5.5.3) nodisconnect
--------------------
-
-Syntax: nodisconnect:0
-
- Disables SCSI disconnects. Any value after the colon is acceptable
- (and has the same effect).
-
-5.5.4) validids
----------------
-
-Syntax: validids:0xNN
-
- Specify which SCSI ids the driver should pay attention to. This is
- a bitmask (i.e. to only pay attention to ID#4, you'd use 0x10).
- Default is 0x7f (devices 0-6).
-
-5.5.5) opthi
-5.5.6) optlo
-------------
-
-Syntax: opthi:M,optlo:N
-
- Specify options for "hostdata->options". The acceptable definitions
- are listed in drivers/scsi/53c7xx.h; the 32 high bits should be in
- opthi and the 32 low bits in optlo. They must be specified in the
- order opthi=M,optlo=N.
-
-5.5.7) next
------------
-
- No argument. Used to separate blocks of keywords when there's more
- than one 53c7xx host adapter in the system.
-
-
/* Local Variables: */
/* mode: text */
/* End: */
diff --git a/Documentation/scsi/00-INDEX b/Documentation/scsi/00-INDEX
index aa1f7e927834..c2e18e109858 100644
--- a/Documentation/scsi/00-INDEX
+++ b/Documentation/scsi/00-INDEX
@@ -64,8 +64,6 @@ lpfc.txt
- LPFC driver release notes
megaraid.txt
- Common Management Module, shared code handling ioctls for LSI drivers
-ncr53c7xx.txt
- - info on driver for NCR53c7xx based adapters
ncr53c8xx.txt
- info on driver for NCR53c8xx based adapters
osst.txt
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 5eb927544990..91c81db0ba71 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,162 @@
+1 Release Date : Thur. Nov. 07 16:30:43 PST 2007 -
+ (emaild-id:megaraidlinux@lsi.com)
+ Sumant Patro
+ Bo Yang
+
+2 Current Version : 00.00.03.16
+3 Older Version : 00.00.03.15
+
+1. Increased MFI_POLL_TIMEOUT_SECS to 60 seconds from 10. FW may take
+ a max of 60 seconds to respond to the INIT cmd.
+
+1 Release Date : Fri. Sep. 07 16:30:43 PST 2007 -
+ (emaild-id:megaraidlinux@lsi.com)
+ Sumant Patro
+ Bo Yang
+
+2 Current Version : 00.00.03.15
+3 Older Version : 00.00.03.14
+
+1. Added module parameter "poll_mode_io" to support for "polling"
+ (reduced interrupt operation). In this mode, IO completion
+ interrupts are delayed. At the end of initiating IOs, the
+ driver schedules for cmd completion if there are pending cmds
+ to be completed. A timer-based interrupt has also been added
+ to prevent IO completion processing from being delayed
+ indefinitely in the case that no new IOs are initiated.
+
+1 Release Date : Fri. Sep. 07 16:30:43 PST 2007 -
+ (emaild-id:megaraidlinux@lsi.com)
+ Sumant Patro
+ Bo Yang
+
+2 Current Version : 00.00.03.14
+3 Older Version : 00.00.03.13
+
+1. Setting the max_sectors_per_req based on max SGL supported by the
+ FW. Prior versions calculated this value from controller info
+ (max_sectors_1, max_sectors_2). For certain controllers/FW,
+ this was resulting in a value greater than max SGL supported
+ by the FW. Issue was first reported by users running LUKS+XFS
+ with megaraid_sas. Thanks to RB for providing the logs and
+ duplication steps that helped to get to the root cause of the
+ issue. 2. Increased MFI_POLL_TIMEOUT_SECS to 60 seconds from
+ 10. FW may take a max of 60 seconds to respond to the INIT
+ cmd.
+
+1 Release Date : Fri. June. 15 16:30:43 PST 2007 -
+ (emaild-id:megaraidlinux@lsi.com)
+ Sumant Patro
+ Bo Yang
+
+2 Current Version : 00.00.03.13
+3 Older Version : 00.00.03.12
+
+1. Added the megasas_reset_timer routine to intercept cmd timeout and throttle io.
+
+On Fri, 2007-03-16 at 16:44 -0600, James Bottomley wrote:
+It looks like megaraid_sas at least needs this to throttle its commands
+> as they begin to time out. The code keeps the existing transport
+> template use of eh_timed_out (and allows the transport to override the
+> host if they both have this callback).
+>
+> James
+
+1 Release Date : Sat May. 12 16:30:43 PST 2007 -
+ (emaild-id:megaraidlinux@lsi.com)
+ Sumant Patro
+ Bo Yang
+
+2 Current Version : 00.00.03.12
+3 Older Version : 00.00.03.11
+
+1. When MegaSAS driver receives reset call from OS, driver waits in reset
+routine for max 3 minutes for all pending command completion. Now driver will
+call completion routine every 5 seconds from the reset routine instead of
+waiting for depending on cmd completion from isr path.
+
+1 Release Date : Mon Apr. 30 10:25:52 PST 2007 -
+ (emaild-id:megaraidlinux@lsi.com)
+ Sumant Patro
+ Bo Yang
+
+2 Current Version : 00.00.03.11
+3 Older Version : 00.00.03.09
+
+ 1. Memory Manager for IOCTL removed for 2.6 kernels.
+ pci_alloc_consistent replaced by dma_alloc_coherent. With this
+ change there is no need of memory manager in the driver code
+
+ On Wed, 2007-02-07 at 13:30 -0800, Andrew Morton wrote:
+ > I suspect all this horror is due to stupidity in the DMA API.
+ >
+ > pci_alloc_consistent() just goes and assumes GFP_ATOMIC, whereas
+ > the caller (megasas_mgmt_fw_ioctl) would have been perfectly happy
+ > to use GFP_KERNEL.
+ >
+ > I bet this fixes it
+
+ It does, but the DMA API was expanded to cope with this exact case, so
+ use dma_alloc_coherent() directly in the megaraid code instead. The dev
+ is just &pci_dev->dev.
+
+ James <James.Bottomley@SteelEye.com>
+
+ 3. SYNCHRONIZE_CACHE is not supported by FW and thus blocked by driver.
+ 4. Hibernation support added
+ 5. Performing diskdump while running IO in RHEL 4 was failing. Fixed.
+
+1 Release Date : Fri Feb. 09 14:36:28 PST 2007 -
+ (emaild-id:megaraidlinux@lsi.com)
+ Sumant Patro
+ Bo Yang
+
+2 Current Version : 00.00.03.09
+3 Older Version : 00.00.03.08
+
+i. Under heavy IO mid-layer prints "DRIVER_TIMEOUT" errors
+
+ The driver now waits for 10 seconds to elapse instead of 5 (as in
+ previous release) to resume IO.
+
+1 Release Date : Mon Feb. 05 11:35:24 PST 2007 -
+ (emaild-id:megaraidlinux@lsi.com)
+ Sumant Patro
+ Bo Yang
+2 Current Version : 00.00.03.08
+3 Older Version : 00.00.03.07
+
+i. Under heavy IO mid-layer prints "DRIVER_TIMEOUT" errors
+
+ Fix: The driver is now throttling IO.
+ Checks added in megasas_queue_command to know if FW is able to
+ process commands within timeout period. If number of retries
+ is 2 or greater,the driver stops sending cmd to FW temporarily. IO is
+ resumed if pending cmd count reduces to 16 or 5 seconds has elapsed
+ from the time cmds were last sent to FW.
+
+ii. FW enables WCE bit in Mode Sense cmd for drives that are configured
+ as WriteBack. The OS may send "SYNCHRONIZE_CACHE" cmd when Logical
+ Disks are exposed with WCE=1. User is advised to enable Write Back
+ mode only when the controller has battery backup. At this time
+ Synhronize cache is not supported by the FW. Driver will short-cycle
+ the cmd and return sucess without sending down to FW.
+
+1 Release Date : Sun Jan. 14 11:21:32 PDT 2007 -
+ Sumant Patro <Sumant.Patro@lsil.com>/Bo Yang
+2 Current Version : 00.00.03.07
+3 Older Version : 00.00.03.06
+
+i. bios_param entry added in scsi_host_template that returns disk geometry
+ information.
+
+1 Release Date : Fri Oct 20 11:21:32 PDT 2006 - Sumant Patro <Sumant.Patro@lsil.com>/Bo Yang
+2 Current Version : 00.00.03.06
+3 Older Version : 00.00.03.05
+
+1. Added new memory management module to support the IOCTL memory allocation. For IOCTL we try to allocate from the memory pool created during driver initialization. If mem pool is empty then we allocate at run time.
+2. Added check in megasas_queue_command and dpc/isr routine to see if we have already declared adapter dead
+ (hw_crit_error=1). If hw_crit_error==1, now we donot accept any processing of pending cmds/accept any cmd from OS
1 Release Date : Mon Oct 02 11:21:32 PDT 2006 - Sumant Patro <Sumant.Patro@lsil.com>
2 Current Version : 00.00.03.05
diff --git a/Documentation/scsi/aacraid.txt b/Documentation/scsi/aacraid.txt
index a8257840695a..d16011a8618e 100644
--- a/Documentation/scsi/aacraid.txt
+++ b/Documentation/scsi/aacraid.txt
@@ -56,6 +56,10 @@ Supported Cards/Chipsets
9005:0285:9005:02d1 Adaptec 5405 (Voodoo40)
9005:0285:15d9:02d2 SMC AOC-USAS-S8i-LP
9005:0285:15d9:02d3 SMC AOC-USAS-S8iR-LP
+ 9005:0285:9005:02d4 Adaptec 2045 (Voodoo04 Lite)
+ 9005:0285:9005:02d5 Adaptec 2405 (Voodoo40 Lite)
+ 9005:0285:9005:02d6 Adaptec 2445 (Voodoo44 Lite)
+ 9005:0285:9005:02d7 Adaptec 2805 (Voodoo80 Lite)
1011:0046:9005:0364 Adaptec 5400S (Mustang)
9005:0287:9005:0800 Adaptec Themisto (Jupiter)
9005:0200:9005:0200 Adaptec Themisto (Jupiter)
diff --git a/Documentation/scsi/hptiop.txt b/Documentation/scsi/hptiop.txt
index d28a31247d4c..a6eb4add1be6 100644
--- a/Documentation/scsi/hptiop.txt
+++ b/Documentation/scsi/hptiop.txt
@@ -1,9 +1,9 @@
-HIGHPOINT ROCKETRAID 3xxx RAID DRIVER (hptiop)
+HIGHPOINT ROCKETRAID 3xxx/4xxx ADAPTER DRIVER (hptiop)
Controller Register Map
-------------------------
-The controller IOP is accessed via PCI BAR0.
+For Intel IOP based adapters, the controller IOP is accessed via PCI BAR0:
BAR0 offset Register
0x10 Inbound Message Register 0
@@ -18,6 +18,24 @@ The controller IOP is accessed via PCI BAR0.
0x40 Inbound Queue Port
0x44 Outbound Queue Port
+For Marvell IOP based adapters, the IOP is accessed via PCI BAR0 and BAR1:
+
+ BAR0 offset Register
+ 0x20400 Inbound Doorbell Register
+ 0x20404 Inbound Interrupt Mask Register
+ 0x20408 Outbound Doorbell Register
+ 0x2040C Outbound Interrupt Mask Register
+
+ BAR1 offset Register
+ 0x0 Inbound Queue Head Pointer
+ 0x4 Inbound Queue Tail Pointer
+ 0x8 Outbound Queue Head Pointer
+ 0xC Outbound Queue Tail Pointer
+ 0x10 Inbound Message Register
+ 0x14 Outbound Message Register
+ 0x40-0x1040 Inbound Queue
+ 0x1040-0x2040 Outbound Queue
+
I/O Request Workflow
----------------------
@@ -73,15 +91,9 @@ The driver exposes following sysfs attributes:
driver-version R driver version string
firmware-version R firmware version string
-The driver registers char device "hptiop" to communicate with HighPoint RAID
-management software. Its ioctl routine acts as a general binary interface
-between the IOP firmware and HighPoint RAID management software. New management
-functions can be implemented in application/firmware without modification
-in driver code.
-
-----------------------------------------------------------------------------
-Copyright (C) 2006 HighPoint Technologies, Inc. All Rights Reserved.
+Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved.
This file is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/Documentation/scsi/ncr53c7xx.txt b/Documentation/scsi/ncr53c7xx.txt
deleted file mode 100644
index 91e9552d63e5..000000000000
--- a/Documentation/scsi/ncr53c7xx.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-README for WarpEngine/A4000T/A4091 SCSI kernels.
-
-Use the following options to disable options in the SCSI driver.
-
-Using amiboot for example.....
-
-To disable Synchronous Negotiation....
-
- amiboot -k kernel 53c7xx=nosync:0
-
-To disable Disconnection....
-
- amiboot -k kernel 53c7xx=nodisconnect:0
-
-To disable certain SCSI devices...
-
- amiboot -k kernel 53c7xx=validids:0x3F
-
- this allows only device ID's 0,1,2,3,4 and 5 for linux to handle.
- (this is a bitmasked field - i.e. each bit represents a SCSI ID)
-
-These commands work on a per controller basis and use the option 'next' to
-move to the next controller in the system.
-
-e.g.
- amiboot -k kernel 53c7xx=nodisconnect:0,next,nosync:0
-
- this uses No Disconnection on the first controller and Asynchronous
- SCSI on the second controller.
-
-Known Issues:
-
-Two devices are known not to function with the default settings of using
-synchronous SCSI. These are the Archive Viper 150 Tape Drive and the
-SyQuest SQ555 removeable hard drive. When using these devices on a controller
-use the 'nosync:0' option.
-
-Please try these options and post any problems/successes to me.
-
-Alan Hourihane <alanh@fairlite.demon.co.uk>
diff --git a/MAINTAINERS b/MAINTAINERS
index 17524afa7475..59db481c77de 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3269,8 +3269,10 @@ W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
S390 ZFCP DRIVER
-P: Swen Schillig
-M: swen@vnet.ibm.com
+P: Christof Schmitt
+M: christof.schmitt@de.ibm.com
+P: Martin Peschke
+M: mp3@de.ibm.com
M: linux390@de.ibm.com
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
diff --git a/block/bsg.c b/block/bsg.c
index 8e181ab3afb9..69b0a9d33306 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -445,6 +445,15 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
else
hdr->dout_resid = rq->data_len;
+ /*
+ * If the request generated a negative error number, return it
+ * (providing we aren't already returning an error); if it's
+ * just a protocol response (i.e. non negative), that gets
+ * processed above.
+ */
+ if (!ret && rq->errors < 0)
+ ret = rq->errors;
+
blk_rq_unmap_user(bio);
blk_put_request(rq);
@@ -837,6 +846,7 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct bsg_device *bd = file->private_data;
int __user *uarg = (int __user *) arg;
+ int ret;
switch (cmd) {
/*
@@ -889,12 +899,12 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (rq->next_rq)
bidi_bio = rq->next_rq->bio;
blk_execute_rq(bd->queue, NULL, rq, 0);
- blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
+ ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
if (copy_to_user(uarg, &hdr, sizeof(hdr)))
return -EFAULT;
- return 0;
+ return ret;
}
/*
* block device ioctls
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 5ccec8aa964b..3d0422f48453 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -760,6 +760,30 @@ void blk_queue_dma_alignment(struct request_queue *q, int mask)
EXPORT_SYMBOL(blk_queue_dma_alignment);
/**
+ * blk_queue_update_dma_alignment - update dma length and memory alignment
+ * @q: the request queue for the device
+ * @mask: alignment mask
+ *
+ * description:
+ * update required memory and length aligment for direct dma transactions.
+ * If the requested alignment is larger than the current alignment, then
+ * the current queue alignment is updated to the new value, otherwise it
+ * is left alone. The design of this is to allow multiple objects
+ * (driver, device, transport etc) to set their respective
+ * alignments without having them interfere.
+ *
+ **/
+void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
+{
+ BUG_ON(mask > PAGE_SIZE);
+
+ if (mask > q->dma_alignment)
+ q->dma_alignment = mask;
+}
+
+EXPORT_SYMBOL(blk_queue_update_dma_alignment);
+
+/**
* blk_queue_find_tag - find a request by its tag and queue
* @q: The request queue for the device
* @tag: The tag of the request
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 3fd08201bef4..c02c490122dc 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -839,7 +839,14 @@ static void ata_scsi_dev_config(struct scsi_device *sdev,
if (dev->class == ATA_DEV_ATAPI) {
struct request_queue *q = sdev->request_queue;
blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
- }
+
+ /* set the min alignment */
+ blk_queue_update_dma_alignment(sdev->request_queue,
+ ATA_DMA_PAD_SZ - 1);
+ } else
+ /* ATA devices must be sector aligned */
+ blk_queue_update_dma_alignment(sdev->request_queue,
+ ATA_SECT_SIZE - 1);
if (dev->class == ATA_DEV_ATA)
sdev->manage_start_stop = 1;
@@ -878,7 +885,7 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
if (dev)
ata_scsi_dev_config(sdev, dev);
- return 0; /* scsi layer doesn't check return value, sigh */
+ return 0;
}
/**
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index d4dfb97de3b0..3b43e8a9f87e 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -320,9 +320,14 @@ attribute_container_add_attrs(struct class_device *classdev)
struct class_device_attribute **attrs = cont->attrs;
int i, error;
- if (!attrs)
+ BUG_ON(attrs && cont->grp);
+
+ if (!attrs && !cont->grp)
return 0;
+ if (cont->grp)
+ return sysfs_create_group(&classdev->kobj, cont->grp);
+
for (i = 0; attrs[i]; i++) {
error = class_device_create_file(classdev, attrs[i]);
if (error)
@@ -378,9 +383,14 @@ attribute_container_remove_attrs(struct class_device *classdev)
struct class_device_attribute **attrs = cont->attrs;
int i;
- if (!attrs)
+ if (!attrs && !cont->grp)
return;
+ if (cont->grp) {
+ sysfs_remove_group(&classdev->kobj, cont->grp);
+ return ;
+ }
+
for (i = 0; attrs[i]; i++)
class_device_remove_file(classdev, attrs[i]);
}
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index 624ff3e082f6..c2169d215bf7 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -1238,6 +1238,12 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
sdev->allow_restart = 1;
+ /*
+ * Update the dma alignment (minimum alignment requirements for
+ * start and end of DMA transfers) to be a sector
+ */
+ blk_queue_update_dma_alignment(sdev->request_queue, 511);
+
if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
sdev->inquiry_len = 36;
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index b83d254bc86e..1eda11abeb1e 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -1963,6 +1963,12 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
lu->sdev = sdev;
sdev->allow_restart = 1;
+ /*
+ * Update the dma alignment (minimum alignment requirements for
+ * start and end of DMA transfers) to be a sector
+ */
+ blk_queue_update_dma_alignment(sdev->request_queue, 511);
+
if (lu->workarounds & SBP2_WORKAROUND_INQUIRY_36)
sdev->inquiry_len = 36;
return 0;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index dfa5a4544187..be1b9fbd416d 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -129,7 +129,7 @@ error:
* iscsi_iser_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
*
**/
-static void
+static int
iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
{
struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data;
@@ -138,6 +138,7 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
iser_ctask->command_sent = 0;
iser_ctask->iser_conn = iser_conn;
iser_ctask_rdma_init(iser_ctask);
+ return 0;
}
/**
@@ -220,12 +221,6 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
debug_scsi("ctask deq [cid %d itt 0x%x]\n",
conn->id, ctask->itt);
- /*
- * serialize with TMF AbortTask
- */
- if (ctask->mtask)
- return error;
-
/* Send the cmd PDU */
if (!iser_ctask->command_sent) {
error = iser_send_command(conn, ctask);
@@ -406,6 +401,7 @@ iscsi_iser_session_create(struct iscsi_transport *iscsit,
ctask = session->cmds[i];
iser_ctask = ctask->dd_data;
ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header;
+ ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header);
}
for (i = 0; i < session->mgmtpool_max; i++) {
@@ -557,6 +553,7 @@ static struct scsi_host_template iscsi_iser_sht = {
.max_sectors = 1024,
.cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
.eh_abort_handler = iscsi_eh_abort,
+ .eh_device_reset_handler= iscsi_eh_device_reset,
.eh_host_reset_handler = iscsi_eh_host_reset,
.use_clustering = DISABLE_CLUSTERING,
.proc_name = "iscsi_iser",
@@ -583,7 +580,9 @@ static struct iscsi_transport iscsi_iser_transport = {
ISCSI_PERSISTENT_ADDRESS |
ISCSI_TARGET_NAME | ISCSI_TPGT |
ISCSI_USERNAME | ISCSI_PASSWORD |
- ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN,
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+ ISCSI_PING_TMO | ISCSI_RECV_TMO,
.host_param_mask = ISCSI_HOST_HWADDRESS |
ISCSI_HOST_NETDEV_NAME |
ISCSI_HOST_INITIATOR_NAME,
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index ba1b455949c0..83247f1fdf72 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -621,9 +621,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
struct iscsi_session *session = conn->session;
spin_lock(&conn->session->lock);
- list_del(&mtask->running);
- __kfifo_put(session->mgmtpool.queue, (void*)&mtask,
- sizeof(void*));
+ iscsi_free_mgmt_task(conn, mtask);
spin_unlock(&session->lock);
}
}
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 52fb216dfe74..425f60c21fdd 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -2056,7 +2056,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"mpt_upload: alt_%s has cached_fw=%p \n",
ioc->name, ioc->alt_ioc->name, ioc->alt_ioc->cached_fw));
- ioc->alt_ioc->cached_fw = NULL;
+ ioc->cached_fw = NULL;
}
} else {
printk(MYIOC_s_WARN_FMT
@@ -2262,10 +2262,12 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
int ret;
if (ioc->cached_fw != NULL) {
- ddlprintk(ioc, printk(MYIOC_s_INFO_FMT
- "mpt_adapter_disable: Pushing FW onto adapter\n", ioc->name));
- if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)ioc->cached_fw, NO_SLEEP)) < 0) {
- printk(MYIOC_s_WARN_FMT "firmware downloadboot failure (%d)!\n",
+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: Pushing FW onto "
+ "adapter\n", __FUNCTION__, ioc->name));
+ if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)
+ ioc->cached_fw, CAN_SLEEP)) < 0) {
+ printk(MYIOC_s_WARN_FMT
+ ": firmware downloadboot failure (%d)!\n",
ioc->name, ret);
}
}
@@ -2303,13 +2305,7 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
ioc->alloc_total -= sz;
}
- if (ioc->cached_fw != NULL) {
- sz = ioc->facts.FWImageSize;
- pci_free_consistent(ioc->pcidev, sz,
- ioc->cached_fw, ioc->cached_fw_dma);
- ioc->cached_fw = NULL;
- ioc->alloc_total -= sz;
- }
+ mpt_free_fw_memory(ioc);
kfree(ioc->spi_data.nvram);
mpt_inactive_raid_list_free(ioc);
@@ -3047,44 +3043,62 @@ SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
*
* If memory has already been allocated, the same (cached) value
* is returned.
- */
-void
+ *
+ * Return 0 if successfull, or non-zero for failure
+ **/
+int
mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size)
{
- if (ioc->cached_fw)
- return; /* use already allocated memory */
- if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
+ int rc;
+
+ if (ioc->cached_fw) {
+ rc = 0; /* use already allocated memory */
+ goto out;
+ }
+ else if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
ioc->cached_fw = ioc->alt_ioc->cached_fw; /* use alt_ioc's memory */
ioc->cached_fw_dma = ioc->alt_ioc->cached_fw_dma;
- ioc->alloc_total += size;
- ioc->alt_ioc->alloc_total -= size;
+ rc = 0;
+ goto out;
+ }
+ ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma);
+ if (!ioc->cached_fw) {
+ printk(MYIOC_s_ERR_FMT "Unable to allocate memory for the cached firmware image!\n",
+ ioc->name);
+ rc = -1;
} else {
- if ( (ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma) ) )
- ioc->alloc_total += size;
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FW Image @ %p[%p], sz=%d[%x] bytes\n",
+ ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, size, size));
+ ioc->alloc_total += size;
+ rc = 0;
}
+ out:
+ return rc;
}
+
/**
* mpt_free_fw_memory - free firmware memory
* @ioc: Pointer to MPT_ADAPTER structure
*
* If alt_img is NULL, delete from ioc structure.
* Else, delete a secondary image in same format.
- */
+ **/
void
mpt_free_fw_memory(MPT_ADAPTER *ioc)
{
int sz;
+ if (!ioc->cached_fw)
+ return;
+
sz = ioc->facts.FWImageSize;
- dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "free_fw_memory: FW Image @ %p[%p], sz=%d[%x] bytes\n",
- ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "free_fw_memory: FW Image @ %p[%p], sz=%d[%x] bytes\n",
+ ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
pci_free_consistent(ioc->pcidev, sz, ioc->cached_fw, ioc->cached_fw_dma);
+ ioc->alloc_total -= sz;
ioc->cached_fw = NULL;
-
- return;
}
-
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_do_upload - Construct and Send FWUpload request to MPT adapter port.
@@ -3116,17 +3130,12 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
if ((sz = ioc->facts.FWImageSize) == 0)
return 0;
- mpt_alloc_fw_memory(ioc, sz);
+ if (mpt_alloc_fw_memory(ioc, ioc->facts.FWImageSize) != 0)
+ return -ENOMEM;
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": FW Image @ %p[%p], sz=%d[%x] bytes\n",
ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
- if (ioc->cached_fw == NULL) {
- /* Major Failure.
- */
- return -ENOMEM;
- }
-
prequest = (sleepFlag == NO_SLEEP) ? kzalloc(ioc->req_sz, GFP_ATOMIC) :
kzalloc(ioc->req_sz, GFP_KERNEL);
if (!prequest) {
@@ -3498,12 +3507,12 @@ KickStart(MPT_ADAPTER *ioc, int force, int sleepFlag)
static int
mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
{
- MPT_ADAPTER *iocp=NULL;
u32 diag0val;
u32 doorbell;
int hard_reset_done = 0;
int count = 0;
u32 diag1val = 0;
+ MpiFwHeader_t *cached_fw; /* Pointer to FW */
/* Clear any existing interrupts */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
@@ -3635,22 +3644,24 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
}
if (ioc->cached_fw)
- iocp = ioc;
+ cached_fw = (MpiFwHeader_t *)ioc->cached_fw;
else if (ioc->alt_ioc && ioc->alt_ioc->cached_fw)
- iocp = ioc->alt_ioc;
- if (iocp) {
+ cached_fw = (MpiFwHeader_t *)ioc->alt_ioc->cached_fw;
+ else
+ cached_fw = NULL;
+ if (cached_fw) {
/* If the DownloadBoot operation fails, the
* IOC will be left unusable. This is a fatal error
* case. _diag_reset will return < 0
*/
for (count = 0; count < 30; count ++) {
- diag0val = CHIPREG_READ32(&iocp->chip->Diagnostic);
+ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) {
break;
}
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "cached_fw: diag0val=%x count=%d\n",
- iocp->name, diag0val, count));
+ ioc->name, diag0val, count));
/* wait 1 sec */
if (sleepFlag == CAN_SLEEP) {
msleep (1000);
@@ -3658,8 +3669,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
mdelay (1000);
}
}
- if ((count = mpt_downloadboot(ioc,
- (MpiFwHeader_t *)iocp->cached_fw, sleepFlag)) < 0) {
+ if ((count = mpt_downloadboot(ioc, cached_fw, sleepFlag)) < 0) {
printk(MYIOC_s_WARN_FMT
"firmware downloadboot failure (%d)!\n", ioc->name, count);
}
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index d7682e083f59..b49b706c0020 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -907,7 +907,7 @@ extern u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked);
extern void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan);
extern int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag);
extern int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg);
-extern void mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size);
+extern int mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size);
extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index e4c94f93de16..f77b329f6923 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1343,6 +1343,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
memcpy(req->sense, smprep, sizeof(*smprep));
req->sense_len = sizeof(*smprep);
+ req->data_len = 0;
+ rsp->data_len -= smprep->ResponseDataLength;
} else {
printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n",
ioc->name, __FUNCTION__);
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 626bb3c9af2b..5c614ec38cc4 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -111,7 +111,7 @@ int mptscsih_suspend(struct pci_dev *pdev, pm_message_t state);
int mptscsih_resume(struct pci_dev *pdev);
#endif
-#define SNS_LEN(scp) sizeof((scp)->sense_buffer)
+#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index aa6fb9429d58..1bcdbbb9e7d3 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -370,7 +370,7 @@ static int i2o_scsi_reply(struct i2o_controller *c, u32 m,
*/
if (cmd->result)
memcpy(cmd->sense_buffer, &msg->body[3],
- min(sizeof(cmd->sense_buffer), (size_t) 40));
+ min(SCSI_SENSE_BUFFERSIZE, 40));
/* only output error code if AdapterStatus is not HBA_SUCCESS */
if ((error >> 8) & 0xff)
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 00118499018b..874b55ed00a3 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -844,8 +844,6 @@ zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
unit->sysfs_device.release = zfcp_sysfs_unit_release;
dev_set_drvdata(&unit->sysfs_device, unit);
- init_waitqueue_head(&unit->scsi_scan_wq);
-
/* mark unit unusable as long as sysfs registration is not complete */
atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 86c3f6539a7d..edc5015e920d 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -123,6 +123,9 @@ zfcp_ccw_remove(struct ccw_device *ccw_device)
list_for_each_entry_safe(port, p, &adapter->port_remove_lh, list) {
list_for_each_entry_safe(unit, u, &port->unit_remove_lh, list) {
+ if (atomic_test_mask(ZFCP_STATUS_UNIT_REGISTERED,
+ &unit->status))
+ scsi_remove_device(unit->device);
zfcp_unit_dequeue(unit);
}
zfcp_port_dequeue(port);
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index ffa3bf756943..701046c9bb33 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -161,12 +161,6 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
(fsf_req->fsf_command == FSF_QTCB_OPEN_LUN)) {
strncpy(rec->tag2, "open", ZFCP_DBF_TAG_SIZE);
level = 4;
- } else if ((prot_status_qual->doubleword[0] != 0) ||
- (prot_status_qual->doubleword[1] != 0) ||
- (fsf_status_qual->doubleword[0] != 0) ||
- (fsf_status_qual->doubleword[1] != 0)) {
- strncpy(rec->tag2, "qual", ZFCP_DBF_TAG_SIZE);
- level = 3;
} else {
strncpy(rec->tag2, "norm", ZFCP_DBF_TAG_SIZE);
level = 6;
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index e268f79bdbd2..9e9f6c1e4e5d 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -118,7 +118,7 @@ zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
#define ZFCP_SBAL_TIMEOUT (5*HZ)
-#define ZFCP_TYPE2_RECOVERY_TIME (8*HZ)
+#define ZFCP_TYPE2_RECOVERY_TIME 8 /* seconds */
/* queue polling (values in microseconds) */
#define ZFCP_MAX_INPUT_THRESHOLD 5000 /* FIXME: tune */
@@ -139,7 +139,7 @@ zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
#define ZFCP_STATUS_READS_RECOM FSF_STATUS_READS_RECOM
/* Do 1st retry in 1 second, then double the timeout for each following retry */
-#define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 100
+#define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 1
#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7
/* timeout value for "default timer" for fsf requests */
@@ -983,10 +983,6 @@ struct zfcp_unit {
struct scsi_device *device; /* scsi device struct pointer */
struct zfcp_erp_action erp_action; /* pending error recovery */
atomic_t erp_counter;
- wait_queue_head_t scsi_scan_wq; /* can be used to wait until
- all scsi_scan_target
- requests have been
- completed. */
};
/* FSF request */
@@ -1127,6 +1123,20 @@ zfcp_reqlist_find(struct zfcp_adapter *adapter, unsigned long req_id)
return NULL;
}
+static inline struct zfcp_fsf_req *
+zfcp_reqlist_find_safe(struct zfcp_adapter *adapter, struct zfcp_fsf_req *req)
+{
+ struct zfcp_fsf_req *request;
+ unsigned int idx;
+
+ for (idx = 0; idx < REQUEST_LIST_SIZE; idx++) {
+ list_for_each_entry(request, &adapter->req_list[idx], list)
+ if (request == req)
+ return request;
+ }
+ return NULL;
+}
+
/*
* functions needed for reference/usage counting
*/
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 07fa824d179f..4f86c0e12961 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -131,7 +131,7 @@ static void zfcp_close_qdio(struct zfcp_adapter *adapter)
debug_text_event(adapter->erp_dbf, 3, "qdio_down2a");
while (qdio_shutdown(adapter->ccw_device,
QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
- msleep(1000);
+ ssleep(1);
debug_text_event(adapter->erp_dbf, 3, "qdio_down2b");
/* cleanup used outbound sbals */
@@ -456,7 +456,7 @@ zfcp_test_link(struct zfcp_port *port)
zfcp_port_get(port);
retval = zfcp_erp_adisc(port);
- if (retval != 0) {
+ if (retval != 0 && retval != -EBUSY) {
zfcp_port_put(port);
ZFCP_LOG_NORMAL("reopen needed for port 0x%016Lx "
"on adapter %s\n ", port->wwpn,
@@ -846,7 +846,8 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
if (erp_action->fsf_req) {
/* take lock to ensure that request is not deleted meanwhile */
spin_lock(&adapter->req_list_lock);
- if (zfcp_reqlist_find(adapter, erp_action->fsf_req->req_id)) {
+ if (zfcp_reqlist_find_safe(adapter, erp_action->fsf_req) &&
+ erp_action->fsf_req->erp_action == erp_action) {
/* fsf_req still exists */
debug_text_event(adapter->erp_dbf, 3, "a_ca_req");
debug_event(adapter->erp_dbf, 3, &erp_action->fsf_req,
@@ -1609,7 +1610,6 @@ static void zfcp_erp_scsi_scan(struct work_struct *work)
scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
unit->scsi_lun, 0);
atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
- wake_up(&unit->scsi_scan_wq);
zfcp_unit_put(unit);
kfree(p);
}
@@ -1900,7 +1900,7 @@ zfcp_erp_adapter_strategy(struct zfcp_erp_action *erp_action)
ZFCP_LOG_INFO("Waiting to allow the adapter %s "
"to recover itself\n",
zfcp_get_busid_by_adapter(adapter));
- msleep(jiffies_to_msecs(ZFCP_TYPE2_RECOVERY_TIME));
+ ssleep(ZFCP_TYPE2_RECOVERY_TIME);
}
return retval;
@@ -2080,7 +2080,7 @@ zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action)
debug_text_event(adapter->erp_dbf, 3, "qdio_down1a");
while (qdio_shutdown(adapter->ccw_device,
QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
- msleep(1000);
+ ssleep(1);
debug_text_event(adapter->erp_dbf, 3, "qdio_down1b");
failed_qdio_establish:
@@ -2165,7 +2165,7 @@ zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
ZFCP_LOG_DEBUG("host connection still initialising... "
"waiting and retrying...\n");
/* sleep a little bit before retry */
- msleep(jiffies_to_msecs(sleep));
+ ssleep(sleep);
sleep *= 2;
}
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index ff866ebd44ac..fe57941ab55d 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1116,6 +1116,10 @@ zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
goto out;
}
+ if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
+ &unit->status)))
+ goto unit_blocked;
+
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@@ -1131,22 +1135,13 @@ zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
zfcp_fsf_start_timer(fsf_req, ZFCP_SCSI_ER_TIMEOUT);
retval = zfcp_fsf_req_send(fsf_req);
- if (retval) {
- ZFCP_LOG_INFO("error: Failed to send abort command request "
- "on adapter %s, port 0x%016Lx, unit 0x%016Lx\n",
- zfcp_get_busid_by_adapter(adapter),
- unit->port->wwpn, unit->fcp_lun);
+ if (!retval)
+ goto out;
+
+ unit_blocked:
zfcp_fsf_req_free(fsf_req);
fsf_req = NULL;
- goto out;
- }
- ZFCP_LOG_DEBUG("Abort FCP Command request initiated "
- "(adapter%s, port d_id=0x%06x, "
- "unit x%016Lx, old_req_id=0x%lx)\n",
- zfcp_get_busid_by_adapter(adapter),
- unit->port->d_id,
- unit->fcp_lun, old_req_id);
out:
write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
return fsf_req;
@@ -1164,8 +1159,8 @@ zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
{
int retval = -EINVAL;
struct zfcp_unit *unit;
- unsigned char status_qual =
- new_fsf_req->qtcb->header.fsf_status_qual.word[0];
+ union fsf_status_qual *fsf_stat_qual =
+ &new_fsf_req->qtcb->header.fsf_status_qual;
if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
/* do not set ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED */
@@ -1178,7 +1173,7 @@ zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
switch (new_fsf_req->qtcb->header.fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
- if (status_qual >> 4 != status_qual % 0xf) {
+ if (fsf_stat_qual->word[0] != fsf_stat_qual->word[1]) {
debug_text_event(new_fsf_req->adapter->erp_dbf, 3,
"fsf_s_phand_nv0");
/*
@@ -1207,8 +1202,7 @@ zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
break;
case FSF_LUN_HANDLE_NOT_VALID:
- if (status_qual >> 4 != status_qual % 0xf) {
- /* 2 */
+ if (fsf_stat_qual->word[0] != fsf_stat_qual->word[1]) {
debug_text_event(new_fsf_req->adapter->erp_dbf, 3,
"fsf_s_lhand_nv0");
/*
@@ -1674,6 +1668,12 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
goto failed_req;
}
+ if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
+ &els->port->status))) {
+ ret = -EBUSY;
+ goto port_blocked;
+ }
+
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
if (zfcp_use_one_sbal(els->req, els->req_count,
els->resp, els->resp_count)){
@@ -1755,6 +1755,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
"0x%06x)\n", zfcp_get_busid_by_adapter(adapter), d_id);
goto out;
+ port_blocked:
failed_send:
zfcp_fsf_req_free(fsf_req);
@@ -3592,6 +3593,12 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
goto failed_req_create;
}
+ if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
+ &unit->status))) {
+ retval = -EBUSY;
+ goto unit_blocked;
+ }
+
zfcp_unit_get(unit);
fsf_req->unit = unit;
@@ -3732,6 +3739,7 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
send_failed:
no_fit:
failed_scsi_cmnd:
+ unit_blocked:
zfcp_unit_put(unit);
zfcp_fsf_req_free(fsf_req);
fsf_req = NULL;
@@ -3766,6 +3774,10 @@ zfcp_fsf_send_fcp_command_task_management(struct zfcp_adapter *adapter,
goto out;
}
+ if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
+ &unit->status)))
+ goto unit_blocked;
+
/*
* Used to decide on proper handler in the return path,
* could be either zfcp_fsf_send_fcp_command_task_handler or
@@ -3799,25 +3811,13 @@ zfcp_fsf_send_fcp_command_task_management(struct zfcp_adapter *adapter,
zfcp_fsf_start_timer(fsf_req, ZFCP_SCSI_ER_TIMEOUT);
retval = zfcp_fsf_req_send(fsf_req);
- if (retval) {
- ZFCP_LOG_INFO("error: Could not send an FCP-command (task "
- "management) on adapter %s, port 0x%016Lx for "
- "unit LUN 0x%016Lx\n",
- zfcp_get_busid_by_adapter(adapter),
- unit->port->wwpn,
- unit->fcp_lun);
- zfcp_fsf_req_free(fsf_req);
- fsf_req = NULL;
+ if (!retval)
goto out;
- }
- ZFCP_LOG_TRACE("Send FCP Command (task management function) initiated "
- "(adapter %s, port 0x%016Lx, unit 0x%016Lx, "
- "tm_flags=0x%x)\n",
- zfcp_get_busid_by_adapter(adapter),
- unit->port->wwpn,
- unit->fcp_lun,
- tm_flags);
+ unit_blocked:
+ zfcp_fsf_req_free(fsf_req);
+ fsf_req = NULL;
+
out:
write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
return fsf_req;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index abae2027f7e5..b9daf5c05862 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -51,7 +51,6 @@ struct zfcp_data zfcp_data = {
.queuecommand = zfcp_scsi_queuecommand,
.eh_abort_handler = zfcp_scsi_eh_abort_handler,
.eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
- .eh_bus_reset_handler = zfcp_scsi_eh_host_reset_handler,
.eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
.can_queue = 4096,
.this_id = -1,
@@ -181,9 +180,6 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
if (unit) {
zfcp_erp_wait(unit->port->adapter);
- wait_event(unit->scsi_scan_wq,
- atomic_test_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING,
- &unit->status) == 0);
atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status);
sdpnt->hostdata = NULL;
unit->device = NULL;
@@ -262,8 +258,9 @@ zfcp_scsi_command_async(struct zfcp_adapter *adapter, struct zfcp_unit *unit,
goto out;
}
- if (unlikely(
- !atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status))) {
+ tmp = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, use_timer,
+ ZFCP_REQ_AUTO_CLEANUP);
+ if (unlikely(tmp == -EBUSY)) {
ZFCP_LOG_DEBUG("adapter %s not ready or unit 0x%016Lx "
"on port 0x%016Lx in recovery\n",
zfcp_get_busid_by_unit(unit),
@@ -272,9 +269,6 @@ zfcp_scsi_command_async(struct zfcp_adapter *adapter, struct zfcp_unit *unit,
goto out;
}
- tmp = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, use_timer,
- ZFCP_REQ_AUTO_CLEANUP);
-
if (unlikely(tmp < 0)) {
ZFCP_LOG_DEBUG("error: initiation of Send FCP Cmnd failed\n");
retval = SCSI_MLQUEUE_HOST_BUSY;
@@ -459,7 +453,9 @@ zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
retval = SUCCESS;
goto out;
}
- ZFCP_LOG_NORMAL("resetting unit 0x%016Lx\n", unit->fcp_lun);
+ ZFCP_LOG_NORMAL("resetting unit 0x%016Lx on port 0x%016Lx, adapter %s\n",
+ unit->fcp_lun, unit->port->wwpn,
+ zfcp_get_busid_by_adapter(unit->port->adapter));
/*
* If we do not know whether the unit supports 'logical unit reset'
@@ -542,7 +538,7 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
}
/**
- * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset
+ * zfcp_scsi_eh_host_reset_handler - handler for host reset
*/
static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
{
@@ -552,8 +548,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
unit = (struct zfcp_unit*) scpnt->device->hostdata;
adapter = unit->port->adapter;
- ZFCP_LOG_NORMAL("host/bus reset because of problems with "
- "unit 0x%016Lx\n", unit->fcp_lun);
+ ZFCP_LOG_NORMAL("host reset because of problems with "
+ "unit 0x%016Lx on port 0x%016Lx, adapter %s\n",
+ unit->fcp_lun, unit->port->wwpn,
+ zfcp_get_busid_by_adapter(unit->port->adapter));
zfcp_erp_adapter_reopen(adapter, 0);
zfcp_erp_wait(adapter);
diff --git a/drivers/scsi/.gitignore b/drivers/scsi/.gitignore
index b385af314356..c89ae9a04399 100644
--- a/drivers/scsi/.gitignore
+++ b/drivers/scsi/.gitignore
@@ -1,3 +1 @@
53c700_d.h
-53c7xx_d.h
-53c7xx_u.h
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index afb262b4be15..1c244832c6c8 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -2010,6 +2010,7 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
}
pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)
|| pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 71ff3fbfce12..f4c4fe90240a 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -608,7 +608,8 @@ NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
scsi_print_sense("53c700", SCp);
#endif
- dma_unmap_single(hostdata->dev, slot->dma_handle, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
+ dma_unmap_single(hostdata->dev, slot->dma_handle,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
/* restore the old result if the request sense was
* successful */
if (result == 0)
@@ -1010,7 +1011,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
cmnd[1] = (SCp->device->lun & 0x7) << 5;
cmnd[2] = 0;
cmnd[3] = 0;
- cmnd[4] = sizeof(SCp->sense_buffer);
+ cmnd[4] = SCSI_SENSE_BUFFERSIZE;
cmnd[5] = 0;
/* Here's a quiet hack: the
* REQUEST_SENSE command is six bytes,
@@ -1024,14 +1025,14 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
SCp->cmd_len = 6; /* command length for
* REQUEST_SENSE */
slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
- slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
- slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer));
+ slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | SCSI_SENSE_BUFFERSIZE);
slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
slot->SG[1].pAddr = 0;
slot->resume_offset = hostdata->pScript;
dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
- dma_cache_sync(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
+ dma_cache_sync(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
/* queue the command for reissue */
slot->state = NCR_700_SLOT_QUEUED;
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 49e1ffa4b2ff..ead47c143ce0 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2947,7 +2947,7 @@ static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRou
}
}
memcpy(CCB->CDB, CDB, CDB_Length);
- CCB->SenseDataLength = sizeof(Command->sense_buffer);
+ CCB->SenseDataLength = SCSI_SENSE_BUFFERSIZE;
CCB->SenseDataPointer = pci_map_single(HostAdapter->PCI_Device, Command->sense_buffer, CCB->SenseDataLength, PCI_DMA_FROMDEVICE);
CCB->Command = Command;
Command->scsi_done = CompletionRoutine;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 184c7ae78519..3e161cd66463 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -341,7 +341,7 @@ config ISCSI_TCP
The userspace component needed to initialize the driver, documentation,
and sample configuration files can be found here:
- http://linux-iscsi.sf.net
+ http://open-iscsi.org
config SGIWD93_SCSI
tristate "SGI WD93C93 SCSI Driver"
@@ -573,10 +573,10 @@ config SCSI_ARCMSR_AER
source "drivers/scsi/megaraid/Kconfig.megaraid"
config SCSI_HPTIOP
- tristate "HighPoint RocketRAID 3xxx Controller support"
+ tristate "HighPoint RocketRAID 3xxx/4xxx Controller support"
depends on SCSI && PCI
help
- This option enables support for HighPoint RocketRAID 3xxx
+ This option enables support for HighPoint RocketRAID 3xxx/4xxx
controllers.
To compile this driver as a module, choose M here; the module
@@ -1288,17 +1288,6 @@ config SCSI_PAS16
To compile this driver as a module, choose M here: the
module will be called pas16.
-config SCSI_PSI240I
- tristate "PSI240i support"
- depends on ISA && SCSI
- help
- This is support for the PSI240i EIDE interface card which acts as a
- SCSI host adapter. Please read the SCSI-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here: the
- module will be called psi240i.
-
config SCSI_QLOGIC_FAS
tristate "Qlogic FAS SCSI support"
depends on ISA && SCSI
@@ -1359,21 +1348,6 @@ config SCSI_LPFC
This lpfc driver supports the Emulex LightPulse
Family of Fibre Channel PCI host adapters.
-config SCSI_SEAGATE
- tristate "Seagate ST-02 and Future Domain TMC-8xx SCSI support"
- depends on X86 && ISA && SCSI
- select CHECK_SIGNATURE
- ---help---
- These are 8-bit SCSI controllers; the ST-01 is also supported by
- this driver. It is explained in section 3.9 of the SCSI-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>. If it
- doesn't work out of the box, you may have to change some macros at
- compiletime, which are described in <file:drivers/scsi/seagate.c>.
-
- To compile this driver as a module, choose M here: the
- module will be called seagate.
-
-# definitely looks not 64bit safe:
config SCSI_SIM710
tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
depends on (EISA || MCA) && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 2e6129f13d38..93e1428d03fc 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -16,9 +16,8 @@
CFLAGS_aha152x.o = -DAHA152X_STAT -DAUTOCONF
CFLAGS_gdth.o = # -DDEBUG_GDTH=2 -D__SERIAL__ -D__COM2__ -DGDTH_STATISTICS
-CFLAGS_seagate.o = -DARBITRATE -DPARITY -DSEAGATE_USE_ASM
-subdir-$(CONFIG_PCMCIA) += pcmcia
+obj-$(CONFIG_PCMCIA) += pcmcia/
obj-$(CONFIG_SCSI) += scsi_mod.o
obj-$(CONFIG_SCSI_TGT) += scsi_tgt.o
@@ -59,7 +58,6 @@ obj-$(CONFIG_MVME16x_SCSI) += 53c700.o mvme16x_scsi.o
obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o
obj-$(CONFIG_SCSI_SIM710) += 53c700.o sim710.o
obj-$(CONFIG_SCSI_ADVANSYS) += advansys.o
-obj-$(CONFIG_SCSI_PSI240I) += psi240i.o
obj-$(CONFIG_SCSI_BUSLOGIC) += BusLogic.o
obj-$(CONFIG_SCSI_DPT_I2O) += dpt_i2o.o
obj-$(CONFIG_SCSI_U14_34F) += u14-34f.o
@@ -90,7 +88,6 @@ obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/
obj-$(CONFIG_SCSI_LPFC) += lpfc/
obj-$(CONFIG_SCSI_PAS16) += pas16.o
-obj-$(CONFIG_SCSI_SEAGATE) += seagate.o
obj-$(CONFIG_SCSI_T128) += t128.o
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
obj-$(CONFIG_SCSI_DTC3280) += dtc.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 2597209183d0..eeddbd19eba5 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -295,16 +295,16 @@ static __inline__ void initialize_SCp(Scsi_Cmnd * cmd)
* various queues are valid.
*/
- if (cmd->use_sg) {
- cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
- cmd->SCp.buffers_residual = cmd->use_sg - 1;
+ if (scsi_bufflen(cmd)) {
+ cmd->SCp.buffer = scsi_sglist(cmd);
+ cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
} else {
cmd->SCp.buffer = NULL;
cmd->SCp.buffers_residual = 0;
- cmd->SCp.ptr = (char *) cmd->request_buffer;
- cmd->SCp.this_residual = cmd->request_bufflen;
+ cmd->SCp.ptr = NULL;
+ cmd->SCp.this_residual = 0;
}
}
@@ -932,7 +932,7 @@ static int __devinit NCR5380_init(struct Scsi_Host *instance, int flags)
* @instance: adapter to remove
*/
-static void __devexit NCR5380_exit(struct Scsi_Host *instance)
+static void NCR5380_exit(struct Scsi_Host *instance)
{
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
@@ -975,14 +975,14 @@ static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
case WRITE_6:
case WRITE_10:
hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase);
- hostdata->bytes_write[cmd->device->id] += cmd->request_bufflen;
+ hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);
hostdata->pendingw++;
break;
case READ:
case READ_6:
case READ_10:
hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase);
- hostdata->bytes_read[cmd->device->id] += cmd->request_bufflen;
+ hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);
hostdata->pendingr++;
break;
}
@@ -1157,16 +1157,17 @@ static void NCR5380_main(struct work_struct *work)
* Locks: takes the needed instance locks
*/
-static irqreturn_t NCR5380_intr(int irq, void *dev_id)
+static irqreturn_t NCR5380_intr(int dummy, void *dev_id)
{
NCR5380_local_declare();
- struct Scsi_Host *instance = (struct Scsi_Host *)dev_id;
+ struct Scsi_Host *instance = dev_id;
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
int done;
unsigned char basr;
unsigned long flags;
- dprintk(NDEBUG_INTR, ("scsi : NCR5380 irq %d triggered\n", irq));
+ dprintk(NDEBUG_INTR, ("scsi : NCR5380 irq %d triggered\n",
+ instance->irq));
do {
done = 1;
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index b7c5385e2efe..23f27c9c9895 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -73,18 +73,9 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
}
if (!dir_in) {
- /* copy to bounce buffer for a write */
- if (cmd->use_sg)
-#if 0
- panic ("scsi%ddma: incomplete s/g support",
- instance->host_no);
-#else
+ /* copy to bounce buffer for a write */
memcpy (HDATA(instance)->dma_bounce_buffer,
cmd->SCp.ptr, cmd->SCp.this_residual);
-#endif
- else
- memcpy (HDATA(instance)->dma_bounce_buffer,
- cmd->request_buffer, cmd->request_bufflen);
}
}
@@ -144,30 +135,13 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
/* copy from a bounce buffer, if necessary */
if (status && HDATA(instance)->dma_bounce_buffer) {
- if (SCpnt && SCpnt->use_sg) {
-#if 0
- panic ("scsi%d: incomplete s/g support",
- instance->host_no);
-#else
- if( HDATA(instance)->dma_dir )
+ if( HDATA(instance)->dma_dir )
memcpy (SCpnt->SCp.ptr,
HDATA(instance)->dma_bounce_buffer,
SCpnt->SCp.this_residual);
- kfree (HDATA(instance)->dma_bounce_buffer);
- HDATA(instance)->dma_bounce_buffer = NULL;
- HDATA(instance)->dma_bounce_len = 0;
-
-#endif
- } else {
- if (HDATA(instance)->dma_dir && SCpnt)
- memcpy (SCpnt->request_buffer,
- HDATA(instance)->dma_bounce_buffer,
- SCpnt->request_bufflen);
-
- kfree (HDATA(instance)->dma_bounce_buffer);
- HDATA(instance)->dma_bounce_buffer = NULL;
- HDATA(instance)->dma_bounce_len = 0;
- }
+ kfree (HDATA(instance)->dma_bounce_buffer);
+ HDATA(instance)->dma_bounce_buffer = NULL;
+ HDATA(instance)->dma_bounce_len = 0;
}
}
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index 796f1c4d772e..d7255c8bf281 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -70,12 +70,8 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
if (!dir_in) {
/* copy to bounce buffer for a write */
- if (cmd->use_sg) {
- memcpy (HDATA(a3000_host)->dma_bounce_buffer,
- cmd->SCp.ptr, cmd->SCp.this_residual);
- } else
- memcpy (HDATA(a3000_host)->dma_bounce_buffer,
- cmd->request_buffer, cmd->request_bufflen);
+ memcpy (HDATA(a3000_host)->dma_bounce_buffer,
+ cmd->SCp.ptr, cmd->SCp.this_residual);
}
addr = virt_to_bus(HDATA(a3000_host)->dma_bounce_buffer);
@@ -146,7 +142,7 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
/* copy from a bounce buffer, if necessary */
if (status && HDATA(instance)->dma_bounce_buffer) {
- if (SCpnt && SCpnt->use_sg) {
+ if (SCpnt) {
if (HDATA(instance)->dma_dir && SCpnt)
memcpy (SCpnt->SCp.ptr,
HDATA(instance)->dma_bounce_buffer,
@@ -155,11 +151,6 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
HDATA(instance)->dma_bounce_buffer = NULL;
HDATA(instance)->dma_bounce_len = 0;
} else {
- if (HDATA(instance)->dma_dir && SCpnt)
- memcpy (SCpnt->request_buffer,
- HDATA(instance)->dma_bounce_buffer,
- SCpnt->request_bufflen);
-
kfree (HDATA(instance)->dma_bounce_buffer);
HDATA(instance)->dma_bounce_buffer = NULL;
HDATA(instance)->dma_bounce_len = 0;
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index a77ab8d693d4..d7235f42cf5f 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -31,9 +31,9 @@
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/blkdev.h>
-#include <linux/dma-mapping.h>
#include <asm/semaphore.h>
#include <asm/uaccess.h>
+#include <linux/highmem.h> /* For flush_kernel_dcache_page */
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -56,54 +56,54 @@
/*
* Sense codes
*/
-
-#define SENCODE_NO_SENSE 0x00
-#define SENCODE_END_OF_DATA 0x00
-#define SENCODE_BECOMING_READY 0x04
-#define SENCODE_INIT_CMD_REQUIRED 0x04
-#define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A
-#define SENCODE_INVALID_COMMAND 0x20
-#define SENCODE_LBA_OUT_OF_RANGE 0x21
-#define SENCODE_INVALID_CDB_FIELD 0x24
-#define SENCODE_LUN_NOT_SUPPORTED 0x25
-#define SENCODE_INVALID_PARAM_FIELD 0x26
-#define SENCODE_PARAM_NOT_SUPPORTED 0x26
-#define SENCODE_PARAM_VALUE_INVALID 0x26
-#define SENCODE_RESET_OCCURRED 0x29
-#define SENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x3E
-#define SENCODE_INQUIRY_DATA_CHANGED 0x3F
-#define SENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x39
-#define SENCODE_DIAGNOSTIC_FAILURE 0x40
-#define SENCODE_INTERNAL_TARGET_FAILURE 0x44
-#define SENCODE_INVALID_MESSAGE_ERROR 0x49
-#define SENCODE_LUN_FAILED_SELF_CONFIG 0x4c
-#define SENCODE_OVERLAPPED_COMMAND 0x4E
+
+#define SENCODE_NO_SENSE 0x00
+#define SENCODE_END_OF_DATA 0x00
+#define SENCODE_BECOMING_READY 0x04
+#define SENCODE_INIT_CMD_REQUIRED 0x04
+#define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A
+#define SENCODE_INVALID_COMMAND 0x20
+#define SENCODE_LBA_OUT_OF_RANGE 0x21
+#define SENCODE_INVALID_CDB_FIELD 0x24
+#define SENCODE_LUN_NOT_SUPPORTED 0x25
+#define SENCODE_INVALID_PARAM_FIELD 0x26
+#define SENCODE_PARAM_NOT_SUPPORTED 0x26
+#define SENCODE_PARAM_VALUE_INVALID 0x26
+#define SENCODE_RESET_OCCURRED 0x29
+#define SENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x3E
+#define SENCODE_INQUIRY_DATA_CHANGED 0x3F
+#define SENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x39
+#define SENCODE_DIAGNOSTIC_FAILURE 0x40
+#define SENCODE_INTERNAL_TARGET_FAILURE 0x44
+#define SENCODE_INVALID_MESSAGE_ERROR 0x49
+#define SENCODE_LUN_FAILED_SELF_CONFIG 0x4c
+#define SENCODE_OVERLAPPED_COMMAND 0x4E
/*
* Additional sense codes
*/
-
-#define ASENCODE_NO_SENSE 0x00
-#define ASENCODE_END_OF_DATA 0x05
-#define ASENCODE_BECOMING_READY 0x01
-#define ASENCODE_INIT_CMD_REQUIRED 0x02
-#define ASENCODE_PARAM_LIST_LENGTH_ERROR 0x00
-#define ASENCODE_INVALID_COMMAND 0x00
-#define ASENCODE_LBA_OUT_OF_RANGE 0x00
-#define ASENCODE_INVALID_CDB_FIELD 0x00
-#define ASENCODE_LUN_NOT_SUPPORTED 0x00
-#define ASENCODE_INVALID_PARAM_FIELD 0x00
-#define ASENCODE_PARAM_NOT_SUPPORTED 0x01
-#define ASENCODE_PARAM_VALUE_INVALID 0x02
-#define ASENCODE_RESET_OCCURRED 0x00
-#define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x00
-#define ASENCODE_INQUIRY_DATA_CHANGED 0x03
-#define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x00
-#define ASENCODE_DIAGNOSTIC_FAILURE 0x80
-#define ASENCODE_INTERNAL_TARGET_FAILURE 0x00
-#define ASENCODE_INVALID_MESSAGE_ERROR 0x00
-#define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00
-#define ASENCODE_OVERLAPPED_COMMAND 0x00
+
+#define ASENCODE_NO_SENSE 0x00
+#define ASENCODE_END_OF_DATA 0x05
+#define ASENCODE_BECOMING_READY 0x01
+#define ASENCODE_INIT_CMD_REQUIRED 0x02
+#define ASENCODE_PARAM_LIST_LENGTH_ERROR 0x00
+#define ASENCODE_INVALID_COMMAND 0x00
+#define ASENCODE_LBA_OUT_OF_RANGE 0x00
+#define ASENCODE_INVALID_CDB_FIELD 0x00
+#define ASENCODE_LUN_NOT_SUPPORTED 0x00
+#define ASENCODE_INVALID_PARAM_FIELD 0x00
+#define ASENCODE_PARAM_NOT_SUPPORTED 0x01
+#define ASENCODE_PARAM_VALUE_INVALID 0x02
+#define ASENCODE_RESET_OCCURRED 0x00
+#define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x00
+#define ASENCODE_INQUIRY_DATA_CHANGED 0x03
+#define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x00
+#define ASENCODE_DIAGNOSTIC_FAILURE 0x80
+#define ASENCODE_INTERNAL_TARGET_FAILURE 0x00
+#define ASENCODE_INVALID_MESSAGE_ERROR 0x00
+#define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00
+#define ASENCODE_OVERLAPPED_COMMAND 0x00
#define BYTE0(x) (unsigned char)(x)
#define BYTE1(x) (unsigned char)((x) >> 8)
@@ -115,8 +115,8 @@
*----------------------------------------------------------------------------*/
/* SCSI inquiry data */
struct inquiry_data {
- u8 inqd_pdt; /* Peripheral qualifier | Peripheral Device Type */
- u8 inqd_dtq; /* RMB | Device Type Qualifier */
+ u8 inqd_pdt; /* Peripheral qualifier | Peripheral Device Type */
+ u8 inqd_dtq; /* RMB | Device Type Qualifier */
u8 inqd_ver; /* ISO version | ECMA version | ANSI-approved version */
u8 inqd_rdf; /* AENC | TrmIOP | Response data format */
u8 inqd_len; /* Additional length (n-4) */
@@ -130,7 +130,7 @@ struct inquiry_data {
/*
* M O D U L E G L O B A L S
*/
-
+
static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap);
static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg);
static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg);
@@ -141,9 +141,10 @@ static char *aac_get_status_string(u32 status);
/*
* Non dasd selection is handled entirely in aachba now
- */
-
+ */
+
static int nondasd = -1;
+static int aac_cache = 0;
static int dacmode = -1;
int aac_commit = -1;
@@ -152,6 +153,8 @@ int aif_timeout = 120;
module_param(nondasd, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on");
+module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n\tbit 0 - Disable FUA in WRITE SCSI commands\n\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n\tbit 2 - Disable only if Battery not protecting Cache");
module_param(dacmode, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC. 0=off, 1=on");
module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR);
@@ -179,7 +182,7 @@ MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health che
int aac_check_reset = 1;
module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(aac_check_reset, "If adapter fails health check, reset the adapter.");
+MODULE_PARM_DESC(aac_check_reset, "If adapter fails health check, reset the adapter. a value of -1 forces the reset to adapters programmed to ignore it.");
int expose_physicals = -1;
module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
@@ -193,12 +196,12 @@ static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
struct fib *fibptr) {
struct scsi_device *device;
- if (unlikely(!scsicmd || !scsicmd->scsi_done )) {
+ if (unlikely(!scsicmd || !scsicmd->scsi_done)) {
dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n"));
- aac_fib_complete(fibptr);
- aac_fib_free(fibptr);
- return 0;
- }
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ return 0;
+ }
scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
device = scsicmd->device;
if (unlikely(!device || !scsi_device_online(device))) {
@@ -240,7 +243,7 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag)
FsaNormal,
1, 1,
NULL, NULL);
- if (status < 0 ) {
+ if (status < 0) {
printk(KERN_WARNING "aac_get_config_status: SendFIB failed.\n");
} else {
struct aac_get_config_status_resp *reply
@@ -264,10 +267,10 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag)
struct aac_commit_config * dinfo;
aac_fib_init(fibptr);
dinfo = (struct aac_commit_config *) fib_data(fibptr);
-
+
dinfo->command = cpu_to_le32(VM_ContainerConfig);
dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG);
-
+
status = aac_fib_send(ContainerCommand,
fibptr,
sizeof (struct aac_commit_config),
@@ -293,7 +296,7 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag)
int aac_get_containers(struct aac_dev *dev)
{
struct fsa_dev_info *fsa_dev_ptr;
- u32 index;
+ u32 index;
int status = 0;
struct fib * fibptr;
struct aac_get_container_count *dinfo;
@@ -363,6 +366,7 @@ static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigne
if (buf && transfer_len > 0)
memcpy(buf + offset, data, transfer_len);
+ flush_kernel_dcache_page(kmap_atomic_to_page(buf - sg->offset));
kunmap_atomic(buf - sg->offset, KM_IRQ0);
}
@@ -395,7 +399,7 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
do {
*dp++ = (*sp) ? *sp++ : ' ';
} while (--count > 0);
- aac_internal_transfer(scsicmd, d,
+ aac_internal_transfer(scsicmd, d,
offsetof(struct inquiry_data, inqd_pid), sizeof(d));
}
}
@@ -431,13 +435,13 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data));
status = aac_fib_send(ContainerCommand,
- cmd_fibcontext,
+ cmd_fibcontext,
sizeof (struct aac_get_name),
- FsaNormal,
- 0, 1,
- (fib_callback) get_container_name_callback,
+ FsaNormal,
+ 0, 1,
+ (fib_callback)get_container_name_callback,
(void *) scsicmd);
-
+
/*
* Check that the command queued to the controller
*/
@@ -445,7 +449,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
return 0;
}
-
+
printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
aac_fib_complete(cmd_fibcontext);
aac_fib_free(cmd_fibcontext);
@@ -652,42 +656,47 @@ struct scsi_inq {
* @a: string to copy from
* @b: string to copy to
*
- * Copy a String from one location to another
+ * Copy a String from one location to another
* without copying \0
*/
static void inqstrcpy(char *a, char *b)
{
- while(*a != (char)0)
+ while (*a != (char)0)
*b++ = *a++;
}
static char *container_types[] = {
- "None",
- "Volume",
- "Mirror",
- "Stripe",
- "RAID5",
- "SSRW",
- "SSRO",
- "Morph",
- "Legacy",
- "RAID4",
- "RAID10",
- "RAID00",
- "V-MIRRORS",
- "PSEUDO R4",
+ "None",
+ "Volume",
+ "Mirror",
+ "Stripe",
+ "RAID5",
+ "SSRW",
+ "SSRO",
+ "Morph",
+ "Legacy",
+ "RAID4",
+ "RAID10",
+ "RAID00",
+ "V-MIRRORS",
+ "PSEUDO R4",
"RAID50",
"RAID5D",
"RAID5D0",
"RAID1E",
"RAID6",
"RAID60",
- "Unknown"
+ "Unknown"
};
-
+char * get_container_type(unsigned tindex)
+{
+ if (tindex >= ARRAY_SIZE(container_types))
+ tindex = ARRAY_SIZE(container_types) - 1;
+ return container_types[tindex];
+}
/* Function: setinqstr
*
@@ -707,16 +716,21 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
if (dev->supplement_adapter_info.AdapterTypeText[0]) {
char * cp = dev->supplement_adapter_info.AdapterTypeText;
- int c = sizeof(str->vid);
- while (*cp && *cp != ' ' && --c)
- ++cp;
- c = *cp;
- *cp = '\0';
- inqstrcpy (dev->supplement_adapter_info.AdapterTypeText,
- str->vid);
- *cp = c;
- while (*cp && *cp != ' ')
- ++cp;
+ int c;
+ if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
+ inqstrcpy("SMC", str->vid);
+ else {
+ c = sizeof(str->vid);
+ while (*cp && *cp != ' ' && --c)
+ ++cp;
+ c = *cp;
+ *cp = '\0';
+ inqstrcpy (dev->supplement_adapter_info.AdapterTypeText,
+ str->vid);
+ *cp = c;
+ while (*cp && *cp != ' ')
+ ++cp;
+ }
while (*cp == ' ')
++cp;
/* last six chars reserved for vol type */
@@ -898,9 +912,8 @@ static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
0, 0);
memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
- (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(cmd->sense_buffer))
- ? sizeof(cmd->sense_buffer)
- : sizeof(dev->fsa_dev[cid].sense_data));
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
cmd->scsi_done(cmd);
return 1;
}
@@ -981,7 +994,7 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
aac_fib_init(fib);
readcmd = (struct aac_read *) fib_data(fib);
readcmd->command = cpu_to_le32(VM_CtBlockRead);
- readcmd->cid = cpu_to_le16(scmd_id(cmd));
+ readcmd->cid = cpu_to_le32(scmd_id(cmd));
readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
readcmd->count = cpu_to_le32(count * 512);
@@ -1013,7 +1026,8 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
writecmd->count = cpu_to_le32(count<<9);
writecmd->cid = cpu_to_le16(scmd_id(cmd));
- writecmd->flags = fua ?
+ writecmd->flags = (fua && ((aac_cache & 5) != 1) &&
+ (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
cpu_to_le16(IO_TYPE_WRITE|IO_SUREWRITE) :
cpu_to_le16(IO_TYPE_WRITE);
writecmd->bpTotal = 0;
@@ -1072,7 +1086,7 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
aac_fib_init(fib);
writecmd = (struct aac_write *) fib_data(fib);
writecmd->command = cpu_to_le32(VM_CtBlockWrite);
- writecmd->cid = cpu_to_le16(scmd_id(cmd));
+ writecmd->cid = cpu_to_le32(scmd_id(cmd));
writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
writecmd->count = cpu_to_le32(count * 512);
writecmd->sg.count = cpu_to_le32(1);
@@ -1190,6 +1204,15 @@ static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
(fib_callback) aac_srb_callback, (void *) cmd);
}
+static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd)
+{
+ if ((sizeof(dma_addr_t) > 4) &&
+ (num_physpages > (0xFFFFFFFFULL >> PAGE_SHIFT)) &&
+ (fib->dev->adapter_info.options & AAC_OPT_SGMAP_HOST64))
+ return FAILED;
+ return aac_scsi_32(fib, cmd);
+}
+
int aac_get_adapter_info(struct aac_dev* dev)
{
struct fib* fibptr;
@@ -1207,11 +1230,11 @@ int aac_get_adapter_info(struct aac_dev* dev)
memset(info,0,sizeof(*info));
rcode = aac_fib_send(RequestAdapterInfo,
- fibptr,
+ fibptr,
sizeof(*info),
- FsaNormal,
+ FsaNormal,
-1, 1, /* First `interrupt' command uses special wait */
- NULL,
+ NULL,
NULL);
if (rcode < 0) {
@@ -1222,29 +1245,29 @@ int aac_get_adapter_info(struct aac_dev* dev)
memcpy(&dev->adapter_info, info, sizeof(*info));
if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) {
- struct aac_supplement_adapter_info * info;
+ struct aac_supplement_adapter_info * sinfo;
aac_fib_init(fibptr);
- info = (struct aac_supplement_adapter_info *) fib_data(fibptr);
+ sinfo = (struct aac_supplement_adapter_info *) fib_data(fibptr);
- memset(info,0,sizeof(*info));
+ memset(sinfo,0,sizeof(*sinfo));
rcode = aac_fib_send(RequestSupplementAdapterInfo,
fibptr,
- sizeof(*info),
+ sizeof(*sinfo),
FsaNormal,
1, 1,
NULL,
NULL);
if (rcode >= 0)
- memcpy(&dev->supplement_adapter_info, info, sizeof(*info));
+ memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo));
}
- /*
- * GetBusInfo
+ /*
+ * GetBusInfo
*/
aac_fib_init(fibptr);
@@ -1267,6 +1290,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
1, 1,
NULL, NULL);
+ /* reasoned default */
+ dev->maximum_num_physicals = 16;
if (rcode >= 0 && le32_to_cpu(bus_info->Status) == ST_OK) {
dev->maximum_num_physicals = le32_to_cpu(bus_info->TargetsPerBus);
dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
@@ -1276,7 +1301,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
char buffer[16];
tmp = le32_to_cpu(dev->adapter_info.kernelrev);
printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n",
- dev->name,
+ dev->name,
dev->id,
tmp>>24,
(tmp>>16)&0xff,
@@ -1305,19 +1330,21 @@ int aac_get_adapter_info(struct aac_dev* dev)
(int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid),
dev->supplement_adapter_info.VpdInfo.Tsid);
}
- if (!aac_check_reset ||
+ if (!aac_check_reset || ((aac_check_reset != 1) &&
(dev->supplement_adapter_info.SupportedOptions2 &
- le32_to_cpu(AAC_OPTION_IGNORE_RESET))) {
+ AAC_OPTION_IGNORE_RESET))) {
printk(KERN_INFO "%s%d: Reset Adapter Ignored\n",
dev->name, dev->id);
}
}
+ dev->cache_protected = 0;
+ dev->jbod = ((dev->supplement_adapter_info.FeatureBits &
+ AAC_FEATURE_JBOD) != 0);
dev->nondasd_support = 0;
dev->raid_scsi_mode = 0;
- if(dev->adapter_info.options & AAC_OPT_NONDASD){
+ if(dev->adapter_info.options & AAC_OPT_NONDASD)
dev->nondasd_support = 1;
- }
/*
* If the firmware supports ROMB RAID/SCSI mode and we are currently
@@ -1338,11 +1365,10 @@ int aac_get_adapter_info(struct aac_dev* dev)
if (dev->raid_scsi_mode != 0)
printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n",
dev->name, dev->id);
-
- if(nondasd != -1) {
+
+ if (nondasd != -1)
dev->nondasd_support = (nondasd!=0);
- }
- if(dev->nondasd_support != 0){
+ if(dev->nondasd_support != 0) {
printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id);
}
@@ -1371,12 +1397,14 @@ int aac_get_adapter_info(struct aac_dev* dev)
rcode = -ENOMEM;
}
}
- /*
+ /*
* Deal with configuring for the individualized limits of each packet
* interface.
*/
dev->a_ops.adapter_scsi = (dev->dac_support)
- ? aac_scsi_64
+ ? ((aac_get_driver_ident(dev->cardtype)->quirks & AAC_QUIRK_SCSI_32)
+ ? aac_scsi_32_64
+ : aac_scsi_64)
: aac_scsi_32;
if (dev->raw_io_interface) {
dev->a_ops.adapter_bounds = (dev->raw_io_64)
@@ -1393,8 +1421,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
if (dev->dac_support) {
dev->a_ops.adapter_read = aac_read_block64;
dev->a_ops.adapter_write = aac_write_block64;
- /*
- * 38 scatter gather elements
+ /*
+ * 38 scatter gather elements
*/
dev->scsi_host_ptr->sg_tablesize =
(dev->max_fib_size -
@@ -1498,9 +1526,8 @@ static void io_callback(void *context, struct fib * fibptr)
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
- (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
- ? sizeof(scsicmd->sense_buffer)
- : sizeof(dev->fsa_dev[cid].sense_data));
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
}
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
@@ -1524,7 +1551,7 @@ static int aac_read(struct scsi_cmnd * scsicmd)
case READ_6:
dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", scmd_id(scsicmd)));
- lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
+ lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
(scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
count = scsicmd->cmnd[4];
@@ -1534,32 +1561,32 @@ static int aac_read(struct scsi_cmnd * scsicmd)
case READ_16:
dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", scmd_id(scsicmd)));
- lba = ((u64)scsicmd->cmnd[2] << 56) |
- ((u64)scsicmd->cmnd[3] << 48) |
+ lba = ((u64)scsicmd->cmnd[2] << 56) |
+ ((u64)scsicmd->cmnd[3] << 48) |
((u64)scsicmd->cmnd[4] << 40) |
((u64)scsicmd->cmnd[5] << 32) |
- ((u64)scsicmd->cmnd[6] << 24) |
+ ((u64)scsicmd->cmnd[6] << 24) |
(scsicmd->cmnd[7] << 16) |
(scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
- count = (scsicmd->cmnd[10] << 24) |
+ count = (scsicmd->cmnd[10] << 24) |
(scsicmd->cmnd[11] << 16) |
(scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
break;
case READ_12:
dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", scmd_id(scsicmd)));
- lba = ((u64)scsicmd->cmnd[2] << 24) |
+ lba = ((u64)scsicmd->cmnd[2] << 24) |
(scsicmd->cmnd[3] << 16) |
- (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
- count = (scsicmd->cmnd[6] << 24) |
+ (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ count = (scsicmd->cmnd[6] << 24) |
(scsicmd->cmnd[7] << 16) |
- (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+ (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
break;
default:
dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", scmd_id(scsicmd)));
- lba = ((u64)scsicmd->cmnd[2] << 24) |
- (scsicmd->cmnd[3] << 16) |
+ lba = ((u64)scsicmd->cmnd[2] << 24) |
+ (scsicmd->cmnd[3] << 16) |
(scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
break;
@@ -1584,7 +1611,7 @@ static int aac_read(struct scsi_cmnd * scsicmd)
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
return 0;
}
-
+
printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status);
/*
* For some reason, the Fib didn't queue, return QUEUE_FULL
@@ -1619,11 +1646,11 @@ static int aac_write(struct scsi_cmnd * scsicmd)
} else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd)));
- lba = ((u64)scsicmd->cmnd[2] << 56) |
+ lba = ((u64)scsicmd->cmnd[2] << 56) |
((u64)scsicmd->cmnd[3] << 48) |
((u64)scsicmd->cmnd[4] << 40) |
((u64)scsicmd->cmnd[5] << 32) |
- ((u64)scsicmd->cmnd[6] << 24) |
+ ((u64)scsicmd->cmnd[6] << 24) |
(scsicmd->cmnd[7] << 16) |
(scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
@@ -1712,8 +1739,8 @@ static void synchronize_callback(void *context, struct fib *fibptr)
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
0, 0);
memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
- min(sizeof(dev->fsa_dev[cid].sense_data),
- sizeof(cmd->sense_buffer)));
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
}
aac_fib_complete(fibptr);
@@ -1798,7 +1825,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
if (active)
return SCSI_MLQUEUE_DEVICE_BUSY;
- aac = (struct aac_dev *)scsicmd->device->host->hostdata;
+ aac = (struct aac_dev *)sdev->host->hostdata;
if (aac->in_reset)
return SCSI_MLQUEUE_HOST_BUSY;
@@ -1850,14 +1877,14 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
* Emulate a SCSI command and queue the required request for the
* aacraid firmware.
*/
-
+
int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
{
u32 cid;
struct Scsi_Host *host = scsicmd->device->host;
struct aac_dev *dev = (struct aac_dev *)host->hostdata;
struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
-
+
if (fsa_dev_ptr == NULL)
return -1;
/*
@@ -1898,7 +1925,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
}
}
} else { /* check for physical non-dasd devices */
- if ((dev->nondasd_support == 1) || expose_physicals) {
+ if (dev->nondasd_support || expose_physicals ||
+ dev->jbod) {
if (dev->in_reset)
return -1;
return aac_send_srb_fib(scsicmd);
@@ -1913,7 +1941,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
* else Command for the controller itself
*/
else if ((scsicmd->cmnd[0] != INQUIRY) && /* only INQUIRY & TUR cmnd supported for controller */
- (scsicmd->cmnd[0] != TEST_UNIT_READY))
+ (scsicmd->cmnd[0] != TEST_UNIT_READY))
{
dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
@@ -1922,9 +1950,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
SENCODE_INVALID_COMMAND,
ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
- (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
- ? sizeof(scsicmd->sense_buffer)
- : sizeof(dev->fsa_dev[cid].sense_data));
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
scsicmd->scsi_done(scsicmd);
return 0;
}
@@ -1939,7 +1966,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid));
memset(&inq_data, 0, sizeof (struct inquiry_data));
- if (scsicmd->cmnd[1] & 0x1 ) {
+ if (scsicmd->cmnd[1] & 0x1) {
char *arr = (char *)&inq_data;
/* EVPD bit set */
@@ -1974,10 +2001,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
ASENCODE_NO_SENSE, 0, 7, 2, 0);
memcpy(scsicmd->sense_buffer,
&dev->fsa_dev[cid].sense_data,
- (sizeof(dev->fsa_dev[cid].sense_data) >
- sizeof(scsicmd->sense_buffer))
- ? sizeof(scsicmd->sense_buffer)
- : sizeof(dev->fsa_dev[cid].sense_data));
+ min_t(size_t,
+ sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
}
scsicmd->scsi_done(scsicmd);
return 0;
@@ -2092,7 +2118,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
mode_buf[2] = 0; /* Device-specific param,
bit 8: 0/1 = write enabled/protected
bit 4: 0/1 = FUA enabled */
- if (dev->raw_io_interface)
+ if (dev->raw_io_interface && ((aac_cache & 5) != 1))
mode_buf[2] = 0x10;
mode_buf[3] = 0; /* Block descriptor length */
if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
@@ -2100,7 +2126,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
mode_buf[0] = 6;
mode_buf[4] = 8;
mode_buf[5] = 1;
- mode_buf[6] = 0x04; /* WCE */
+ mode_buf[6] = ((aac_cache & 6) == 2)
+ ? 0 : 0x04; /* WCE */
mode_buf_length = 7;
if (mode_buf_length > scsicmd->cmnd[4])
mode_buf_length = scsicmd->cmnd[4];
@@ -2123,7 +2150,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
mode_buf[3] = 0; /* Device-specific param,
bit 8: 0/1 = write enabled/protected
bit 4: 0/1 = FUA enabled */
- if (dev->raw_io_interface)
+ if (dev->raw_io_interface && ((aac_cache & 5) != 1))
mode_buf[3] = 0x10;
mode_buf[4] = 0; /* reserved */
mode_buf[5] = 0; /* reserved */
@@ -2134,7 +2161,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
mode_buf[1] = 9;
mode_buf[8] = 8;
mode_buf[9] = 1;
- mode_buf[10] = 0x04; /* WCE */
+ mode_buf[10] = ((aac_cache & 6) == 2)
+ ? 0 : 0x04; /* WCE */
mode_buf_length = 11;
if (mode_buf_length > scsicmd->cmnd[8])
mode_buf_length = scsicmd->cmnd[8];
@@ -2179,7 +2207,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
return 0;
}
- switch (scsicmd->cmnd[0])
+ switch (scsicmd->cmnd[0])
{
case READ_6:
case READ_10:
@@ -2192,11 +2220,11 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
* corresponds to a container. Needed to convert
* containers to /dev/sd device names
*/
-
+
if (scsicmd->request->rq_disk)
strlcpy(fsa_dev_ptr[cid].devname,
scsicmd->request->rq_disk->disk_name,
- min(sizeof(fsa_dev_ptr[cid].devname),
+ min(sizeof(fsa_dev_ptr[cid].devname),
sizeof(scsicmd->request->rq_disk->disk_name) + 1));
return aac_read(scsicmd);
@@ -2210,9 +2238,16 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
return aac_write(scsicmd);
case SYNCHRONIZE_CACHE:
+ if (((aac_cache & 6) == 6) && dev->cache_protected) {
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ }
/* Issue FIB to tell Firmware to flush it's cache */
- return aac_synchronize(scsicmd);
-
+ if ((aac_cache & 6) != 2)
+ return aac_synchronize(scsicmd);
+ /* FALLTHRU */
default:
/*
* Unhandled commands
@@ -2223,9 +2258,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
- (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
- ? sizeof(scsicmd->sense_buffer)
- : sizeof(dev->fsa_dev[cid].sense_data));
+ min_t(size_t,
+ sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
scsicmd->scsi_done(scsicmd);
return 0;
}
@@ -2243,7 +2278,7 @@ static int query_disk(struct aac_dev *dev, void __user *arg)
return -EFAULT;
if (qd.cnum == -1)
qd.cnum = qd.id;
- else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1))
+ else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1))
{
if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
return -EINVAL;
@@ -2370,7 +2405,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
/*
- * Calculate resid for sg
+ * Calculate resid for sg
*/
scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
@@ -2385,10 +2420,8 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
if (le32_to_cpu(srbreply->status) != ST_OK){
int len;
printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
- len = (le32_to_cpu(srbreply->sense_data_size) >
- sizeof(scsicmd->sense_buffer)) ?
- sizeof(scsicmd->sense_buffer) :
- le32_to_cpu(srbreply->sense_data_size);
+ len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
+ SCSI_SENSE_BUFFERSIZE);
scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
}
@@ -2412,7 +2445,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
case WRITE_12:
case READ_16:
case WRITE_16:
- if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) {
+ if (le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow) {
printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
} else {
printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
@@ -2481,26 +2514,23 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
printk("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",
le32_to_cpu(srbreply->srb_status) & 0x3F,
aac_get_status_string(
- le32_to_cpu(srbreply->srb_status) & 0x3F),
- scsicmd->cmnd[0],
+ le32_to_cpu(srbreply->srb_status) & 0x3F),
+ scsicmd->cmnd[0],
le32_to_cpu(srbreply->scsi_status));
#endif
scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
break;
}
- if (le32_to_cpu(srbreply->scsi_status) == 0x02 ){ // Check Condition
+ if (le32_to_cpu(srbreply->scsi_status) == SAM_STAT_CHECK_CONDITION) {
int len;
scsicmd->result |= SAM_STAT_CHECK_CONDITION;
- len = (le32_to_cpu(srbreply->sense_data_size) >
- sizeof(scsicmd->sense_buffer)) ?
- sizeof(scsicmd->sense_buffer) :
- le32_to_cpu(srbreply->sense_data_size);
+ len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
+ SCSI_SENSE_BUFFERSIZE);
#ifdef AAC_DETAILED_STATUS_INFO
printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
le32_to_cpu(srbreply->status), len);
#endif
memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
-
}
/*
* OR in the scsi status (already shifted up a bit)
@@ -2517,7 +2547,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
* aac_send_scb_fib
* @scsicmd: the scsi command block
*
- * This routine will form a FIB and fill in the aac_srb from the
+ * This routine will form a FIB and fill in the aac_srb from the
* scsicmd passed in.
*/
@@ -2731,7 +2761,7 @@ static struct aac_srb_status_info srb_status_info[] = {
{ SRB_STATUS_ERROR_RECOVERY, "Error Recovery"},
{ SRB_STATUS_NOT_STARTED, "Not Started"},
{ SRB_STATUS_NOT_IN_USE, "Not In Use"},
- { SRB_STATUS_FORCE_ABORT, "Force Abort"},
+ { SRB_STATUS_FORCE_ABORT, "Force Abort"},
{ SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"},
{ 0xff, "Unknown Error"}
};
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 9abba8b90f70..3195d29f2177 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1,4 +1,4 @@
-#if (!defined(dprintk))
+#ifndef dprintk
# define dprintk(x)
#endif
/* eg: if (nblank(dprintk(x))) */
@@ -12,7 +12,7 @@
*----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 2449
+# define AAC_DRIVER_BUILD 2455
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -50,9 +50,9 @@ struct diskparm
/*
* Firmware constants
*/
-
+
#define CT_NONE 0
-#define CT_OK 218
+#define CT_OK 218
#define FT_FILESYS 8 /* ADAPTEC's "FSA"(tm) filesystem */
#define FT_DRIVE 9 /* physical disk - addressable in scsi by bus/id/lun */
@@ -107,12 +107,12 @@ struct user_sgentryraw {
struct sgmap {
__le32 count;
- struct sgentry sg[1];
+ struct sgentry sg[1];
};
struct user_sgmap {
u32 count;
- struct user_sgentry sg[1];
+ struct user_sgentry sg[1];
};
struct sgmap64 {
@@ -137,18 +137,18 @@ struct user_sgmapraw {
struct creation_info
{
- u8 buildnum; /* e.g., 588 */
- u8 usec; /* e.g., 588 */
- u8 via; /* e.g., 1 = FSU,
- * 2 = API
+ u8 buildnum; /* e.g., 588 */
+ u8 usec; /* e.g., 588 */
+ u8 via; /* e.g., 1 = FSU,
+ * 2 = API
*/
- u8 year; /* e.g., 1997 = 97 */
+ u8 year; /* e.g., 1997 = 97 */
__le32 date; /*
- * unsigned Month :4; // 1 - 12
- * unsigned Day :6; // 1 - 32
- * unsigned Hour :6; // 0 - 23
- * unsigned Minute :6; // 0 - 60
- * unsigned Second :6; // 0 - 60
+ * unsigned Month :4; // 1 - 12
+ * unsigned Day :6; // 1 - 32
+ * unsigned Hour :6; // 0 - 23
+ * unsigned Minute :6; // 0 - 60
+ * unsigned Second :6; // 0 - 60
*/
__le32 serial[2]; /* e.g., 0x1DEADB0BFAFAF001 */
};
@@ -184,7 +184,7 @@ struct creation_info
/*
* Set the queues on a 16 byte alignment
*/
-
+
#define QUEUE_ALIGNMENT 16
/*
@@ -203,9 +203,9 @@ struct aac_entry {
* The adapter assumes the ProducerIndex and ConsumerIndex are grouped
* adjacently and in that order.
*/
-
+
struct aac_qhdr {
- __le64 header_addr;/* Address to hand the adapter to access
+ __le64 header_addr;/* Address to hand the adapter to access
to this queue head */
__le32 *producer; /* The producer index for this queue (host address) */
__le32 *consumer; /* The consumer index for this queue (host address) */
@@ -215,7 +215,7 @@ struct aac_qhdr {
* Define all the events which the adapter would like to notify
* the host of.
*/
-
+
#define HostNormCmdQue 1 /* Change in host normal priority command queue */
#define HostHighCmdQue 2 /* Change in host high priority command queue */
#define HostNormRespQue 3 /* Change in host normal priority response queue */
@@ -286,17 +286,17 @@ struct aac_fibhdr {
u8 StructType; /* Type FIB */
u8 Flags; /* Flags for FIB */
__le16 Size; /* Size of this FIB in bytes */
- __le16 SenderSize; /* Size of the FIB in the sender
+ __le16 SenderSize; /* Size of the FIB in the sender
(for response sizing) */
__le32 SenderFibAddress; /* Host defined data in the FIB */
- __le32 ReceiverFibAddress;/* Logical address of this FIB for
+ __le32 ReceiverFibAddress;/* Logical address of this FIB for
the adapter */
u32 SenderData; /* Place holder for the sender to store data */
union {
struct {
- __le32 _ReceiverTimeStart; /* Timestamp for
+ __le32 _ReceiverTimeStart; /* Timestamp for
receipt of fib */
- __le32 _ReceiverTimeDone; /* Timestamp for
+ __le32 _ReceiverTimeDone; /* Timestamp for
completion of fib */
} _s;
} _u;
@@ -311,7 +311,7 @@ struct hw_fib {
* FIB commands
*/
-#define TestCommandResponse 1
+#define TestCommandResponse 1
#define TestAdapterCommand 2
/*
* Lowlevel and comm commands
@@ -350,10 +350,6 @@ struct hw_fib {
#define ContainerCommand64 501
#define ContainerRawIo 502
/*
- * Cluster Commands
- */
-#define ClusterCommand 550
-/*
* Scsi Port commands (scsi passthrough)
*/
#define ScsiPortCommand 600
@@ -375,19 +371,19 @@ struct hw_fib {
*/
enum fib_xfer_state {
- HostOwned = (1<<0),
- AdapterOwned = (1<<1),
- FibInitialized = (1<<2),
- FibEmpty = (1<<3),
- AllocatedFromPool = (1<<4),
- SentFromHost = (1<<5),
- SentFromAdapter = (1<<6),
- ResponseExpected = (1<<7),
- NoResponseExpected = (1<<8),
- AdapterProcessed = (1<<9),
- HostProcessed = (1<<10),
- HighPriority = (1<<11),
- NormalPriority = (1<<12),
+ HostOwned = (1<<0),
+ AdapterOwned = (1<<1),
+ FibInitialized = (1<<2),
+ FibEmpty = (1<<3),
+ AllocatedFromPool = (1<<4),
+ SentFromHost = (1<<5),
+ SentFromAdapter = (1<<6),
+ ResponseExpected = (1<<7),
+ NoResponseExpected = (1<<8),
+ AdapterProcessed = (1<<9),
+ HostProcessed = (1<<10),
+ HighPriority = (1<<11),
+ NormalPriority = (1<<12),
Async = (1<<13),
AsyncIo = (1<<13), // rpbfix: remove with new regime
PageFileIo = (1<<14), // rpbfix: remove with new regime
@@ -420,7 +416,7 @@ struct aac_init
__le32 AdapterFibAlign;
__le32 printfbuf;
__le32 printfbufsiz;
- __le32 HostPhysMemPages; /* number of 4k pages of host
+ __le32 HostPhysMemPages; /* number of 4k pages of host
physical memory */
__le32 HostElapsedSeconds; /* number of seconds since 1970. */
/*
@@ -481,7 +477,7 @@ struct adapter_ops
struct aac_driver_ident
{
- int (*init)(struct aac_dev *dev);
+ int (*init)(struct aac_dev *dev);
char * name;
char * vname;
char * model;
@@ -489,7 +485,7 @@ struct aac_driver_ident
int quirks;
};
/*
- * Some adapter firmware needs communication memory
+ * Some adapter firmware needs communication memory
* below 2gig. This tells the init function to set the
* dma mask such that fib memory will be allocated where the
* adapter firmware can get to it.
@@ -521,33 +517,39 @@ struct aac_driver_ident
#define AAC_QUIRK_17SG 0x0010
/*
+ * Some adapter firmware does not support 64 bit scsi passthrough
+ * commands.
+ */
+#define AAC_QUIRK_SCSI_32 0x0020
+
+/*
* The adapter interface specs all queues to be located in the same
* physically contigous block. The host structure that defines the
* commuication queues will assume they are each a separate physically
* contigous memory region that will support them all being one big
- * contigous block.
+ * contigous block.
* There is a command and response queue for each level and direction of
* commuication. These regions are accessed by both the host and adapter.
*/
-
+
struct aac_queue {
- u64 logical; /*address we give the adapter */
+ u64 logical; /*address we give the adapter */
struct aac_entry *base; /*system virtual address */
- struct aac_qhdr headers; /*producer,consumer q headers*/
- u32 entries; /*Number of queue entries */
+ struct aac_qhdr headers; /*producer,consumer q headers*/
+ u32 entries; /*Number of queue entries */
wait_queue_head_t qfull; /*Event to wait on if q full */
wait_queue_head_t cmdready; /*Cmd ready from the adapter */
- /* This is only valid for adapter to host command queues. */
- spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */
+ /* This is only valid for adapter to host command queues. */
+ spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */
spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */
- struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */
- /* only valid for command queues which receive entries from the adapter. */
+ struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */
+ /* only valid for command queues which receive entries from the adapter. */
u32 numpending; /* Number of entries on outstanding queue. */
struct aac_dev * dev; /* Back pointer to adapter structure */
};
/*
- * Message queues. The order here is important, see also the
+ * Message queues. The order here is important, see also the
* queue type ordering
*/
@@ -559,12 +561,12 @@ struct aac_queue_block
/*
* SaP1 Message Unit Registers
*/
-
+
struct sa_drawbridge_CSR {
- /* Offset | Name */
+ /* Offset | Name */
__le32 reserved[10]; /* 00h-27h | Reserved */
u8 LUT_Offset; /* 28h | Lookup Table Offset */
- u8 reserved1[3]; /* 29h-2bh | Reserved */
+ u8 reserved1[3]; /* 29h-2bh | Reserved */
__le32 LUT_Data; /* 2ch | Looup Table Data */
__le32 reserved2[26]; /* 30h-97h | Reserved */
__le16 PRICLEARIRQ; /* 98h | Primary Clear Irq */
@@ -583,8 +585,8 @@ struct sa_drawbridge_CSR {
__le32 MAILBOX5; /* bch | Scratchpad 5 */
__le32 MAILBOX6; /* c0h | Scratchpad 6 */
__le32 MAILBOX7; /* c4h | Scratchpad 7 */
- __le32 ROM_Setup_Data; /* c8h | Rom Setup and Data */
- __le32 ROM_Control_Addr;/* cch | Rom Control and Address */
+ __le32 ROM_Setup_Data; /* c8h | Rom Setup and Data */
+ __le32 ROM_Control_Addr;/* cch | Rom Control and Address */
__le32 reserved3[12]; /* d0h-ffh | reserved */
__le32 LUT[64]; /* 100h-1ffh | Lookup Table Entries */
};
@@ -597,7 +599,7 @@ struct sa_drawbridge_CSR {
#define Mailbox5 SaDbCSR.MAILBOX5
#define Mailbox6 SaDbCSR.MAILBOX6
#define Mailbox7 SaDbCSR.MAILBOX7
-
+
#define DoorbellReg_p SaDbCSR.PRISETIRQ
#define DoorbellReg_s SaDbCSR.SECSETIRQ
#define DoorbellClrReg_p SaDbCSR.PRICLEARIRQ
@@ -611,19 +613,19 @@ struct sa_drawbridge_CSR {
#define DOORBELL_5 0x0020
#define DOORBELL_6 0x0040
-
+
#define PrintfReady DOORBELL_5
#define PrintfDone DOORBELL_5
-
+
struct sa_registers {
struct sa_drawbridge_CSR SaDbCSR; /* 98h - c4h */
};
-
+
#define Sa_MINIPORT_REVISION 1
#define sa_readw(AEP, CSR) readl(&((AEP)->regs.sa->CSR))
-#define sa_readl(AEP, CSR) readl(&((AEP)->regs.sa->CSR))
+#define sa_readl(AEP, CSR) readl(&((AEP)->regs.sa->CSR))
#define sa_writew(AEP, CSR, value) writew(value, &((AEP)->regs.sa->CSR))
#define sa_writel(AEP, CSR, value) writel(value, &((AEP)->regs.sa->CSR))
@@ -640,21 +642,21 @@ struct rx_mu_registers {
__le32 IMRx[2]; /* 1310h | 10h | Inbound Message Registers */
__le32 OMRx[2]; /* 1318h | 18h | Outbound Message Registers */
__le32 IDR; /* 1320h | 20h | Inbound Doorbell Register */
- __le32 IISR; /* 1324h | 24h | Inbound Interrupt
+ __le32 IISR; /* 1324h | 24h | Inbound Interrupt
Status Register */
- __le32 IIMR; /* 1328h | 28h | Inbound Interrupt
- Mask Register */
+ __le32 IIMR; /* 1328h | 28h | Inbound Interrupt
+ Mask Register */
__le32 ODR; /* 132Ch | 2Ch | Outbound Doorbell Register */
- __le32 OISR; /* 1330h | 30h | Outbound Interrupt
+ __le32 OISR; /* 1330h | 30h | Outbound Interrupt
Status Register */
- __le32 OIMR; /* 1334h | 34h | Outbound Interrupt
+ __le32 OIMR; /* 1334h | 34h | Outbound Interrupt
Mask Register */
__le32 reserved2; /* 1338h | 38h | Reserved */
__le32 reserved3; /* 133Ch | 3Ch | Reserved */
__le32 InboundQueue;/* 1340h | 40h | Inbound Queue Port relative to firmware */
__le32 OutboundQueue;/*1344h | 44h | Outbound Queue Port relative to firmware */
- /* * Must access through ATU Inbound
- Translation Window */
+ /* * Must access through ATU Inbound
+ Translation Window */
};
struct rx_inbound {
@@ -710,12 +712,12 @@ struct rkt_registers {
typedef void (*fib_callback)(void *ctxt, struct fib *fibctx);
struct aac_fib_context {
- s16 type; // used for verification of structure
- s16 size;
+ s16 type; // used for verification of structure
+ s16 size;
u32 unique; // unique value representing this context
ulong jiffies; // used for cleanup - dmb changed to ulong
struct list_head next; // used to link context's into a linked list
- struct semaphore wait_sem; // this is used to wait for the next fib to arrive.
+ struct semaphore wait_sem; // this is used to wait for the next fib to arrive.
int wait; // Set to true when thread is in WaitForSingleObject
unsigned long count; // total number of FIBs on FibList
struct list_head fib_list; // this holds fibs and their attachd hw_fibs
@@ -734,9 +736,9 @@ struct sense_data {
u8 EOM:1; /* End Of Medium - reserved for random access devices */
u8 filemark:1; /* Filemark - reserved for random access devices */
- u8 information[4]; /* for direct-access devices, contains the unsigned
- * logical block address or residue associated with
- * the sense key
+ u8 information[4]; /* for direct-access devices, contains the unsigned
+ * logical block address or residue associated with
+ * the sense key
*/
u8 add_sense_len; /* number of additional sense bytes to follow this field */
u8 cmnd_info[4]; /* not used */
@@ -746,7 +748,7 @@ struct sense_data {
u8 bit_ptr:3; /* indicates which byte of the CDB or parameter data
* was in error
*/
- u8 BPV:1; /* bit pointer valid (BPV): 1- indicates that
+ u8 BPV:1; /* bit pointer valid (BPV): 1- indicates that
* the bit_ptr field has valid value
*/
u8 reserved2:2;
@@ -780,24 +782,24 @@ struct fib {
/*
* The Adapter that this I/O is destined for.
*/
- struct aac_dev *dev;
+ struct aac_dev *dev;
/*
* This is the event the sendfib routine will wait on if the
* caller did not pass one and this is synch io.
*/
- struct semaphore event_wait;
+ struct semaphore event_wait;
spinlock_t event_lock;
u32 done; /* gets set to 1 when fib is complete */
- fib_callback callback;
- void *callback_data;
+ fib_callback callback;
+ void *callback_data;
u32 flags; // u32 dmb was ulong
/*
* And for the internal issue/reply queues (we may be able
* to merge these two)
*/
struct list_head fiblink;
- void *data;
+ void *data;
struct hw_fib *hw_fib_va; /* Actual shared object */
dma_addr_t hw_fib_pa; /* physical address of hw_fib*/
};
@@ -807,7 +809,7 @@ struct fib {
*
* This is returned by the RequestAdapterInfo block
*/
-
+
struct aac_adapter_info
{
__le32 platform;
@@ -826,7 +828,7 @@ struct aac_adapter_info
__le32 biosrev;
__le32 biosbuild;
__le32 cluster;
- __le32 clusterchannelmask;
+ __le32 clusterchannelmask;
__le32 serial[2];
__le32 battery;
__le32 options;
@@ -863,9 +865,10 @@ struct aac_supplement_adapter_info
__le32 SupportedOptions2;
__le32 ReservedGrowth[1];
};
-#define AAC_FEATURE_FALCON 0x00000010
-#define AAC_OPTION_MU_RESET 0x00000001
-#define AAC_OPTION_IGNORE_RESET 0x00000002
+#define AAC_FEATURE_FALCON cpu_to_le32(0x00000010)
+#define AAC_FEATURE_JBOD cpu_to_le32(0x08000000)
+#define AAC_OPTION_MU_RESET cpu_to_le32(0x00000001)
+#define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002)
#define AAC_SIS_VERSION_V3 3
#define AAC_SIS_SLOT_UNKNOWN 0xFF
@@ -916,13 +919,13 @@ struct aac_bus_info_response {
#define AAC_OPT_HOST_TIME_FIB cpu_to_le32(1<<4)
#define AAC_OPT_RAID50 cpu_to_le32(1<<5)
#define AAC_OPT_4GB_WINDOW cpu_to_le32(1<<6)
-#define AAC_OPT_SCSI_UPGRADEABLE cpu_to_le32(1<<7)
+#define AAC_OPT_SCSI_UPGRADEABLE cpu_to_le32(1<<7)
#define AAC_OPT_SOFT_ERR_REPORT cpu_to_le32(1<<8)
-#define AAC_OPT_SUPPORTED_RECONDITION cpu_to_le32(1<<9)
+#define AAC_OPT_SUPPORTED_RECONDITION cpu_to_le32(1<<9)
#define AAC_OPT_SGMAP_HOST64 cpu_to_le32(1<<10)
#define AAC_OPT_ALARM cpu_to_le32(1<<11)
#define AAC_OPT_NONDASD cpu_to_le32(1<<12)
-#define AAC_OPT_SCSI_MANAGED cpu_to_le32(1<<13)
+#define AAC_OPT_SCSI_MANAGED cpu_to_le32(1<<13)
#define AAC_OPT_RAID_SCSI_MODE cpu_to_le32(1<<14)
#define AAC_OPT_SUPPLEMENT_ADAPTER_INFO cpu_to_le32(1<<16)
#define AAC_OPT_NEW_COMM cpu_to_le32(1<<17)
@@ -942,7 +945,7 @@ struct aac_dev
/*
* Map for 128 fib objects (64k)
- */
+ */
dma_addr_t hw_fib_pa;
struct hw_fib *hw_fib_va;
struct hw_fib *aif_base_va;
@@ -953,24 +956,24 @@ struct aac_dev
struct fib *free_fib;
spinlock_t fib_lock;
-
+
struct aac_queue_block *queues;
/*
* The user API will use an IOCTL to register itself to receive
* FIBs from the adapter. The following list is used to keep
* track of all the threads that have requested these FIBs. The
- * mutex is used to synchronize access to all data associated
+ * mutex is used to synchronize access to all data associated
* with the adapter fibs.
*/
struct list_head fib_list;
struct adapter_ops a_ops;
unsigned long fsrev; /* Main driver's revision number */
-
+
unsigned base_size; /* Size of mapped in region */
struct aac_init *init; /* Holds initialization info to communicate with adapter */
- dma_addr_t init_pa; /* Holds physical address of the init struct */
-
+ dma_addr_t init_pa; /* Holds physical address of the init struct */
+
struct pci_dev *pdev; /* Our PCI interface */
void * printfbuf; /* pointer to buffer used for printf's from the adapter */
void * comm_addr; /* Base address of Comm area */
@@ -984,11 +987,11 @@ struct aac_dev
struct fsa_dev_info *fsa_dev;
struct task_struct *thread;
int cardtype;
-
+
/*
* The following is the device specific extension.
*/
-#if (!defined(AAC_MIN_FOOTPRINT_SIZE))
+#ifndef AAC_MIN_FOOTPRINT_SIZE
# define AAC_MIN_FOOTPRINT_SIZE 8192
#endif
union
@@ -1009,7 +1012,9 @@ struct aac_dev
/* These are in adapter info but they are in the io flow so
* lets break them out so we don't have to do an AND to check them
*/
- u8 nondasd_support;
+ u8 nondasd_support;
+ u8 jbod;
+ u8 cache_protected;
u8 dac_support;
u8 raid_scsi_mode;
u8 comm_interface;
@@ -1066,18 +1071,19 @@ struct aac_dev
(dev)->a_ops.adapter_comm(dev, comm)
#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001)
+#define FIB_CONTEXT_FLAG (0x00000002)
/*
* Define the command values
*/
-
+
#define Null 0
-#define GetAttributes 1
-#define SetAttributes 2
-#define Lookup 3
-#define ReadLink 4
-#define Read 5
-#define Write 6
+#define GetAttributes 1
+#define SetAttributes 2
+#define Lookup 3
+#define ReadLink 4
+#define Read 5
+#define Write 6
#define Create 7
#define MakeDirectory 8
#define SymbolicLink 9
@@ -1173,19 +1179,19 @@ struct aac_dev
struct aac_read
{
- __le32 command;
- __le32 cid;
- __le32 block;
- __le32 count;
+ __le32 command;
+ __le32 cid;
+ __le32 block;
+ __le32 count;
struct sgmap sg; // Must be last in struct because it is variable
};
struct aac_read64
{
- __le32 command;
- __le16 cid;
- __le16 sector_count;
- __le32 block;
+ __le32 command;
+ __le16 cid;
+ __le16 sector_count;
+ __le32 block;
__le16 pad;
__le16 flags;
struct sgmap64 sg; // Must be last in struct because it is variable
@@ -1193,26 +1199,26 @@ struct aac_read64
struct aac_read_reply
{
- __le32 status;
- __le32 count;
+ __le32 status;
+ __le32 count;
};
struct aac_write
{
__le32 command;
- __le32 cid;
- __le32 block;
- __le32 count;
- __le32 stable; // Not used
+ __le32 cid;
+ __le32 block;
+ __le32 count;
+ __le32 stable; // Not used
struct sgmap sg; // Must be last in struct because it is variable
};
struct aac_write64
{
- __le32 command;
- __le16 cid;
- __le16 sector_count;
- __le32 block;
+ __le32 command;
+ __le16 cid;
+ __le16 sector_count;
+ __le32 block;
__le16 pad;
__le16 flags;
#define IO_TYPE_WRITE 0x00000000
@@ -1223,7 +1229,7 @@ struct aac_write64
struct aac_write_reply
{
__le32 status;
- __le32 count;
+ __le32 count;
__le32 committed;
};
@@ -1326,10 +1332,10 @@ struct aac_srb_reply
#define SRB_NoDataXfer 0x0000
#define SRB_DisableDisconnect 0x0004
#define SRB_DisableSynchTransfer 0x0008
-#define SRB_BypassFrozenQueue 0x0010
+#define SRB_BypassFrozenQueue 0x0010
#define SRB_DisableAutosense 0x0020
#define SRB_DataIn 0x0040
-#define SRB_DataOut 0x0080
+#define SRB_DataOut 0x0080
/*
* SRB Functions - set in aac_srb->function
@@ -1352,7 +1358,7 @@ struct aac_srb_reply
#define SRBF_RemoveDevice 0x0016
#define SRBF_DomainValidation 0x0017
-/*
+/*
* SRB SCSI Status - set in aac_srb->scsi_status
*/
#define SRB_STATUS_PENDING 0x00
@@ -1511,17 +1517,17 @@ struct aac_get_container_count_resp {
*/
struct aac_mntent {
- __le32 oid;
+ __le32 oid;
u8 name[16]; /* if applicable */
struct creation_info create_info; /* if applicable */
__le32 capacity;
- __le32 vol; /* substrate structure */
- __le32 obj; /* FT_FILESYS, etc. */
- __le32 state; /* unready for mounting,
+ __le32 vol; /* substrate structure */
+ __le32 obj; /* FT_FILESYS, etc. */
+ __le32 state; /* unready for mounting,
readonly, etc. */
- union aac_contentinfo fileinfo; /* Info specific to content
+ union aac_contentinfo fileinfo; /* Info specific to content
manager (eg, filesystem) */
- __le32 altoid; /* != oid <==> snapshot or
+ __le32 altoid; /* != oid <==> snapshot or
broken mirror exists */
__le32 capacityhigh;
};
@@ -1538,7 +1544,7 @@ struct aac_query_mount {
struct aac_mount {
__le32 status;
- __le32 type; /* should be same as that requested */
+ __le32 type; /* should be same as that requested */
__le32 count;
struct aac_mntent mnt[1];
};
@@ -1608,7 +1614,7 @@ struct aac_delete_disk {
u32 disknum;
u32 cnum;
};
-
+
struct fib_ioctl
{
u32 fibctx;
@@ -1622,10 +1628,10 @@ struct revision
__le32 version;
__le32 build;
};
-
+
/*
- * Ugly - non Linux like ioctl coding for back compat.
+ * Ugly - non Linux like ioctl coding for back compat.
*/
#define CTL_CODE(function, method) ( \
@@ -1633,7 +1639,7 @@ struct revision
)
/*
- * Define the method codes for how buffers are passed for I/O and FS
+ * Define the method codes for how buffers are passed for I/O and FS
* controls
*/
@@ -1644,15 +1650,15 @@ struct revision
* Filesystem ioctls
*/
-#define FSACTL_SENDFIB CTL_CODE(2050, METHOD_BUFFERED)
-#define FSACTL_SEND_RAW_SRB CTL_CODE(2067, METHOD_BUFFERED)
+#define FSACTL_SENDFIB CTL_CODE(2050, METHOD_BUFFERED)
+#define FSACTL_SEND_RAW_SRB CTL_CODE(2067, METHOD_BUFFERED)
#define FSACTL_DELETE_DISK 0x163
#define FSACTL_QUERY_DISK 0x173
#define FSACTL_OPEN_GET_ADAPTER_FIB CTL_CODE(2100, METHOD_BUFFERED)
#define FSACTL_GET_NEXT_ADAPTER_FIB CTL_CODE(2101, METHOD_BUFFERED)
#define FSACTL_CLOSE_GET_ADAPTER_FIB CTL_CODE(2102, METHOD_BUFFERED)
#define FSACTL_MINIPORT_REV_CHECK CTL_CODE(2107, METHOD_BUFFERED)
-#define FSACTL_GET_PCI_INFO CTL_CODE(2119, METHOD_BUFFERED)
+#define FSACTL_GET_PCI_INFO CTL_CODE(2119, METHOD_BUFFERED)
#define FSACTL_FORCE_DELETE_DISK CTL_CODE(2120, METHOD_NEITHER)
#define FSACTL_GET_CONTAINERS 2131
#define FSACTL_SEND_LARGE_FIB CTL_CODE(2138, METHOD_BUFFERED)
@@ -1661,7 +1667,7 @@ struct revision
struct aac_common
{
/*
- * If this value is set to 1 then interrupt moderation will occur
+ * If this value is set to 1 then interrupt moderation will occur
* in the base commuication support.
*/
u32 irq_mod;
@@ -1690,11 +1696,11 @@ extern struct aac_common aac_config;
* The following macro is used when sending and receiving FIBs. It is
* only used for debugging.
*/
-
+
#ifdef DBG
#define FIB_COUNTER_INCREMENT(counter) (counter)++
#else
-#define FIB_COUNTER_INCREMENT(counter)
+#define FIB_COUNTER_INCREMENT(counter)
#endif
/*
@@ -1726,17 +1732,17 @@ extern struct aac_common aac_config;
*
* The adapter reports is present state through the phase. Only
* a single phase should be ever be set. Each phase can have multiple
- * phase status bits to provide more detailed information about the
- * state of the board. Care should be taken to ensure that any phase
+ * phase status bits to provide more detailed information about the
+ * state of the board. Care should be taken to ensure that any phase
* status bits that are set when changing the phase are also valid
* for the new phase or be cleared out. Adapter software (monitor,
- * iflash, kernel) is responsible for properly maintining the phase
+ * iflash, kernel) is responsible for properly maintining the phase
* status mailbox when it is running.
- *
- * MONKER_API Phases
*
- * Phases are bit oriented. It is NOT valid to have multiple bits set
- */
+ * MONKER_API Phases
+ *
+ * Phases are bit oriented. It is NOT valid to have multiple bits set
+ */
#define SELF_TEST_FAILED 0x00000004
#define MONITOR_PANIC 0x00000020
@@ -1759,16 +1765,22 @@ extern struct aac_common aac_config;
* For FIB communication, we need all of the following things
* to send back to the user.
*/
-
-#define AifCmdEventNotify 1 /* Notify of event */
+
+#define AifCmdEventNotify 1 /* Notify of event */
#define AifEnConfigChange 3 /* Adapter configuration change */
#define AifEnContainerChange 4 /* Container configuration change */
#define AifEnDeviceFailure 5 /* SCSI device failed */
+#define AifEnEnclosureManagement 13 /* EM_DRIVE_* */
+#define EM_DRIVE_INSERTION 31
+#define EM_DRIVE_REMOVAL 32
+#define AifEnBatteryEvent 14 /* Change in Battery State */
#define AifEnAddContainer 15 /* A new array was created */
#define AifEnDeleteContainer 16 /* A container was deleted */
#define AifEnExpEvent 23 /* Firmware Event Log */
#define AifExeFirmwarePanic 3 /* Firmware Event Panic */
#define AifHighPriority 3 /* Highest Priority Event */
+#define AifEnAddJBOD 30 /* JBOD created */
+#define AifEnDeleteJBOD 31 /* JBOD deleted */
#define AifCmdJobProgress 2 /* Progress report */
#define AifJobCtrZero 101 /* Array Zero progress */
@@ -1780,11 +1792,11 @@ extern struct aac_common aac_config;
#define AifDenVolumeExtendComplete 201 /* A volume extend completed */
#define AifReqJobList 100 /* Gets back complete job list */
#define AifReqJobsForCtr 101 /* Gets back jobs for specific container */
-#define AifReqJobsForScsi 102 /* Gets back jobs for specific SCSI device */
-#define AifReqJobReport 103 /* Gets back a specific job report or list of them */
+#define AifReqJobsForScsi 102 /* Gets back jobs for specific SCSI device */
+#define AifReqJobReport 103 /* Gets back a specific job report or list of them */
#define AifReqTerminateJob 104 /* Terminates job */
#define AifReqSuspendJob 105 /* Suspends a job */
-#define AifReqResumeJob 106 /* Resumes a job */
+#define AifReqResumeJob 106 /* Resumes a job */
#define AifReqSendAPIReport 107 /* API generic report requests */
#define AifReqAPIJobStart 108 /* Start a job from the API */
#define AifReqAPIJobUpdate 109 /* Update a job report from the API */
@@ -1803,8 +1815,8 @@ struct aac_aifcmd {
};
/**
- * Convert capacity to cylinders
- * accounting for the fact capacity could be a 64 bit value
+ * Convert capacity to cylinders
+ * accounting for the fact capacity could be a 64 bit value
*
*/
static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
@@ -1861,6 +1873,7 @@ int aac_probe_container(struct aac_dev *dev, int cid);
int _aac_rx_init(struct aac_dev *dev);
int aac_rx_select_comm(struct aac_dev *dev, int comm);
int aac_rx_deliver_producer(struct fib * fib);
+char * get_container_type(unsigned type);
extern int numacb;
extern int acbsize;
extern char aac_driver_version[];
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 1e6d7a9c75bf..851a7e599c50 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -48,13 +48,13 @@
* ioctl_send_fib - send a FIB from userspace
* @dev: adapter is being processed
* @arg: arguments to the ioctl call
- *
+ *
* This routine sends a fib to the adapter on behalf of a user level
* program.
*/
# define AAC_DEBUG_PREAMBLE KERN_INFO
# define AAC_DEBUG_POSTAMBLE
-
+
static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
{
struct hw_fib * kfib;
@@ -71,7 +71,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
if(fibptr == NULL) {
return -ENOMEM;
}
-
+
kfib = fibptr->hw_fib_va;
/*
* First copy in the header so that we can check the size field.
@@ -109,7 +109,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
aac_adapter_interrupt(dev);
/*
- * Since we didn't really send a fib, zero out the state to allow
+ * Since we didn't really send a fib, zero out the state to allow
* cleanup code not to assert.
*/
kfib->header.XferState = 0;
@@ -169,7 +169,7 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
fibctx->size = sizeof(struct aac_fib_context);
- /*
+ /*
* Yes yes, I know this could be an index, but we have a
* better guarantee of uniqueness for the locked loop below.
* Without the aid of a persistent history, this also helps
@@ -189,7 +189,7 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
INIT_LIST_HEAD(&fibctx->fib_list);
fibctx->jiffies = jiffies/HZ;
/*
- * Now add this context onto the adapter's
+ * Now add this context onto the adapter's
* AdapterFibContext list.
*/
spin_lock_irqsave(&dev->fib_lock, flags);
@@ -207,12 +207,12 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
}
list_add_tail(&fibctx->next, &dev->fib_list);
spin_unlock_irqrestore(&dev->fib_lock, flags);
- if (copy_to_user(arg, &fibctx->unique,
+ if (copy_to_user(arg, &fibctx->unique,
sizeof(fibctx->unique))) {
status = -EFAULT;
} else {
status = 0;
- }
+ }
}
return status;
}
@@ -221,8 +221,8 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
* next_getadapter_fib - get the next fib
* @dev: adapter to use
* @arg: ioctl argument
- *
- * This routine will get the next Fib, if available, from the AdapterFibContext
+ *
+ * This routine will get the next Fib, if available, from the AdapterFibContext
* passed in from the user.
*/
@@ -234,7 +234,7 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
int status;
struct list_head * entry;
unsigned long flags;
-
+
if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl)))
return -EFAULT;
/*
@@ -243,6 +243,7 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
* Search the list of AdapterFibContext addresses on the adapter
* to be sure this is a valid address
*/
+ spin_lock_irqsave(&dev->fib_lock, flags);
entry = dev->fib_list.next;
fibctx = NULL;
@@ -251,37 +252,37 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
/*
* Extract the AdapterFibContext from the Input parameters.
*/
- if (fibctx->unique == f.fibctx) { /* We found a winner */
+ if (fibctx->unique == f.fibctx) { /* We found a winner */
break;
}
entry = entry->next;
fibctx = NULL;
}
if (!fibctx) {
+ spin_unlock_irqrestore(&dev->fib_lock, flags);
dprintk ((KERN_INFO "Fib Context not found\n"));
return -EINVAL;
}
if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
(fibctx->size != sizeof(struct aac_fib_context))) {
+ spin_unlock_irqrestore(&dev->fib_lock, flags);
dprintk ((KERN_INFO "Fib Context corrupt?\n"));
return -EINVAL;
}
status = 0;
- spin_lock_irqsave(&dev->fib_lock, flags);
/*
* If there are no fibs to send back, then either wait or return
* -EAGAIN
*/
return_fib:
if (!list_empty(&fibctx->fib_list)) {
- struct list_head * entry;
/*
* Pull the next fib from the fibs
*/
entry = fibctx->fib_list.next;
list_del(entry);
-
+
fib = list_entry(entry, struct fib, fiblink);
fibctx->count--;
spin_unlock_irqrestore(&dev->fib_lock, flags);
@@ -289,7 +290,7 @@ return_fib:
kfree(fib->hw_fib_va);
kfree(fib);
return -EFAULT;
- }
+ }
/*
* Free the space occupied by this copy of the fib.
*/
@@ -318,7 +319,7 @@ return_fib:
}
} else {
status = -EAGAIN;
- }
+ }
}
fibctx->jiffies = jiffies/HZ;
return status;
@@ -327,7 +328,9 @@ return_fib:
int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
{
struct fib *fib;
+ unsigned long flags;
+ spin_lock_irqsave(&dev->fib_lock, flags);
/*
* First free any FIBs that have not been consumed.
*/
@@ -350,6 +353,7 @@ int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
* Remove the Context from the AdapterFibContext List
*/
list_del(&fibctx->next);
+ spin_unlock_irqrestore(&dev->fib_lock, flags);
/*
* Invalidate context
*/
@@ -368,7 +372,7 @@ int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
*
* This routine will close down the fibctx passed in from the user.
*/
-
+
static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
{
struct aac_fib_context *fibctx;
@@ -415,8 +419,8 @@ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
* @arg: ioctl arguments
*
* This routine returns the driver version.
- * Under Linux, there have been no version incompatibilities, so this is
- * simple!
+ * Under Linux, there have been no version incompatibilities, so this is
+ * simple!
*/
static int check_revision(struct aac_dev *dev, void __user *arg)
@@ -426,12 +430,12 @@ static int check_revision(struct aac_dev *dev, void __user *arg)
u32 version;
response.compat = 1;
- version = (simple_strtol(driver_version,
+ version = (simple_strtol(driver_version,
&driver_version, 10) << 24) | 0x00000400;
version += simple_strtol(driver_version + 1, &driver_version, 10) << 16;
version += simple_strtol(driver_version + 1, NULL, 10);
response.version = cpu_to_le32(version);
-# if (defined(AAC_DRIVER_BUILD))
+# ifdef AAC_DRIVER_BUILD
response.build = cpu_to_le32(AAC_DRIVER_BUILD);
# else
response.build = cpu_to_le32(9999);
@@ -464,7 +468,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
u32 data_dir;
void __user *sg_user[32];
void *sg_list[32];
- u32 sg_indx = 0;
+ u32 sg_indx = 0;
u32 byte_count = 0;
u32 actual_fibsize64, actual_fibsize = 0;
int i;
@@ -475,7 +479,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
return -EBUSY;
}
if (!capable(CAP_SYS_ADMIN)){
- dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n"));
+ dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n"));
return -EPERM;
}
/*
@@ -490,7 +494,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */
if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
- dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n"));
+ dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n"));
rcode = -EFAULT;
goto cleanup;
}
@@ -507,7 +511,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
if(copy_from_user(user_srbcmd, user_srb,fibsize)){
- dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n"));
+ dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n"));
rcode = -EFAULT;
goto cleanup;
}
@@ -518,15 +522,15 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
// Fix up srb for endian and force some values
srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
- srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
+ srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
srbcmd->id = cpu_to_le32(user_srbcmd->id);
- srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
- srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
- srbcmd->flags = cpu_to_le32(flags);
+ srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
+ srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
+ srbcmd->flags = cpu_to_le32(flags);
srbcmd->retry_limit = 0; // Obsolete parameter
srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
-
+
switch (flags & (SRB_DataIn | SRB_DataOut)) {
case SRB_DataOut:
data_dir = DMA_TO_DEVICE;
@@ -582,7 +586,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
void* p;
/* Does this really need to be GFP_DMA? */
p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA);
- if(p == 0) {
+ if(!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
upsg->sg[i].count,i,upsg->count));
rcode = -ENOMEM;
@@ -594,7 +598,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
- if( flags & SRB_DataOut ){
+ if (flags & SRB_DataOut) {
if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
rcode = -EFAULT;
@@ -626,7 +630,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
void* p;
/* Does this really need to be GFP_DMA? */
p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
- if(p == 0) {
+ if(!p) {
kfree (usg);
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
usg->sg[i].count,i,usg->count));
@@ -637,7 +641,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
- if( flags & SRB_DataOut ){
+ if (flags & SRB_DataOut) {
if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
kfree (usg);
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
@@ -668,7 +672,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
void* p;
/* Does this really need to be GFP_DMA? */
p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
- if(p == 0) {
+ if(!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
usg->sg[i].count,i,usg->count));
rcode = -ENOMEM;
@@ -680,7 +684,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
- if( flags & SRB_DataOut ){
+ if (flags & SRB_DataOut) {
if(copy_from_user(p,sg_user[i],usg->sg[i].count)){
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
rcode = -EFAULT;
@@ -698,7 +702,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
dma_addr_t addr;
void* p;
p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
- if(p == 0) {
+ if (!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
upsg->sg[i].count, i, upsg->count));
rcode = -ENOMEM;
@@ -708,7 +712,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
- if( flags & SRB_DataOut ){
+ if (flags & SRB_DataOut) {
if(copy_from_user(p, sg_user[i],
upsg->sg[i].count)) {
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
@@ -734,19 +738,19 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
}
if (status != 0){
- dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
+ dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
rcode = -ENXIO;
goto cleanup;
}
- if( flags & SRB_DataIn ) {
+ if (flags & SRB_DataIn) {
for(i = 0 ; i <= sg_indx; i++){
byte_count = le32_to_cpu(
(dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)
? ((struct sgmap64*)&srbcmd->sg)->sg[i].count
: srbcmd->sg.sg[i].count);
if(copy_to_user(sg_user[i], sg_list[i], byte_count)){
- dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n"));
+ dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n"));
rcode = -EFAULT;
goto cleanup;
@@ -756,7 +760,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
reply = (struct aac_srb_reply *) fib_data(srbfib);
if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){
- dprintk((KERN_DEBUG"aacraid: Could not copy reply to user\n"));
+ dprintk((KERN_DEBUG"aacraid: Could not copy reply to user\n"));
rcode = -EFAULT;
goto cleanup;
}
@@ -775,34 +779,34 @@ cleanup:
}
struct aac_pci_info {
- u32 bus;
- u32 slot;
+ u32 bus;
+ u32 slot;
};
static int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
{
- struct aac_pci_info pci_info;
+ struct aac_pci_info pci_info;
pci_info.bus = dev->pdev->bus->number;
pci_info.slot = PCI_SLOT(dev->pdev->devfn);
- if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
- dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
- return -EFAULT;
+ if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
+ dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
+ return -EFAULT;
}
- return 0;
+ return 0;
}
-
+
int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
{
int status;
-
+
/*
* HBA gets first crack
*/
-
+
status = aac_dev_ioctl(dev, cmd, arg);
if(status != -ENOTTY)
return status;
@@ -832,7 +836,7 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
break;
default:
status = -ENOTTY;
- break;
+ break;
}
return status;
}
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 8736813a0296..89cc8b7b42a2 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -301,10 +301,10 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) &&
(status[0] == 0x00000001)) {
- if (status[1] & AAC_OPT_NEW_COMM_64)
+ if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64))
dev->raw_io_64 = 1;
if (dev->a_ops.adapter_comm &&
- (status[1] & AAC_OPT_NEW_COMM))
+ (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM)))
dev->comm_interface = AAC_COMM_MESSAGE;
if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
(status[2] > dev->base_size)) {
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index abce48ccc85b..81b36923e0ef 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -56,7 +56,7 @@
* Allocate and map the shared PCI space for the FIB blocks used to
* talk to the Adaptec firmware.
*/
-
+
static int fib_map_alloc(struct aac_dev *dev)
{
dprintk((KERN_INFO
@@ -109,14 +109,16 @@ int aac_fib_setup(struct aac_dev * dev)
}
if (i<0)
return -ENOMEM;
-
+
hw_fib = dev->hw_fib_va;
hw_fib_pa = dev->hw_fib_pa;
memset(hw_fib, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
/*
* Initialise the fibs
*/
- for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++)
+ for (i = 0, fibptr = &dev->fibs[i];
+ i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
+ i++, fibptr++)
{
fibptr->dev = dev;
fibptr->hw_fib_va = hw_fib;
@@ -148,13 +150,13 @@ int aac_fib_setup(struct aac_dev * dev)
* Allocate a fib from the adapter fib pool. If the pool is empty we
* return NULL.
*/
-
+
struct fib *aac_fib_alloc(struct aac_dev *dev)
{
struct fib * fibptr;
unsigned long flags;
spin_lock_irqsave(&dev->fib_lock, flags);
- fibptr = dev->free_fib;
+ fibptr = dev->free_fib;
if(!fibptr){
spin_unlock_irqrestore(&dev->fib_lock, flags);
return fibptr;
@@ -171,6 +173,7 @@ struct fib *aac_fib_alloc(struct aac_dev *dev)
* each I/O
*/
fibptr->hw_fib_va->header.XferState = 0;
+ fibptr->flags = 0;
fibptr->callback = NULL;
fibptr->callback_data = NULL;
@@ -183,7 +186,7 @@ struct fib *aac_fib_alloc(struct aac_dev *dev)
*
* Frees up a fib and places it on the appropriate queue
*/
-
+
void aac_fib_free(struct fib *fibptr)
{
unsigned long flags;
@@ -204,10 +207,10 @@ void aac_fib_free(struct fib *fibptr)
/**
* aac_fib_init - initialise a fib
* @fibptr: The fib to initialize
- *
+ *
* Set up the generic fib fields ready for use
*/
-
+
void aac_fib_init(struct fib *fibptr)
{
struct hw_fib *hw_fib = fibptr->hw_fib_va;
@@ -227,12 +230,12 @@ void aac_fib_init(struct fib *fibptr)
* Will deallocate and return to the free pool the FIB pointed to by the
* caller.
*/
-
+
static void fib_dealloc(struct fib * fibptr)
{
struct hw_fib *hw_fib = fibptr->hw_fib_va;
BUG_ON(hw_fib->header.StructType != FIB_MAGIC);
- hw_fib->header.XferState = 0;
+ hw_fib->header.XferState = 0;
}
/*
@@ -241,7 +244,7 @@ static void fib_dealloc(struct fib * fibptr)
* these routines and are the only routines which have a knowledge of the
* how these queues are implemented.
*/
-
+
/**
* aac_get_entry - get a queue entry
* @dev: Adapter
@@ -254,7 +257,7 @@ static void fib_dealloc(struct fib * fibptr)
* is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
* returned.
*/
-
+
static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
{
struct aac_queue * q;
@@ -279,26 +282,27 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
idx = ADAP_NORM_RESP_ENTRIES;
}
if (idx != le32_to_cpu(*(q->headers.consumer)))
- *nonotify = 1;
+ *nonotify = 1;
}
if (qid == AdapNormCmdQueue) {
- if (*index >= ADAP_NORM_CMD_ENTRIES)
+ if (*index >= ADAP_NORM_CMD_ENTRIES)
*index = 0; /* Wrap to front of the Producer Queue. */
} else {
- if (*index >= ADAP_NORM_RESP_ENTRIES)
+ if (*index >= ADAP_NORM_RESP_ENTRIES)
*index = 0; /* Wrap to front of the Producer Queue. */
}
- if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
+ /* Queue is full */
+ if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
qid, q->numpending);
return 0;
} else {
- *entry = q->base + *index;
+ *entry = q->base + *index;
return 1;
}
-}
+}
/**
* aac_queue_get - get the next free QE
@@ -320,31 +324,29 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw
{
struct aac_entry * entry = NULL;
int map = 0;
-
+
if (qid == AdapNormCmdQueue) {
/* if no entries wait for some if caller wants to */
- while (!aac_get_entry(dev, qid, &entry, index, nonotify))
- {
+ while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
printk(KERN_ERR "GetEntries failed\n");
}
- /*
- * Setup queue entry with a command, status and fib mapped
- */
- entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
- map = 1;
+ /*
+ * Setup queue entry with a command, status and fib mapped
+ */
+ entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
+ map = 1;
} else {
- while(!aac_get_entry(dev, qid, &entry, index, nonotify))
- {
+ while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
/* if no entries wait for some if caller wants to */
}
- /*
- * Setup queue entry with command, status and fib mapped
- */
- entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
- entry->addr = hw_fib->header.SenderFibAddress;
- /* Restore adapters pointer to the FIB */
+ /*
+ * Setup queue entry with command, status and fib mapped
+ */
+ entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
+ entry->addr = hw_fib->header.SenderFibAddress;
+ /* Restore adapters pointer to the FIB */
hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
- map = 0;
+ map = 0;
}
/*
* If MapFib is true than we need to map the Fib and put pointers
@@ -356,8 +358,8 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw
}
/*
- * Define the highest level of host to adapter communication routines.
- * These routines will support host to adapter FS commuication. These
+ * Define the highest level of host to adapter communication routines.
+ * These routines will support host to adapter FS commuication. These
* routines have no knowledge of the commuication method used. This level
* sends and receives FIBs. This level has no knowledge of how these FIBs
* get passed back and forth.
@@ -379,7 +381,7 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw
* an event to wait on must be supplied. This event will be set when a
* response FIB is received from the adapter.
*/
-
+
int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
int priority, int wait, int reply, fib_callback callback,
void *callback_data)
@@ -392,16 +394,17 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
return -EBUSY;
/*
- * There are 5 cases with the wait and reponse requested flags.
+ * There are 5 cases with the wait and reponse requested flags.
* The only invalid cases are if the caller requests to wait and
* does not request a response and if the caller does not want a
* response and the Fib is not allocated from pool. If a response
* is not requesed the Fib will just be deallocaed by the DPC
* routine when the response comes back from the adapter. No
- * further processing will be done besides deleting the Fib. We
+ * further processing will be done besides deleting the Fib. We
* will have a debug mode where the adapter can notify the host
* it had a problem and the host can log that fact.
*/
+ fibptr->flags = 0;
if (wait && !reply) {
return -EINVAL;
} else if (!wait && reply) {
@@ -413,7 +416,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
} else if (wait && reply) {
hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
FIB_COUNTER_INCREMENT(aac_config.NormalSent);
- }
+ }
/*
* Map the fib into 32bits by using the fib number
*/
@@ -436,7 +439,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
return -EMSGSIZE;
- }
+ }
/*
* Get a queue entry connect the FIB to it and send an notify
* the adapter a command is ready.
@@ -450,10 +453,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
if (!wait) {
fibptr->callback = callback;
fibptr->callback_data = callback_data;
+ fibptr->flags = FIB_CONTEXT_FLAG;
}
fibptr->done = 0;
- fibptr->flags = 0;
FIB_COUNTER_INCREMENT(aac_config.FibsSent);
@@ -473,9 +476,9 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
aac_adapter_deliver(fibptr);
/*
- * If the caller wanted us to wait for response wait now.
+ * If the caller wanted us to wait for response wait now.
*/
-
+
if (wait) {
spin_unlock_irqrestore(&fibptr->event_lock, flags);
/* Only set for first known interruptable command */
@@ -522,7 +525,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
}
spin_unlock_irqrestore(&fibptr->event_lock, flags);
BUG_ON(fibptr->done == 0);
-
+
if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
return -ETIMEDOUT;
return 0;
@@ -537,15 +540,15 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
return 0;
}
-/**
+/**
* aac_consumer_get - get the top of the queue
* @dev: Adapter
* @q: Queue
* @entry: Return entry
*
* Will return a pointer to the entry on the top of the queue requested that
- * we are a consumer of, and return the address of the queue entry. It does
- * not change the state of the queue.
+ * we are a consumer of, and return the address of the queue entry. It does
+ * not change the state of the queue.
*/
int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
@@ -560,10 +563,10 @@ int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entr
* the end of the queue, else we just use the entry
* pointed to by the header index
*/
- if (le32_to_cpu(*q->headers.consumer) >= q->entries)
- index = 0;
+ if (le32_to_cpu(*q->headers.consumer) >= q->entries)
+ index = 0;
else
- index = le32_to_cpu(*q->headers.consumer);
+ index = le32_to_cpu(*q->headers.consumer);
*entry = q->base + index;
status = 1;
}
@@ -587,12 +590,12 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
wasfull = 1;
-
+
if (le32_to_cpu(*q->headers.consumer) >= q->entries)
*q->headers.consumer = cpu_to_le32(1);
else
*q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
-
+
if (wasfull) {
switch (qid) {
@@ -608,7 +611,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
}
aac_adapter_notify(dev, notify);
}
-}
+}
/**
* aac_fib_adapter_complete - complete adapter issued fib
@@ -630,32 +633,32 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
if (hw_fib->header.XferState == 0) {
if (dev->comm_interface == AAC_COMM_MESSAGE)
kfree (hw_fib);
- return 0;
+ return 0;
}
/*
* If we plan to do anything check the structure type first.
- */
- if ( hw_fib->header.StructType != FIB_MAGIC ) {
+ */
+ if (hw_fib->header.StructType != FIB_MAGIC) {
if (dev->comm_interface == AAC_COMM_MESSAGE)
kfree (hw_fib);
- return -EINVAL;
+ return -EINVAL;
}
/*
* This block handles the case where the adapter had sent us a
* command and we have finished processing the command. We
- * call completeFib when we are done processing the command
- * and want to send a response back to the adapter. This will
+ * call completeFib when we are done processing the command
+ * and want to send a response back to the adapter. This will
* send the completed cdb to the adapter.
*/
if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
if (dev->comm_interface == AAC_COMM_MESSAGE) {
kfree (hw_fib);
} else {
- u32 index;
- hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
+ u32 index;
+ hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
if (size) {
size += sizeof(struct aac_fibhdr);
- if (size > le16_to_cpu(hw_fib->header.SenderSize))
+ if (size > le16_to_cpu(hw_fib->header.SenderSize))
return -EMSGSIZE;
hw_fib->header.Size = cpu_to_le16(size);
}
@@ -667,12 +670,11 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
if (!(nointr & (int)aac_config.irq_mod))
aac_adapter_notify(dev, AdapNormRespQueue);
}
+ } else {
+ printk(KERN_WARNING "aac_fib_adapter_complete: "
+ "Unknown xferstate detected.\n");
+ BUG();
}
- else
- {
- printk(KERN_WARNING "aac_fib_adapter_complete: Unknown xferstate detected.\n");
- BUG();
- }
return 0;
}
@@ -682,7 +684,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
*
* Will do all necessary work to complete a FIB.
*/
-
+
int aac_fib_complete(struct fib *fibptr)
{
struct hw_fib * hw_fib = fibptr->hw_fib_va;
@@ -692,15 +694,15 @@ int aac_fib_complete(struct fib *fibptr)
*/
if (hw_fib->header.XferState == 0)
- return 0;
+ return 0;
/*
* If we plan to do anything check the structure type first.
- */
+ */
if (hw_fib->header.StructType != FIB_MAGIC)
- return -EINVAL;
+ return -EINVAL;
/*
- * This block completes a cdb which orginated on the host and we
+ * This block completes a cdb which orginated on the host and we
* just need to deallocate the cdb or reinit it. At this point the
* command is complete that we had sent to the adapter and this
* cdb could be reused.
@@ -721,7 +723,7 @@ int aac_fib_complete(struct fib *fibptr)
fib_dealloc(fibptr);
} else {
BUG();
- }
+ }
return 0;
}
@@ -741,7 +743,7 @@ void aac_printf(struct aac_dev *dev, u32 val)
{
int length = val & 0xffff;
int level = (val >> 16) & 0xffff;
-
+
/*
* The size of the printfbuf is set in port.c
* There is no variable or define for it
@@ -755,7 +757,7 @@ void aac_printf(struct aac_dev *dev, u32 val)
else
printk(KERN_INFO "%s:%s", dev->name, cp);
}
- memset(cp, 0, 256);
+ memset(cp, 0, 256);
}
@@ -773,20 +775,20 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
{
struct hw_fib * hw_fib = fibptr->hw_fib_va;
struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
- u32 container;
+ u32 channel, id, lun, container;
struct scsi_device *device;
enum {
NOTHING,
DELETE,
ADD,
CHANGE
- } device_config_needed;
+ } device_config_needed = NOTHING;
/* Sniff for container changes */
if (!dev || !dev->fsa_dev)
return;
- container = (u32)-1;
+ container = channel = id = lun = (u32)-1;
/*
* We have set this up to try and minimize the number of
@@ -796,13 +798,13 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
*/
switch (le32_to_cpu(aifcmd->command)) {
case AifCmdDriverNotify:
- switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
+ switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
/*
* Morph or Expand complete
*/
case AifDenMorphComplete:
case AifDenVolumeExtendComplete:
- container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
+ container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
if (container >= dev->maximum_num_containers)
break;
@@ -814,9 +816,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
*/
if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
- device = scsi_device_lookup(dev->scsi_host_ptr,
- CONTAINER_TO_CHANNEL(container),
- CONTAINER_TO_ID(container),
+ device = scsi_device_lookup(dev->scsi_host_ptr,
+ CONTAINER_TO_CHANNEL(container),
+ CONTAINER_TO_ID(container),
CONTAINER_TO_LUN(container));
if (device) {
dev->fsa_dev[container].config_needed = CHANGE;
@@ -835,25 +837,29 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
if (container >= dev->maximum_num_containers)
break;
if ((dev->fsa_dev[container].config_waiting_on ==
- le32_to_cpu(*(u32 *)aifcmd->data)) &&
+ le32_to_cpu(*(__le32 *)aifcmd->data)) &&
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
dev->fsa_dev[container].config_waiting_on = 0;
} else for (container = 0;
container < dev->maximum_num_containers; ++container) {
if ((dev->fsa_dev[container].config_waiting_on ==
- le32_to_cpu(*(u32 *)aifcmd->data)) &&
+ le32_to_cpu(*(__le32 *)aifcmd->data)) &&
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
dev->fsa_dev[container].config_waiting_on = 0;
}
break;
case AifCmdEventNotify:
- switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
+ switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
+ case AifEnBatteryEvent:
+ dev->cache_protected =
+ (((__le32 *)aifcmd->data)[1] == cpu_to_le32(3));
+ break;
/*
* Add an Array.
*/
case AifEnAddContainer:
- container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
+ container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
if (container >= dev->maximum_num_containers)
break;
dev->fsa_dev[container].config_needed = ADD;
@@ -866,7 +872,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
* Delete an Array.
*/
case AifEnDeleteContainer:
- container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
+ container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
if (container >= dev->maximum_num_containers)
break;
dev->fsa_dev[container].config_needed = DELETE;
@@ -880,7 +886,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
* waiting on something else, setup to wait on a Config Change.
*/
case AifEnContainerChange:
- container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
+ container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
if (container >= dev->maximum_num_containers)
break;
if (dev->fsa_dev[container].config_waiting_on &&
@@ -895,6 +901,60 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
case AifEnConfigChange:
break;
+ case AifEnAddJBOD:
+ case AifEnDeleteJBOD:
+ container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
+ if ((container >> 28))
+ break;
+ channel = (container >> 24) & 0xF;
+ if (channel >= dev->maximum_num_channels)
+ break;
+ id = container & 0xFFFF;
+ if (id >= dev->maximum_num_physicals)
+ break;
+ lun = (container >> 16) & 0xFF;
+ channel = aac_phys_to_logical(channel);
+ device_config_needed =
+ (((__le32 *)aifcmd->data)[0] ==
+ cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
+ break;
+
+ case AifEnEnclosureManagement:
+ /*
+ * If in JBOD mode, automatic exposure of new
+ * physical target to be suppressed until configured.
+ */
+ if (dev->jbod)
+ break;
+ switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
+ case EM_DRIVE_INSERTION:
+ case EM_DRIVE_REMOVAL:
+ container = le32_to_cpu(
+ ((__le32 *)aifcmd->data)[2]);
+ if ((container >> 28))
+ break;
+ channel = (container >> 24) & 0xF;
+ if (channel >= dev->maximum_num_channels)
+ break;
+ id = container & 0xFFFF;
+ lun = (container >> 16) & 0xFF;
+ if (id >= dev->maximum_num_physicals) {
+ /* legacy dev_t ? */
+ if ((0x2000 <= id) || lun || channel ||
+ ((channel = (id >> 7) & 0x3F) >=
+ dev->maximum_num_channels))
+ break;
+ lun = (id >> 4) & 7;
+ id &= 0xF;
+ }
+ channel = aac_phys_to_logical(channel);
+ device_config_needed =
+ (((__le32 *)aifcmd->data)[3]
+ == cpu_to_le32(EM_DRIVE_INSERTION)) ?
+ ADD : DELETE;
+ break;
+ }
+ break;
}
/*
@@ -905,13 +965,13 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
if (container >= dev->maximum_num_containers)
break;
if ((dev->fsa_dev[container].config_waiting_on ==
- le32_to_cpu(*(u32 *)aifcmd->data)) &&
+ le32_to_cpu(*(__le32 *)aifcmd->data)) &&
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
dev->fsa_dev[container].config_waiting_on = 0;
} else for (container = 0;
container < dev->maximum_num_containers; ++container) {
if ((dev->fsa_dev[container].config_waiting_on ==
- le32_to_cpu(*(u32 *)aifcmd->data)) &&
+ le32_to_cpu(*(__le32 *)aifcmd->data)) &&
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
dev->fsa_dev[container].config_waiting_on = 0;
}
@@ -926,9 +986,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
* wait for a container change.
*/
- if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
- && ((((u32 *)aifcmd->data)[6] == ((u32 *)aifcmd->data)[5])
- || (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess)))) {
+ if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
+ (((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] ||
+ ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) {
for (container = 0;
container < dev->maximum_num_containers;
++container) {
@@ -943,9 +1003,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
jiffies;
}
}
- if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
- && (((u32 *)aifcmd->data)[6] == 0)
- && (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning))) {
+ if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
+ ((__le32 *)aifcmd->data)[6] == 0 &&
+ ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) {
for (container = 0;
container < dev->maximum_num_containers;
++container) {
@@ -963,7 +1023,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
break;
}
- device_config_needed = NOTHING;
+ if (device_config_needed == NOTHING)
for (container = 0; container < dev->maximum_num_containers;
++container) {
if ((dev->fsa_dev[container].config_waiting_on == 0) &&
@@ -972,6 +1032,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
device_config_needed =
dev->fsa_dev[container].config_needed;
dev->fsa_dev[container].config_needed = NOTHING;
+ channel = CONTAINER_TO_CHANNEL(container);
+ id = CONTAINER_TO_ID(container);
+ lun = CONTAINER_TO_LUN(container);
break;
}
}
@@ -995,34 +1058,56 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
/*
* force reload of disk info via aac_probe_container
*/
- if ((device_config_needed == CHANGE)
- && (dev->fsa_dev[container].valid == 1))
- dev->fsa_dev[container].valid = 2;
- if ((device_config_needed == CHANGE) ||
- (device_config_needed == ADD))
+ if ((channel == CONTAINER_CHANNEL) &&
+ (device_config_needed != NOTHING)) {
+ if (dev->fsa_dev[container].valid == 1)
+ dev->fsa_dev[container].valid = 2;
aac_probe_container(dev, container);
- device = scsi_device_lookup(dev->scsi_host_ptr,
- CONTAINER_TO_CHANNEL(container),
- CONTAINER_TO_ID(container),
- CONTAINER_TO_LUN(container));
+ }
+ device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun);
if (device) {
switch (device_config_needed) {
case DELETE:
+ if (scsi_device_online(device)) {
+ scsi_device_set_state(device, SDEV_OFFLINE);
+ sdev_printk(KERN_INFO, device,
+ "Device offlined - %s\n",
+ (channel == CONTAINER_CHANNEL) ?
+ "array deleted" :
+ "enclosure services event");
+ }
+ break;
+ case ADD:
+ if (!scsi_device_online(device)) {
+ sdev_printk(KERN_INFO, device,
+ "Device online - %s\n",
+ (channel == CONTAINER_CHANNEL) ?
+ "array created" :
+ "enclosure services event");
+ scsi_device_set_state(device, SDEV_RUNNING);
+ }
+ /* FALLTHRU */
case CHANGE:
+ if ((channel == CONTAINER_CHANNEL)
+ && (!dev->fsa_dev[container].valid)) {
+ if (!scsi_device_online(device))
+ break;
+ scsi_device_set_state(device, SDEV_OFFLINE);
+ sdev_printk(KERN_INFO, device,
+ "Device offlined - %s\n",
+ "array failed");
+ break;
+ }
scsi_rescan_device(&device->sdev_gendev);
default:
break;
}
scsi_device_put(device);
+ device_config_needed = NOTHING;
}
- if (device_config_needed == ADD) {
- scsi_add_device(dev->scsi_host_ptr,
- CONTAINER_TO_CHANNEL(container),
- CONTAINER_TO_ID(container),
- CONTAINER_TO_LUN(container));
- }
-
+ if (device_config_needed == ADD)
+ scsi_add_device(dev->scsi_host_ptr, channel, id, lun);
}
static int _aac_reset_adapter(struct aac_dev *aac, int forced)
@@ -1099,7 +1184,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
free_irq(aac->pdev->irq, aac);
kfree(aac->fsa_dev);
aac->fsa_dev = NULL;
- if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT) {
+ quirks = aac_get_driver_ident(index)->quirks;
+ if (quirks & AAC_QUIRK_31BIT) {
if (((retval = pci_set_dma_mask(aac->pdev, DMA_31BIT_MASK))) ||
((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_31BIT_MASK))))
goto out;
@@ -1110,7 +1196,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
}
if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
goto out;
- if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT)
+ if (quirks & AAC_QUIRK_31BIT)
if ((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK)))
goto out;
if (jafo) {
@@ -1121,15 +1207,14 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
}
}
(void)aac_get_adapter_info(aac);
- quirks = aac_get_driver_ident(index)->quirks;
if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
- host->sg_tablesize = 34;
- host->max_sectors = (host->sg_tablesize * 8) + 112;
- }
- if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
- host->sg_tablesize = 17;
- host->max_sectors = (host->sg_tablesize * 8) + 112;
- }
+ host->sg_tablesize = 34;
+ host->max_sectors = (host->sg_tablesize * 8) + 112;
+ }
+ if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
+ host->sg_tablesize = 17;
+ host->max_sectors = (host->sg_tablesize * 8) + 112;
+ }
aac_get_config_status(aac, 1);
aac_get_containers(aac);
/*
@@ -1217,12 +1302,13 @@ int aac_reset_adapter(struct aac_dev * aac, int forced)
}
/* Quiesce build, flush cache, write through mode */
- aac_send_shutdown(aac);
+ if (forced < 2)
+ aac_send_shutdown(aac);
spin_lock_irqsave(host->host_lock, flagv);
- retval = _aac_reset_adapter(aac, forced);
+ retval = _aac_reset_adapter(aac, forced ? forced : ((aac_check_reset != 0) && (aac_check_reset != 1)));
spin_unlock_irqrestore(host->host_lock, flagv);
- if (retval == -ENODEV) {
+ if ((forced < 2) && (retval == -ENODEV)) {
/* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
struct fib * fibctx = aac_fib_alloc(aac);
if (fibctx) {
@@ -1338,11 +1424,11 @@ int aac_check_health(struct aac_dev * aac)
fib->data = hw_fib->data;
aif = (struct aac_aifcmd *)hw_fib->data;
aif->command = cpu_to_le32(AifCmdEventNotify);
- aif->seqnum = cpu_to_le32(0xFFFFFFFF);
- aif->data[0] = AifEnExpEvent;
- aif->data[1] = AifExeFirmwarePanic;
- aif->data[2] = AifHighPriority;
- aif->data[3] = BlinkLED;
+ aif->seqnum = cpu_to_le32(0xFFFFFFFF);
+ ((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent);
+ ((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic);
+ ((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority);
+ ((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED);
/*
* Put the FIB onto the
@@ -1372,14 +1458,14 @@ int aac_check_health(struct aac_dev * aac)
printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
- if (!aac_check_reset ||
+ if (!aac_check_reset || ((aac_check_reset != 1) &&
(aac->supplement_adapter_info.SupportedOptions2 &
- le32_to_cpu(AAC_OPTION_IGNORE_RESET)))
+ AAC_OPTION_IGNORE_RESET)))
goto out;
host = aac->scsi_host_ptr;
if (aac->thread->pid != current->pid)
spin_lock_irqsave(host->host_lock, flagv);
- BlinkLED = _aac_reset_adapter(aac, 0);
+ BlinkLED = _aac_reset_adapter(aac, aac_check_reset != 1);
if (aac->thread->pid != current->pid)
spin_unlock_irqrestore(host->host_lock, flagv);
return BlinkLED;
@@ -1399,7 +1485,7 @@ out:
* until the queue is empty. When the queue is empty it will wait for
* more FIBs.
*/
-
+
int aac_command_thread(void *data)
{
struct aac_dev *dev = data;
@@ -1425,30 +1511,29 @@ int aac_command_thread(void *data)
add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
set_current_state(TASK_INTERRUPTIBLE);
dprintk ((KERN_INFO "aac_command_thread start\n"));
- while(1)
- {
+ while (1) {
spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
struct list_head *entry;
struct aac_aifcmd * aifcmd;
set_current_state(TASK_RUNNING);
-
+
entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
list_del(entry);
-
+
spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
fib = list_entry(entry, struct fib, fiblink);
/*
- * We will process the FIB here or pass it to a
- * worker thread that is TBD. We Really can't
+ * We will process the FIB here or pass it to a
+ * worker thread that is TBD. We Really can't
* do anything at this point since we don't have
* anything defined for this thread to do.
*/
hw_fib = fib->hw_fib_va;
memset(fib, 0, sizeof(struct fib));
fib->type = FSAFS_NTC_FIB_CONTEXT;
- fib->size = sizeof( struct fib );
+ fib->size = sizeof(struct fib);
fib->hw_fib_va = hw_fib;
fib->data = hw_fib->data;
fib->dev = dev;
@@ -1462,20 +1547,19 @@ int aac_command_thread(void *data)
*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
aac_fib_adapter_complete(fib, (u16)sizeof(u32));
} else {
- struct list_head *entry;
/* The u32 here is important and intended. We are using
32bit wrapping time to fit the adapter field */
-
+
u32 time_now, time_last;
unsigned long flagv;
unsigned num;
struct hw_fib ** hw_fib_pool, ** hw_fib_p;
struct fib ** fib_pool, ** fib_p;
-
+
/* Sniff events */
- if ((aifcmd->command ==
+ if ((aifcmd->command ==
cpu_to_le32(AifCmdEventNotify)) ||
- (aifcmd->command ==
+ (aifcmd->command ==
cpu_to_le32(AifCmdJobProgress))) {
aac_handle_aif(dev, fib);
}
@@ -1527,7 +1611,7 @@ int aac_command_thread(void *data)
spin_lock_irqsave(&dev->fib_lock, flagv);
entry = dev->fib_list.next;
/*
- * For each Context that is on the
+ * For each Context that is on the
* fibctxList, make a copy of the
* fib, and then set the event to wake up the
* thread that is waiting for it.
@@ -1552,7 +1636,7 @@ int aac_command_thread(void *data)
*/
time_last = fibctx->jiffies;
/*
- * Has it been > 2 minutes
+ * Has it been > 2 minutes
* since the last read off
* the queue?
*/
@@ -1583,7 +1667,7 @@ int aac_command_thread(void *data)
*/
list_add_tail(&newfib->fiblink, &fibctx->fib_list);
fibctx->count++;
- /*
+ /*
* Set the event to wake up the
* thread that is waiting.
*/
@@ -1655,11 +1739,11 @@ int aac_command_thread(void *data)
struct fib *fibptr;
if ((fibptr = aac_fib_alloc(dev))) {
- u32 * info;
+ __le32 *info;
aac_fib_init(fibptr);
- info = (u32 *) fib_data(fibptr);
+ info = (__le32 *) fib_data(fibptr);
if (now.tv_usec > 500000)
++now.tv_sec;
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index e6032ffc66a6..d1163ded132b 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -120,6 +120,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
* NOTE: we cannot touch the fib after this
* call, because it may have been deallocated.
*/
+ fib->flags = 0;
fib->callback(fib->callback_data, fib);
} else {
unsigned long flagv;
@@ -229,11 +230,9 @@ unsigned int aac_command_normal(struct aac_queue *q)
* all QE there are and wake up all the waiters before exiting.
*/
-unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
+unsigned int aac_intr_normal(struct aac_dev * dev, u32 index)
{
- u32 index = le32_to_cpu(Index);
-
- dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, Index));
+ dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
if ((index & 0x00000002L)) {
struct hw_fib * hw_fib;
struct fib * fib;
@@ -301,7 +300,7 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
{
- u32 *pstatus = (u32 *)hwfib->data;
+ __le32 *pstatus = (__le32 *)hwfib->data;
if (*pstatus & cpu_to_le32(0xffff0000))
*pstatus = cpu_to_le32(ST_OK);
}
@@ -315,6 +314,7 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
* NOTE: we cannot touch the fib after this
* call, because it may have been deallocated.
*/
+ fib->flags = 0;
fib->callback(fib->callback_data, fib);
} else {
unsigned long flagv;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 9dd331bc29b0..61be22774e99 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -159,27 +159,27 @@ static struct pci_device_id aac_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
/*
- * dmb - For now we add the number of channels to this structure.
+ * dmb - For now we add the number of channels to this structure.
* In the future we should add a fib that reports the number of channels
* for the card. At that time we can remove the channels from here
*/
static struct aac_driver_ident aac_drivers[] = {
- { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 2/Si (Iguana/PERC2Si) */
- { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Opal/PERC3Di) */
- { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Si (SlimFast/PERC3Si */
- { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
- { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Viper/PERC3DiV) */
- { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Lexus/PERC3DiL) */
- { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
- { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Dagger/PERC3DiD) */
- { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Boxster/PERC3DiB) */
- { aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* catapult */
- { aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* tomcat */
- { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2120S (Crusader) */
- { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan) */
- { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan-2m) */
- { aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend S220 (Legend Crusader) */
- { aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend S230 (Legend Vulcan) */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 2/Si (Iguana/PERC2Si) */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Opal/PERC3Di) */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Si (SlimFast/PERC3Si */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Viper/PERC3DiV) */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Lexus/PERC3DiL) */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Dagger/PERC3DiD) */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2120S (Crusader) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */
+ { aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */
+ { aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3230S ", 2 }, /* Adaptec 3230S (Harrier) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3240S ", 2 }, /* Adaptec 3240S (Tornado) */
@@ -224,8 +224,8 @@ static struct aac_driver_ident aac_drivers[] = {
{ aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4, AAC_QUIRK_34SG }, /* Dell PERC2/QC */
{ aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */
- { aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell Catchall */
- { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend Catchall */
+ { aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Dell Catchall */
+ { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */
{ aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
{ aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec NEMER/ARK Catch All */
@@ -239,7 +239,7 @@ static struct aac_driver_ident aac_drivers[] = {
* Queues a command for execution by the associated Host Adapter.
*
* TODO: unify with aac_scsi_cmd().
- */
+ */
static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
@@ -258,7 +258,7 @@ static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd
}
cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
return (aac_scsi_cmd(cmd) ? FAILED : 0);
-}
+}
/**
* aac_info - Returns the host adapter name
@@ -292,21 +292,21 @@ struct aac_driver_ident* aac_get_driver_ident(int devtype)
* @capacity: the sector capacity of the disk
* @geom: geometry block to fill in
*
- * Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk.
- * The default disk geometry is 64 heads, 32 sectors, and the appropriate
- * number of cylinders so as not to exceed drive capacity. In order for
+ * Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk.
+ * The default disk geometry is 64 heads, 32 sectors, and the appropriate
+ * number of cylinders so as not to exceed drive capacity. In order for
* disks equal to or larger than 1 GB to be addressable by the BIOS
- * without exceeding the BIOS limitation of 1024 cylinders, Extended
- * Translation should be enabled. With Extended Translation enabled,
- * drives between 1 GB inclusive and 2 GB exclusive are given a disk
- * geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive
- * are given a disk geometry of 255 heads and 63 sectors. However, if
- * the BIOS detects that the Extended Translation setting does not match
- * the geometry in the partition table, then the translation inferred
- * from the partition table will be used by the BIOS, and a warning may
+ * without exceeding the BIOS limitation of 1024 cylinders, Extended
+ * Translation should be enabled. With Extended Translation enabled,
+ * drives between 1 GB inclusive and 2 GB exclusive are given a disk
+ * geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive
+ * are given a disk geometry of 255 heads and 63 sectors. However, if
+ * the BIOS detects that the Extended Translation setting does not match
+ * the geometry in the partition table, then the translation inferred
+ * from the partition table will be used by the BIOS, and a warning may
* be displayed.
*/
-
+
static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int *geom)
{
@@ -333,10 +333,10 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
- /*
+ /*
* Read the first 1024 bytes from the disk device, if the boot
* sector partition table is valid, search for a partition table
- * entry whose end_head matches one of the standard geometry
+ * entry whose end_head matches one of the standard geometry
* translations ( 64/32, 128/32, 255/63 ).
*/
buf = scsi_bios_ptable(bdev);
@@ -401,30 +401,44 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
static int aac_slave_configure(struct scsi_device *sdev)
{
+ struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
if ((sdev->type == TYPE_DISK) &&
- (sdev_channel(sdev) != CONTAINER_CHANNEL)) {
+ (sdev_channel(sdev) != CONTAINER_CHANNEL) &&
+ (!aac->jbod || sdev->inq_periph_qual) &&
+ (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) {
if (expose_physicals == 0)
return -ENXIO;
- if (expose_physicals < 0) {
- struct aac_dev *aac =
- (struct aac_dev *)sdev->host->hostdata;
- if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
- sdev->no_uld_attach = 1;
- }
+ if (expose_physicals < 0)
+ sdev->no_uld_attach = 1;
}
if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
- (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
+ (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) &&
+ !sdev->no_uld_attach) {
struct scsi_device * dev;
struct Scsi_Host *host = sdev->host;
unsigned num_lsu = 0;
unsigned num_one = 0;
unsigned depth;
+ unsigned cid;
+ /*
+ * Firmware has an individual device recovery time typically
+ * of 35 seconds, give us a margin.
+ */
+ if (sdev->timeout < (45 * HZ))
+ sdev->timeout = 45 * HZ;
+ for (cid = 0; cid < aac->maximum_num_containers; ++cid)
+ if (aac->fsa_dev[cid].valid)
+ ++num_lsu;
__shost_for_each_device(dev, host) {
if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
- (sdev_channel(dev) == CONTAINER_CHANNEL))
- ++num_lsu;
- else
+ (!aac->raid_scsi_mode ||
+ (sdev_channel(sdev) != 2)) &&
+ !dev->no_uld_attach) {
+ if ((sdev_channel(dev) != CONTAINER_CHANNEL)
+ || !aac->fsa_dev[sdev_id(dev)].valid)
+ ++num_lsu;
+ } else
++num_one;
}
if (num_lsu == 0)
@@ -481,9 +495,35 @@ static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
return sdev->queue_depth;
}
+static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device * sdev = to_scsi_device(dev);
+ if (sdev_channel(sdev) != CONTAINER_CHANNEL)
+ return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach
+ ? "Hidden\n" : "JBOD");
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ get_container_type(((struct aac_dev *)(sdev->host->hostdata))
+ ->fsa_dev[sdev_id(sdev)].type));
+}
+
+static struct device_attribute aac_raid_level_attr = {
+ .attr = {
+ .name = "level",
+ .mode = S_IRUGO,
+ },
+ .show = aac_show_raid_level
+};
+
+static struct device_attribute *aac_dev_attrs[] = {
+ &aac_raid_level_attr,
+ NULL,
+};
+
static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
{
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
return aac_do_ioctl(dev, cmd, arg);
}
@@ -506,17 +546,33 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
break;
case INQUIRY:
case READ_CAPACITY:
- case TEST_UNIT_READY:
/* Mark associated FIB to not complete, eh handler does this */
for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
struct fib * fib = &aac->fibs[count];
if (fib->hw_fib_va->header.XferState &&
+ (fib->flags & FIB_CONTEXT_FLAG) &&
(fib->callback_data == cmd)) {
fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
ret = SUCCESS;
}
}
+ break;
+ case TEST_UNIT_READY:
+ /* Mark associated FIB to not complete, eh handler does this */
+ for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
+ struct scsi_cmnd * command;
+ struct fib * fib = &aac->fibs[count];
+ if ((fib->hw_fib_va->header.XferState & cpu_to_le32(Async | NoResponseExpected)) &&
+ (fib->flags & FIB_CONTEXT_FLAG) &&
+ ((command = fib->callback_data)) &&
+ (command->device == cmd->device)) {
+ fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
+ command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
+ if (command == cmd)
+ ret = SUCCESS;
+ }
+ }
}
return ret;
}
@@ -539,12 +595,13 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
struct fib * fib = &aac->fibs[count];
if (fib->hw_fib_va->header.XferState &&
+ (fib->flags & FIB_CONTEXT_FLAG) &&
(fib->callback_data == cmd)) {
fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
}
}
- printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n",
+ printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n",
AAC_DRIVERNAME);
if ((count = aac_check_health(aac)))
@@ -584,8 +641,11 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
* support a register, instead of a commanded, reset.
*/
if ((aac->supplement_adapter_info.SupportedOptions2 &
- le32_to_cpu(AAC_OPTION_MU_RESET|AAC_OPTION_IGNORE_RESET)) ==
- le32_to_cpu(AAC_OPTION_MU_RESET))
+ AAC_OPTION_MU_RESET) &&
+ aac_check_reset &&
+ ((aac_check_reset != 1) ||
+ (aac->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_IGNORE_RESET)))
aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */
return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */
}
@@ -632,8 +692,8 @@ static int aac_cfg_open(struct inode *inode, struct file *file)
* Bugs: Needs locking against parallel ioctls lower down
* Bugs: Needs to handle hot plugging
*/
-
-static int aac_cfg_ioctl(struct inode *inode, struct file *file,
+
+static int aac_cfg_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
if (!capable(CAP_SYS_RAWIO))
@@ -646,7 +706,7 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
{
long ret;
lock_kernel();
- switch (cmd) {
+ switch (cmd) {
case FSACTL_MINIPORT_REV_CHECK:
case FSACTL_SENDFIB:
case FSACTL_OPEN_GET_ADAPTER_FIB:
@@ -656,14 +716,14 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
case FSACTL_QUERY_DISK:
case FSACTL_DELETE_DISK:
case FSACTL_FORCE_DELETE_DISK:
- case FSACTL_GET_CONTAINERS:
+ case FSACTL_GET_CONTAINERS:
case FSACTL_SEND_LARGE_FIB:
ret = aac_do_ioctl(dev, cmd, (void __user *)arg);
break;
case FSACTL_GET_NEXT_ADAPTER_FIB: {
struct fib_ioctl __user *f;
-
+
f = compat_alloc_user_space(sizeof(*f));
ret = 0;
if (clear_user(f, sizeof(*f)))
@@ -676,9 +736,9 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
}
default:
- ret = -ENOIOCTLCMD;
+ ret = -ENOIOCTLCMD;
break;
- }
+ }
unlock_kernel();
return ret;
}
@@ -735,6 +795,25 @@ static ssize_t aac_show_vendor(struct class_device *class_dev,
return len;
}
+static ssize_t aac_show_flags(struct class_device *class_dev, char *buf)
+{
+ int len = 0;
+ struct aac_dev *dev = (struct aac_dev*)class_to_shost(class_dev)->hostdata;
+
+ if (nblank(dprintk(x)))
+ len = snprintf(buf, PAGE_SIZE, "dprintk\n");
+#ifdef AAC_DETAILED_STATUS_INFO
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "AAC_DETAILED_STATUS_INFO\n");
+#endif
+ if (dev->raw_io_interface && dev->raw_io_64)
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "SAI_READ_CAPACITY_16\n");
+ if (dev->jbod)
+ len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n");
+ return len;
+}
+
static ssize_t aac_show_kernel_version(struct class_device *class_dev,
char *buf)
{
@@ -742,7 +821,7 @@ static ssize_t aac_show_kernel_version(struct class_device *class_dev,
int len, tmp;
tmp = le32_to_cpu(dev->adapter_info.kernelrev);
- len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
+ len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
le32_to_cpu(dev->adapter_info.kernelbuild));
return len;
@@ -755,7 +834,7 @@ static ssize_t aac_show_monitor_version(struct class_device *class_dev,
int len, tmp;
tmp = le32_to_cpu(dev->adapter_info.monitorrev);
- len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
+ len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
le32_to_cpu(dev->adapter_info.monitorbuild));
return len;
@@ -768,7 +847,7 @@ static ssize_t aac_show_bios_version(struct class_device *class_dev,
int len, tmp;
tmp = le32_to_cpu(dev->adapter_info.biosrev);
- len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
+ len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
le32_to_cpu(dev->adapter_info.biosbuild));
return len;
@@ -844,6 +923,13 @@ static struct class_device_attribute aac_vendor = {
},
.show = aac_show_vendor,
};
+static struct class_device_attribute aac_flags = {
+ .attr = {
+ .name = "flags",
+ .mode = S_IRUGO,
+ },
+ .show = aac_show_flags,
+};
static struct class_device_attribute aac_kernel_version = {
.attr = {
.name = "hba_kernel_version",
@@ -898,6 +984,7 @@ static struct class_device_attribute aac_reset = {
static struct class_device_attribute *aac_attrs[] = {
&aac_model,
&aac_vendor,
+ &aac_flags,
&aac_kernel_version,
&aac_monitor_version,
&aac_bios_version,
@@ -928,21 +1015,22 @@ static struct scsi_host_template aac_driver_template = {
.compat_ioctl = aac_compat_ioctl,
#endif
.queuecommand = aac_queuecommand,
- .bios_param = aac_biosparm,
+ .bios_param = aac_biosparm,
.shost_attrs = aac_attrs,
.slave_configure = aac_slave_configure,
.change_queue_depth = aac_change_queue_depth,
+ .sdev_attrs = aac_dev_attrs,
.eh_abort_handler = aac_eh_abort,
.eh_host_reset_handler = aac_eh_reset,
- .can_queue = AAC_NUM_IO_FIB,
+ .can_queue = AAC_NUM_IO_FIB,
.this_id = MAXIMUM_NUM_CONTAINERS,
.sg_tablesize = 16,
.max_sectors = 128,
#if (AAC_NUM_IO_FIB > 256)
.cmd_per_lun = 256,
-#else
- .cmd_per_lun = AAC_NUM_IO_FIB,
-#endif
+#else
+ .cmd_per_lun = AAC_NUM_IO_FIB,
+#endif
.use_clustering = ENABLE_CLUSTERING,
.use_sg_chaining = ENABLE_SG_CHAINING,
.emulated = 1,
@@ -979,18 +1067,18 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
goto out;
error = -ENODEV;
- if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) ||
+ if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) ||
pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
goto out_disable_pdev;
/*
* If the quirk31 bit is set, the adapter needs adapter
* to driver communication memory to be allocated below 2gig
*/
- if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
+ if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
if (pci_set_dma_mask(pdev, DMA_31BIT_MASK) ||
pci_set_consistent_dma_mask(pdev, DMA_31BIT_MASK))
goto out_disable_pdev;
-
+
pci_set_master(pdev);
shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
@@ -1003,7 +1091,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
shost->max_cmd_len = 16;
aac = (struct aac_dev *)shost->hostdata;
- aac->scsi_host_ptr = shost;
+ aac->scsi_host_ptr = shost;
aac->pdev = pdev;
aac->name = aac_driver_template.name;
aac->id = shost->unique_id;
@@ -1040,7 +1128,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK))
goto out_deinit;
-
+
aac->maximum_num_channels = aac_drivers[index].channels;
error = aac_get_adapter_info(aac);
if (error < 0)
@@ -1049,7 +1137,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
/*
* Lets override negotiations and drop the maximum SG limit to 34
*/
- if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) &&
+ if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) &&
(aac->scsi_host_ptr->sg_tablesize > 34)) {
aac->scsi_host_ptr->sg_tablesize = 34;
aac->scsi_host_ptr->max_sectors
@@ -1066,17 +1154,17 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
/*
* Firware printf works only with older firmware.
*/
- if (aac_drivers[index].quirks & AAC_QUIRK_34SG)
+ if (aac_drivers[index].quirks & AAC_QUIRK_34SG)
aac->printf_enabled = 1;
else
aac->printf_enabled = 0;
-
+
/*
* max channel will be the physical channels plus 1 virtual channel
* all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
* physical channels are address by their actual physical number+1
*/
- if ((aac->nondasd_support == 1) || expose_physicals)
+ if (aac->nondasd_support || expose_physicals || aac->jbod)
shost->max_channel = aac->maximum_num_channels;
else
shost->max_channel = 0;
@@ -1148,10 +1236,10 @@ static void __devexit aac_remove_one(struct pci_dev *pdev)
kfree(aac->queues);
aac_adapter_ioremap(aac, 0);
-
+
kfree(aac->fibs);
kfree(aac->fsa_dev);
-
+
list_del(&aac->entry);
scsi_host_put(shost);
pci_disable_device(pdev);
@@ -1172,7 +1260,7 @@ static struct pci_driver aac_pci_driver = {
static int __init aac_init(void)
{
int error;
-
+
printk(KERN_INFO "Adaptec %s driver %s\n",
AAC_DRIVERNAME, aac_driver_version);
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 73eef3dc5dc6..a08bbf1fd76c 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -465,7 +465,7 @@ static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
u32 var;
if (!(dev->supplement_adapter_info.SupportedOptions2 &
- le32_to_cpu(AAC_OPTION_MU_RESET)) || (bled >= 0) || (bled == -2)) {
+ AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) {
if (bled)
printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
dev->name, dev->id, bled);
@@ -549,7 +549,9 @@ int _aac_rx_init(struct aac_dev *dev)
dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) &&
!aac_rx_restart_adapter(dev, 0))
- ++restart;
+ /* Make sure the Hardware FIFO is empty */
+ while ((++restart < 512) &&
+ (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
/*
* Check to see if the board panic'd while booting.
*/
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 38a1ee2eacd8..374ed025dc5a 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -8233,7 +8233,7 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
if (scsiqp->scsi_status == SAM_STAT_CHECK_CONDITION) {
ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n");
ASC_DBG_PRT_SENSE(2, scp->sense_buffer,
- sizeof(scp->sense_buffer));
+ SCSI_SENSE_BUFFERSIZE);
/*
* Note: The 'status_byte()' macro used by
* target drivers defined in scsi.h shifts the
@@ -9136,7 +9136,7 @@ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
BUG_ON(asc_dvc_varp != &boardp->dvc_var.asc_dvc_var);
dma_unmap_single(boardp->dev, scp->SCp.dma_handle,
- sizeof(scp->sense_buffer), DMA_FROM_DEVICE);
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
/*
* 'qdonep' contains the command's ending status.
*/
@@ -9166,7 +9166,7 @@ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
if (qdonep->d3.scsi_stat == SAM_STAT_CHECK_CONDITION) {
ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n");
ASC_DBG_PRT_SENSE(2, scp->sense_buffer,
- sizeof(scp->sense_buffer));
+ SCSI_SENSE_BUFFERSIZE);
/*
* Note: The 'status_byte()' macro used by
* target drivers defined in scsi.h shifts the
@@ -9881,9 +9881,9 @@ static __le32 advansys_get_sense_buffer_dma(struct scsi_cmnd *scp)
{
struct asc_board *board = shost_priv(scp->device->host);
scp->SCp.dma_handle = dma_map_single(board->dev, scp->sense_buffer,
- sizeof(scp->sense_buffer), DMA_FROM_DEVICE);
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
dma_cache_sync(board->dev, scp->sense_buffer,
- sizeof(scp->sense_buffer), DMA_FROM_DEVICE);
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
return cpu_to_le32(scp->SCp.dma_handle);
}
@@ -9914,7 +9914,7 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
asc_scsi_q->q2.target_ix =
ASC_TIDLUN_TO_IX(scp->device->id, scp->device->lun);
asc_scsi_q->q1.sense_addr = advansys_get_sense_buffer_dma(scp);
- asc_scsi_q->q1.sense_len = sizeof(scp->sense_buffer);
+ asc_scsi_q->q1.sense_len = SCSI_SENSE_BUFFERSIZE;
/*
* If there are any outstanding requests for the current target,
@@ -10173,7 +10173,7 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
scsiqp->target_lun = scp->device->lun;
scsiqp->sense_addr = cpu_to_le32(virt_to_bus(&scp->sense_buffer[0]));
- scsiqp->sense_len = sizeof(scp->sense_buffer);
+ scsiqp->sense_len = SCSI_SENSE_BUFFERSIZE;
/* Build ADV_SCSI_REQ_Q */
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index ea8c69947644..6ccdc96cc480 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -260,6 +260,7 @@
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_spi.h>
+#include <scsi/scsi_eh.h>
#include "aha152x.h"
static LIST_HEAD(aha152x_host_list);
@@ -558,9 +559,7 @@ struct aha152x_hostdata {
struct aha152x_scdata {
Scsi_Cmnd *next; /* next sc in queue */
struct completion *done;/* semaphore to block on */
- unsigned char aha_orig_cmd_len;
- unsigned char aha_orig_cmnd[MAX_COMMAND_SIZE];
- int aha_orig_resid;
+ struct scsi_eh_save ses;
};
/* access macros for hostdata */
@@ -1017,16 +1016,10 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
SCp.buffers_residual : left buffers in list
SCp.phase : current state of the command */
- if ((phase & (check_condition|resetting)) || !scsi_sglist(SCpnt)) {
- if (phase & check_condition) {
- SCpnt->SCp.ptr = SCpnt->sense_buffer;
- SCpnt->SCp.this_residual = sizeof(SCpnt->sense_buffer);
- scsi_set_resid(SCpnt, sizeof(SCpnt->sense_buffer));
- } else {
- SCpnt->SCp.ptr = NULL;
- SCpnt->SCp.this_residual = 0;
- scsi_set_resid(SCpnt, 0);
- }
+ if ((phase & resetting) || !scsi_sglist(SCpnt)) {
+ SCpnt->SCp.ptr = NULL;
+ SCpnt->SCp.this_residual = 0;
+ scsi_set_resid(SCpnt, 0);
SCpnt->SCp.buffer = NULL;
SCpnt->SCp.buffers_residual = 0;
} else {
@@ -1561,10 +1554,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
}
#endif
- /* restore old command */
- memcpy(cmd->cmnd, sc->aha_orig_cmnd, sizeof(cmd->cmnd));
- cmd->cmd_len = sc->aha_orig_cmd_len;
- scsi_set_resid(cmd, sc->aha_orig_resid);
+ scsi_eh_restore_cmnd(cmd, &sc->ses);
cmd->SCp.Status = SAM_STAT_CHECK_CONDITION;
@@ -1587,22 +1577,10 @@ static void busfree_run(struct Scsi_Host *shpnt)
DPRINTK(debug_eh, ERR_LEAD "requesting sense\n", CMDINFO(ptr));
#endif
- /* save old command */
sc = SCDATA(ptr);
/* It was allocated in aha152x_internal_queue? */
BUG_ON(!sc);
- memcpy(sc->aha_orig_cmnd, ptr->cmnd,
- sizeof(ptr->cmnd));
- sc->aha_orig_cmd_len = ptr->cmd_len;
- sc->aha_orig_resid = scsi_get_resid(ptr);
-
- ptr->cmnd[0] = REQUEST_SENSE;
- ptr->cmnd[1] = 0;
- ptr->cmnd[2] = 0;
- ptr->cmnd[3] = 0;
- ptr->cmnd[4] = sizeof(ptr->sense_buffer);
- ptr->cmnd[5] = 0;
- ptr->cmd_len = 6;
+ scsi_eh_prep_cmnd(ptr, &sc->ses, NULL, 0, ~0);
DO_UNLOCK(flags);
aha152x_internal_queue(ptr, NULL, check_condition, ptr->scsi_done);
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index bbcc2c52d79f..190568ebea3c 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -51,15 +51,6 @@
#define SCSI_BUF_PA(address) isa_virt_to_bus(address)
#define SCSI_SG_PA(sgent) (isa_page_to_bus(sg_page((sgent))) + (sgent)->offset)
-static void BAD_DMA(void *address, unsigned int length)
-{
- printk(KERN_CRIT "buf vaddress %p paddress 0x%lx length %d\n",
- address,
- SCSI_BUF_PA(address),
- length);
- panic("Buffer at physical address > 16Mb used for aha1542");
-}
-
static void BAD_SG_DMA(Scsi_Cmnd * SCpnt,
struct scatterlist *sgp,
int nseg,
@@ -545,7 +536,7 @@ static void aha1542_intr_handle(struct Scsi_Host *shost, void *dev_id)
we will still have it in the cdb when we come back */
if (ccb[mbo].tarstat == 2)
memcpy(SCtmp->sense_buffer, &ccb[mbo].cdb[ccb[mbo].cdblen],
- sizeof(SCtmp->sense_buffer));
+ SCSI_SENSE_BUFFERSIZE);
/* is there mail :-) */
@@ -597,8 +588,7 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
unchar target = SCpnt->device->id;
unchar lun = SCpnt->device->lun;
unsigned long flags;
- void *buff = SCpnt->request_buffer;
- int bufflen = SCpnt->request_bufflen;
+ int bufflen = scsi_bufflen(SCpnt);
int mbo;
struct mailbox *mb;
struct ccb *ccb;
@@ -619,7 +609,7 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
#if 0
/* scsi_request_sense() provides a buffer of size 256,
so there is no reason to expect equality */
- if (bufflen != sizeof(SCpnt->sense_buffer))
+ if (bufflen != SCSI_SENSE_BUFFERSIZE)
printk(KERN_CRIT "aha1542: Wrong buffer length supplied "
"for request sense (%d)\n", bufflen);
#endif
@@ -689,42 +679,29 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
memcpy(ccb[mbo].cdb, cmd, ccb[mbo].cdblen);
- if (SCpnt->use_sg) {
+ if (bufflen) {
struct scatterlist *sg;
struct chain *cptr;
#ifdef DEBUG
unsigned char *ptr;
#endif
- int i;
+ int i, sg_count = scsi_sg_count(SCpnt);
ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */
- SCpnt->host_scribble = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ SCpnt->host_scribble = kmalloc(sizeof(*cptr)*sg_count,
+ GFP_KERNEL | GFP_DMA);
cptr = (struct chain *) SCpnt->host_scribble;
if (cptr == NULL) {
/* free the claimed mailbox slot */
HOSTDATA(SCpnt->device->host)->SCint[mbo] = NULL;
return SCSI_MLQUEUE_HOST_BUSY;
}
- scsi_for_each_sg(SCpnt, sg, SCpnt->use_sg, i) {
- if (sg->length == 0 || SCpnt->use_sg > 16 ||
- (((int) sg->offset) & 1) || (sg->length & 1)) {
- unsigned char *ptr;
- printk(KERN_CRIT "Bad segment list supplied to aha1542.c (%d, %d)\n", SCpnt->use_sg, i);
- scsi_for_each_sg(SCpnt, sg, SCpnt->use_sg, i) {
- printk(KERN_CRIT "%d: %p %d\n", i,
- sg_virt(sg), sg->length);
- };
- printk(KERN_CRIT "cptr %x: ", (unsigned int) cptr);
- ptr = (unsigned char *) &cptr[i];
- for (i = 0; i < 18; i++)
- printk("%02x ", ptr[i]);
- panic("Foooooooood fight!");
- };
+ scsi_for_each_sg(SCpnt, sg, sg_count, i) {
any2scsi(cptr[i].dataptr, SCSI_SG_PA(sg));
if (SCSI_SG_PA(sg) + sg->length - 1 > ISA_DMA_THRESHOLD)
- BAD_SG_DMA(SCpnt, sg, SCpnt->use_sg, i);
+ BAD_SG_DMA(SCpnt, scsi_sglist(SCpnt), sg_count, i);
any2scsi(cptr[i].datalen, sg->length);
};
- any2scsi(ccb[mbo].datalen, SCpnt->use_sg * sizeof(struct chain));
+ any2scsi(ccb[mbo].datalen, sg_count * sizeof(struct chain));
any2scsi(ccb[mbo].dataptr, SCSI_BUF_PA(cptr));
#ifdef DEBUG
printk("cptr %x: ", cptr);
@@ -735,10 +712,8 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
} else {
ccb[mbo].op = 0; /* SCSI Initiator Command */
SCpnt->host_scribble = NULL;
- any2scsi(ccb[mbo].datalen, bufflen);
- if (buff && SCSI_BUF_PA(buff + bufflen - 1) > ISA_DMA_THRESHOLD)
- BAD_DMA(buff, bufflen);
- any2scsi(ccb[mbo].dataptr, SCSI_BUF_PA(buff));
+ any2scsi(ccb[mbo].datalen, 0);
+ any2scsi(ccb[mbo].dataptr, 0);
};
ccb[mbo].idlun = (target & 7) << 5 | direction | (lun & 7); /*SCSI Target Id */
ccb[mbo].rsalen = 16;
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index f6722fd46008..be58a0b097c7 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -286,7 +286,7 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
cdb when we come back */
if ( (adapstat & G2INTST_MASK) == G2INTST_CCBERROR ) {
memcpy(SCtmp->sense_buffer, ecbptr->sense,
- sizeof(SCtmp->sense_buffer));
+ SCSI_SENSE_BUFFERSIZE);
errstatus = aha1740_makecode(ecbptr->sense,ecbptr->status);
} else
errstatus = 0;
diff --git a/drivers/scsi/aic7xxx/Makefile b/drivers/scsi/aic7xxx/Makefile
index 9a6ce19a4030..e4f70c563bc2 100644
--- a/drivers/scsi/aic7xxx/Makefile
+++ b/drivers/scsi/aic7xxx/Makefile
@@ -33,11 +33,10 @@ aic79xx-y += aic79xx_osm.o \
aic79xx_proc.o \
aic79xx_osm_pci.o
-EXTRA_CFLAGS += -Idrivers/scsi
+ccflags-y += -Idrivers/scsi
ifdef WARNINGS_BECOME_ERRORS
-EXTRA_CFLAGS += -Werror
+ccflags-y += -Werror
endif
-#EXTRA_CFLAGS += -g
# Files generated that shall be removed upon make clean
clean-files := aic7xxx_seq.h aic7xxx_reg.h aic7xxx_reg_print.c
@@ -46,53 +45,45 @@ clean-files += aic79xx_seq.h aic79xx_reg.h aic79xx_reg_print.c
# Dependencies for generated files need to be listed explicitly
$(obj)/aic7xxx_core.o: $(obj)/aic7xxx_seq.h
+$(obj)/aic7xxx_core.o: $(obj)/aic7xxx_reg.h
$(obj)/aic79xx_core.o: $(obj)/aic79xx_seq.h
-$(obj)/aic79xx_reg_print.c: $(src)/aic79xx_reg_print.c_shipped
-$(obj)/aic7xxx_reg_print.c: $(src)/aic7xxx_reg_print.c_shipped
+$(obj)/aic79xx_core.o: $(obj)/aic79xx_reg.h
-$(addprefix $(obj)/,$(aic7xxx-y)): $(obj)/aic7xxx_reg.h
-$(addprefix $(obj)/,$(aic79xx-y)): $(obj)/aic79xx_reg.h
+$(addprefix $(obj)/,$(aic7xxx-y)): $(obj)/aic7xxx_seq.h
+$(addprefix $(obj)/,$(aic79xx-y)): $(obj)/aic79xx_seq.h
-aic7xxx-gen-$(CONFIG_AIC7XXX_BUILD_FIRMWARE) := $(obj)/aic7xxx_seq.h \
- $(obj)/aic7xxx_reg.h
+aic7xxx-gen-$(CONFIG_AIC7XXX_BUILD_FIRMWARE) := $(obj)/aic7xxx_reg.h
aic7xxx-gen-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT) += $(obj)/aic7xxx_reg_print.c
aicasm-7xxx-opts-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT) := \
-p $(obj)/aic7xxx_reg_print.c -i aic7xxx_osm.h
ifeq ($(CONFIG_AIC7XXX_BUILD_FIRMWARE),y)
-# Create a dependency chain in generated files
-# to avoid concurrent invocations of the single
-# rule that builds them all.
-aic7xxx_seq.h: aic7xxx_reg.h
-ifeq ($(CONFIG_AIC7XXX_REG_PRETTY_PRINT),y)
-aic7xxx_reg.h: aic7xxx_reg_print.c
-endif
-$(aic7xxx-gen-y): $(src)/aic7xxx.seq $(src)/aic7xxx.reg $(obj)/aicasm/aicasm
+$(obj)/aic7xxx_seq.h: $(src)/aic7xxx.seq $(src)/aic7xxx.reg $(obj)/aicasm/aicasm
$(obj)/aicasm/aicasm -I$(src) -r $(obj)/aic7xxx_reg.h \
$(aicasm-7xxx-opts-y) -o $(obj)/aic7xxx_seq.h \
$(src)/aic7xxx.seq
+
+$(aic7xxx-gen-y): $(obj)/aic7xxx_seq.h
+else
+$(obj)/aic7xxx_reg_print.c: $(src)/aic7xxx_reg_print.c_shipped
endif
-aic79xx-gen-$(CONFIG_AIC79XX_BUILD_FIRMWARE) := $(obj)/aic79xx_seq.h \
- $(obj)/aic79xx_reg.h
+aic79xx-gen-$(CONFIG_AIC79XX_BUILD_FIRMWARE) := $(obj)/aic79xx_reg.h
aic79xx-gen-$(CONFIG_AIC79XX_REG_PRETTY_PRINT) += $(obj)/aic79xx_reg_print.c
aicasm-79xx-opts-$(CONFIG_AIC79XX_REG_PRETTY_PRINT) := \
-p $(obj)/aic79xx_reg_print.c -i aic79xx_osm.h
ifeq ($(CONFIG_AIC79XX_BUILD_FIRMWARE),y)
-# Create a dependency chain in generated files
-# to avoid concurrent invocations of the single
-# rule that builds them all.
-aic79xx_seq.h: aic79xx_reg.h
-ifeq ($(CONFIG_AIC79XX_REG_PRETTY_PRINT),y)
-aic79xx_reg.h: aic79xx_reg_print.c
-endif
-$(aic79xx-gen-y): $(src)/aic79xx.seq $(src)/aic79xx.reg $(obj)/aicasm/aicasm
+$(obj)/aic79xx_seq.h: $(src)/aic79xx.seq $(src)/aic79xx.reg $(obj)/aicasm/aicasm
$(obj)/aicasm/aicasm -I$(src) -r $(obj)/aic79xx_reg.h \
$(aicasm-79xx-opts-y) -o $(obj)/aic79xx_seq.h \
$(src)/aic79xx.seq
+
+$(aic79xx-gen-y): $(obj)/aic79xx_seq.h
+else
+$(obj)/aic79xx_reg_print.c: $(src)/aic79xx_reg_print.c_shipped
endif
$(obj)/aicasm/aicasm: $(src)/aicasm/*.[chyl]
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 2d020405480c..0e4708fd43c8 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -1784,7 +1784,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
if (scb->flags & SCB_SENSE) {
sense_size = min(sizeof(struct scsi_sense_data)
- ahd_get_sense_residual(scb),
- (u_long)sizeof(cmd->sense_buffer));
+ (u_long)SCSI_SENSE_BUFFERSIZE);
sense_offset = 0;
} else {
/*
@@ -1795,11 +1795,11 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
scb->sense_data;
sense_size = min_t(size_t,
scsi_4btoul(siu->sense_length),
- sizeof(cmd->sense_buffer));
+ SCSI_SENSE_BUFFERSIZE);
sense_offset = SIU_SENSE_OFFSET(siu);
}
- memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
+ memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
memcpy(cmd->sense_buffer,
ahd_get_sense_buf(ahd, scb)
+ sense_offset, sense_size);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 390b0fc991c5..e310e414067f 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1801,12 +1801,12 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
sense_size = min(sizeof(struct scsi_sense_data)
- ahc_get_sense_residual(scb),
- (u_long)sizeof(cmd->sense_buffer));
+ (u_long)SCSI_SENSE_BUFFERSIZE);
memcpy(cmd->sense_buffer,
ahc_get_sense_buf(ahc, scb), sense_size);
- if (sense_size < sizeof(cmd->sense_buffer))
+ if (sense_size < SCSI_SENSE_BUFFERSIZE)
memset(&cmd->sense_buffer[sense_size], 0,
- sizeof(cmd->sense_buffer) - sense_size);
+ SCSI_SENSE_BUFFERSIZE - sense_size);
cmd->result |= (DRIVER_SENSE << 24);
#ifdef AHC_DEBUG
if (ahc_debug & AHC_SHOW_SENSE) {
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index 8f8db5f0aef7..bcb0b870320c 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -2696,7 +2696,7 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
{
pci_unmap_single(p->pdev,
le32_to_cpu(scb->sg_list[0].address),
- sizeof(cmd->sense_buffer),
+ SCSI_SENSE_BUFFERSIZE,
PCI_DMA_FROMDEVICE);
}
if (scb->flags & SCB_RECOVERY_SCB)
@@ -4267,13 +4267,13 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
sizeof(generic_sense));
scb->sense_cmd[1] = (cmd->device->lun << 5);
- scb->sense_cmd[4] = sizeof(cmd->sense_buffer);
+ scb->sense_cmd[4] = SCSI_SENSE_BUFFERSIZE;
scb->sg_list[0].length =
- cpu_to_le32(sizeof(cmd->sense_buffer));
+ cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
scb->sg_list[0].address =
cpu_to_le32(pci_map_single(p->pdev, cmd->sense_buffer,
- sizeof(cmd->sense_buffer),
+ SCSI_SENSE_BUFFERSIZE,
PCI_DMA_FROMDEVICE));
/*
@@ -4296,7 +4296,7 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
hscb->residual_data_count[2] = 0;
scb->sg_count = hscb->SG_segment_count = 1;
- scb->sg_length = sizeof(cmd->sense_buffer);
+ scb->sg_length = SCSI_SENSE_BUFFERSIZE;
scb->tag_action = 0;
scb->flags |= SCB_SENSE;
/*
@@ -10293,7 +10293,6 @@ static int aic7xxx_queue(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
aic7xxx_position(cmd) = scb->hscb->tag;
cmd->scsi_done = fn;
cmd->result = DID_OK;
- memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
aic7xxx_error(cmd) = DID_OK;
aic7xxx_status(cmd) = 0;
cmd->host_scribble = NULL;
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
index 3dce618bf414..72042cae7768 100644
--- a/drivers/scsi/aic94xx/aic94xx_dev.c
+++ b/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -165,7 +165,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
if (dev->port->oob_mode != SATA_OOB_MODE) {
flags |= OPEN_REQUIRED;
if ((dev->dev_type == SATA_DEV) ||
- (dev->tproto & SAS_PROTO_STP)) {
+ (dev->tproto & SAS_PROTOCOL_STP)) {
struct smp_resp *rps_resp = &dev->sata_dev.rps_resp;
if (rps_resp->frame_type == SMP_RESPONSE &&
rps_resp->function == SMP_REPORT_PHY_SATA &&
@@ -193,7 +193,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags);
flags = 0;
- if (dev->tproto & SAS_PROTO_STP)
+ if (dev->tproto & SAS_PROTOCOL_STP)
flags |= STP_CL_POL_NO_TX;
asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS2, flags);
@@ -201,7 +201,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF);
asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF);
- if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTO_STP)) {
+ if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
i = asd_init_sata(dev);
if (i < 0) {
asd_free_ddb(asd_ha, ddb);
diff --git a/drivers/scsi/aic94xx/aic94xx_dump.c b/drivers/scsi/aic94xx/aic94xx_dump.c
index 6bd8e3059d27..3d8c4ff1f2ef 100644
--- a/drivers/scsi/aic94xx/aic94xx_dump.c
+++ b/drivers/scsi/aic94xx/aic94xx_dump.c
@@ -903,11 +903,11 @@ void asd_dump_frame_rcvd(struct asd_phy *phy,
int i;
switch ((dl->status_block[1] & 0x70) >> 3) {
- case SAS_PROTO_STP:
+ case SAS_PROTOCOL_STP:
ASD_DPRINTK("STP proto device-to-host FIS:\n");
break;
default:
- case SAS_PROTO_SSP:
+ case SAS_PROTOCOL_SSP:
ASD_DPRINTK("SAS proto IDENTIFY:\n");
break;
}
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 0cd7eed9196c..098b5f39cd31 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -91,7 +91,7 @@ static int asd_init_phy(struct asd_phy *phy)
sas_phy->enabled = 1;
sas_phy->class = SAS;
- sas_phy->iproto = SAS_PROTO_ALL;
+ sas_phy->iproto = SAS_PROTOCOL_ALL;
sas_phy->tproto = 0;
sas_phy->type = PHY_TYPE_PHYSICAL;
sas_phy->role = PHY_ROLE_INITIATOR;
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.h b/drivers/scsi/aic94xx/aic94xx_hwi.h
index 491e5d8a98bc..150f6706d23f 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.h
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.h
@@ -72,6 +72,7 @@ struct flash_struct {
u8 manuf;
u8 dev_id;
u8 sec_prot;
+ u8 method;
u32 dir_offs;
};
@@ -216,6 +217,8 @@ struct asd_ha_struct {
struct dma_pool *scb_pool;
struct asd_seq_data seq; /* sequencer related */
+ u32 bios_status;
+ const struct firmware *bios_image;
};
/* ---------- Common macros ---------- */
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index b70d6e7f96e9..5d761eb67442 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -29,6 +29,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
+#include <linux/firmware.h>
#include <scsi/scsi_host.h>
@@ -36,6 +37,7 @@
#include "aic94xx_reg.h"
#include "aic94xx_hwi.h"
#include "aic94xx_seq.h"
+#include "aic94xx_sds.h"
/* The format is "version.release.patchlevel" */
#define ASD_DRIVER_VERSION "1.0.3"
@@ -134,7 +136,7 @@ Err:
return err;
}
-static void __devexit asd_unmap_memio(struct asd_ha_struct *asd_ha)
+static void asd_unmap_memio(struct asd_ha_struct *asd_ha)
{
struct asd_ha_addrspace *io_handle;
@@ -171,7 +173,7 @@ static int __devinit asd_map_ioport(struct asd_ha_struct *asd_ha)
return err;
}
-static void __devexit asd_unmap_ioport(struct asd_ha_struct *asd_ha)
+static void asd_unmap_ioport(struct asd_ha_struct *asd_ha)
{
pci_release_region(asd_ha->pcidev, PCI_IOBAR_OFFSET);
}
@@ -208,7 +210,7 @@ Err:
return err;
}
-static void __devexit asd_unmap_ha(struct asd_ha_struct *asd_ha)
+static void asd_unmap_ha(struct asd_ha_struct *asd_ha)
{
if (asd_ha->iospace)
asd_unmap_ioport(asd_ha);
@@ -313,6 +315,181 @@ static ssize_t asd_show_dev_pcba_sn(struct device *dev,
}
static DEVICE_ATTR(pcba_sn, S_IRUGO, asd_show_dev_pcba_sn, NULL);
+#define FLASH_CMD_NONE 0x00
+#define FLASH_CMD_UPDATE 0x01
+#define FLASH_CMD_VERIFY 0x02
+
+struct flash_command {
+ u8 command[8];
+ int code;
+};
+
+static struct flash_command flash_command_table[] =
+{
+ {"verify", FLASH_CMD_VERIFY},
+ {"update", FLASH_CMD_UPDATE},
+ {"", FLASH_CMD_NONE} /* Last entry should be NULL. */
+};
+
+struct error_bios {
+ char *reason;
+ int err_code;
+};
+
+static struct error_bios flash_error_table[] =
+{
+ {"Failed to open bios image file", FAIL_OPEN_BIOS_FILE},
+ {"PCI ID mismatch", FAIL_CHECK_PCI_ID},
+ {"Checksum mismatch", FAIL_CHECK_SUM},
+ {"Unknown Error", FAIL_UNKNOWN},
+ {"Failed to verify.", FAIL_VERIFY},
+ {"Failed to reset flash chip.", FAIL_RESET_FLASH},
+ {"Failed to find flash chip type.", FAIL_FIND_FLASH_ID},
+ {"Failed to erash flash chip.", FAIL_ERASE_FLASH},
+ {"Failed to program flash chip.", FAIL_WRITE_FLASH},
+ {"Flash in progress", FLASH_IN_PROGRESS},
+ {"Image file size Error", FAIL_FILE_SIZE},
+ {"Input parameter error", FAIL_PARAMETERS},
+ {"Out of memory", FAIL_OUT_MEMORY},
+ {"OK", 0} /* Last entry err_code = 0. */
+};
+
+static ssize_t asd_store_update_bios(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
+ char *cmd_ptr, *filename_ptr;
+ struct bios_file_header header, *hdr_ptr;
+ int res, i;
+ u32 csum = 0;
+ int flash_command = FLASH_CMD_NONE;
+ int err = 0;
+
+ cmd_ptr = kzalloc(count*2, GFP_KERNEL);
+
+ if (!cmd_ptr) {
+ err = FAIL_OUT_MEMORY;
+ goto out;
+ }
+
+ filename_ptr = cmd_ptr + count;
+ res = sscanf(buf, "%s %s", cmd_ptr, filename_ptr);
+ if (res != 2) {
+ err = FAIL_PARAMETERS;
+ goto out1;
+ }
+
+ for (i = 0; flash_command_table[i].code != FLASH_CMD_NONE; i++) {
+ if (!memcmp(flash_command_table[i].command,
+ cmd_ptr, strlen(cmd_ptr))) {
+ flash_command = flash_command_table[i].code;
+ break;
+ }
+ }
+ if (flash_command == FLASH_CMD_NONE) {
+ err = FAIL_PARAMETERS;
+ goto out1;
+ }
+
+ if (asd_ha->bios_status == FLASH_IN_PROGRESS) {
+ err = FLASH_IN_PROGRESS;
+ goto out1;
+ }
+ err = request_firmware(&asd_ha->bios_image,
+ filename_ptr,
+ &asd_ha->pcidev->dev);
+ if (err) {
+ asd_printk("Failed to load bios image file %s, error %d\n",
+ filename_ptr, err);
+ err = FAIL_OPEN_BIOS_FILE;
+ goto out1;
+ }
+
+ hdr_ptr = (struct bios_file_header *)asd_ha->bios_image->data;
+
+ if ((hdr_ptr->contrl_id.vendor != asd_ha->pcidev->vendor ||
+ hdr_ptr->contrl_id.device != asd_ha->pcidev->device) &&
+ (hdr_ptr->contrl_id.sub_vendor != asd_ha->pcidev->vendor ||
+ hdr_ptr->contrl_id.sub_device != asd_ha->pcidev->device)) {
+
+ ASD_DPRINTK("The PCI vendor or device id does not match\n");
+ ASD_DPRINTK("vendor=%x dev=%x sub_vendor=%x sub_dev=%x"
+ " pci vendor=%x pci dev=%x\n",
+ hdr_ptr->contrl_id.vendor,
+ hdr_ptr->contrl_id.device,
+ hdr_ptr->contrl_id.sub_vendor,
+ hdr_ptr->contrl_id.sub_device,
+ asd_ha->pcidev->vendor,
+ asd_ha->pcidev->device);
+ err = FAIL_CHECK_PCI_ID;
+ goto out2;
+ }
+
+ if (hdr_ptr->filelen != asd_ha->bios_image->size) {
+ err = FAIL_FILE_SIZE;
+ goto out2;
+ }
+
+ /* calculate checksum */
+ for (i = 0; i < hdr_ptr->filelen; i++)
+ csum += asd_ha->bios_image->data[i];
+
+ if ((csum & 0x0000ffff) != hdr_ptr->checksum) {
+ ASD_DPRINTK("BIOS file checksum mismatch\n");
+ err = FAIL_CHECK_SUM;
+ goto out2;
+ }
+ if (flash_command == FLASH_CMD_UPDATE) {
+ asd_ha->bios_status = FLASH_IN_PROGRESS;
+ err = asd_write_flash_seg(asd_ha,
+ &asd_ha->bios_image->data[sizeof(*hdr_ptr)],
+ 0, hdr_ptr->filelen-sizeof(*hdr_ptr));
+ if (!err)
+ err = asd_verify_flash_seg(asd_ha,
+ &asd_ha->bios_image->data[sizeof(*hdr_ptr)],
+ 0, hdr_ptr->filelen-sizeof(*hdr_ptr));
+ } else {
+ asd_ha->bios_status = FLASH_IN_PROGRESS;
+ err = asd_verify_flash_seg(asd_ha,
+ &asd_ha->bios_image->data[sizeof(header)],
+ 0, hdr_ptr->filelen-sizeof(header));
+ }
+
+out2:
+ release_firmware(asd_ha->bios_image);
+out1:
+ kfree(cmd_ptr);
+out:
+ asd_ha->bios_status = err;
+
+ if (!err)
+ return count;
+ else
+ return -err;
+}
+
+static ssize_t asd_show_update_bios(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i;
+ struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
+
+ for (i = 0; flash_error_table[i].err_code != 0; i++) {
+ if (flash_error_table[i].err_code == asd_ha->bios_status)
+ break;
+ }
+ if (asd_ha->bios_status != FLASH_IN_PROGRESS)
+ asd_ha->bios_status = FLASH_OK;
+
+ return snprintf(buf, PAGE_SIZE, "status=%x %s\n",
+ flash_error_table[i].err_code,
+ flash_error_table[i].reason);
+}
+
+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
+ asd_show_update_bios, asd_store_update_bios);
+
static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
{
int err;
@@ -328,9 +505,14 @@ static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
if (err)
goto err_biosb;
+ err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_update_bios);
+ if (err)
+ goto err_update_bios;
return 0;
+err_update_bios:
+ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
err_biosb:
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
err_rev:
@@ -343,6 +525,7 @@ static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
+ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios);
}
/* The first entry, 0, is used for dynamic ids, the rest for devices
@@ -589,6 +772,7 @@ static int __devinit asd_pci_probe(struct pci_dev *dev,
asd_ha->sas_ha.dev = &asd_ha->pcidev->dev;
asd_ha->sas_ha.lldd_ha = asd_ha;
+ asd_ha->bios_status = FLASH_OK;
asd_ha->name = asd_dev->name;
asd_printk("found %s, device %s\n", asd_ha->name, pci_name(dev));
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index db6ab1a3b81e..0febad4dd75f 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -788,12 +788,12 @@ void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc)
/* initiator port settings are in the hi nibble */
if (phy->sas_phy.role == PHY_ROLE_INITIATOR)
- control_phy->port_type = SAS_PROTO_ALL << 4;
+ control_phy->port_type = SAS_PROTOCOL_ALL << 4;
else if (phy->sas_phy.role == PHY_ROLE_TARGET)
- control_phy->port_type = SAS_PROTO_ALL;
+ control_phy->port_type = SAS_PROTOCOL_ALL;
else
control_phy->port_type =
- (SAS_PROTO_ALL << 4) | SAS_PROTO_ALL;
+ (SAS_PROTOCOL_ALL << 4) | SAS_PROTOCOL_ALL;
/* link reset retries, this should be nominal */
control_phy->link_reset_retries = 10;
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
index 06509bff71f7..2a4c933eb89c 100644
--- a/drivers/scsi/aic94xx/aic94xx_sds.c
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -30,6 +30,7 @@
#include "aic94xx.h"
#include "aic94xx_reg.h"
+#include "aic94xx_sds.h"
/* ---------- OCM stuff ---------- */
@@ -1083,3 +1084,391 @@ out:
kfree(flash_dir);
return err;
}
+
+/**
+ * asd_verify_flash_seg - verify data with flash memory
+ * @asd_ha: pointer to the host adapter structure
+ * @src: pointer to the source data to be verified
+ * @dest_offset: offset from flash memory
+ * @bytes_to_verify: total bytes to verify
+ */
+int asd_verify_flash_seg(struct asd_ha_struct *asd_ha,
+ void *src, u32 dest_offset, u32 bytes_to_verify)
+{
+ u8 *src_buf;
+ u8 flash_char;
+ int err;
+ u32 nv_offset, reg, i;
+
+ reg = asd_ha->hw_prof.flash.bar;
+ src_buf = NULL;
+
+ err = FLASH_OK;
+ nv_offset = dest_offset;
+ src_buf = (u8 *)src;
+ for (i = 0; i < bytes_to_verify; i++) {
+ flash_char = asd_read_reg_byte(asd_ha, reg + nv_offset + i);
+ if (flash_char != src_buf[i]) {
+ err = FAIL_VERIFY;
+ break;
+ }
+ }
+ return err;
+}
+
+/**
+ * asd_write_flash_seg - write data into flash memory
+ * @asd_ha: pointer to the host adapter structure
+ * @src: pointer to the source data to be written
+ * @dest_offset: offset from flash memory
+ * @bytes_to_write: total bytes to write
+ */
+int asd_write_flash_seg(struct asd_ha_struct *asd_ha,
+ void *src, u32 dest_offset, u32 bytes_to_write)
+{
+ u8 *src_buf;
+ u32 nv_offset, reg, i;
+ int err;
+
+ reg = asd_ha->hw_prof.flash.bar;
+ src_buf = NULL;
+
+ err = asd_check_flash_type(asd_ha);
+ if (err) {
+ ASD_DPRINTK("couldn't find the type of flash. err=%d\n", err);
+ return err;
+ }
+
+ nv_offset = dest_offset;
+ err = asd_erase_nv_sector(asd_ha, nv_offset, bytes_to_write);
+ if (err) {
+ ASD_DPRINTK("Erase failed at offset:0x%x\n",
+ nv_offset);
+ return err;
+ }
+
+ err = asd_reset_flash(asd_ha);
+ if (err) {
+ ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
+ return err;
+ }
+
+ src_buf = (u8 *)src;
+ for (i = 0; i < bytes_to_write; i++) {
+ /* Setup program command sequence */
+ switch (asd_ha->hw_prof.flash.method) {
+ case FLASH_METHOD_A:
+ {
+ asd_write_reg_byte(asd_ha,
+ (reg + 0xAAA), 0xAA);
+ asd_write_reg_byte(asd_ha,
+ (reg + 0x555), 0x55);
+ asd_write_reg_byte(asd_ha,
+ (reg + 0xAAA), 0xA0);
+ asd_write_reg_byte(asd_ha,
+ (reg + nv_offset + i),
+ (*(src_buf + i)));
+ break;
+ }
+ case FLASH_METHOD_B:
+ {
+ asd_write_reg_byte(asd_ha,
+ (reg + 0x555), 0xAA);
+ asd_write_reg_byte(asd_ha,
+ (reg + 0x2AA), 0x55);
+ asd_write_reg_byte(asd_ha,
+ (reg + 0x555), 0xA0);
+ asd_write_reg_byte(asd_ha,
+ (reg + nv_offset + i),
+ (*(src_buf + i)));
+ break;
+ }
+ default:
+ break;
+ }
+ if (asd_chk_write_status(asd_ha,
+ (nv_offset + i), 0) != 0) {
+ ASD_DPRINTK("aicx: Write failed at offset:0x%x\n",
+ reg + nv_offset + i);
+ return FAIL_WRITE_FLASH;
+ }
+ }
+
+ err = asd_reset_flash(asd_ha);
+ if (err) {
+ ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
+ return err;
+ }
+ return 0;
+}
+
+int asd_chk_write_status(struct asd_ha_struct *asd_ha,
+ u32 sector_addr, u8 erase_flag)
+{
+ u32 reg;
+ u32 loop_cnt;
+ u8 nv_data1, nv_data2;
+ u8 toggle_bit1;
+
+ /*
+ * Read from DQ2 requires sector address
+ * while it's dont care for DQ6
+ */
+ reg = asd_ha->hw_prof.flash.bar;
+
+ for (loop_cnt = 0; loop_cnt < 50000; loop_cnt++) {
+ nv_data1 = asd_read_reg_byte(asd_ha, reg);
+ nv_data2 = asd_read_reg_byte(asd_ha, reg);
+
+ toggle_bit1 = ((nv_data1 & FLASH_STATUS_BIT_MASK_DQ6)
+ ^ (nv_data2 & FLASH_STATUS_BIT_MASK_DQ6));
+
+ if (toggle_bit1 == 0) {
+ return 0;
+ } else {
+ if (nv_data2 & FLASH_STATUS_BIT_MASK_DQ5) {
+ nv_data1 = asd_read_reg_byte(asd_ha,
+ reg);
+ nv_data2 = asd_read_reg_byte(asd_ha,
+ reg);
+ toggle_bit1 =
+ ((nv_data1 & FLASH_STATUS_BIT_MASK_DQ6)
+ ^ (nv_data2 & FLASH_STATUS_BIT_MASK_DQ6));
+
+ if (toggle_bit1 == 0)
+ return 0;
+ }
+ }
+
+ /*
+ * ERASE is a sector-by-sector operation and requires
+ * more time to finish while WRITE is byte-byte-byte
+ * operation and takes lesser time to finish.
+ *
+ * For some strange reason a reduced ERASE delay gives different
+ * behaviour across different spirit boards. Hence we set
+ * a optimum balance of 50mus for ERASE which works well
+ * across all boards.
+ */
+ if (erase_flag) {
+ udelay(FLASH_STATUS_ERASE_DELAY_COUNT);
+ } else {
+ udelay(FLASH_STATUS_WRITE_DELAY_COUNT);
+ }
+ }
+ return -1;
+}
+
+/**
+ * asd_hwi_erase_nv_sector - Erase the flash memory sectors.
+ * @asd_ha: pointer to the host adapter structure
+ * @flash_addr: pointer to offset from flash memory
+ * @size: total bytes to erase.
+ */
+int asd_erase_nv_sector(struct asd_ha_struct *asd_ha, u32 flash_addr, u32 size)
+{
+ u32 reg;
+ u32 sector_addr;
+
+ reg = asd_ha->hw_prof.flash.bar;
+
+ /* sector staring address */
+ sector_addr = flash_addr & FLASH_SECTOR_SIZE_MASK;
+
+ /*
+ * Erasing an flash sector needs to be done in six consecutive
+ * write cyles.
+ */
+ while (sector_addr < flash_addr+size) {
+ switch (asd_ha->hw_prof.flash.method) {
+ case FLASH_METHOD_A:
+ asd_write_reg_byte(asd_ha, (reg + 0xAAA), 0xAA);
+ asd_write_reg_byte(asd_ha, (reg + 0x555), 0x55);
+ asd_write_reg_byte(asd_ha, (reg + 0xAAA), 0x80);
+ asd_write_reg_byte(asd_ha, (reg + 0xAAA), 0xAA);
+ asd_write_reg_byte(asd_ha, (reg + 0x555), 0x55);
+ asd_write_reg_byte(asd_ha, (reg + sector_addr), 0x30);
+ break;
+ case FLASH_METHOD_B:
+ asd_write_reg_byte(asd_ha, (reg + 0x555), 0xAA);
+ asd_write_reg_byte(asd_ha, (reg + 0x2AA), 0x55);
+ asd_write_reg_byte(asd_ha, (reg + 0x555), 0x80);
+ asd_write_reg_byte(asd_ha, (reg + 0x555), 0xAA);
+ asd_write_reg_byte(asd_ha, (reg + 0x2AA), 0x55);
+ asd_write_reg_byte(asd_ha, (reg + sector_addr), 0x30);
+ break;
+ default:
+ break;
+ }
+
+ if (asd_chk_write_status(asd_ha, sector_addr, 1) != 0)
+ return FAIL_ERASE_FLASH;
+
+ sector_addr += FLASH_SECTOR_SIZE;
+ }
+
+ return 0;
+}
+
+int asd_check_flash_type(struct asd_ha_struct *asd_ha)
+{
+ u8 manuf_id;
+ u8 dev_id;
+ u8 sec_prot;
+ u32 inc;
+ u32 reg;
+ int err;
+
+ /* get Flash memory base address */
+ reg = asd_ha->hw_prof.flash.bar;
+
+ /* Determine flash info */
+ err = asd_reset_flash(asd_ha);
+ if (err) {
+ ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
+ return err;
+ }
+
+ asd_ha->hw_prof.flash.method = FLASH_METHOD_UNKNOWN;
+ asd_ha->hw_prof.flash.manuf = FLASH_MANUF_ID_UNKNOWN;
+ asd_ha->hw_prof.flash.dev_id = FLASH_DEV_ID_UNKNOWN;
+
+ /* Get flash info. This would most likely be AMD Am29LV family flash.
+ * First try the sequence for word mode. It is the same as for
+ * 008B (byte mode only), 160B (word mode) and 800D (word mode).
+ */
+ inc = asd_ha->hw_prof.flash.wide ? 2 : 1;
+ asd_write_reg_byte(asd_ha, reg + 0xAAA, 0xAA);
+ asd_write_reg_byte(asd_ha, reg + 0x555, 0x55);
+ asd_write_reg_byte(asd_ha, reg + 0xAAA, 0x90);
+ manuf_id = asd_read_reg_byte(asd_ha, reg);
+ dev_id = asd_read_reg_byte(asd_ha, reg + inc);
+ sec_prot = asd_read_reg_byte(asd_ha, reg + inc + inc);
+ /* Get out of autoselect mode. */
+ err = asd_reset_flash(asd_ha);
+ if (err) {
+ ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
+ return err;
+ }
+ ASD_DPRINTK("Flash MethodA manuf_id(0x%x) dev_id(0x%x) "
+ "sec_prot(0x%x)\n", manuf_id, dev_id, sec_prot);
+ err = asd_reset_flash(asd_ha);
+ if (err != 0)
+ return err;
+
+ switch (manuf_id) {
+ case FLASH_MANUF_ID_AMD:
+ switch (sec_prot) {
+ case FLASH_DEV_ID_AM29LV800DT:
+ case FLASH_DEV_ID_AM29LV640MT:
+ case FLASH_DEV_ID_AM29F800B:
+ asd_ha->hw_prof.flash.method = FLASH_METHOD_A;
+ break;
+ default:
+ break;
+ }
+ break;
+ case FLASH_MANUF_ID_ST:
+ switch (sec_prot) {
+ case FLASH_DEV_ID_STM29W800DT:
+ case FLASH_DEV_ID_STM29LV640:
+ asd_ha->hw_prof.flash.method = FLASH_METHOD_A;
+ break;
+ default:
+ break;
+ }
+ break;
+ case FLASH_MANUF_ID_FUJITSU:
+ switch (sec_prot) {
+ case FLASH_DEV_ID_MBM29LV800TE:
+ case FLASH_DEV_ID_MBM29DL800TA:
+ asd_ha->hw_prof.flash.method = FLASH_METHOD_A;
+ break;
+ }
+ break;
+ case FLASH_MANUF_ID_MACRONIX:
+ switch (sec_prot) {
+ case FLASH_DEV_ID_MX29LV800BT:
+ asd_ha->hw_prof.flash.method = FLASH_METHOD_A;
+ break;
+ }
+ break;
+ }
+
+ if (asd_ha->hw_prof.flash.method == FLASH_METHOD_UNKNOWN) {
+ err = asd_reset_flash(asd_ha);
+ if (err) {
+ ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
+ return err;
+ }
+
+ /* Issue Unlock sequence for AM29LV008BT */
+ asd_write_reg_byte(asd_ha, (reg + 0x555), 0xAA);
+ asd_write_reg_byte(asd_ha, (reg + 0x2AA), 0x55);
+ asd_write_reg_byte(asd_ha, (reg + 0x555), 0x90);
+ manuf_id = asd_read_reg_byte(asd_ha, reg);
+ dev_id = asd_read_reg_byte(asd_ha, reg + inc);
+ sec_prot = asd_read_reg_byte(asd_ha, reg + inc + inc);
+
+ ASD_DPRINTK("Flash MethodB manuf_id(0x%x) dev_id(0x%x) sec_prot"
+ "(0x%x)\n", manuf_id, dev_id, sec_prot);
+
+ err = asd_reset_flash(asd_ha);
+ if (err != 0) {
+ ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
+ return err;
+ }
+
+ switch (manuf_id) {
+ case FLASH_MANUF_ID_AMD:
+ switch (dev_id) {
+ case FLASH_DEV_ID_AM29LV008BT:
+ asd_ha->hw_prof.flash.method = FLASH_METHOD_B;
+ break;
+ default:
+ break;
+ }
+ break;
+ case FLASH_MANUF_ID_ST:
+ switch (dev_id) {
+ case FLASH_DEV_ID_STM29008:
+ asd_ha->hw_prof.flash.method = FLASH_METHOD_B;
+ break;
+ default:
+ break;
+ }
+ break;
+ case FLASH_MANUF_ID_FUJITSU:
+ switch (dev_id) {
+ case FLASH_DEV_ID_MBM29LV008TA:
+ asd_ha->hw_prof.flash.method = FLASH_METHOD_B;
+ break;
+ }
+ break;
+ case FLASH_MANUF_ID_INTEL:
+ switch (dev_id) {
+ case FLASH_DEV_ID_I28LV00TAT:
+ asd_ha->hw_prof.flash.method = FLASH_METHOD_B;
+ break;
+ }
+ break;
+ case FLASH_MANUF_ID_MACRONIX:
+ switch (dev_id) {
+ case FLASH_DEV_ID_I28LV00TAT:
+ asd_ha->hw_prof.flash.method = FLASH_METHOD_B;
+ break;
+ }
+ break;
+ default:
+ return FAIL_FIND_FLASH_ID;
+ }
+ }
+
+ if (asd_ha->hw_prof.flash.method == FLASH_METHOD_UNKNOWN)
+ return FAIL_FIND_FLASH_ID;
+
+ asd_ha->hw_prof.flash.manuf = manuf_id;
+ asd_ha->hw_prof.flash.dev_id = dev_id;
+ asd_ha->hw_prof.flash.sec_prot = sec_prot;
+ return 0;
+}
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.h b/drivers/scsi/aic94xx/aic94xx_sds.h
new file mode 100644
index 000000000000..bb9795a04dc3
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_sds.h
@@ -0,0 +1,121 @@
+/*
+ * Aic94xx SAS/SATA driver hardware interface header file.
+ *
+ * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
+ * Copyright (C) 2005 Gilbert Wu <gilbert_wu@adaptec.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This file is part of the aic94xx driver.
+ *
+ * The aic94xx driver is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * The aic94xx driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the aic94xx driver; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+#ifndef _AIC94XX_SDS_H_
+#define _AIC94XX_SDS_H_
+
+enum {
+ FLASH_METHOD_UNKNOWN,
+ FLASH_METHOD_A,
+ FLASH_METHOD_B
+};
+
+#define FLASH_MANUF_ID_AMD 0x01
+#define FLASH_MANUF_ID_ST 0x20
+#define FLASH_MANUF_ID_FUJITSU 0x04
+#define FLASH_MANUF_ID_MACRONIX 0xC2
+#define FLASH_MANUF_ID_INTEL 0x89
+#define FLASH_MANUF_ID_UNKNOWN 0xFF
+
+#define FLASH_DEV_ID_AM29LV008BT 0x3E
+#define FLASH_DEV_ID_AM29LV800DT 0xDA
+#define FLASH_DEV_ID_STM29W800DT 0xD7
+#define FLASH_DEV_ID_STM29LV640 0xDE
+#define FLASH_DEV_ID_STM29008 0xEA
+#define FLASH_DEV_ID_MBM29LV800TE 0xDA
+#define FLASH_DEV_ID_MBM29DL800TA 0x4A
+#define FLASH_DEV_ID_MBM29LV008TA 0x3E
+#define FLASH_DEV_ID_AM29LV640MT 0x7E
+#define FLASH_DEV_ID_AM29F800B 0xD6
+#define FLASH_DEV_ID_MX29LV800BT 0xDA
+#define FLASH_DEV_ID_MX29LV008CT 0xDA
+#define FLASH_DEV_ID_I28LV00TAT 0x3E
+#define FLASH_DEV_ID_UNKNOWN 0xFF
+
+/* status bit mask values */
+#define FLASH_STATUS_BIT_MASK_DQ6 0x40
+#define FLASH_STATUS_BIT_MASK_DQ5 0x20
+#define FLASH_STATUS_BIT_MASK_DQ2 0x04
+
+/* minimum value in micro seconds needed for checking status */
+#define FLASH_STATUS_ERASE_DELAY_COUNT 50
+#define FLASH_STATUS_WRITE_DELAY_COUNT 25
+
+#define FLASH_SECTOR_SIZE 0x010000
+#define FLASH_SECTOR_SIZE_MASK 0xffff0000
+
+#define FLASH_OK 0x000000
+#define FAIL_OPEN_BIOS_FILE 0x000100
+#define FAIL_CHECK_PCI_ID 0x000200
+#define FAIL_CHECK_SUM 0x000300
+#define FAIL_UNKNOWN 0x000400
+#define FAIL_VERIFY 0x000500
+#define FAIL_RESET_FLASH 0x000600
+#define FAIL_FIND_FLASH_ID 0x000700
+#define FAIL_ERASE_FLASH 0x000800
+#define FAIL_WRITE_FLASH 0x000900
+#define FAIL_FILE_SIZE 0x000a00
+#define FAIL_PARAMETERS 0x000b00
+#define FAIL_OUT_MEMORY 0x000c00
+#define FLASH_IN_PROGRESS 0x001000
+
+struct controller_id {
+ u32 vendor; /* PCI Vendor ID */
+ u32 device; /* PCI Device ID */
+ u32 sub_vendor; /* PCI Subvendor ID */
+ u32 sub_device; /* PCI Subdevice ID */
+};
+
+struct image_info {
+ u32 ImageId; /* Identifies the image */
+ u32 ImageOffset; /* Offset the beginning of the file */
+ u32 ImageLength; /* length of the image */
+ u32 ImageChecksum; /* Image checksum */
+ u32 ImageVersion; /* Version of the image, could be build number */
+};
+
+struct bios_file_header {
+ u8 signature[32]; /* Signature/Cookie to identify the file */
+ u32 checksum; /*Entire file checksum with this field zero */
+ u32 antidote; /* Entire file checksum with this field 0xFFFFFFFF */
+ struct controller_id contrl_id; /*PCI id to identify the controller */
+ u32 filelen; /*Length of the entire file*/
+ u32 chunk_num; /*The chunk/part number for multiple Image files */
+ u32 total_chunks; /*Total number of chunks/parts in the image file */
+ u32 num_images; /* Number of images in the file */
+ u32 build_num; /* Build number of this image */
+ struct image_info image_header;
+};
+
+int asd_verify_flash_seg(struct asd_ha_struct *asd_ha,
+ void *src, u32 dest_offset, u32 bytes_to_verify);
+int asd_write_flash_seg(struct asd_ha_struct *asd_ha,
+ void *src, u32 dest_offset, u32 bytes_to_write);
+int asd_chk_write_status(struct asd_ha_struct *asd_ha,
+ u32 sector_addr, u8 erase_flag);
+int asd_check_flash_type(struct asd_ha_struct *asd_ha);
+int asd_erase_nv_sector(struct asd_ha_struct *asd_ha,
+ u32 flash_addr, u32 size);
+#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index ee0a98bffcd4..965d4bb999d9 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -187,29 +187,13 @@ static void asd_get_response_tasklet(struct asd_ascb *ascb,
ts->buf_valid_size = 0;
edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
r = edb->vaddr;
- if (task->task_proto == SAS_PROTO_SSP) {
+ if (task->task_proto == SAS_PROTOCOL_SSP) {
struct ssp_response_iu *iu =
r + 16 + sizeof(struct ssp_frame_hdr);
ts->residual = le32_to_cpu(*(__le32 *)r);
- ts->resp = SAS_TASK_COMPLETE;
- if (iu->datapres == 0)
- ts->stat = iu->status;
- else if (iu->datapres == 1)
- ts->stat = iu->resp_data[3];
- else if (iu->datapres == 2) {
- ts->stat = SAM_CHECK_COND;
- ts->buf_valid_size = min((u32) SAS_STATUS_BUF_SIZE,
- be32_to_cpu(iu->sense_data_len));
- memcpy(ts->buf, iu->sense_data, ts->buf_valid_size);
- if (iu->status != SAM_CHECK_COND) {
- ASD_DPRINTK("device %llx sent sense data, but "
- "stat(0x%x) is not CHECK_CONDITION"
- "\n",
- SAS_ADDR(task->dev->sas_addr),
- iu->status);
- }
- }
+
+ sas_ssp_task_response(&asd_ha->pcidev->dev, task, iu);
} else {
struct ata_task_resp *resp = (void *) &ts->buf[0];
@@ -341,14 +325,14 @@ Again:
}
switch (task->task_proto) {
- case SATA_PROTO:
- case SAS_PROTO_STP:
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
asd_unbuild_ata_ascb(ascb);
break;
- case SAS_PROTO_SMP:
+ case SAS_PROTOCOL_SMP:
asd_unbuild_smp_ascb(ascb);
break;
- case SAS_PROTO_SSP:
+ case SAS_PROTOCOL_SSP:
asd_unbuild_ssp_ascb(ascb);
default:
break;
@@ -586,17 +570,17 @@ int asd_execute_task(struct sas_task *task, const int num,
list_for_each_entry(a, &alist, list) {
t = a->uldd_task;
a->uldd_timer = 1;
- if (t->task_proto & SAS_PROTO_STP)
- t->task_proto = SAS_PROTO_STP;
+ if (t->task_proto & SAS_PROTOCOL_STP)
+ t->task_proto = SAS_PROTOCOL_STP;
switch (t->task_proto) {
- case SATA_PROTO:
- case SAS_PROTO_STP:
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
res = asd_build_ata_ascb(a, t, gfp_flags);
break;
- case SAS_PROTO_SMP:
+ case SAS_PROTOCOL_SMP:
res = asd_build_smp_ascb(a, t, gfp_flags);
break;
- case SAS_PROTO_SSP:
+ case SAS_PROTOCOL_SSP:
res = asd_build_ssp_ascb(a, t, gfp_flags);
break;
default:
@@ -633,14 +617,14 @@ out_err_unmap:
t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&t->task_state_lock, flags);
switch (t->task_proto) {
- case SATA_PROTO:
- case SAS_PROTO_STP:
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
asd_unbuild_ata_ascb(a);
break;
- case SAS_PROTO_SMP:
+ case SAS_PROTOCOL_SMP:
asd_unbuild_smp_ascb(a);
break;
- case SAS_PROTO_SSP:
+ case SAS_PROTOCOL_SSP:
asd_unbuild_ssp_ascb(a);
default:
break;
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index c0d0b7d7a8ce..87b2f6e6adfe 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -372,21 +372,21 @@ int asd_abort_task(struct sas_task *task)
scb->header.opcode = ABORT_TASK;
switch (task->task_proto) {
- case SATA_PROTO:
- case SAS_PROTO_STP:
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
scb->abort_task.proto_conn_rate = (1 << 5); /* STP */
break;
- case SAS_PROTO_SSP:
+ case SAS_PROTOCOL_SSP:
scb->abort_task.proto_conn_rate = (1 << 4); /* SSP */
scb->abort_task.proto_conn_rate |= task->dev->linkrate;
break;
- case SAS_PROTO_SMP:
+ case SAS_PROTOCOL_SMP:
break;
default:
break;
}
- if (task->task_proto == SAS_PROTO_SSP) {
+ if (task->task_proto == SAS_PROTOCOL_SSP) {
scb->abort_task.ssp_frame.frame_type = SSP_TASK;
memcpy(scb->abort_task.ssp_frame.hashed_dest_addr,
task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
@@ -512,7 +512,7 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
int res = 1;
struct scb *scb;
- if (!(dev->tproto & SAS_PROTO_SSP))
+ if (!(dev->tproto & SAS_PROTOCOL_SSP))
return TMF_RESP_FUNC_ESUPP;
ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index d466a2dac1db..d80dba913a75 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -634,9 +634,9 @@ static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
pcmd->result = DID_OK << 16;
if (sensebuffer) {
int sense_data_length =
- sizeof(struct SENSE_DATA) < sizeof(pcmd->sense_buffer)
- ? sizeof(struct SENSE_DATA) : sizeof(pcmd->sense_buffer);
- memset(sensebuffer, 0, sizeof(pcmd->sense_buffer));
+ sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
+ ? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE;
+ memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
sensebuffer->Valid = 1;
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index a9680b5e8ac6..93b61f148653 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -511,9 +511,9 @@ static inline void initialize_SCp(Scsi_Cmnd *cmd)
* various queues are valid.
*/
- if (cmd->use_sg) {
- cmd->SCp.buffer = (struct scatterlist *)cmd->request_buffer;
- cmd->SCp.buffers_residual = cmd->use_sg - 1;
+ if (scsi_bufflen(cmd)) {
+ cmd->SCp.buffer = scsi_sglist(cmd);
+ cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
/* ++roman: Try to merge some scatter-buffers if they are at
@@ -523,8 +523,8 @@ static inline void initialize_SCp(Scsi_Cmnd *cmd)
} else {
cmd->SCp.buffer = NULL;
cmd->SCp.buffers_residual = 0;
- cmd->SCp.ptr = (char *)cmd->request_buffer;
- cmd->SCp.this_residual = cmd->request_bufflen;
+ cmd->SCp.ptr = NULL;
+ cmd->SCp.this_residual = 0;
}
}
@@ -936,21 +936,21 @@ static int NCR5380_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
}
# endif
# ifdef NCR5380_STAT_LIMIT
- if (cmd->request_bufflen > NCR5380_STAT_LIMIT)
+ if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT)
# endif
switch (cmd->cmnd[0]) {
case WRITE:
case WRITE_6:
case WRITE_10:
hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase);
- hostdata->bytes_write[cmd->device->id] += cmd->request_bufflen;
+ hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);
hostdata->pendingw++;
break;
case READ:
case READ_6:
case READ_10:
hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase);
- hostdata->bytes_read[cmd->device->id] += cmd->request_bufflen;
+ hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);
hostdata->pendingr++;
break;
}
@@ -1352,21 +1352,21 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
static void collect_stats(struct NCR5380_hostdata* hostdata, Scsi_Cmnd *cmd)
{
# ifdef NCR5380_STAT_LIMIT
- if (cmd->request_bufflen > NCR5380_STAT_LIMIT)
+ if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT)
# endif
switch (cmd->cmnd[0]) {
case WRITE:
case WRITE_6:
case WRITE_10:
hostdata->time_write[cmd->device->id] += (jiffies - hostdata->timebase);
- /*hostdata->bytes_write[cmd->device->id] += cmd->request_bufflen;*/
+ /*hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);*/
hostdata->pendingw--;
break;
case READ:
case READ_6:
case READ_10:
hostdata->time_read[cmd->device->id] += (jiffies - hostdata->timebase);
- /*hostdata->bytes_read[cmd->device->id] += cmd->request_bufflen;*/
+ /*hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);*/
hostdata->pendingr--;
break;
}
@@ -1868,7 +1868,7 @@ static int do_abort(struct Scsi_Host *host)
* the target sees, so we just handshake.
*/
- while (!(tmp = NCR5380_read(STATUS_REG)) & SR_REQ)
+ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ))
;
NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index fec58cc47f1c..db6de5e6afb3 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -471,18 +471,8 @@ go_42:
/*
* Complete the command
*/
- if (workreq->use_sg) {
- pci_unmap_sg(dev->pdev,
- (struct scatterlist *)workreq->request_buffer,
- workreq->use_sg,
- workreq->sc_data_direction);
- } else if (workreq->request_bufflen &&
- workreq->sc_data_direction != DMA_NONE) {
- pci_unmap_single(dev->pdev,
- workreq->SCp.dma_handle,
- workreq->request_bufflen,
- workreq->sc_data_direction);
- }
+ scsi_dma_unmap(workreq);
+
spin_lock_irqsave(dev->host->host_lock, flags);
(*workreq->scsi_done) (workreq);
#ifdef ED_DBGP
@@ -624,7 +614,7 @@ static int atp870u_queuecommand(struct scsi_cmnd * req_p,
c = scmd_channel(req_p);
req_p->sense_buffer[0]=0;
- req_p->resid = 0;
+ scsi_set_resid(req_p, 0);
if (scmd_channel(req_p) > 1) {
req_p->result = 0x00040000;
done(req_p);
@@ -722,7 +712,6 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
unsigned short int tmpcip, w;
unsigned long l, bttl = 0;
unsigned int workport;
- struct scatterlist *sgpnt;
unsigned long sg_count;
if (dev->in_snd[c] != 0) {
@@ -793,6 +782,8 @@ oktosend:
}
printk("\n");
#endif
+ l = scsi_bufflen(workreq);
+
if (dev->dev_id == ATP885_DEVID) {
j = inb(dev->baseport + 0x29) & 0xfe;
outb(j, dev->baseport + 0x29);
@@ -800,12 +791,11 @@ oktosend:
}
if (workreq->cmnd[0] == READ_CAPACITY) {
- if (workreq->request_bufflen > 8) {
- workreq->request_bufflen = 0x08;
- }
+ if (l > 8)
+ l = 8;
}
if (workreq->cmnd[0] == 0x00) {
- workreq->request_bufflen = 0;
+ l = 0;
}
tmport = workport + 0x1b;
@@ -852,40 +842,8 @@ oktosend:
#ifdef ED_DBGP
printk("dev->id[%d][%d].devsp = %2x\n",c,target_id,dev->id[c][target_id].devsp);
#endif
- /*
- * Figure out the transfer size
- */
- if (workreq->use_sg) {
-#ifdef ED_DBGP
- printk("Using SGL\n");
-#endif
- l = 0;
-
- sgpnt = (struct scatterlist *) workreq->request_buffer;
- sg_count = pci_map_sg(dev->pdev, sgpnt, workreq->use_sg,
- workreq->sc_data_direction);
-
- for (i = 0; i < workreq->use_sg; i++) {
- if (sgpnt[i].length == 0 || workreq->use_sg > ATP870U_SCATTER) {
- panic("Foooooooood fight!");
- }
- l += sgpnt[i].length;
- }
-#ifdef ED_DBGP
- printk( "send_s870: workreq->use_sg %d, sg_count %d l %8ld\n", workreq->use_sg, sg_count, l);
-#endif
- } else if(workreq->request_bufflen && workreq->sc_data_direction != PCI_DMA_NONE) {
-#ifdef ED_DBGP
- printk("Not using SGL\n");
-#endif
- workreq->SCp.dma_handle = pci_map_single(dev->pdev, workreq->request_buffer,
- workreq->request_bufflen,
- workreq->sc_data_direction);
- l = workreq->request_bufflen;
-#ifdef ED_DBGP
- printk( "send_s870: workreq->use_sg %d, l %8ld\n", workreq->use_sg, l);
-#endif
- } else l = 0;
+
+ sg_count = scsi_dma_map(workreq);
/*
* Write transfer size
*/
@@ -938,16 +896,16 @@ oktosend:
* a linear chain.
*/
- if (workreq->use_sg) {
- sgpnt = (struct scatterlist *) workreq->request_buffer;
+ if (l) {
+ struct scatterlist *sgpnt;
i = 0;
- for (j = 0; j < workreq->use_sg; j++) {
- bttl = sg_dma_address(&sgpnt[j]);
- l=sg_dma_len(&sgpnt[j]);
+ scsi_for_each_sg(workreq, sgpnt, sg_count, j) {
+ bttl = sg_dma_address(sgpnt);
+ l=sg_dma_len(sgpnt);
#ifdef ED_DBGP
- printk("1. bttl %x, l %x\n",bttl, l);
+ printk("1. bttl %x, l %x\n",bttl, l);
#endif
- while (l > 0x10000) {
+ while (l > 0x10000) {
(((u16 *) (prd))[i + 3]) = 0x0000;
(((u16 *) (prd))[i + 2]) = 0x0000;
(((u32 *) (prd))[i >> 1]) = cpu_to_le32(bttl);
@@ -965,32 +923,6 @@ oktosend:
printk("prd %4x %4x %4x %4x\n",(((unsigned short int *)prd)[0]),(((unsigned short int *)prd)[1]),(((unsigned short int *)prd)[2]),(((unsigned short int *)prd)[3]));
printk("2. bttl %x, l %x\n",bttl, l);
#endif
- } else {
- /*
- * For a linear request write a chain of blocks
- */
- bttl = workreq->SCp.dma_handle;
- l = workreq->request_bufflen;
- i = 0;
-#ifdef ED_DBGP
- printk("3. bttl %x, l %x\n",bttl, l);
-#endif
- while (l > 0x10000) {
- (((u16 *) (prd))[i + 3]) = 0x0000;
- (((u16 *) (prd))[i + 2]) = 0x0000;
- (((u32 *) (prd))[i >> 1]) = cpu_to_le32(bttl);
- l -= 0x10000;
- bttl += 0x10000;
- i += 0x04;
- }
- (((u16 *) (prd))[i + 3]) = cpu_to_le16(0x8000);
- (((u16 *) (prd))[i + 2]) = cpu_to_le16(l);
- (((u32 *) (prd))[i >> 1]) = cpu_to_le32(bttl);
-#ifdef ED_DBGP
- printk("prd %4x %4x %4x %4x\n",(((unsigned short int *)prd)[0]),(((unsigned short int *)prd)[1]),(((unsigned short int *)prd)[2]),(((unsigned short int *)prd)[3]));
- printk("4. bttl %x, l %x\n",bttl, l);
-#endif
-
}
tmpcip += 4;
#ifdef ED_DBGP
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 2311019304c0..7aad15436d24 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -21,6 +21,7 @@
#include <linux/compat.h>
#include <linux/chio.h> /* here are all the ioctls */
#include <linux/mutex.h>
+#include <linux/idr.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -33,6 +34,7 @@
#define CH_DT_MAX 16
#define CH_TYPES 8
+#define CH_MAX_DEVS 128
MODULE_DESCRIPTION("device driver for scsi media changer devices");
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org>");
@@ -88,17 +90,6 @@ static const char * vendor_labels[CH_TYPES-4] = {
#define MAX_RETRIES 1
-static int ch_probe(struct device *);
-static int ch_remove(struct device *);
-static int ch_open(struct inode * inode, struct file * filp);
-static int ch_release(struct inode * inode, struct file * filp);
-static int ch_ioctl(struct inode * inode, struct file * filp,
- unsigned int cmd, unsigned long arg);
-#ifdef CONFIG_COMPAT
-static long ch_ioctl_compat(struct file * filp,
- unsigned int cmd, unsigned long arg);
-#endif
-
static struct class * ch_sysfs_class;
typedef struct {
@@ -114,30 +105,8 @@ typedef struct {
struct mutex lock;
} scsi_changer;
-static LIST_HEAD(ch_devlist);
-static DEFINE_SPINLOCK(ch_devlist_lock);
-static int ch_devcount;
-
-static struct scsi_driver ch_template =
-{
- .owner = THIS_MODULE,
- .gendrv = {
- .name = "ch",
- .probe = ch_probe,
- .remove = ch_remove,
- },
-};
-
-static const struct file_operations changer_fops =
-{
- .owner = THIS_MODULE,
- .open = ch_open,
- .release = ch_release,
- .ioctl = ch_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ch_ioctl_compat,
-#endif
-};
+static DEFINE_IDR(ch_index_idr);
+static DEFINE_SPINLOCK(ch_index_lock);
static const struct {
unsigned char sense;
@@ -207,7 +176,7 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd,
{
int errno, retries = 0, timeout, result;
struct scsi_sense_hdr sshdr;
-
+
timeout = (cmd[0] == INITIALIZE_ELEMENT_STATUS)
? timeout_init : timeout_move;
@@ -245,7 +214,7 @@ static int
ch_elem_to_typecode(scsi_changer *ch, u_int elem)
{
int i;
-
+
for (i = 0; i < CH_TYPES; i++) {
if (elem >= ch->firsts[i] &&
elem < ch->firsts[i] +
@@ -261,15 +230,15 @@ ch_read_element_status(scsi_changer *ch, u_int elem, char *data)
u_char cmd[12];
u_char *buffer;
int result;
-
+
buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
if(!buffer)
return -ENOMEM;
-
+
retry:
memset(cmd,0,sizeof(cmd));
cmd[0] = READ_ELEMENT_STATUS;
- cmd[1] = (ch->device->lun << 5) |
+ cmd[1] = (ch->device->lun << 5) |
(ch->voltags ? 0x10 : 0) |
ch_elem_to_typecode(ch,elem);
cmd[2] = (elem >> 8) & 0xff;
@@ -296,7 +265,7 @@ ch_read_element_status(scsi_changer *ch, u_int elem, char *data)
return result;
}
-static int
+static int
ch_init_elem(scsi_changer *ch)
{
int err;
@@ -322,7 +291,7 @@ ch_readconfig(scsi_changer *ch)
buffer = kzalloc(512, GFP_KERNEL | GFP_DMA);
if (!buffer)
return -ENOMEM;
-
+
memset(cmd,0,sizeof(cmd));
cmd[0] = MODE_SENSE;
cmd[1] = ch->device->lun << 5;
@@ -365,7 +334,7 @@ ch_readconfig(scsi_changer *ch)
} else {
vprintk("reading element address assigment page failed!\n");
}
-
+
/* vendor specific element types */
for (i = 0; i < 4; i++) {
if (0 == vendor_counts[i])
@@ -443,7 +412,7 @@ static int
ch_position(scsi_changer *ch, u_int trans, u_int elem, int rotate)
{
u_char cmd[10];
-
+
dprintk("position: 0x%x\n",elem);
if (0 == trans)
trans = ch->firsts[CHET_MT];
@@ -462,7 +431,7 @@ static int
ch_move(scsi_changer *ch, u_int trans, u_int src, u_int dest, int rotate)
{
u_char cmd[12];
-
+
dprintk("move: 0x%x => 0x%x\n",src,dest);
if (0 == trans)
trans = ch->firsts[CHET_MT];
@@ -484,7 +453,7 @@ ch_exchange(scsi_changer *ch, u_int trans, u_int src,
u_int dest1, u_int dest2, int rotate1, int rotate2)
{
u_char cmd[12];
-
+
dprintk("exchange: 0x%x => 0x%x => 0x%x\n",
src,dest1,dest2);
if (0 == trans)
@@ -501,7 +470,7 @@ ch_exchange(scsi_changer *ch, u_int trans, u_int src,
cmd[8] = (dest2 >> 8) & 0xff;
cmd[9] = dest2 & 0xff;
cmd[10] = (rotate1 ? 1 : 0) | (rotate2 ? 2 : 0);
-
+
return ch_do_scsi(ch, cmd, NULL,0, DMA_NONE);
}
@@ -539,14 +508,14 @@ ch_set_voltag(scsi_changer *ch, u_int elem,
elem, tag);
memset(cmd,0,sizeof(cmd));
cmd[0] = SEND_VOLUME_TAG;
- cmd[1] = (ch->device->lun << 5) |
+ cmd[1] = (ch->device->lun << 5) |
ch_elem_to_typecode(ch,elem);
cmd[2] = (elem >> 8) & 0xff;
cmd[3] = elem & 0xff;
cmd[5] = clear
? (alternate ? 0x0d : 0x0c)
: (alternate ? 0x0b : 0x0a);
-
+
cmd[9] = 255;
memcpy(buffer,tag,32);
@@ -562,7 +531,7 @@ static int ch_gstatus(scsi_changer *ch, int type, unsigned char __user *dest)
int retval = 0;
u_char data[16];
unsigned int i;
-
+
mutex_lock(&ch->lock);
for (i = 0; i < ch->counts[type]; i++) {
if (0 != ch_read_element_status
@@ -599,20 +568,17 @@ ch_release(struct inode *inode, struct file *file)
static int
ch_open(struct inode *inode, struct file *file)
{
- scsi_changer *tmp, *ch;
+ scsi_changer *ch;
int minor = iminor(inode);
- spin_lock(&ch_devlist_lock);
- ch = NULL;
- list_for_each_entry(tmp,&ch_devlist,list) {
- if (tmp->minor == minor)
- ch = tmp;
- }
+ spin_lock(&ch_index_lock);
+ ch = idr_find(&ch_index_idr, minor);
+
if (NULL == ch || scsi_device_get(ch->device)) {
- spin_unlock(&ch_devlist_lock);
+ spin_unlock(&ch_index_lock);
return -ENXIO;
}
- spin_unlock(&ch_devlist_lock);
+ spin_unlock(&ch_index_lock);
file->private_data = ch;
return 0;
@@ -626,24 +592,24 @@ ch_checkrange(scsi_changer *ch, unsigned int type, unsigned int unit)
return 0;
}
-static int ch_ioctl(struct inode * inode, struct file * file,
+static long ch_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
scsi_changer *ch = file->private_data;
int retval;
void __user *argp = (void __user *)arg;
-
+
switch (cmd) {
case CHIOGPARAMS:
{
struct changer_params params;
-
+
params.cp_curpicker = 0;
params.cp_npickers = ch->counts[CHET_MT];
params.cp_nslots = ch->counts[CHET_ST];
params.cp_nportals = ch->counts[CHET_IE];
params.cp_ndrives = ch->counts[CHET_DT];
-
+
if (copy_to_user(argp, &params, sizeof(params)))
return -EFAULT;
return 0;
@@ -673,11 +639,11 @@ static int ch_ioctl(struct inode * inode, struct file * file,
return -EFAULT;
return 0;
}
-
+
case CHIOPOSITION:
{
struct changer_position pos;
-
+
if (copy_from_user(&pos, argp, sizeof (pos)))
return -EFAULT;
@@ -692,7 +658,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
mutex_unlock(&ch->lock);
return retval;
}
-
+
case CHIOMOVE:
{
struct changer_move mv;
@@ -705,7 +671,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
dprintk("CHIOMOVE: invalid parameter\n");
return -EBADSLT;
}
-
+
mutex_lock(&ch->lock);
retval = ch_move(ch,0,
ch->firsts[mv.cm_fromtype] + mv.cm_fromunit,
@@ -718,7 +684,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
case CHIOEXCHANGE:
{
struct changer_exchange mv;
-
+
if (copy_from_user(&mv, argp, sizeof (mv)))
return -EFAULT;
@@ -728,7 +694,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
dprintk("CHIOEXCHANGE: invalid parameter\n");
return -EBADSLT;
}
-
+
mutex_lock(&ch->lock);
retval = ch_exchange
(ch,0,
@@ -743,7 +709,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
case CHIOGSTATUS:
{
struct changer_element_status ces;
-
+
if (copy_from_user(&ces, argp, sizeof (ces)))
return -EFAULT;
if (ces.ces_type < 0 || ces.ces_type >= CH_TYPES)
@@ -759,19 +725,19 @@ static int ch_ioctl(struct inode * inode, struct file * file,
u_char *buffer;
unsigned int elem;
int result,i;
-
+
if (copy_from_user(&cge, argp, sizeof (cge)))
return -EFAULT;
if (0 != ch_checkrange(ch, cge.cge_type, cge.cge_unit))
return -EINVAL;
elem = ch->firsts[cge.cge_type] + cge.cge_unit;
-
+
buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
if (!buffer)
return -ENOMEM;
mutex_lock(&ch->lock);
-
+
voltag_retry:
memset(cmd,0,sizeof(cmd));
cmd[0] = READ_ELEMENT_STATUS;
@@ -782,7 +748,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
cmd[3] = elem & 0xff;
cmd[5] = 1;
cmd[9] = 255;
-
+
if (0 == (result = ch_do_scsi(ch, cmd, buffer, 256, DMA_FROM_DEVICE))) {
cge.cge_status = buffer[18];
cge.cge_flags = 0;
@@ -822,7 +788,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
}
kfree(buffer);
mutex_unlock(&ch->lock);
-
+
if (copy_to_user(argp, &cge, sizeof (cge)))
return -EFAULT;
return result;
@@ -835,7 +801,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
mutex_unlock(&ch->lock);
return retval;
}
-
+
case CHIOSVOLTAG:
{
struct changer_set_voltag csv;
@@ -876,7 +842,7 @@ static long ch_ioctl_compat(struct file * file,
unsigned int cmd, unsigned long arg)
{
scsi_changer *ch = file->private_data;
-
+
switch (cmd) {
case CHIOGPARAMS:
case CHIOGVPARAMS:
@@ -887,13 +853,12 @@ static long ch_ioctl_compat(struct file * file,
case CHIOINITELEM:
case CHIOSVOLTAG:
/* compatible */
- return ch_ioctl(NULL /* inode, unused */,
- file, cmd, arg);
+ return ch_ioctl(file, cmd, arg);
case CHIOGSTATUS32:
{
struct changer_element_status32 ces32;
unsigned char __user *data;
-
+
if (copy_from_user(&ces32, (void __user *)arg, sizeof (ces32)))
return -EFAULT;
if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES)
@@ -915,63 +880,100 @@ static long ch_ioctl_compat(struct file * file,
static int ch_probe(struct device *dev)
{
struct scsi_device *sd = to_scsi_device(dev);
+ struct class_device *class_dev;
+ int minor, ret = -ENOMEM;
scsi_changer *ch;
-
+
if (sd->type != TYPE_MEDIUM_CHANGER)
return -ENODEV;
-
+
ch = kzalloc(sizeof(*ch), GFP_KERNEL);
if (NULL == ch)
return -ENOMEM;
- ch->minor = ch_devcount;
+ if (!idr_pre_get(&ch_index_idr, GFP_KERNEL))
+ goto free_ch;
+
+ spin_lock(&ch_index_lock);
+ ret = idr_get_new(&ch_index_idr, ch, &minor);
+ spin_unlock(&ch_index_lock);
+
+ if (ret)
+ goto free_ch;
+
+ if (minor > CH_MAX_DEVS) {
+ ret = -ENODEV;
+ goto remove_idr;
+ }
+
+ ch->minor = minor;
sprintf(ch->name,"ch%d",ch->minor);
+
+ class_dev = class_device_create(ch_sysfs_class, NULL,
+ MKDEV(SCSI_CHANGER_MAJOR, ch->minor),
+ dev, "s%s", ch->name);
+ if (IS_ERR(class_dev)) {
+ printk(KERN_WARNING "ch%d: class_device_create failed\n",
+ ch->minor);
+ ret = PTR_ERR(class_dev);
+ goto remove_idr;
+ }
+
mutex_init(&ch->lock);
ch->device = sd;
ch_readconfig(ch);
if (init)
ch_init_elem(ch);
- class_device_create(ch_sysfs_class, NULL,
- MKDEV(SCSI_CHANGER_MAJOR,ch->minor),
- dev, "s%s", ch->name);
-
+ dev_set_drvdata(dev, ch);
sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name);
-
- spin_lock(&ch_devlist_lock);
- list_add_tail(&ch->list,&ch_devlist);
- ch_devcount++;
- spin_unlock(&ch_devlist_lock);
+
return 0;
+remove_idr:
+ idr_remove(&ch_index_idr, minor);
+free_ch:
+ kfree(ch);
+ return ret;
}
static int ch_remove(struct device *dev)
{
- struct scsi_device *sd = to_scsi_device(dev);
- scsi_changer *tmp, *ch;
+ scsi_changer *ch = dev_get_drvdata(dev);
- spin_lock(&ch_devlist_lock);
- ch = NULL;
- list_for_each_entry(tmp,&ch_devlist,list) {
- if (tmp->device == sd)
- ch = tmp;
- }
- BUG_ON(NULL == ch);
- list_del(&ch->list);
- spin_unlock(&ch_devlist_lock);
+ spin_lock(&ch_index_lock);
+ idr_remove(&ch_index_idr, ch->minor);
+ spin_unlock(&ch_index_lock);
class_device_destroy(ch_sysfs_class,
MKDEV(SCSI_CHANGER_MAJOR,ch->minor));
kfree(ch->dt);
kfree(ch);
- ch_devcount--;
return 0;
}
+static struct scsi_driver ch_template = {
+ .owner = THIS_MODULE,
+ .gendrv = {
+ .name = "ch",
+ .probe = ch_probe,
+ .remove = ch_remove,
+ },
+};
+
+static const struct file_operations changer_fops = {
+ .owner = THIS_MODULE,
+ .open = ch_open,
+ .release = ch_release,
+ .unlocked_ioctl = ch_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ch_ioctl_compat,
+#endif
+};
+
static int __init init_ch_module(void)
{
int rc;
-
+
printk(KERN_INFO "SCSI Media Changer driver v" VERSION " \n");
ch_sysfs_class = class_create(THIS_MODULE, "scsi_changer");
if (IS_ERR(ch_sysfs_class)) {
@@ -996,11 +998,12 @@ static int __init init_ch_module(void)
return rc;
}
-static void __exit exit_ch_module(void)
+static void __exit exit_ch_module(void)
{
scsi_unregister_driver(&ch_template.gendrv);
unregister_chrdev(SCSI_CHANGER_MAJOR, "ch");
class_destroy(ch_sysfs_class);
+ idr_destroy(&ch_index_idr);
}
module_init(init_ch_module);
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 024553f9c247..403a7f2d8f9b 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -362,7 +362,6 @@ void scsi_print_command(struct scsi_cmnd *cmd)
EXPORT_SYMBOL(scsi_print_command);
/**
- *
* scsi_print_status - print scsi status description
* @scsi_status: scsi status value
*
@@ -1369,7 +1368,7 @@ EXPORT_SYMBOL(scsi_print_sense);
static const char * const hostbyte_table[]={
"DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET",
"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",
-"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY"};
+"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE"};
#define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table)
static const char * const driverbyte_table[]={
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index a9def6e1d30e..f93c73c0ba53 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -1629,8 +1629,7 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
- DC395x_write8(acb, TRM_S1040_SCSI_FIFO,
- sizeof(srb->cmd->sense_buffer));
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
} else {
ptr = (u8 *)srb->cmd->cmnd;
@@ -1915,8 +1914,7 @@ static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
- DC395x_write8(acb, TRM_S1040_SCSI_FIFO,
- sizeof(srb->cmd->sense_buffer));
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
}
srb->state |= SRB_COMMAND;
@@ -3685,7 +3683,7 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
srb->target_status = 0;
/* KG: Can this prevent crap sense data ? */
- memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
+ memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
/* Save some data */
srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address =
@@ -3694,15 +3692,15 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
srb->segment_x[0].length;
srb->xferred = srb->total_xfer_length;
/* srb->segment_x : a one entry of S/G list table */
- srb->total_xfer_length = sizeof(cmd->sense_buffer);
- srb->segment_x[0].length = sizeof(cmd->sense_buffer);
+ srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE;
+ srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE;
/* Map sense buffer */
srb->segment_x[0].address =
pci_map_single(acb->dev, cmd->sense_buffer,
- sizeof(cmd->sense_buffer), PCI_DMA_FROMDEVICE);
+ SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
cmd->sense_buffer, srb->segment_x[0].address,
- sizeof(cmd->sense_buffer));
+ SCSI_SENSE_BUFFERSIZE);
srb->sg_count = 1;
srb->sg_index = 0;
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index b31d1c95c9fb..19cce125124c 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -2296,9 +2296,8 @@ static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
// copy over the request sense data if it was a check
// condition status
- if(dev_status == 0x02 /*CHECK_CONDITION*/) {
- u32 len = sizeof(cmd->sense_buffer);
- len = (len > 40) ? 40 : len;
+ if (dev_status == SAM_STAT_CHECK_CONDITION) {
+ u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
// Copy over the sense data
memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 7ead5210de96..05163cefec12 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1623,9 +1623,9 @@ static void map_dma(unsigned int i, struct hostdata *ha)
if (SCpnt->sense_buffer)
cpp->sense_addr =
H2DEV(pci_map_single(ha->pdev, SCpnt->sense_buffer,
- sizeof SCpnt->sense_buffer, PCI_DMA_FROMDEVICE));
+ SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE));
- cpp->sense_len = sizeof SCpnt->sense_buffer;
+ cpp->sense_len = SCSI_SENSE_BUFFERSIZE;
count = scsi_dma_map(SCpnt);
BUG_ON(count < 0);
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 982c5092be11..b5a60926e556 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -369,7 +369,6 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
cp = &hd->ccb[y];
memset(cp, 0, sizeof(struct eata_ccb));
- memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
cp->status = USED; /* claim free slot */
@@ -385,7 +384,7 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
cp->DataIn = 0; /* Input mode */
cp->Interpret = (cmd->device->id == hd->hostid);
- cp->cp_datalen = cpu_to_be32(cmd->request_bufflen);
+ cp->cp_datalen = cpu_to_be32(scsi_bufflen(cmd));
cp->Auto_Req_Sen = 0;
cp->cp_reqDMA = 0;
cp->reqlen = 0;
@@ -402,14 +401,14 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
cp->cmd = cmd;
cmd->host_scribble = (char *) &hd->ccb[y];
- if (cmd->use_sg == 0) {
+ if (!scsi_bufflen(cmd)) {
cmd->SCp.buffers_residual = 1;
- cmd->SCp.ptr = cmd->request_buffer;
- cmd->SCp.this_residual = cmd->request_bufflen;
+ cmd->SCp.ptr = NULL;
+ cmd->SCp.this_residual = 0;
cmd->SCp.buffer = NULL;
} else {
- cmd->SCp.buffer = cmd->request_buffer;
- cmd->SCp.buffers_residual = cmd->use_sg;
+ cmd->SCp.buffer = scsi_sglist(cmd);
+ cmd->SCp.buffers_residual = scsi_sg_count(cmd);
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
}
diff --git a/drivers/scsi/fd_mcs.c b/drivers/scsi/fd_mcs.c
index 8335b608e571..85bd54c77b50 100644
--- a/drivers/scsi/fd_mcs.c
+++ b/drivers/scsi/fd_mcs.c
@@ -1017,24 +1017,6 @@ static irqreturn_t fd_mcs_intr(int irq, void *dev_id)
printk(" ** IN DONE %d ** ", current_SC->SCp.have_data_in);
#endif
-#if ERRORS_ONLY
- if (current_SC->cmnd[0] == REQUEST_SENSE && !current_SC->SCp.Status) {
- if ((unsigned char) (*((char *) current_SC->request_buffer + 2)) & 0x0f) {
- unsigned char key;
- unsigned char code;
- unsigned char qualifier;
-
- key = (unsigned char) (*((char *) current_SC->request_buffer + 2)) & 0x0f;
- code = (unsigned char) (*((char *) current_SC->request_buffer + 12));
- qualifier = (unsigned char) (*((char *) current_SC->request_buffer + 13));
-
- if (key != UNIT_ATTENTION && !(key == NOT_READY && code == 0x04 && (!qualifier || qualifier == 0x02 || qualifier == 0x01))
- && !(key == ILLEGAL_REQUEST && (code == 0x25 || code == 0x24 || !code)))
-
- printk("fd_mcs: REQUEST SENSE " "Key = %x, Code = %x, Qualifier = %x\n", key, code, qualifier);
- }
- }
-#endif
#if EVERY_ACCESS
printk("BEFORE MY_DONE. . .");
#endif
@@ -1097,7 +1079,9 @@ static int fd_mcs_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
panic("fd_mcs: fd_mcs_queue() NOT REENTRANT!\n");
}
#if EVERY_ACCESS
- printk("queue: target = %d cmnd = 0x%02x pieces = %d size = %u\n", SCpnt->target, *(unsigned char *) SCpnt->cmnd, SCpnt->use_sg, SCpnt->request_bufflen);
+ printk("queue: target = %d cmnd = 0x%02x pieces = %d size = %u\n",
+ SCpnt->target, *(unsigned char *) SCpnt->cmnd,
+ scsi_sg_count(SCpnt), scsi_bufflen(SCpnt));
#endif
fd_mcs_make_bus_idle(shpnt);
@@ -1107,14 +1091,14 @@ static int fd_mcs_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
/* Initialize static data */
- if (current_SC->use_sg) {
- current_SC->SCp.buffer = (struct scatterlist *) current_SC->request_buffer;
+ if (scsi_bufflen(current_SC)) {
+ current_SC->SCp.buffer = scsi_sglist(current_SC);
current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
- current_SC->SCp.buffers_residual = current_SC->use_sg - 1;
+ current_SC->SCp.buffers_residual = scsi_sg_count(current_SC) - 1;
} else {
- current_SC->SCp.ptr = (char *) current_SC->request_buffer;
- current_SC->SCp.this_residual = current_SC->request_bufflen;
+ current_SC->SCp.ptr = NULL;
+ current_SC->SCp.this_residual = 0;
current_SC->SCp.buffer = NULL;
current_SC->SCp.buffers_residual = 0;
}
@@ -1166,7 +1150,9 @@ static void fd_mcs_print_info(Scsi_Cmnd * SCpnt)
break;
}
- printk("(%d), target = %d cmnd = 0x%02x pieces = %d size = %u\n", SCpnt->SCp.phase, SCpnt->device->id, *(unsigned char *) SCpnt->cmnd, SCpnt->use_sg, SCpnt->request_bufflen);
+ printk("(%d), target = %d cmnd = 0x%02x pieces = %d size = %u\n",
+ SCpnt->SCp.phase, SCpnt->device->id, *(unsigned char *) SCpnt->cmnd,
+ scsi_sg_count(SCpnt), scsi_bufflen(SCpnt));
printk("sent_command = %d, have_data_in = %d, timeout = %d\n", SCpnt->SCp.sent_command, SCpnt->SCp.have_data_in, SCpnt->timeout);
#if DEBUG_RACE
printk("in_interrupt_flag = %d\n", in_interrupt_flag);
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index b253b8c718d3..c82523908c2e 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -141,7 +141,7 @@
static void gdth_delay(int milliseconds);
static void gdth_eval_mapping(ulong32 size, ulong32 *cyls, int *heads, int *secs);
static irqreturn_t gdth_interrupt(int irq, void *dev_id);
-static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, int irq,
+static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
int gdth_from_wait, int* pIndex);
static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
Scsi_Cmnd *scp);
@@ -165,7 +165,6 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive);
static void gdth_enable_int(gdth_ha_str *ha);
-static unchar gdth_get_status(gdth_ha_str *ha, int irq);
static int gdth_test_busy(gdth_ha_str *ha);
static int gdth_get_cmd_index(gdth_ha_str *ha);
static void gdth_release_event(gdth_ha_str *ha);
@@ -1334,14 +1333,12 @@ static void __init gdth_enable_int(gdth_ha_str *ha)
}
/* return IStatus if interrupt was from this card else 0 */
-static unchar gdth_get_status(gdth_ha_str *ha, int irq)
+static unchar gdth_get_status(gdth_ha_str *ha)
{
unchar IStatus = 0;
- TRACE(("gdth_get_status() irq %d ctr_count %d\n", irq, gdth_ctr_count));
+ TRACE(("gdth_get_status() irq %d ctr_count %d\n", ha->irq, gdth_ctr_count));
- if (ha->irq != (unchar)irq) /* check IRQ */
- return false;
if (ha->type == GDT_EISA)
IStatus = inb((ushort)ha->bmic + EDOORREG);
else if (ha->type == GDT_ISA)
@@ -1523,7 +1520,7 @@ static int gdth_wait(gdth_ha_str *ha, int index, ulong32 time)
return 1; /* no wait required */
do {
- __gdth_interrupt(ha, (int)ha->irq, true, &wait_index);
+ __gdth_interrupt(ha, true, &wait_index);
if (wait_index == index) {
answer_found = TRUE;
break;
@@ -3036,7 +3033,7 @@ static void gdth_clear_events(void)
/* SCSI interface functions */
-static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, int irq,
+static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
int gdth_from_wait, int* pIndex)
{
gdt6m_dpram_str __iomem *dp6m_ptr = NULL;
@@ -3054,7 +3051,7 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, int irq,
int act_int_coal = 0;
#endif
- TRACE(("gdth_interrupt() IRQ %d\n",irq));
+ TRACE(("gdth_interrupt() IRQ %d\n", ha->irq));
/* if polling and not from gdth_wait() -> return */
if (gdth_polling) {
@@ -3067,7 +3064,8 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, int irq,
spin_lock_irqsave(&ha->smp_lock, flags);
/* search controller */
- if (0 == (IStatus = gdth_get_status(ha, irq))) {
+ IStatus = gdth_get_status(ha);
+ if (IStatus == 0) {
/* spurious interrupt */
if (!gdth_polling)
spin_unlock_irqrestore(&ha->smp_lock, flags);
@@ -3294,9 +3292,9 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, int irq,
static irqreturn_t gdth_interrupt(int irq, void *dev_id)
{
- gdth_ha_str *ha = (gdth_ha_str *)dev_id;
+ gdth_ha_str *ha = dev_id;
- return __gdth_interrupt(ha, irq, false, NULL);
+ return __gdth_interrupt(ha, false, NULL);
}
static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 6325115e5b3d..5ea1f986220c 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -54,8 +54,7 @@ static struct class shost_class = {
};
/**
- * scsi_host_set_state - Take the given host through the host
- * state model.
+ * scsi_host_set_state - Take the given host through the host state model.
* @shost: scsi host to change the state of.
* @state: state to change to.
*
@@ -440,7 +439,6 @@ static int __scsi_host_match(struct class_device *cdev, void *data)
/**
* scsi_host_lookup - get a reference to a Scsi_Host by host no
- *
* @hostnum: host number to locate
*
* Return value:
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 0844331abb87..e7b2f3575ce9 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1,5 +1,5 @@
/*
- * HighPoint RR3xxx controller driver for Linux
+ * HighPoint RR3xxx/4xxx controller driver for Linux
* Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -38,80 +38,84 @@
#include "hptiop.h"
MODULE_AUTHOR("HighPoint Technologies, Inc.");
-MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx SATA Controller Driver");
+MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
static char driver_name[] = "hptiop";
-static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver";
-static const char driver_ver[] = "v1.2 (070830)";
-
-static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag);
-static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag);
+static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
+static const char driver_ver[] = "v1.3 (071203)";
+
+static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
+static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
+ struct hpt_iop_request_scsi_command *req);
+static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag);
+static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag);
static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
-static inline void hptiop_pci_posting_flush(struct hpt_iopmu __iomem *iop)
-{
- readl(&iop->outbound_intstatus);
-}
-
-static int iop_wait_ready(struct hpt_iopmu __iomem *iop, u32 millisec)
+static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec)
{
u32 req = 0;
int i;
for (i = 0; i < millisec; i++) {
- req = readl(&iop->inbound_queue);
+ req = readl(&hba->u.itl.iop->inbound_queue);
if (req != IOPMU_QUEUE_EMPTY)
break;
msleep(1);
}
if (req != IOPMU_QUEUE_EMPTY) {
- writel(req, &iop->outbound_queue);
- hptiop_pci_posting_flush(iop);
+ writel(req, &hba->u.itl.iop->outbound_queue);
+ readl(&hba->u.itl.iop->outbound_intstatus);
return 0;
}
return -1;
}
-static void hptiop_request_callback(struct hptiop_hba *hba, u32 tag)
+static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec)
+{
+ return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
+}
+
+static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
{
if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
- return hptiop_host_request_callback(hba,
+ hptiop_host_request_callback_itl(hba,
tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
else
- return hptiop_iop_request_callback(hba, tag);
+ hptiop_iop_request_callback_itl(hba, tag);
}
-static inline void hptiop_drain_outbound_queue(struct hptiop_hba *hba)
+static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba)
{
u32 req;
- while ((req = readl(&hba->iop->outbound_queue)) != IOPMU_QUEUE_EMPTY) {
+ while ((req = readl(&hba->u.itl.iop->outbound_queue)) !=
+ IOPMU_QUEUE_EMPTY) {
if (req & IOPMU_QUEUE_MASK_HOST_BITS)
- hptiop_request_callback(hba, req);
+ hptiop_request_callback_itl(hba, req);
else {
struct hpt_iop_request_header __iomem * p;
p = (struct hpt_iop_request_header __iomem *)
- ((char __iomem *)hba->iop + req);
+ ((char __iomem *)hba->u.itl.iop + req);
if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
if (readl(&p->context))
- hptiop_request_callback(hba, req);
+ hptiop_request_callback_itl(hba, req);
else
writel(1, &p->context);
}
else
- hptiop_request_callback(hba, req);
+ hptiop_request_callback_itl(hba, req);
}
}
}
-static int __iop_intr(struct hptiop_hba *hba)
+static int iop_intr_itl(struct hptiop_hba *hba)
{
- struct hpt_iopmu __iomem *iop = hba->iop;
+ struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop;
u32 status;
int ret = 0;
@@ -119,6 +123,7 @@ static int __iop_intr(struct hptiop_hba *hba)
if (status & IOPMU_OUTBOUND_INT_MSG0) {
u32 msg = readl(&iop->outbound_msgaddr0);
+
dprintk("received outbound msg %x\n", msg);
writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
hptiop_message_callback(hba, msg);
@@ -126,31 +131,115 @@ static int __iop_intr(struct hptiop_hba *hba)
}
if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
- hptiop_drain_outbound_queue(hba);
+ hptiop_drain_outbound_queue_itl(hba);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu)
+{
+ u32 outbound_tail = readl(&mu->outbound_tail);
+ u32 outbound_head = readl(&mu->outbound_head);
+
+ if (outbound_tail != outbound_head) {
+ u64 p;
+
+ memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8);
+ outbound_tail++;
+
+ if (outbound_tail == MVIOP_QUEUE_LEN)
+ outbound_tail = 0;
+ writel(outbound_tail, &mu->outbound_tail);
+ return p;
+ } else
+ return 0;
+}
+
+static void mv_inbound_write(u64 p, struct hptiop_hba *hba)
+{
+ u32 inbound_head = readl(&hba->u.mv.mu->inbound_head);
+ u32 head = inbound_head + 1;
+
+ if (head == MVIOP_QUEUE_LEN)
+ head = 0;
+
+ memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8);
+ writel(head, &hba->u.mv.mu->inbound_head);
+ writel(MVIOP_MU_INBOUND_INT_POSTQUEUE,
+ &hba->u.mv.regs->inbound_doorbell);
+}
+
+static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag)
+{
+ u32 req_type = (tag >> 5) & 0x7;
+ struct hpt_iop_request_scsi_command *req;
+
+ dprintk("hptiop_request_callback_mv: tag=%llx\n", tag);
+
+ BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0);
+
+ switch (req_type) {
+ case IOP_REQUEST_TYPE_GET_CONFIG:
+ case IOP_REQUEST_TYPE_SET_CONFIG:
+ hba->msg_done = 1;
+ break;
+
+ case IOP_REQUEST_TYPE_SCSI_COMMAND:
+ req = hba->reqs[tag >> 8].req_virt;
+ if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT))
+ req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
+
+ hptiop_finish_scsi_req(hba, tag>>8, req);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int iop_intr_mv(struct hptiop_hba *hba)
+{
+ u32 status;
+ int ret = 0;
+
+ status = readl(&hba->u.mv.regs->outbound_doorbell);
+ writel(~status, &hba->u.mv.regs->outbound_doorbell);
+
+ if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
+ u32 msg;
+ msg = readl(&hba->u.mv.mu->outbound_msg);
+ dprintk("received outbound msg %x\n", msg);
+ hptiop_message_callback(hba, msg);
+ ret = 1;
+ }
+
+ if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
+ u64 tag;
+
+ while ((tag = mv_outbound_read(hba->u.mv.mu)))
+ hptiop_request_callback_mv(hba, tag);
ret = 1;
}
return ret;
}
-static int iop_send_sync_request(struct hptiop_hba *hba,
+static int iop_send_sync_request_itl(struct hptiop_hba *hba,
void __iomem *_req, u32 millisec)
{
struct hpt_iop_request_header __iomem *req = _req;
u32 i;
- writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST,
- &req->flags);
-
+ writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags);
writel(0, &req->context);
-
- writel((unsigned long)req - (unsigned long)hba->iop,
- &hba->iop->inbound_queue);
-
- hptiop_pci_posting_flush(hba->iop);
+ writel((unsigned long)req - (unsigned long)hba->u.itl.iop,
+ &hba->u.itl.iop->inbound_queue);
+ readl(&hba->u.itl.iop->outbound_intstatus);
for (i = 0; i < millisec; i++) {
- __iop_intr(hba);
+ iop_intr_itl(hba);
if (readl(&req->context))
return 0;
msleep(1);
@@ -159,19 +248,49 @@ static int iop_send_sync_request(struct hptiop_hba *hba,
return -1;
}
-static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
+static int iop_send_sync_request_mv(struct hptiop_hba *hba,
+ u32 size_bits, u32 millisec)
{
+ struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req;
u32 i;
hba->msg_done = 0;
+ reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
+ mv_inbound_write(hba->u.mv.internal_req_phy |
+ MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba);
+
+ for (i = 0; i < millisec; i++) {
+ iop_intr_mv(hba);
+ if (hba->msg_done)
+ return 0;
+ msleep(1);
+ }
+ return -1;
+}
+
+static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg)
+{
+ writel(msg, &hba->u.itl.iop->inbound_msgaddr0);
+ readl(&hba->u.itl.iop->outbound_intstatus);
+}
+
+static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg)
+{
+ writel(msg, &hba->u.mv.mu->inbound_msg);
+ writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell);
+ readl(&hba->u.mv.regs->inbound_doorbell);
+}
- writel(msg, &hba->iop->inbound_msgaddr0);
+static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
+{
+ u32 i;
- hptiop_pci_posting_flush(hba->iop);
+ hba->msg_done = 0;
+ hba->ops->post_msg(hba, msg);
for (i = 0; i < millisec; i++) {
spin_lock_irq(hba->host->host_lock);
- __iop_intr(hba);
+ hba->ops->iop_intr(hba);
spin_unlock_irq(hba->host->host_lock);
if (hba->msg_done)
break;
@@ -181,46 +300,67 @@ static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
return hba->msg_done? 0 : -1;
}
-static int iop_get_config(struct hptiop_hba *hba,
+static int iop_get_config_itl(struct hptiop_hba *hba,
struct hpt_iop_request_get_config *config)
{
u32 req32;
struct hpt_iop_request_get_config __iomem *req;
- req32 = readl(&hba->iop->inbound_queue);
+ req32 = readl(&hba->u.itl.iop->inbound_queue);
if (req32 == IOPMU_QUEUE_EMPTY)
return -1;
req = (struct hpt_iop_request_get_config __iomem *)
- ((unsigned long)hba->iop + req32);
+ ((unsigned long)hba->u.itl.iop + req32);
writel(0, &req->header.flags);
writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
writel(IOP_RESULT_PENDING, &req->header.result);
- if (iop_send_sync_request(hba, req, 20000)) {
+ if (iop_send_sync_request_itl(hba, req, 20000)) {
dprintk("Get config send cmd failed\n");
return -1;
}
memcpy_fromio(config, req, sizeof(*config));
- writel(req32, &hba->iop->outbound_queue);
+ writel(req32, &hba->u.itl.iop->outbound_queue);
+ return 0;
+}
+
+static int iop_get_config_mv(struct hptiop_hba *hba,
+ struct hpt_iop_request_get_config *config)
+{
+ struct hpt_iop_request_get_config *req = hba->u.mv.internal_req;
+
+ req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
+ req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG);
+ req->header.size =
+ cpu_to_le32(sizeof(struct hpt_iop_request_get_config));
+ req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
+ req->header.context = cpu_to_le64(IOP_REQUEST_TYPE_GET_CONFIG<<5);
+
+ if (iop_send_sync_request_mv(hba, 0, 20000)) {
+ dprintk("Get config send cmd failed\n");
+ return -1;
+ }
+
+ memcpy(config, req, sizeof(struct hpt_iop_request_get_config));
return 0;
}
-static int iop_set_config(struct hptiop_hba *hba,
+static int iop_set_config_itl(struct hptiop_hba *hba,
struct hpt_iop_request_set_config *config)
{
u32 req32;
struct hpt_iop_request_set_config __iomem *req;
- req32 = readl(&hba->iop->inbound_queue);
+ req32 = readl(&hba->u.itl.iop->inbound_queue);
if (req32 == IOPMU_QUEUE_EMPTY)
return -1;
req = (struct hpt_iop_request_set_config __iomem *)
- ((unsigned long)hba->iop + req32);
+ ((unsigned long)hba->u.itl.iop + req32);
memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
(u8 *)config + sizeof(struct hpt_iop_request_header),
@@ -232,22 +372,52 @@ static int iop_set_config(struct hptiop_hba *hba,
writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
writel(IOP_RESULT_PENDING, &req->header.result);
- if (iop_send_sync_request(hba, req, 20000)) {
+ if (iop_send_sync_request_itl(hba, req, 20000)) {
dprintk("Set config send cmd failed\n");
return -1;
}
- writel(req32, &hba->iop->outbound_queue);
+ writel(req32, &hba->u.itl.iop->outbound_queue);
return 0;
}
-static int hptiop_initialize_iop(struct hptiop_hba *hba)
+static int iop_set_config_mv(struct hptiop_hba *hba,
+ struct hpt_iop_request_set_config *config)
{
- struct hpt_iopmu __iomem *iop = hba->iop;
+ struct hpt_iop_request_set_config *req = hba->u.mv.internal_req;
- /* enable interrupts */
+ memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
+ req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
+ req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
+ req->header.size =
+ cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
+ req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
+ req->header.context = cpu_to_le64(IOP_REQUEST_TYPE_SET_CONFIG<<5);
+
+ if (iop_send_sync_request_mv(hba, 0, 20000)) {
+ dprintk("Set config send cmd failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void hptiop_enable_intr_itl(struct hptiop_hba *hba)
+{
writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
- &iop->outbound_intmask);
+ &hba->u.itl.iop->outbound_intmask);
+}
+
+static void hptiop_enable_intr_mv(struct hptiop_hba *hba)
+{
+ writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG,
+ &hba->u.mv.regs->outbound_intmask);
+}
+
+static int hptiop_initialize_iop(struct hptiop_hba *hba)
+{
+ /* enable interrupts */
+ hba->ops->enable_intr(hba);
hba->initialized = 1;
@@ -261,37 +431,74 @@ static int hptiop_initialize_iop(struct hptiop_hba *hba)
return 0;
}
-static int hptiop_map_pci_bar(struct hptiop_hba *hba)
+static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index)
{
u32 mem_base_phy, length;
void __iomem *mem_base_virt;
+
struct pci_dev *pcidev = hba->pcidev;
- if (!(pci_resource_flags(pcidev, 0) & IORESOURCE_MEM)) {
+
+ if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) {
printk(KERN_ERR "scsi%d: pci resource invalid\n",
hba->host->host_no);
- return -1;
+ return 0;
}
- mem_base_phy = pci_resource_start(pcidev, 0);
- length = pci_resource_len(pcidev, 0);
+ mem_base_phy = pci_resource_start(pcidev, index);
+ length = pci_resource_len(pcidev, index);
mem_base_virt = ioremap(mem_base_phy, length);
if (!mem_base_virt) {
printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
hba->host->host_no);
+ return 0;
+ }
+ return mem_base_virt;
+}
+
+static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba)
+{
+ hba->u.itl.iop = hptiop_map_pci_bar(hba, 0);
+ if (hba->u.itl.iop)
+ return 0;
+ else
+ return -1;
+}
+
+static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba)
+{
+ iounmap(hba->u.itl.iop);
+}
+
+static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba)
+{
+ hba->u.mv.regs = hptiop_map_pci_bar(hba, 0);
+ if (hba->u.mv.regs == 0)
+ return -1;
+
+ hba->u.mv.mu = hptiop_map_pci_bar(hba, 2);
+ if (hba->u.mv.mu == 0) {
+ iounmap(hba->u.mv.regs);
return -1;
}
- hba->iop = mem_base_virt;
- dprintk("hptiop_map_pci_bar: iop=%p\n", hba->iop);
return 0;
}
+static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba)
+{
+ iounmap(hba->u.mv.regs);
+ iounmap(hba->u.mv.mu);
+}
+
static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
{
dprintk("iop message 0x%x\n", msg);
+ if (msg == IOPMU_INBOUND_MSG0_NOP)
+ hba->msg_done = 1;
+
if (!hba->initialized)
return;
@@ -303,7 +510,7 @@ static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
hba->msg_done = 1;
}
-static inline struct hptiop_request *get_req(struct hptiop_hba *hba)
+static struct hptiop_request *get_req(struct hptiop_hba *hba)
{
struct hptiop_request *ret;
@@ -316,30 +523,19 @@ static inline struct hptiop_request *get_req(struct hptiop_hba *hba)
return ret;
}
-static inline void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
+static void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
{
dprintk("free_req(%d, %p)\n", req->index, req);
req->next = hba->req_list;
hba->req_list = req;
}
-static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
+static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
+ struct hpt_iop_request_scsi_command *req)
{
- struct hpt_iop_request_scsi_command *req;
struct scsi_cmnd *scp;
- u32 tag;
-
- if (hba->iopintf_v2) {
- tag = _tag & ~ IOPMU_QUEUE_REQUEST_RESULT_BIT;
- req = hba->reqs[tag].req_virt;
- if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
- req->header.result = IOP_RESULT_SUCCESS;
- } else {
- tag = _tag;
- req = hba->reqs[tag].req_virt;
- }
- dprintk("hptiop_host_request_callback: req=%p, type=%d, "
+ dprintk("hptiop_finish_scsi_req: req=%p, type=%d, "
"result=%d, context=0x%x tag=%d\n",
req, req->header.type, req->header.result,
req->header.context, tag);
@@ -354,6 +550,8 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
switch (le32_to_cpu(req->header.result)) {
case IOP_RESULT_SUCCESS:
+ scsi_set_resid(scp,
+ scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
scp->result = (DID_OK<<16);
break;
case IOP_RESULT_BAD_TARGET:
@@ -371,12 +569,12 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
case IOP_RESULT_INVALID_REQUEST:
scp->result = (DID_ABORT<<16);
break;
- case IOP_RESULT_MODE_SENSE_CHECK_CONDITION:
+ case IOP_RESULT_CHECK_CONDITION:
+ scsi_set_resid(scp,
+ scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
scp->result = SAM_STAT_CHECK_CONDITION;
- memset(&scp->sense_buffer,
- 0, sizeof(scp->sense_buffer));
memcpy(&scp->sense_buffer, &req->sg_list,
- min(sizeof(scp->sense_buffer),
+ min_t(size_t, SCSI_SENSE_BUFFERSIZE,
le32_to_cpu(req->dataxfer_length)));
break;
@@ -391,15 +589,33 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
free_req(hba, &hba->reqs[tag]);
}
-void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag)
+static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag)
+{
+ struct hpt_iop_request_scsi_command *req;
+ u32 tag;
+
+ if (hba->iopintf_v2) {
+ tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT;
+ req = hba->reqs[tag].req_virt;
+ if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
+ req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
+ } else {
+ tag = _tag;
+ req = hba->reqs[tag].req_virt;
+ }
+
+ hptiop_finish_scsi_req(hba, tag, req);
+}
+
+void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
{
struct hpt_iop_request_header __iomem *req;
struct hpt_iop_request_ioctl_command __iomem *p;
struct hpt_ioctl_k *arg;
req = (struct hpt_iop_request_header __iomem *)
- ((unsigned long)hba->iop + tag);
- dprintk("hptiop_iop_request_callback: req=%p, type=%d, "
+ ((unsigned long)hba->u.itl.iop + tag);
+ dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, "
"result=%d, context=0x%x tag=%d\n",
req, readl(&req->type), readl(&req->result),
readl(&req->context), tag);
@@ -427,7 +643,7 @@ void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag)
arg->result = HPT_IOCTL_RESULT_FAILED;
arg->done(arg);
- writel(tag, &hba->iop->outbound_queue);
+ writel(tag, &hba->u.itl.iop->outbound_queue);
}
static irqreturn_t hptiop_intr(int irq, void *dev_id)
@@ -437,7 +653,7 @@ static irqreturn_t hptiop_intr(int irq, void *dev_id)
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
- handled = __iop_intr(hba);
+ handled = hba->ops->iop_intr(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
return handled;
@@ -469,6 +685,57 @@ static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
return HPT_SCP(scp)->sgcnt;
}
+static void hptiop_post_req_itl(struct hptiop_hba *hba,
+ struct hptiop_request *_req)
+{
+ struct hpt_iop_request_header *reqhdr = _req->req_virt;
+
+ reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
+ (u32)_req->index);
+ reqhdr->context_hi32 = 0;
+
+ if (hba->iopintf_v2) {
+ u32 size, size_bits;
+
+ size = le32_to_cpu(reqhdr->size);
+ if (size < 256)
+ size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
+ else if (size < 512)
+ size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
+ else
+ size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
+ IOPMU_QUEUE_ADDR_HOST_BIT;
+ writel(_req->req_shifted_phy | size_bits,
+ &hba->u.itl.iop->inbound_queue);
+ } else
+ writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
+ &hba->u.itl.iop->inbound_queue);
+}
+
+static void hptiop_post_req_mv(struct hptiop_hba *hba,
+ struct hptiop_request *_req)
+{
+ struct hpt_iop_request_header *reqhdr = _req->req_virt;
+ u32 size, size_bit;
+
+ reqhdr->context = cpu_to_le32(_req->index<<8 |
+ IOP_REQUEST_TYPE_SCSI_COMMAND<<5);
+ reqhdr->context_hi32 = 0;
+ size = le32_to_cpu(reqhdr->size);
+
+ if (size <= 256)
+ size_bit = 0;
+ else if (size <= 256*2)
+ size_bit = 1;
+ else if (size <= 256*3)
+ size_bit = 2;
+ else
+ size_bit = 3;
+
+ mv_inbound_write((_req->req_shifted_phy << 5) |
+ MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba);
+}
+
static int hptiop_queuecommand(struct scsi_cmnd *scp,
void (*done)(struct scsi_cmnd *))
{
@@ -518,9 +785,6 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
- req->header.context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
- (u32)_req->index);
- req->header.context_hi32 = 0;
req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
req->channel = scp->device->channel;
req->target = scp->device->id;
@@ -531,21 +795,7 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
+ sg_count * sizeof(struct hpt_iopsg));
memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
-
- if (hba->iopintf_v2) {
- u32 size_bits;
- if (req->header.size < 256)
- size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
- else if (req->header.size < 512)
- size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
- else
- size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
- IOPMU_QUEUE_ADDR_HOST_BIT;
- writel(_req->req_shifted_phy | size_bits, &hba->iop->inbound_queue);
- } else
- writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
- &hba->iop->inbound_queue);
-
+ hba->ops->post_req(hba, _req);
return 0;
cmd_done:
@@ -563,9 +813,7 @@ static int hptiop_reset_hba(struct hptiop_hba *hba)
{
if (atomic_xchg(&hba->resetting, 1) == 0) {
atomic_inc(&hba->reset_count);
- writel(IOPMU_INBOUND_MSG0_RESET,
- &hba->iop->inbound_msgaddr0);
- hptiop_pci_posting_flush(hba->iop);
+ hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET);
}
wait_event_timeout(hba->reset_wq,
@@ -601,8 +849,10 @@ static int hptiop_reset(struct scsi_cmnd *scp)
static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
int queue_depth)
{
- if(queue_depth > 256)
- queue_depth = 256;
+ struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata;
+
+ if (queue_depth > hba->max_requests)
+ queue_depth = hba->max_requests;
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
return queue_depth;
}
@@ -663,6 +913,26 @@ static struct scsi_host_template driver_template = {
.change_queue_depth = hptiop_adjust_disk_queue_depth,
};
+static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
+{
+ hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev,
+ 0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL);
+ if (hba->u.mv.internal_req)
+ return 0;
+ else
+ return -1;
+}
+
+static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
+{
+ if (hba->u.mv.internal_req) {
+ dma_free_coherent(&hba->pcidev->dev, 0x800,
+ hba->u.mv.internal_req, hba->u.mv.internal_req_phy);
+ return 0;
+ } else
+ return -1;
+}
+
static int __devinit hptiop_probe(struct pci_dev *pcidev,
const struct pci_device_id *id)
{
@@ -708,6 +978,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
hba = (struct hptiop_hba *)host->hostdata;
+ hba->ops = (struct hptiop_adapter_ops *)id->driver_data;
hba->pcidev = pcidev;
hba->host = host;
hba->initialized = 0;
@@ -725,16 +996,24 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
host->n_io_port = 0;
host->irq = pcidev->irq;
- if (hptiop_map_pci_bar(hba))
+ if (hba->ops->map_pci_bar(hba))
goto free_scsi_host;
- if (iop_wait_ready(hba->iop, 20000)) {
+ if (hba->ops->iop_wait_ready(hba, 20000)) {
printk(KERN_ERR "scsi%d: firmware not ready\n",
hba->host->host_no);
goto unmap_pci_bar;
}
- if (iop_get_config(hba, &iop_config)) {
+ if (hba->ops->internal_memalloc) {
+ if (hba->ops->internal_memalloc(hba)) {
+ printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
+ hba->host->host_no);
+ goto unmap_pci_bar;
+ }
+ }
+
+ if (hba->ops->get_config(hba, &iop_config)) {
printk(KERN_ERR "scsi%d: get config failed\n",
hba->host->host_no);
goto unmap_pci_bar;
@@ -770,7 +1049,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
set_config.vbus_id = cpu_to_le16(host->host_no);
set_config.max_host_request_size = cpu_to_le16(req_size);
- if (iop_set_config(hba, &set_config)) {
+ if (hba->ops->set_config(hba, &set_config)) {
printk(KERN_ERR "scsi%d: set config failed\n",
hba->host->host_no);
goto unmap_pci_bar;
@@ -839,21 +1118,24 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
free_request_mem:
dma_free_coherent(&hba->pcidev->dev,
- hba->req_size*hba->max_requests + 0x20,
+ hba->req_size * hba->max_requests + 0x20,
hba->dma_coherent, hba->dma_coherent_handle);
free_request_irq:
free_irq(hba->pcidev->irq, hba);
unmap_pci_bar:
- iounmap(hba->iop);
+ if (hba->ops->internal_memfree)
+ hba->ops->internal_memfree(hba);
-free_pci_regions:
- pci_release_regions(pcidev) ;
+ hba->ops->unmap_pci_bar(hba);
free_scsi_host:
scsi_host_put(host);
+free_pci_regions:
+ pci_release_regions(pcidev);
+
disable_pci_device:
pci_disable_device(pcidev);
@@ -865,8 +1147,6 @@ static void hptiop_shutdown(struct pci_dev *pcidev)
{
struct Scsi_Host *host = pci_get_drvdata(pcidev);
struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
- struct hpt_iopmu __iomem *iop = hba->iop;
- u32 int_mask;
dprintk("hptiop_shutdown(%p)\n", hba);
@@ -876,11 +1156,24 @@ static void hptiop_shutdown(struct pci_dev *pcidev)
hba->host->host_no);
/* disable all outbound interrupts */
- int_mask = readl(&iop->outbound_intmask);
+ hba->ops->disable_intr(hba);
+}
+
+static void hptiop_disable_intr_itl(struct hptiop_hba *hba)
+{
+ u32 int_mask;
+
+ int_mask = readl(&hba->u.itl.iop->outbound_intmask);
writel(int_mask |
IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
- &iop->outbound_intmask);
- hptiop_pci_posting_flush(iop);
+ &hba->u.itl.iop->outbound_intmask);
+ readl(&hba->u.itl.iop->outbound_intmask);
+}
+
+static void hptiop_disable_intr_mv(struct hptiop_hba *hba)
+{
+ writel(0, &hba->u.mv.regs->outbound_intmask);
+ readl(&hba->u.mv.regs->outbound_intmask);
}
static void hptiop_remove(struct pci_dev *pcidev)
@@ -901,7 +1194,10 @@ static void hptiop_remove(struct pci_dev *pcidev)
hba->dma_coherent,
hba->dma_coherent_handle);
- iounmap(hba->iop);
+ if (hba->ops->internal_memfree)
+ hba->ops->internal_memfree(hba);
+
+ hba->ops->unmap_pci_bar(hba);
pci_release_regions(hba->pcidev);
pci_set_drvdata(hba->pcidev, NULL);
@@ -910,11 +1206,50 @@ static void hptiop_remove(struct pci_dev *pcidev)
scsi_host_put(host);
}
+static struct hptiop_adapter_ops hptiop_itl_ops = {
+ .iop_wait_ready = iop_wait_ready_itl,
+ .internal_memalloc = 0,
+ .internal_memfree = 0,
+ .map_pci_bar = hptiop_map_pci_bar_itl,
+ .unmap_pci_bar = hptiop_unmap_pci_bar_itl,
+ .enable_intr = hptiop_enable_intr_itl,
+ .disable_intr = hptiop_disable_intr_itl,
+ .get_config = iop_get_config_itl,
+ .set_config = iop_set_config_itl,
+ .iop_intr = iop_intr_itl,
+ .post_msg = hptiop_post_msg_itl,
+ .post_req = hptiop_post_req_itl,
+};
+
+static struct hptiop_adapter_ops hptiop_mv_ops = {
+ .iop_wait_ready = iop_wait_ready_mv,
+ .internal_memalloc = hptiop_internal_memalloc_mv,
+ .internal_memfree = hptiop_internal_memfree_mv,
+ .map_pci_bar = hptiop_map_pci_bar_mv,
+ .unmap_pci_bar = hptiop_unmap_pci_bar_mv,
+ .enable_intr = hptiop_enable_intr_mv,
+ .disable_intr = hptiop_disable_intr_mv,
+ .get_config = iop_get_config_mv,
+ .set_config = iop_set_config_mv,
+ .iop_intr = iop_intr_mv,
+ .post_msg = hptiop_post_msg_mv,
+ .post_req = hptiop_post_req_mv,
+};
+
static struct pci_device_id hptiop_id_table[] = {
- { PCI_VDEVICE(TTI, 0x3220) },
- { PCI_VDEVICE(TTI, 0x3320) },
- { PCI_VDEVICE(TTI, 0x3520) },
- { PCI_VDEVICE(TTI, 0x4320) },
+ { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
+ { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
+ { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
+ { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
{},
};
diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h
index 2a5e46e001cb..a0289f219752 100644
--- a/drivers/scsi/hptiop.h
+++ b/drivers/scsi/hptiop.h
@@ -1,5 +1,5 @@
/*
- * HighPoint RR3xxx controller driver for Linux
+ * HighPoint RR3xxx/4xxx controller driver for Linux
* Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -18,8 +18,7 @@
#ifndef _HPTIOP_H_
#define _HPTIOP_H_
-struct hpt_iopmu
-{
+struct hpt_iopmu_itl {
__le32 resrved0[4];
__le32 inbound_msgaddr0;
__le32 inbound_msgaddr1;
@@ -54,6 +53,40 @@ struct hpt_iopmu
#define IOPMU_INBOUND_INT_ERROR 8
#define IOPMU_INBOUND_INT_POSTQUEUE 0x10
+#define MVIOP_QUEUE_LEN 512
+
+struct hpt_iopmu_mv {
+ __le32 inbound_head;
+ __le32 inbound_tail;
+ __le32 outbound_head;
+ __le32 outbound_tail;
+ __le32 inbound_msg;
+ __le32 outbound_msg;
+ __le32 reserve[10];
+ __le64 inbound_q[MVIOP_QUEUE_LEN];
+ __le64 outbound_q[MVIOP_QUEUE_LEN];
+};
+
+struct hpt_iopmv_regs {
+ __le32 reserved[0x20400 / 4];
+ __le32 inbound_doorbell;
+ __le32 inbound_intmask;
+ __le32 outbound_doorbell;
+ __le32 outbound_intmask;
+};
+
+#define MVIOP_MU_QUEUE_ADDR_HOST_MASK (~(0x1full))
+#define MVIOP_MU_QUEUE_ADDR_HOST_BIT 4
+
+#define MVIOP_MU_QUEUE_ADDR_IOP_HIGH32 0xffffffff
+#define MVIOP_MU_QUEUE_REQUEST_RESULT_BIT 1
+#define MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT 2
+
+#define MVIOP_MU_INBOUND_INT_MSG 1
+#define MVIOP_MU_INBOUND_INT_POSTQUEUE 2
+#define MVIOP_MU_OUTBOUND_INT_MSG 1
+#define MVIOP_MU_OUTBOUND_INT_POSTQUEUE 2
+
enum hpt_iopmu_message {
/* host-to-iop messages */
IOPMU_INBOUND_MSG0_NOP = 0,
@@ -72,8 +105,7 @@ enum hpt_iopmu_message {
IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_MAX = 0x3ff,
};
-struct hpt_iop_request_header
-{
+struct hpt_iop_request_header {
__le32 size;
__le32 type;
__le32 flags;
@@ -104,11 +136,10 @@ enum hpt_iop_result_type {
IOP_RESULT_RESET,
IOP_RESULT_INVALID_REQUEST,
IOP_RESULT_BAD_TARGET,
- IOP_RESULT_MODE_SENSE_CHECK_CONDITION,
+ IOP_RESULT_CHECK_CONDITION,
};
-struct hpt_iop_request_get_config
-{
+struct hpt_iop_request_get_config {
struct hpt_iop_request_header header;
__le32 interface_version;
__le32 firmware_version;
@@ -121,8 +152,7 @@ struct hpt_iop_request_get_config
__le32 sdram_size;
};
-struct hpt_iop_request_set_config
-{
+struct hpt_iop_request_set_config {
struct hpt_iop_request_header header;
__le32 iop_id;
__le16 vbus_id;
@@ -130,15 +160,13 @@ struct hpt_iop_request_set_config
__le32 reserve[6];
};
-struct hpt_iopsg
-{
+struct hpt_iopsg {
__le32 size;
__le32 eot; /* non-zero: end of table */
__le64 pci_address;
};
-struct hpt_iop_request_block_command
-{
+struct hpt_iop_request_block_command {
struct hpt_iop_request_header header;
u8 channel;
u8 target;
@@ -156,8 +184,7 @@ struct hpt_iop_request_block_command
#define IOP_BLOCK_COMMAND_FLUSH 4
#define IOP_BLOCK_COMMAND_SHUTDOWN 5
-struct hpt_iop_request_scsi_command
-{
+struct hpt_iop_request_scsi_command {
struct hpt_iop_request_header header;
u8 channel;
u8 target;
@@ -168,8 +195,7 @@ struct hpt_iop_request_scsi_command
struct hpt_iopsg sg_list[1];
};
-struct hpt_iop_request_ioctl_command
-{
+struct hpt_iop_request_ioctl_command {
struct hpt_iop_request_header header;
__le32 ioctl_code;
__le32 inbuf_size;
@@ -182,11 +208,11 @@ struct hpt_iop_request_ioctl_command
#define HPTIOP_MAX_REQUESTS 256u
struct hptiop_request {
- struct hptiop_request * next;
- void * req_virt;
- u32 req_shifted_phy;
- struct scsi_cmnd * scp;
- int index;
+ struct hptiop_request *next;
+ void *req_virt;
+ u32 req_shifted_phy;
+ struct scsi_cmnd *scp;
+ int index;
};
struct hpt_scsi_pointer {
@@ -198,9 +224,21 @@ struct hpt_scsi_pointer {
#define HPT_SCP(scp) ((struct hpt_scsi_pointer *)&(scp)->SCp)
struct hptiop_hba {
- struct hpt_iopmu __iomem * iop;
- struct Scsi_Host * host;
- struct pci_dev * pcidev;
+ struct hptiop_adapter_ops *ops;
+ union {
+ struct {
+ struct hpt_iopmu_itl __iomem *iop;
+ } itl;
+ struct {
+ struct hpt_iopmv_regs *regs;
+ struct hpt_iopmu_mv __iomem *mu;
+ void *internal_req;
+ dma_addr_t internal_req_phy;
+ } mv;
+ } u;
+
+ struct Scsi_Host *host;
+ struct pci_dev *pcidev;
/* IOP config info */
u32 interface_version;
@@ -213,15 +251,15 @@ struct hptiop_hba {
u32 req_size; /* host-allocated request buffer size */
- int iopintf_v2: 1;
- int initialized: 1;
- int msg_done: 1;
+ u32 iopintf_v2: 1;
+ u32 initialized: 1;
+ u32 msg_done: 1;
struct hptiop_request * req_list;
struct hptiop_request reqs[HPTIOP_MAX_REQUESTS];
/* used to free allocated dma area */
- void * dma_coherent;
+ void *dma_coherent;
dma_addr_t dma_coherent_handle;
atomic_t reset_count;
@@ -231,19 +269,35 @@ struct hptiop_hba {
wait_queue_head_t ioctl_wq;
};
-struct hpt_ioctl_k
-{
+struct hpt_ioctl_k {
struct hptiop_hba * hba;
u32 ioctl_code;
u32 inbuf_size;
u32 outbuf_size;
- void * inbuf;
- void * outbuf;
- u32 * bytes_returned;
+ void *inbuf;
+ void *outbuf;
+ u32 *bytes_returned;
void (*done)(struct hpt_ioctl_k *);
int result; /* HPT_IOCTL_RESULT_ */
};
+struct hptiop_adapter_ops {
+ int (*iop_wait_ready)(struct hptiop_hba *hba, u32 millisec);
+ int (*internal_memalloc)(struct hptiop_hba *hba);
+ int (*internal_memfree)(struct hptiop_hba *hba);
+ int (*map_pci_bar)(struct hptiop_hba *hba);
+ void (*unmap_pci_bar)(struct hptiop_hba *hba);
+ void (*enable_intr)(struct hptiop_hba *hba);
+ void (*disable_intr)(struct hptiop_hba *hba);
+ int (*get_config)(struct hptiop_hba *hba,
+ struct hpt_iop_request_get_config *config);
+ int (*set_config)(struct hptiop_hba *hba,
+ struct hpt_iop_request_set_config *config);
+ int (*iop_intr)(struct hptiop_hba *hba);
+ void (*post_msg)(struct hptiop_hba *hba, u32 msg);
+ void (*post_req)(struct hptiop_hba *hba, struct hptiop_request *_req);
+};
+
#define HPT_IOCTL_RESULT_OK 0
#define HPT_IOCTL_RESULT_FAILED (-1)
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 5f2396c03958..30819012898f 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -629,6 +629,16 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
list_del(&evt_struct->list);
del_timer(&evt_struct->timer);
+ /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
+ * Firmware will send a CRQ with a transport event (0xFF) to
+ * tell this client what has happened to the transport. This
+ * will be handled in ibmvscsi_handle_crq()
+ */
+ if (rc == H_CLOSED) {
+ dev_warn(hostdata->dev, "send warning. "
+ "Receive queue closed, will retry.\n");
+ goto send_busy;
+ }
dev_err(hostdata->dev, "send error %d\n", rc);
atomic_inc(&hostdata->request_limit);
goto send_error;
@@ -976,58 +986,74 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
int rsp_rc;
unsigned long flags;
u16 lun = lun_from_dev(cmd->device);
+ unsigned long wait_switch = 0;
/* First, find this command in our sent list so we can figure
* out the correct tag
*/
spin_lock_irqsave(hostdata->host->host_lock, flags);
- found_evt = NULL;
- list_for_each_entry(tmp_evt, &hostdata->sent, list) {
- if (tmp_evt->cmnd == cmd) {
- found_evt = tmp_evt;
- break;
+ wait_switch = jiffies + (init_timeout * HZ);
+ do {
+ found_evt = NULL;
+ list_for_each_entry(tmp_evt, &hostdata->sent, list) {
+ if (tmp_evt->cmnd == cmd) {
+ found_evt = tmp_evt;
+ break;
+ }
}
- }
- if (!found_evt) {
- spin_unlock_irqrestore(hostdata->host->host_lock, flags);
- return SUCCESS;
- }
+ if (!found_evt) {
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ return SUCCESS;
+ }
- evt = get_event_struct(&hostdata->pool);
- if (evt == NULL) {
- spin_unlock_irqrestore(hostdata->host->host_lock, flags);
- sdev_printk(KERN_ERR, cmd->device, "failed to allocate abort event\n");
- return FAILED;
- }
+ evt = get_event_struct(&hostdata->pool);
+ if (evt == NULL) {
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ sdev_printk(KERN_ERR, cmd->device,
+ "failed to allocate abort event\n");
+ return FAILED;
+ }
- init_event_struct(evt,
- sync_completion,
- VIOSRP_SRP_FORMAT,
- init_timeout);
+ init_event_struct(evt,
+ sync_completion,
+ VIOSRP_SRP_FORMAT,
+ init_timeout);
- tsk_mgmt = &evt->iu.srp.tsk_mgmt;
+ tsk_mgmt = &evt->iu.srp.tsk_mgmt;
- /* Set up an abort SRP command */
- memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
- tsk_mgmt->opcode = SRP_TSK_MGMT;
- tsk_mgmt->lun = ((u64) lun) << 48;
- tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
- tsk_mgmt->task_tag = (u64) found_evt;
-
- sdev_printk(KERN_INFO, cmd->device, "aborting command. lun 0x%lx, tag 0x%lx\n",
- tsk_mgmt->lun, tsk_mgmt->task_tag);
-
- evt->sync_srp = &srp_rsp;
- init_completion(&evt->comp);
- rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
+ /* Set up an abort SRP command */
+ memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
+ tsk_mgmt->opcode = SRP_TSK_MGMT;
+ tsk_mgmt->lun = ((u64) lun) << 48;
+ tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
+ tsk_mgmt->task_tag = (u64) found_evt;
+
+ evt->sync_srp = &srp_rsp;
+
+ init_completion(&evt->comp);
+ rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
+
+ if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
+ break;
+
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ msleep(10);
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ } while (time_before(jiffies, wait_switch));
+
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+
if (rsp_rc != 0) {
sdev_printk(KERN_ERR, cmd->device,
"failed to send abort() event. rc=%d\n", rsp_rc);
return FAILED;
}
+ sdev_printk(KERN_INFO, cmd->device,
+ "aborting command. lun 0x%lx, tag 0x%lx\n",
+ (((u64) lun) << 48), (u64) found_evt);
+
wait_for_completion(&evt->comp);
/* make sure we got a good response */
@@ -1099,41 +1125,56 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
int rsp_rc;
unsigned long flags;
u16 lun = lun_from_dev(cmd->device);
+ unsigned long wait_switch = 0;
spin_lock_irqsave(hostdata->host->host_lock, flags);
- evt = get_event_struct(&hostdata->pool);
- if (evt == NULL) {
- spin_unlock_irqrestore(hostdata->host->host_lock, flags);
- sdev_printk(KERN_ERR, cmd->device, "failed to allocate reset event\n");
- return FAILED;
- }
+ wait_switch = jiffies + (init_timeout * HZ);
+ do {
+ evt = get_event_struct(&hostdata->pool);
+ if (evt == NULL) {
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ sdev_printk(KERN_ERR, cmd->device,
+ "failed to allocate reset event\n");
+ return FAILED;
+ }
- init_event_struct(evt,
- sync_completion,
- VIOSRP_SRP_FORMAT,
- init_timeout);
+ init_event_struct(evt,
+ sync_completion,
+ VIOSRP_SRP_FORMAT,
+ init_timeout);
- tsk_mgmt = &evt->iu.srp.tsk_mgmt;
+ tsk_mgmt = &evt->iu.srp.tsk_mgmt;
- /* Set up a lun reset SRP command */
- memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
- tsk_mgmt->opcode = SRP_TSK_MGMT;
- tsk_mgmt->lun = ((u64) lun) << 48;
- tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
+ /* Set up a lun reset SRP command */
+ memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
+ tsk_mgmt->opcode = SRP_TSK_MGMT;
+ tsk_mgmt->lun = ((u64) lun) << 48;
+ tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
- sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%lx\n",
- tsk_mgmt->lun);
+ evt->sync_srp = &srp_rsp;
+
+ init_completion(&evt->comp);
+ rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
+
+ if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
+ break;
+
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ msleep(10);
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ } while (time_before(jiffies, wait_switch));
- evt->sync_srp = &srp_rsp;
- init_completion(&evt->comp);
- rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+
if (rsp_rc != 0) {
sdev_printk(KERN_ERR, cmd->device,
"failed to send reset event. rc=%d\n", rsp_rc);
return FAILED;
}
+ sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%lx\n",
+ (((u64) lun) << 48));
+
wait_for_completion(&evt->comp);
/* make sure we got a good response */
@@ -1386,8 +1427,10 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
unsigned long lock_flags = 0;
spin_lock_irqsave(shost->host_lock, lock_flags);
- if (sdev->type == TYPE_DISK)
+ if (sdev->type == TYPE_DISK) {
sdev->allow_restart = 1;
+ sdev->timeout = 60 * HZ;
+ }
scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
spin_unlock_irqrestore(shost->host_lock, lock_flags);
return 0;
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index 82bcab688b44..d63f11e95abf 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -292,7 +292,7 @@ static int ibmvstgt_cmd_done(struct scsi_cmnd *sc,
dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0],
cmd->usg_sg);
- if (sc->use_sg)
+ if (scsi_sg_count(sc))
err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
spin_lock_irqsave(&target->lock, flags);
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index a3d0c6b14958..f97d172844be 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -837,19 +837,16 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
/* Phase 4 - Setup scatter/gather buffers */
case 4:
- if (cmd->use_sg) {
- /* if many buffers are available, start filling the first */
- cmd->SCp.buffer =
- (struct scatterlist *) cmd->request_buffer;
+ if (scsi_bufflen(cmd)) {
+ cmd->SCp.buffer = scsi_sglist(cmd);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
} else {
- /* else fill the only available buffer */
cmd->SCp.buffer = NULL;
- cmd->SCp.this_residual = cmd->request_bufflen;
- cmd->SCp.ptr = cmd->request_buffer;
+ cmd->SCp.this_residual = 0;
+ cmd->SCp.ptr = NULL;
}
- cmd->SCp.buffers_residual = cmd->use_sg - 1;
+ cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
cmd->SCp.phase++;
if (cmd->SCp.this_residual & 0x01)
cmd->SCp.this_residual++;
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
index c8b452f2878c..8053b1e86ccb 100644
--- a/drivers/scsi/in2000.c
+++ b/drivers/scsi/in2000.c
@@ -369,16 +369,16 @@ static int in2000_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
* - SCp.phase records this command's SRCID_ER bit setting
*/
- if (cmd->use_sg) {
- cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
- cmd->SCp.buffers_residual = cmd->use_sg - 1;
+ if (scsi_bufflen(cmd)) {
+ cmd->SCp.buffer = scsi_sglist(cmd);
+ cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
} else {
cmd->SCp.buffer = NULL;
cmd->SCp.buffers_residual = 0;
- cmd->SCp.ptr = (char *) cmd->request_buffer;
- cmd->SCp.this_residual = cmd->request_bufflen;
+ cmd->SCp.ptr = NULL;
+ cmd->SCp.this_residual = 0;
}
cmd->SCp.have_data_in = 0;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index aa0df0a4b22a..73270ff892d9 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -84,7 +84,7 @@
/*
* Global Data
*/
-static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
+static LIST_HEAD(ipr_ioa_head);
static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
static unsigned int ipr_max_speed = 1;
static int ipr_testmode = 0;
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 5c5a9b2628fc..7505cca8e68e 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -389,17 +389,17 @@ static struct pci_device_id ips_pci_table[] = {
MODULE_DEVICE_TABLE( pci, ips_pci_table );
static char ips_hot_plug_name[] = "ips";
-
+
static int __devinit ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent);
static void __devexit ips_remove_device(struct pci_dev *pci_dev);
-
+
static struct pci_driver ips_pci_driver = {
.name = ips_hot_plug_name,
.id_table = ips_pci_table,
.probe = ips_insert_device,
.remove = __devexit_p(ips_remove_device),
};
-
+
/*
* Necessary forward function protoypes
@@ -587,7 +587,7 @@ static void
ips_setup_funclist(ips_ha_t * ha)
{
- /*
+ /*
* Setup Functions
*/
if (IPS_IS_MORPHEUS(ha) || IPS_IS_MARCO(ha)) {
@@ -702,12 +702,8 @@ ips_release(struct Scsi_Host *sh)
/* free extra memory */
ips_free(ha);
- /* Free I/O Region */
- if (ha->io_addr)
- release_region(ha->io_addr, ha->io_len);
-
/* free IRQ */
- free_irq(ha->irq, ha);
+ free_irq(ha->pcidev->irq, ha);
scsi_host_put(sh);
@@ -1637,7 +1633,7 @@ ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr)
return (IPS_FAILURE);
}
- if (ha->device_id == IPS_DEVICEID_COPPERHEAD &&
+ if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD &&
pt->CoppCP.cmd.flashfw.op_code ==
IPS_CMD_RW_BIOSFW) {
ret = ips_flash_copperhead(ha, pt, scb);
@@ -2021,7 +2017,7 @@ ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb)
pt->ExtendedStatus = scb->extended_status;
pt->AdapterType = ha->ad_type;
- if (ha->device_id == IPS_DEVICEID_COPPERHEAD &&
+ if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD &&
(scb->cmd.flashfw.op_code == IPS_CMD_DOWNLOAD ||
scb->cmd.flashfw.op_code == IPS_CMD_RW_BIOSFW))
ips_free_flash_copperhead(ha);
@@ -2075,13 +2071,13 @@ ips_host_info(ips_ha_t * ha, char *ptr, off_t offset, int len)
ha->mem_ptr);
}
- copy_info(&info, "\tIRQ number : %d\n", ha->irq);
+ copy_info(&info, "\tIRQ number : %d\n", ha->pcidev->irq);
/* For the Next 3 lines Check for Binary 0 at the end and don't include it if it's there. */
/* That keeps everything happy for "text" operations on the proc file. */
if (le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) {
- if (ha->nvram->bios_low[3] == 0) {
+ if (ha->nvram->bios_low[3] == 0) {
copy_info(&info,
"\tBIOS Version : %c%c%c%c%c%c%c\n",
ha->nvram->bios_high[0], ha->nvram->bios_high[1],
@@ -2232,31 +2228,31 @@ ips_identify_controller(ips_ha_t * ha)
{
METHOD_TRACE("ips_identify_controller", 1);
- switch (ha->device_id) {
+ switch (ha->pcidev->device) {
case IPS_DEVICEID_COPPERHEAD:
- if (ha->revision_id <= IPS_REVID_SERVERAID) {
+ if (ha->pcidev->revision <= IPS_REVID_SERVERAID) {
ha->ad_type = IPS_ADTYPE_SERVERAID;
- } else if (ha->revision_id == IPS_REVID_SERVERAID2) {
+ } else if (ha->pcidev->revision == IPS_REVID_SERVERAID2) {
ha->ad_type = IPS_ADTYPE_SERVERAID2;
- } else if (ha->revision_id == IPS_REVID_NAVAJO) {
+ } else if (ha->pcidev->revision == IPS_REVID_NAVAJO) {
ha->ad_type = IPS_ADTYPE_NAVAJO;
- } else if ((ha->revision_id == IPS_REVID_SERVERAID2)
+ } else if ((ha->pcidev->revision == IPS_REVID_SERVERAID2)
&& (ha->slot_num == 0)) {
ha->ad_type = IPS_ADTYPE_KIOWA;
- } else if ((ha->revision_id >= IPS_REVID_CLARINETP1) &&
- (ha->revision_id <= IPS_REVID_CLARINETP3)) {
+ } else if ((ha->pcidev->revision >= IPS_REVID_CLARINETP1) &&
+ (ha->pcidev->revision <= IPS_REVID_CLARINETP3)) {
if (ha->enq->ucMaxPhysicalDevices == 15)
ha->ad_type = IPS_ADTYPE_SERVERAID3L;
else
ha->ad_type = IPS_ADTYPE_SERVERAID3;
- } else if ((ha->revision_id >= IPS_REVID_TROMBONE32) &&
- (ha->revision_id <= IPS_REVID_TROMBONE64)) {
+ } else if ((ha->pcidev->revision >= IPS_REVID_TROMBONE32) &&
+ (ha->pcidev->revision <= IPS_REVID_TROMBONE64)) {
ha->ad_type = IPS_ADTYPE_SERVERAID4H;
}
break;
case IPS_DEVICEID_MORPHEUS:
- switch (ha->subdevice_id) {
+ switch (ha->pcidev->subsystem_device) {
case IPS_SUBDEVICEID_4L:
ha->ad_type = IPS_ADTYPE_SERVERAID4L;
break;
@@ -2285,7 +2281,7 @@ ips_identify_controller(ips_ha_t * ha)
break;
case IPS_DEVICEID_MARCO:
- switch (ha->subdevice_id) {
+ switch (ha->pcidev->subsystem_device) {
case IPS_SUBDEVICEID_6M:
ha->ad_type = IPS_ADTYPE_SERVERAID6M;
break;
@@ -2332,20 +2328,20 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
strncpy(ha->bios_version, " ?", 8);
- if (ha->device_id == IPS_DEVICEID_COPPERHEAD) {
+ if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD) {
if (IPS_USE_MEMIO(ha)) {
/* Memory Mapped I/O */
/* test 1st byte */
writel(0, ha->mem_ptr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55)
return;
writel(1, ha->mem_ptr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA)
@@ -2353,20 +2349,20 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
/* Get Major version */
writel(0x1FF, ha->mem_ptr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
major = readb(ha->mem_ptr + IPS_REG_FLDP);
/* Get Minor version */
writel(0x1FE, ha->mem_ptr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
minor = readb(ha->mem_ptr + IPS_REG_FLDP);
/* Get SubMinor version */
writel(0x1FD, ha->mem_ptr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
subminor = readb(ha->mem_ptr + IPS_REG_FLDP);
@@ -2375,14 +2371,14 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
/* test 1st byte */
outl(0, ha->io_addr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55)
return;
outl(cpu_to_le32(1), ha->io_addr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA)
@@ -2390,21 +2386,21 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
/* Get Major version */
outl(cpu_to_le32(0x1FF), ha->io_addr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
major = inb(ha->io_addr + IPS_REG_FLDP);
/* Get Minor version */
outl(cpu_to_le32(0x1FE), ha->io_addr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
minor = inb(ha->io_addr + IPS_REG_FLDP);
/* Get SubMinor version */
outl(cpu_to_le32(0x1FD), ha->io_addr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
subminor = inb(ha->io_addr + IPS_REG_FLDP);
@@ -2740,8 +2736,6 @@ ips_next(ips_ha_t * ha, int intr)
SC->result = DID_OK;
SC->host_scribble = NULL;
- memset(SC->sense_buffer, 0, sizeof (SC->sense_buffer));
-
scb->target_id = SC->device->id;
scb->lun = SC->device->lun;
scb->bus = SC->device->channel;
@@ -2780,10 +2774,11 @@ ips_next(ips_ha_t * ha, int intr)
scb->dcdb.cmd_attribute =
ips_command_direction[scb->scsi_cmd->cmnd[0]];
- /* Allow a WRITE BUFFER Command to Have no Data */
- /* This is Used by Tape Flash Utilites */
- if ((scb->scsi_cmd->cmnd[0] == WRITE_BUFFER) && (scb->data_len == 0))
- scb->dcdb.cmd_attribute = 0;
+ /* Allow a WRITE BUFFER Command to Have no Data */
+ /* This is Used by Tape Flash Utilites */
+ if ((scb->scsi_cmd->cmnd[0] == WRITE_BUFFER) &&
+ (scb->data_len == 0))
+ scb->dcdb.cmd_attribute = 0;
if (!(scb->dcdb.cmd_attribute & 0x3))
scb->dcdb.transfer_length = 0;
@@ -3404,7 +3399,7 @@ ips_map_status(ips_ha_t * ha, ips_scb_t * scb, ips_stat_t * sp)
/* Restrict access to physical DASD */
if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
- ips_scmd_buf_read(scb->scsi_cmd,
+ ips_scmd_buf_read(scb->scsi_cmd,
&inquiryData, sizeof (inquiryData));
if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK) {
errcode = DID_TIME_OUT;
@@ -3438,13 +3433,11 @@ ips_map_status(ips_ha_t * ha, ips_scb_t * scb, ips_stat_t * sp)
(IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
memcpy(scb->scsi_cmd->sense_buffer,
tapeDCDB->sense_info,
- sizeof (scb->scsi_cmd->
- sense_buffer));
+ SCSI_SENSE_BUFFERSIZE);
} else {
memcpy(scb->scsi_cmd->sense_buffer,
scb->dcdb.sense_info,
- sizeof (scb->scsi_cmd->
- sense_buffer));
+ SCSI_SENSE_BUFFERSIZE);
}
device_error = 2; /* check condition */
}
@@ -3824,7 +3817,6 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
/* attempted, a Check Condition occurred, and Sense */
/* Data indicating an Invalid CDB OpCode is returned. */
sp = (char *) scb->scsi_cmd->sense_buffer;
- memset(sp, 0, sizeof (scb->scsi_cmd->sense_buffer));
sp[0] = 0x70; /* Error Code */
sp[2] = ILLEGAL_REQUEST; /* Sense Key 5 Illegal Req. */
@@ -4090,10 +4082,10 @@ ips_chkstatus(ips_ha_t * ha, IPS_STATUS * pstatus)
scb->scsi_cmd->result = errcode << 16;
} else { /* bus == 0 */
/* restrict access to physical drives */
- if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
- ips_scmd_buf_read(scb->scsi_cmd,
+ if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
+ ips_scmd_buf_read(scb->scsi_cmd,
&inquiryData, sizeof (inquiryData));
- if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK)
+ if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK)
scb->scsi_cmd->result = DID_TIME_OUT << 16;
}
} /* else */
@@ -4393,8 +4385,6 @@ ips_free(ips_ha_t * ha)
ha->mem_ptr = NULL;
}
- if (ha->mem_addr)
- release_mem_region(ha->mem_addr, ha->mem_len);
ha->mem_addr = 0;
}
@@ -4661,8 +4651,8 @@ ips_isinit_morpheus(ips_ha_t * ha)
uint32_t bits;
METHOD_TRACE("ips_is_init_morpheus", 1);
-
- if (ips_isintr_morpheus(ha))
+
+ if (ips_isintr_morpheus(ha))
ips_flush_and_reset(ha);
post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
@@ -4686,7 +4676,7 @@ ips_isinit_morpheus(ips_ha_t * ha)
/* state ( was trying to INIT and an interrupt was already pending ) ... */
/* */
/****************************************************************************/
-static void
+static void
ips_flush_and_reset(ips_ha_t *ha)
{
ips_scb_t *scb;
@@ -4718,9 +4708,9 @@ ips_flush_and_reset(ips_ha_t *ha)
if (ret == IPS_SUCCESS) {
time = 60 * IPS_ONE_SEC; /* Max Wait time is 60 seconds */
done = 0;
-
+
while ((time > 0) && (!done)) {
- done = ips_poll_for_flush_complete(ha);
+ done = ips_poll_for_flush_complete(ha);
/* This may look evil, but it's only done during extremely rare start-up conditions ! */
udelay(1000);
time--;
@@ -4749,17 +4739,17 @@ static int
ips_poll_for_flush_complete(ips_ha_t * ha)
{
IPS_STATUS cstatus;
-
+
while (TRUE) {
cstatus.value = (*ha->func.statupd) (ha);
if (cstatus.value == 0xffffffff) /* If No Interrupt to process */
break;
-
+
/* Success is when we see the Flush Command ID */
- if (cstatus.fields.command_id == IPS_MAX_CMDS )
+ if (cstatus.fields.command_id == IPS_MAX_CMDS)
return 1;
- }
+ }
return 0;
}
@@ -4903,7 +4893,7 @@ ips_init_copperhead(ips_ha_t * ha)
/* Enable busmastering */
outb(IPS_BIT_EBM, ha->io_addr + IPS_REG_SCPR);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
/* fix for anaconda64 */
outl(0, ha->io_addr + IPS_REG_NDAE);
@@ -4997,7 +4987,7 @@ ips_init_copperhead_memio(ips_ha_t * ha)
/* Enable busmastering */
writeb(IPS_BIT_EBM, ha->mem_ptr + IPS_REG_SCPR);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
/* fix for anaconda64 */
writel(0, ha->mem_ptr + IPS_REG_NDAE);
@@ -5142,7 +5132,7 @@ ips_reset_copperhead(ips_ha_t * ha)
METHOD_TRACE("ips_reset_copperhead", 1);
DEBUG_VAR(1, "(%s%d) ips_reset_copperhead: io addr: %x, irq: %d",
- ips_name, ha->host_num, ha->io_addr, ha->irq);
+ ips_name, ha->host_num, ha->io_addr, ha->pcidev->irq);
reset_counter = 0;
@@ -5187,7 +5177,7 @@ ips_reset_copperhead_memio(ips_ha_t * ha)
METHOD_TRACE("ips_reset_copperhead_memio", 1);
DEBUG_VAR(1, "(%s%d) ips_reset_copperhead_memio: mem addr: %x, irq: %d",
- ips_name, ha->host_num, ha->mem_addr, ha->irq);
+ ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq);
reset_counter = 0;
@@ -5233,7 +5223,7 @@ ips_reset_morpheus(ips_ha_t * ha)
METHOD_TRACE("ips_reset_morpheus", 1);
DEBUG_VAR(1, "(%s%d) ips_reset_morpheus: mem addr: %x, irq: %d",
- ips_name, ha->host_num, ha->mem_addr, ha->irq);
+ ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq);
reset_counter = 0;
@@ -5920,7 +5910,7 @@ ips_read_config(ips_ha_t * ha, int intr)
return (0);
}
-
+
memcpy(ha->conf, ha->ioctl_data, sizeof(*ha->conf));
return (1);
}
@@ -5959,7 +5949,7 @@ ips_readwrite_page5(ips_ha_t * ha, int write, int intr)
scb->cmd.nvram.buffer_addr = ha->ioctl_busaddr;
if (write)
memcpy(ha->ioctl_data, ha->nvram, sizeof(*ha->nvram));
-
+
/* issue the command */
if (((ret =
ips_send_wait(ha, scb, ips_cmd_timeout, intr)) == IPS_FAILURE)
@@ -6196,32 +6186,32 @@ ips_erase_bios(ips_ha_t * ha)
/* Clear the status register */
outl(0, ha->io_addr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
outb(0x50, ha->io_addr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* Erase Setup */
outb(0x20, ha->io_addr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* Erase Confirm */
outb(0xD0, ha->io_addr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* Erase Status */
outb(0x70, ha->io_addr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
timeout = 80000; /* 80 seconds */
while (timeout > 0) {
- if (ha->revision_id == IPS_REVID_TROMBONE64) {
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
outl(0, ha->io_addr + IPS_REG_FLAP);
udelay(25); /* 25 us */
}
@@ -6241,13 +6231,13 @@ ips_erase_bios(ips_ha_t * ha)
/* try to suspend the erase */
outb(0xB0, ha->io_addr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* wait for 10 seconds */
timeout = 10000;
while (timeout > 0) {
- if (ha->revision_id == IPS_REVID_TROMBONE64) {
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
outl(0, ha->io_addr + IPS_REG_FLAP);
udelay(25); /* 25 us */
}
@@ -6277,12 +6267,12 @@ ips_erase_bios(ips_ha_t * ha)
/* Otherwise, we were successful */
/* clear status */
outb(0x50, ha->io_addr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* enable reads */
outb(0xFF, ha->io_addr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
return (0);
@@ -6308,32 +6298,32 @@ ips_erase_bios_memio(ips_ha_t * ha)
/* Clear the status register */
writel(0, ha->mem_ptr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
writeb(0x50, ha->mem_ptr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* Erase Setup */
writeb(0x20, ha->mem_ptr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* Erase Confirm */
writeb(0xD0, ha->mem_ptr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* Erase Status */
writeb(0x70, ha->mem_ptr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
timeout = 80000; /* 80 seconds */
while (timeout > 0) {
- if (ha->revision_id == IPS_REVID_TROMBONE64) {
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
writel(0, ha->mem_ptr + IPS_REG_FLAP);
udelay(25); /* 25 us */
}
@@ -6353,13 +6343,13 @@ ips_erase_bios_memio(ips_ha_t * ha)
/* try to suspend the erase */
writeb(0xB0, ha->mem_ptr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* wait for 10 seconds */
timeout = 10000;
while (timeout > 0) {
- if (ha->revision_id == IPS_REVID_TROMBONE64) {
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
writel(0, ha->mem_ptr + IPS_REG_FLAP);
udelay(25); /* 25 us */
}
@@ -6389,12 +6379,12 @@ ips_erase_bios_memio(ips_ha_t * ha)
/* Otherwise, we were successful */
/* clear status */
writeb(0x50, ha->mem_ptr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* enable reads */
writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
return (0);
@@ -6423,21 +6413,21 @@ ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
for (i = 0; i < buffersize; i++) {
/* write a byte */
outl(cpu_to_le32(i + offset), ha->io_addr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
outb(0x40, ha->io_addr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
outb(buffer[i], ha->io_addr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* wait up to one second */
timeout = 1000;
while (timeout > 0) {
- if (ha->revision_id == IPS_REVID_TROMBONE64) {
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
outl(0, ha->io_addr + IPS_REG_FLAP);
udelay(25); /* 25 us */
}
@@ -6454,11 +6444,11 @@ ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
if (timeout == 0) {
/* timeout error */
outl(0, ha->io_addr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
outb(0xFF, ha->io_addr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
return (1);
@@ -6468,11 +6458,11 @@ ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
if (status & 0x18) {
/* programming error */
outl(0, ha->io_addr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
outb(0xFF, ha->io_addr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
return (1);
@@ -6481,11 +6471,11 @@ ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
/* Enable reading */
outl(0, ha->io_addr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
outb(0xFF, ha->io_addr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
return (0);
@@ -6514,21 +6504,21 @@ ips_program_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
for (i = 0; i < buffersize; i++) {
/* write a byte */
writel(i + offset, ha->mem_ptr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
writeb(0x40, ha->mem_ptr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
writeb(buffer[i], ha->mem_ptr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
/* wait up to one second */
timeout = 1000;
while (timeout > 0) {
- if (ha->revision_id == IPS_REVID_TROMBONE64) {
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
writel(0, ha->mem_ptr + IPS_REG_FLAP);
udelay(25); /* 25 us */
}
@@ -6545,11 +6535,11 @@ ips_program_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
if (timeout == 0) {
/* timeout error */
writel(0, ha->mem_ptr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
return (1);
@@ -6559,11 +6549,11 @@ ips_program_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
if (status & 0x18) {
/* programming error */
writel(0, ha->mem_ptr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
return (1);
@@ -6572,11 +6562,11 @@ ips_program_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
/* Enable reading */
writel(0, ha->mem_ptr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
return (0);
@@ -6601,14 +6591,14 @@ ips_verify_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
/* test 1st byte */
outl(0, ha->io_addr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55)
return (1);
outl(cpu_to_le32(1), ha->io_addr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA)
return (1);
@@ -6617,7 +6607,7 @@ ips_verify_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
for (i = 2; i < buffersize; i++) {
outl(cpu_to_le32(i + offset), ha->io_addr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
checksum = (uint8_t) checksum + inb(ha->io_addr + IPS_REG_FLDP);
@@ -6650,14 +6640,14 @@ ips_verify_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
/* test 1st byte */
writel(0, ha->mem_ptr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55)
return (1);
writel(1, ha->mem_ptr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA)
return (1);
@@ -6666,7 +6656,7 @@ ips_verify_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
for (i = 2; i < buffersize; i++) {
writel(i + offset, ha->mem_ptr + IPS_REG_FLAP);
- if (ha->revision_id == IPS_REVID_TROMBONE64)
+ if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
udelay(25); /* 25 us */
checksum =
@@ -6837,24 +6827,18 @@ ips_register_scsi(int index)
}
ha = IPS_HA(sh);
memcpy(ha, oldha, sizeof (ips_ha_t));
- free_irq(oldha->irq, oldha);
+ free_irq(oldha->pcidev->irq, oldha);
/* Install the interrupt handler with the new ha */
- if (request_irq(ha->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
+ if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"Unable to install interrupt handler\n");
- scsi_host_put(sh);
- return -1;
+ goto err_out_sh;
}
kfree(oldha);
- ips_sh[index] = sh;
- ips_ha[index] = ha;
/* Store away needed values for later use */
- sh->io_port = ha->io_addr;
- sh->n_io_port = ha->io_addr ? 255 : 0;
sh->unique_id = (ha->io_addr) ? ha->io_addr : ha->mem_addr;
- sh->irq = ha->irq;
sh->sg_tablesize = sh->hostt->sg_tablesize;
sh->can_queue = sh->hostt->can_queue;
sh->cmd_per_lun = sh->hostt->cmd_per_lun;
@@ -6867,10 +6851,21 @@ ips_register_scsi(int index)
sh->max_channel = ha->nbus - 1;
sh->can_queue = ha->max_cmds - 1;
- scsi_add_host(sh, NULL);
+ if (scsi_add_host(sh, &ha->pcidev->dev))
+ goto err_out;
+
+ ips_sh[index] = sh;
+ ips_ha[index] = ha;
+
scsi_scan_host(sh);
return 0;
+
+err_out:
+ free_irq(ha->pcidev->irq, ha);
+err_out_sh:
+ scsi_host_put(sh);
+ return -1;
}
/*---------------------------------------------------------------------------*/
@@ -6882,20 +6877,14 @@ ips_register_scsi(int index)
static void __devexit
ips_remove_device(struct pci_dev *pci_dev)
{
- int i;
- struct Scsi_Host *sh;
- ips_ha_t *ha;
+ struct Scsi_Host *sh = pci_get_drvdata(pci_dev);
- for (i = 0; i < IPS_MAX_ADAPTERS; i++) {
- ha = ips_ha[i];
- if (ha) {
- if ((pci_dev->bus->number == ha->pcidev->bus->number) &&
- (pci_dev->devfn == ha->pcidev->devfn)) {
- sh = ips_sh[i];
- ips_release(sh);
- }
- }
- }
+ pci_set_drvdata(pci_dev, NULL);
+
+ ips_release(sh);
+
+ pci_release_regions(pci_dev);
+ pci_disable_device(pci_dev);
}
/****************************************************************************/
@@ -6949,12 +6938,17 @@ module_exit(ips_module_exit);
static int __devinit
ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent)
{
- int uninitialized_var(index);
+ int index = -1;
int rc;
METHOD_TRACE("ips_insert_device", 1);
- if (pci_enable_device(pci_dev))
- return -1;
+ rc = pci_enable_device(pci_dev);
+ if (rc)
+ return rc;
+
+ rc = pci_request_regions(pci_dev, "ips");
+ if (rc)
+ goto err_out;
rc = ips_init_phase1(pci_dev, &index);
if (rc == SUCCESS)
@@ -6970,6 +6964,19 @@ ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent)
ips_num_controllers++;
ips_next_controller = ips_num_controllers;
+
+ if (rc < 0) {
+ rc = -ENODEV;
+ goto err_out_regions;
+ }
+
+ pci_set_drvdata(pci_dev, ips_sh[index]);
+ return 0;
+
+err_out_regions:
+ pci_release_regions(pci_dev);
+err_out:
+ pci_disable_device(pci_dev);
return rc;
}
@@ -6992,8 +6999,6 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
uint32_t mem_len;
uint8_t bus;
uint8_t func;
- uint8_t irq;
- uint16_t subdevice_id;
int j;
int index;
dma_addr_t dma_address;
@@ -7004,7 +7009,7 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
METHOD_TRACE("ips_init_phase1", 1);
index = IPS_MAX_ADAPTERS;
for (j = 0; j < IPS_MAX_ADAPTERS; j++) {
- if (ips_ha[j] == 0) {
+ if (ips_ha[j] == NULL) {
index = j;
break;
}
@@ -7014,7 +7019,6 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
return -1;
/* stuff that we get in dev */
- irq = pci_dev->irq;
bus = pci_dev->bus->number;
func = pci_dev->devfn;
@@ -7042,34 +7046,17 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
uint32_t base;
uint32_t offs;
- if (!request_mem_region(mem_addr, mem_len, "ips")) {
- IPS_PRINTK(KERN_WARNING, pci_dev,
- "Couldn't allocate IO Memory space %x len %d.\n",
- mem_addr, mem_len);
- return -1;
- }
-
base = mem_addr & PAGE_MASK;
offs = mem_addr - base;
ioremap_ptr = ioremap(base, PAGE_SIZE);
+ if (!ioremap_ptr)
+ return -1;
mem_ptr = ioremap_ptr + offs;
} else {
ioremap_ptr = NULL;
mem_ptr = NULL;
}
- /* setup I/O mapped area (if applicable) */
- if (io_addr) {
- if (!request_region(io_addr, io_len, "ips")) {
- IPS_PRINTK(KERN_WARNING, pci_dev,
- "Couldn't allocate IO space %x len %d.\n",
- io_addr, io_len);
- return -1;
- }
- }
-
- subdevice_id = pci_dev->subsystem_device;
-
/* found a controller */
ha = kzalloc(sizeof (ips_ha_t), GFP_KERNEL);
if (ha == NULL) {
@@ -7078,13 +7065,11 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
return -1;
}
-
ips_sh[index] = NULL;
ips_ha[index] = ha;
ha->active = 1;
/* Store info in HA structure */
- ha->irq = irq;
ha->io_addr = io_addr;
ha->io_len = io_len;
ha->mem_addr = mem_addr;
@@ -7092,10 +7077,7 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
ha->mem_ptr = mem_ptr;
ha->ioremap_ptr = ioremap_ptr;
ha->host_num = (uint32_t) index;
- ha->revision_id = pci_dev->revision;
ha->slot_num = PCI_SLOT(pci_dev->devfn);
- ha->device_id = pci_dev->device;
- ha->subdevice_id = subdevice_id;
ha->pcidev = pci_dev;
/*
@@ -7240,7 +7222,7 @@ ips_init_phase2(int index)
}
/* Install the interrupt handler */
- if (request_irq(ha->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
+ if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"Unable to install interrupt handler\n");
return ips_abort_init(ha, index);
@@ -7253,14 +7235,14 @@ ips_init_phase2(int index)
if (!ips_allocatescbs(ha)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"Unable to allocate a CCB\n");
- free_irq(ha->irq, ha);
+ free_irq(ha->pcidev->irq, ha);
return ips_abort_init(ha, index);
}
if (!ips_hainit(ha)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"Unable to initialize controller\n");
- free_irq(ha->irq, ha);
+ free_irq(ha->pcidev->irq, ha);
return ips_abort_init(ha, index);
}
/* Free the temporary SCB */
@@ -7270,7 +7252,7 @@ ips_init_phase2(int index)
if (!ips_allocatescbs(ha)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"Unable to allocate CCBs\n");
- free_irq(ha->irq, ha);
+ free_irq(ha->pcidev->irq, ha);
return ips_abort_init(ha, index);
}
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index 3bcbd9ff056b..e0657b6f009c 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -60,14 +60,14 @@
*/
#define IPS_HA(x) ((ips_ha_t *) x->hostdata)
#define IPS_COMMAND_ID(ha, scb) (int) (scb - ha->scbs)
- #define IPS_IS_TROMBONE(ha) (((ha->device_id == IPS_DEVICEID_COPPERHEAD) && \
- (ha->revision_id >= IPS_REVID_TROMBONE32) && \
- (ha->revision_id <= IPS_REVID_TROMBONE64)) ? 1 : 0)
- #define IPS_IS_CLARINET(ha) (((ha->device_id == IPS_DEVICEID_COPPERHEAD) && \
- (ha->revision_id >= IPS_REVID_CLARINETP1) && \
- (ha->revision_id <= IPS_REVID_CLARINETP3)) ? 1 : 0)
- #define IPS_IS_MORPHEUS(ha) (ha->device_id == IPS_DEVICEID_MORPHEUS)
- #define IPS_IS_MARCO(ha) (ha->device_id == IPS_DEVICEID_MARCO)
+ #define IPS_IS_TROMBONE(ha) (((ha->pcidev->device == IPS_DEVICEID_COPPERHEAD) && \
+ (ha->pcidev->revision >= IPS_REVID_TROMBONE32) && \
+ (ha->pcidev->revision <= IPS_REVID_TROMBONE64)) ? 1 : 0)
+ #define IPS_IS_CLARINET(ha) (((ha->pcidev->device == IPS_DEVICEID_COPPERHEAD) && \
+ (ha->pcidev->revision >= IPS_REVID_CLARINETP1) && \
+ (ha->pcidev->revision <= IPS_REVID_CLARINETP3)) ? 1 : 0)
+ #define IPS_IS_MORPHEUS(ha) (ha->pcidev->device == IPS_DEVICEID_MORPHEUS)
+ #define IPS_IS_MARCO(ha) (ha->pcidev->device == IPS_DEVICEID_MARCO)
#define IPS_USE_I2O_DELIVER(ha) ((IPS_IS_MORPHEUS(ha) || \
(IPS_IS_TROMBONE(ha) && \
(ips_force_i2o))) ? 1 : 0)
@@ -92,7 +92,7 @@
#ifndef min
#define min(x,y) ((x) < (y) ? x : y)
#endif
-
+
#ifndef __iomem /* For clean compiles in earlier kernels without __iomem annotations */
#define __iomem
#endif
@@ -171,7 +171,7 @@
#define IPS_CMD_DOWNLOAD 0x20
#define IPS_CMD_RW_BIOSFW 0x22
#define IPS_CMD_GET_VERSION_INFO 0xC6
- #define IPS_CMD_RESET_CHANNEL 0x1A
+ #define IPS_CMD_RESET_CHANNEL 0x1A
/*
* Adapter Equates
@@ -458,7 +458,7 @@ typedef struct {
uint32_t reserved3;
uint32_t buffer_addr;
uint32_t reserved4;
-} IPS_IOCTL_CMD, *PIPS_IOCTL_CMD;
+} IPS_IOCTL_CMD, *PIPS_IOCTL_CMD;
typedef struct {
uint8_t op_code;
@@ -552,7 +552,7 @@ typedef struct {
uint32_t cccr;
} IPS_NVRAM_CMD, *PIPS_NVRAM_CMD;
-typedef struct
+typedef struct
{
uint8_t op_code;
uint8_t command_id;
@@ -650,7 +650,7 @@ typedef struct {
uint8_t device_address;
uint8_t cmd_attribute;
uint8_t cdb_length;
- uint8_t reserved_for_LUN;
+ uint8_t reserved_for_LUN;
uint32_t transfer_length;
uint32_t buffer_pointer;
uint16_t sg_count;
@@ -790,7 +790,7 @@ typedef struct {
/* SubSystem Parameter[4] */
#define IPS_GET_VERSION_SUPPORT 0x00018000 /* Mask for Versioning Support */
-typedef struct
+typedef struct
{
uint32_t revision;
uint8_t bootBlkVersion[32];
@@ -1034,7 +1034,6 @@ typedef struct ips_ha {
uint8_t ha_id[IPS_MAX_CHANNELS+1];
uint32_t dcdb_active[IPS_MAX_CHANNELS];
uint32_t io_addr; /* Base I/O address */
- uint8_t irq; /* IRQ for adapter */
uint8_t ntargets; /* Number of targets */
uint8_t nbus; /* Number of buses */
uint8_t nlun; /* Number of Luns */
@@ -1066,10 +1065,7 @@ typedef struct ips_ha {
int ioctl_reset; /* IOCTL Requested Reset Flag */
uint16_t reset_count; /* number of resets */
time_t last_ffdc; /* last time we sent ffdc info*/
- uint8_t revision_id; /* Revision level */
- uint16_t device_id; /* PCI device ID */
uint8_t slot_num; /* PCI Slot Number */
- uint16_t subdevice_id; /* Subsystem device ID */
int ioctl_len; /* size of ioctl buffer */
dma_addr_t ioctl_busaddr; /* dma address of ioctl buffer*/
uint8_t bios_version[8]; /* BIOS Revision */
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 57ce2251abc8..e5be5fd4ef58 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -48,7 +48,7 @@ MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
"Alex Aizman <itn780@yahoo.com>");
MODULE_DESCRIPTION("iSCSI/TCP data-path");
MODULE_LICENSE("GPL");
-/* #define DEBUG_TCP */
+#undef DEBUG_TCP
#define DEBUG_ASSERT
#ifdef DEBUG_TCP
@@ -67,115 +67,429 @@ MODULE_LICENSE("GPL");
static unsigned int iscsi_max_lun = 512;
module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment);
+
+/*
+ * Scatterlist handling: inside the iscsi_segment, we
+ * remember an index into the scatterlist, and set data/size
+ * to the current scatterlist entry. For highmem pages, we
+ * kmap as needed.
+ *
+ * Note that the page is unmapped when we return from
+ * TCP's data_ready handler, so we may end up mapping and
+ * unmapping the same page repeatedly. The whole reason
+ * for this is that we shouldn't keep the page mapped
+ * outside the softirq.
+ */
+
+/**
+ * iscsi_tcp_segment_init_sg - init indicated scatterlist entry
+ * @segment: the buffer object
+ * @sg: scatterlist
+ * @offset: byte offset into that sg entry
+ *
+ * This function sets up the segment so that subsequent
+ * data is copied to the indicated sg entry, at the given
+ * offset.
+ */
static inline void
-iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size)
+iscsi_tcp_segment_init_sg(struct iscsi_segment *segment,
+ struct scatterlist *sg, unsigned int offset)
{
- sg_init_one(&ibuf->sg, vbuf, size);
- ibuf->sent = 0;
- ibuf->use_sendmsg = 1;
+ segment->sg = sg;
+ segment->sg_offset = offset;
+ segment->size = min(sg->length - offset,
+ segment->total_size - segment->total_copied);
+ segment->data = NULL;
}
+/**
+ * iscsi_tcp_segment_map - map the current S/G page
+ * @segment: iscsi_segment
+ * @recv: 1 if called from recv path
+ *
+ * We only need to possibly kmap data if scatter lists are being used,
+ * because the iscsi passthrough and internal IO paths will never use high
+ * mem pages.
+ */
static inline void
-iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg)
+iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
{
- sg_init_table(&ibuf->sg, 1);
- sg_set_page(&ibuf->sg, sg_page(sg), sg->length, sg->offset);
+ struct scatterlist *sg;
+
+ if (segment->data != NULL || !segment->sg)
+ return;
+
+ sg = segment->sg;
+ BUG_ON(segment->sg_mapped);
+ BUG_ON(sg->length == 0);
+
/*
- * Fastpath: sg element fits into single page
+ * If the page count is greater than one it is ok to send
+ * to the network layer's zero copy send path. If not we
+ * have to go the slow sendmsg path. We always map for the
+ * recv path.
*/
- if (sg->length + sg->offset <= PAGE_SIZE && !PageSlab(sg_page(sg)))
- ibuf->use_sendmsg = 0;
- else
- ibuf->use_sendmsg = 1;
- ibuf->sent = 0;
+ if (page_count(sg_page(sg)) >= 1 && !recv)
+ return;
+
+ debug_tcp("iscsi_tcp_segment_map %s %p\n", recv ? "recv" : "xmit",
+ segment);
+ segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
+ segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
}
-static inline int
-iscsi_buf_left(struct iscsi_buf *ibuf)
+static inline void
+iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
{
- int rc;
+ debug_tcp("iscsi_tcp_segment_unmap %p\n", segment);
- rc = ibuf->sg.length - ibuf->sent;
- BUG_ON(rc < 0);
- return rc;
+ if (segment->sg_mapped) {
+ debug_tcp("iscsi_tcp_segment_unmap valid\n");
+ kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
+ segment->sg_mapped = NULL;
+ segment->data = NULL;
+ }
}
+/*
+ * Splice the digest buffer into the buffer
+ */
static inline void
-iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf,
- u8* crc)
+iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
{
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
-
- crypto_hash_digest(&tcp_conn->tx_hash, &buf->sg, buf->sg.length, crc);
- buf->sg.length += sizeof(u32);
+ segment->data = digest;
+ segment->digest_len = ISCSI_DIGEST_SIZE;
+ segment->total_size += ISCSI_DIGEST_SIZE;
+ segment->size = ISCSI_DIGEST_SIZE;
+ segment->copied = 0;
+ segment->sg = NULL;
+ segment->hash = NULL;
}
+/**
+ * iscsi_tcp_segment_done - check whether the segment is complete
+ * @segment: iscsi segment to check
+ * @recv: set to one of this is called from the recv path
+ * @copied: number of bytes copied
+ *
+ * Check if we're done receiving this segment. If the receive
+ * buffer is full but we expect more data, move on to the
+ * next entry in the scatterlist.
+ *
+ * If the amount of data we received isn't a multiple of 4,
+ * we will transparently receive the pad bytes, too.
+ *
+ * This function must be re-entrant.
+ */
static inline int
-iscsi_hdr_extract(struct iscsi_tcp_conn *tcp_conn)
+iscsi_tcp_segment_done(struct iscsi_segment *segment, int recv, unsigned copied)
{
- struct sk_buff *skb = tcp_conn->in.skb;
-
- tcp_conn->in.zero_copy_hdr = 0;
+ static unsigned char padbuf[ISCSI_PAD_LEN];
+ struct scatterlist sg;
+ unsigned int pad;
- if (tcp_conn->in.copy >= tcp_conn->hdr_size &&
- tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER) {
+ debug_tcp("copied %u %u size %u %s\n", segment->copied, copied,
+ segment->size, recv ? "recv" : "xmit");
+ if (segment->hash && copied) {
/*
- * Zero-copy PDU Header: using connection context
- * to store header pointer.
+ * If a segment is kmapd we must unmap it before sending
+ * to the crypto layer since that will try to kmap it again.
*/
- if (skb_shinfo(skb)->frag_list == NULL &&
- !skb_shinfo(skb)->nr_frags) {
- tcp_conn->in.hdr = (struct iscsi_hdr *)
- ((char*)skb->data + tcp_conn->in.offset);
- tcp_conn->in.zero_copy_hdr = 1;
+ iscsi_tcp_segment_unmap(segment);
+
+ if (!segment->data) {
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, sg_page(segment->sg), copied,
+ segment->copied + segment->sg_offset +
+ segment->sg->offset);
+ } else
+ sg_init_one(&sg, segment->data + segment->copied,
+ copied);
+ crypto_hash_update(segment->hash, &sg, copied);
+ }
+
+ segment->copied += copied;
+ if (segment->copied < segment->size) {
+ iscsi_tcp_segment_map(segment, recv);
+ return 0;
+ }
+
+ segment->total_copied += segment->copied;
+ segment->copied = 0;
+ segment->size = 0;
+
+ /* Unmap the current scatterlist page, if there is one. */
+ iscsi_tcp_segment_unmap(segment);
+
+ /* Do we have more scatterlist entries? */
+ debug_tcp("total copied %u total size %u\n", segment->total_copied,
+ segment->total_size);
+ if (segment->total_copied < segment->total_size) {
+ /* Proceed to the next entry in the scatterlist. */
+ iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
+ 0);
+ iscsi_tcp_segment_map(segment, recv);
+ BUG_ON(segment->size == 0);
+ return 0;
+ }
+
+ /* Do we need to handle padding? */
+ pad = iscsi_padding(segment->total_copied);
+ if (pad != 0) {
+ debug_tcp("consume %d pad bytes\n", pad);
+ segment->total_size += pad;
+ segment->size = pad;
+ segment->data = padbuf;
+ return 0;
+ }
+
+ /*
+ * Set us up for transferring the data digest. hdr digest
+ * is completely handled in hdr done function.
+ */
+ if (segment->hash) {
+ crypto_hash_final(segment->hash, segment->digest);
+ iscsi_tcp_segment_splice_digest(segment,
+ recv ? segment->recv_digest : segment->digest);
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * iscsi_tcp_xmit_segment - transmit segment
+ * @tcp_conn: the iSCSI TCP connection
+ * @segment: the buffer to transmnit
+ *
+ * This function transmits as much of the buffer as
+ * the network layer will accept, and returns the number of
+ * bytes transmitted.
+ *
+ * If CRC hashing is enabled, the function will compute the
+ * hash as it goes. When the entire segment has been transmitted,
+ * it will retrieve the hash value and send it as well.
+ */
+static int
+iscsi_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment)
+{
+ struct socket *sk = tcp_conn->sock;
+ unsigned int copied = 0;
+ int r = 0;
+
+ while (!iscsi_tcp_segment_done(segment, 0, r)) {
+ struct scatterlist *sg;
+ unsigned int offset, copy;
+ int flags = 0;
+
+ r = 0;
+ offset = segment->copied;
+ copy = segment->size - offset;
+
+ if (segment->total_copied + segment->size < segment->total_size)
+ flags |= MSG_MORE;
+
+ /* Use sendpage if we can; else fall back to sendmsg */
+ if (!segment->data) {
+ sg = segment->sg;
+ offset += segment->sg_offset + sg->offset;
+ r = tcp_conn->sendpage(sk, sg_page(sg), offset, copy,
+ flags);
} else {
- /* ignoring return code since we checked
- * in.copy before */
- skb_copy_bits(skb, tcp_conn->in.offset,
- &tcp_conn->hdr, tcp_conn->hdr_size);
- tcp_conn->in.hdr = &tcp_conn->hdr;
+ struct msghdr msg = { .msg_flags = flags };
+ struct kvec iov = {
+ .iov_base = segment->data + offset,
+ .iov_len = copy
+ };
+
+ r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
}
- tcp_conn->in.offset += tcp_conn->hdr_size;
- tcp_conn->in.copy -= tcp_conn->hdr_size;
- } else {
- int hdr_remains;
- int copylen;
- /*
- * PDU header scattered across SKB's,
- * copying it... This'll happen quite rarely.
- */
+ if (r < 0) {
+ iscsi_tcp_segment_unmap(segment);
+ if (copied || r == -EAGAIN)
+ break;
+ return r;
+ }
+ copied += r;
+ }
+ return copied;
+}
+
+/**
+ * iscsi_tcp_segment_recv - copy data to segment
+ * @tcp_conn: the iSCSI TCP connection
+ * @segment: the buffer to copy to
+ * @ptr: data pointer
+ * @len: amount of data available
+ *
+ * This function copies up to @len bytes to the
+ * given buffer, and returns the number of bytes
+ * consumed, which can actually be less than @len.
+ *
+ * If hash digest is enabled, the function will update the
+ * hash while copying.
+ * Combining these two operations doesn't buy us a lot (yet),
+ * but in the future we could implement combined copy+crc,
+ * just way we do for network layer checksums.
+ */
+static int
+iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment, const void *ptr,
+ unsigned int len)
+{
+ unsigned int copy = 0, copied = 0;
+
+ while (!iscsi_tcp_segment_done(segment, 1, copy)) {
+ if (copied == len) {
+ debug_tcp("iscsi_tcp_segment_recv copied %d bytes\n",
+ len);
+ break;
+ }
+
+ copy = min(len - copied, segment->size - segment->copied);
+ debug_tcp("iscsi_tcp_segment_recv copying %d\n", copy);
+ memcpy(segment->data + segment->copied, ptr + copied, copy);
+ copied += copy;
+ }
+ return copied;
+}
- if (tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER)
- tcp_conn->in.hdr_offset = 0;
+static inline void
+iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
+ unsigned char digest[ISCSI_DIGEST_SIZE])
+{
+ struct scatterlist sg;
- hdr_remains = tcp_conn->hdr_size - tcp_conn->in.hdr_offset;
- BUG_ON(hdr_remains <= 0);
+ sg_init_one(&sg, hdr, hdrlen);
+ crypto_hash_digest(hash, &sg, hdrlen, digest);
+}
- copylen = min(tcp_conn->in.copy, hdr_remains);
- skb_copy_bits(skb, tcp_conn->in.offset,
- (char*)&tcp_conn->hdr + tcp_conn->in.hdr_offset,
- copylen);
+static inline int
+iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment)
+{
+ if (!segment->digest_len)
+ return 1;
- debug_tcp("PDU gather offset %d bytes %d in.offset %d "
- "in.copy %d\n", tcp_conn->in.hdr_offset, copylen,
- tcp_conn->in.offset, tcp_conn->in.copy);
+ if (memcmp(segment->recv_digest, segment->digest,
+ segment->digest_len)) {
+ debug_scsi("digest mismatch\n");
+ return 0;
+ }
- tcp_conn->in.offset += copylen;
- tcp_conn->in.copy -= copylen;
- if (copylen < hdr_remains) {
- tcp_conn->in_progress = IN_PROGRESS_HEADER_GATHER;
- tcp_conn->in.hdr_offset += copylen;
- return -EAGAIN;
+ return 1;
+}
+
+/*
+ * Helper function to set up segment buffer
+ */
+static inline void
+__iscsi_segment_init(struct iscsi_segment *segment, size_t size,
+ iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+{
+ memset(segment, 0, sizeof(*segment));
+ segment->total_size = size;
+ segment->done = done;
+
+ if (hash) {
+ segment->hash = hash;
+ crypto_hash_init(hash);
+ }
+}
+
+static inline void
+iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
+ size_t size, iscsi_segment_done_fn_t *done,
+ struct hash_desc *hash)
+{
+ __iscsi_segment_init(segment, size, done, hash);
+ segment->data = data;
+ segment->size = size;
+}
+
+static inline int
+iscsi_segment_seek_sg(struct iscsi_segment *segment,
+ struct scatterlist *sg_list, unsigned int sg_count,
+ unsigned int offset, size_t size,
+ iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+{
+ struct scatterlist *sg;
+ unsigned int i;
+
+ debug_scsi("iscsi_segment_seek_sg offset %u size %llu\n",
+ offset, size);
+ __iscsi_segment_init(segment, size, done, hash);
+ for_each_sg(sg_list, sg, sg_count, i) {
+ debug_scsi("sg %d, len %u offset %u\n", i, sg->length,
+ sg->offset);
+ if (offset < sg->length) {
+ iscsi_tcp_segment_init_sg(segment, sg, offset);
+ return 0;
}
- tcp_conn->in.hdr = &tcp_conn->hdr;
- tcp_conn->discontiguous_hdr_cnt++;
- tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
+ offset -= sg->length;
}
+ return ISCSI_ERR_DATA_OFFSET;
+}
+
+/**
+ * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
+ * @tcp_conn: iscsi connection to prep for
+ *
+ * This function always passes NULL for the hash argument, because when this
+ * function is called we do not yet know the final size of the header and want
+ * to delay the digest processing until we know that.
+ */
+static void
+iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
+{
+ debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn,
+ tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : "");
+ iscsi_segment_init_linear(&tcp_conn->in.segment,
+ tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
+ iscsi_tcp_hdr_recv_done, NULL);
+}
+
+/*
+ * Handle incoming reply to any other type of command
+ */
+static int
+iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment)
+{
+ struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+ int rc = 0;
+
+ if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+ return ISCSI_ERR_DATA_DGST;
+
+ rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
+ conn->data, tcp_conn->in.datalen);
+ if (rc)
+ return rc;
+
+ iscsi_tcp_hdr_recv_prep(tcp_conn);
return 0;
}
+static void
+iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
+{
+ struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+ struct hash_desc *rx_hash = NULL;
+
+ if (conn->datadgst_en)
+ rx_hash = &tcp_conn->rx_hash;
+
+ iscsi_segment_init_linear(&tcp_conn->in.segment,
+ conn->data, tcp_conn->in.datalen,
+ iscsi_tcp_data_recv_done, rx_hash);
+}
+
/*
* must be called with session lock
*/
@@ -184,7 +498,6 @@ iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
{
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
struct iscsi_r2t_info *r2t;
- struct scsi_cmnd *sc;
/* flush ctask's r2t queues */
while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
@@ -193,12 +506,12 @@ iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
}
- sc = ctask->sc;
- if (unlikely(!sc))
- return;
-
- tcp_ctask->xmstate = XMSTATE_VALUE_IDLE;
- tcp_ctask->r2t = NULL;
+ r2t = tcp_ctask->r2t;
+ if (r2t != NULL) {
+ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+ tcp_ctask->r2t = NULL;
+ }
}
/**
@@ -217,11 +530,6 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
int datasn = be32_to_cpu(rhdr->datasn);
iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
- /*
- * setup Data-In byte counter (gets decremented..)
- */
- ctask->data_count = tcp_conn->in.datalen;
-
if (tcp_conn->in.datalen == 0)
return 0;
@@ -242,22 +550,20 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
}
if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
+ sc->result = (DID_OK << 16) | rhdr->cmd_status;
conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
- if (rhdr->flags & ISCSI_FLAG_DATA_UNDERFLOW) {
+ if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
+ ISCSI_FLAG_DATA_OVERFLOW)) {
int res_count = be32_to_cpu(rhdr->residual_count);
if (res_count > 0 &&
- res_count <= scsi_bufflen(sc)) {
+ (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+ res_count <= scsi_bufflen(sc)))
scsi_set_resid(sc, res_count);
- sc->result = (DID_OK << 16) | rhdr->cmd_status;
- } else
+ else
sc->result = (DID_BAD_TARGET << 16) |
rhdr->cmd_status;
- } else if (rhdr->flags & ISCSI_FLAG_DATA_OVERFLOW) {
- scsi_set_resid(sc, be32_to_cpu(rhdr->residual_count));
- sc->result = (DID_OK << 16) | rhdr->cmd_status;
- } else
- sc->result = (DID_OK << 16) | rhdr->cmd_status;
+ }
}
conn->datain_pdus_cnt++;
@@ -281,9 +587,6 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
struct iscsi_r2t_info *r2t)
{
struct iscsi_data *hdr;
- struct scsi_cmnd *sc = ctask->sc;
- int i, sg_count = 0;
- struct scatterlist *sg;
hdr = &r2t->dtask.hdr;
memset(hdr, 0, sizeof(struct iscsi_data));
@@ -307,34 +610,6 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
conn->dataout_pdus_cnt++;
r2t->sent = 0;
-
- iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr,
- sizeof(struct iscsi_hdr));
-
- sg = scsi_sglist(sc);
- r2t->sg = NULL;
- for (i = 0; i < scsi_sg_count(sc); i++, sg += 1) {
- /* FIXME: prefetch ? */
- if (sg_count + sg->length > r2t->data_offset) {
- int page_offset;
-
- /* sg page found! */
-
- /* offset within this page */
- page_offset = r2t->data_offset - sg_count;
-
- /* fill in this buffer */
- iscsi_buf_init_sg(&r2t->sendbuf, sg);
- r2t->sendbuf.sg.offset += page_offset;
- r2t->sendbuf.sg.length -= page_offset;
-
- /* xmit logic will continue with next one */
- r2t->sg = sg + 1;
- break;
- }
- sg_count += sg->length;
- }
- BUG_ON(r2t->sg == NULL);
}
/**
@@ -366,14 +641,11 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
}
/* fill-in new R2T associated with the task */
- spin_lock(&session->lock);
iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
- if (!ctask->sc || ctask->mtask ||
- session->state != ISCSI_STATE_LOGGED_IN) {
+ if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) {
printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in "
"recovery...\n", ctask->itt);
- spin_unlock(&session->lock);
return 0;
}
@@ -384,7 +656,8 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
r2t->data_length = be32_to_cpu(rhdr->data_length);
if (r2t->data_length == 0) {
printk(KERN_ERR "iscsi_tcp: invalid R2T with zero data len\n");
- spin_unlock(&session->lock);
+ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
return ISCSI_ERR_DATALEN;
}
@@ -395,10 +668,11 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
r2t->data_offset = be32_to_cpu(rhdr->data_offset);
if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) {
- spin_unlock(&session->lock);
printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at "
"offset %u and total length %d\n", r2t->data_length,
r2t->data_offset, scsi_bufflen(ctask->sc));
+ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
return ISCSI_ERR_DATALEN;
}
@@ -409,26 +683,55 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
tcp_ctask->exp_datasn = r2tsn + 1;
__kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
- set_bit(XMSTATE_BIT_SOL_HDR_INIT, &tcp_ctask->xmstate);
- list_move_tail(&ctask->running, &conn->xmitqueue);
-
- scsi_queue_work(session->host, &conn->xmitwork);
conn->r2t_pdus_cnt++;
- spin_unlock(&session->lock);
+ iscsi_requeue_ctask(ctask);
return 0;
}
+/*
+ * Handle incoming reply to DataIn command
+ */
static int
-iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
+iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment)
+{
+ struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+ struct iscsi_hdr *hdr = tcp_conn->in.hdr;
+ int rc;
+
+ if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+ return ISCSI_ERR_DATA_DGST;
+
+ /* check for non-exceptional status */
+ if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+ rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
+ if (rc)
+ return rc;
+ }
+
+ iscsi_tcp_hdr_recv_prep(tcp_conn);
+ return 0;
+}
+
+/**
+ * iscsi_tcp_hdr_dissect - process PDU header
+ * @conn: iSCSI connection
+ * @hdr: PDU header
+ *
+ * This function analyzes the header of the PDU received,
+ * and performs several sanity checks. If the PDU is accompanied
+ * by data, the receive buffer is set up to copy the incoming data
+ * to the correct location.
+ */
+static int
+iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
{
int rc = 0, opcode, ahslen;
- struct iscsi_hdr *hdr;
struct iscsi_session *session = conn->session;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- uint32_t cdgst, rdgst = 0, itt;
-
- hdr = tcp_conn->in.hdr;
+ struct iscsi_cmd_task *ctask;
+ uint32_t itt;
/* verify PDU length */
tcp_conn->in.datalen = ntoh24(hdr->dlength);
@@ -437,78 +740,73 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
tcp_conn->in.datalen, conn->max_recv_dlength);
return ISCSI_ERR_DATALEN;
}
- tcp_conn->data_copied = 0;
- /* read AHS */
+ /* Additional header segments. So far, we don't
+ * process additional headers.
+ */
ahslen = hdr->hlength << 2;
- tcp_conn->in.offset += ahslen;
- tcp_conn->in.copy -= ahslen;
- if (tcp_conn->in.copy < 0) {
- printk(KERN_ERR "iscsi_tcp: can't handle AHS with length "
- "%d bytes\n", ahslen);
- return ISCSI_ERR_AHSLEN;
- }
-
- /* calculate read padding */
- tcp_conn->in.padding = tcp_conn->in.datalen & (ISCSI_PAD_LEN-1);
- if (tcp_conn->in.padding) {
- tcp_conn->in.padding = ISCSI_PAD_LEN - tcp_conn->in.padding;
- debug_scsi("read padding %d bytes\n", tcp_conn->in.padding);
- }
-
- if (conn->hdrdgst_en) {
- struct scatterlist sg;
-
- sg_init_one(&sg, (u8 *)hdr,
- sizeof(struct iscsi_hdr) + ahslen);
- crypto_hash_digest(&tcp_conn->rx_hash, &sg, sg.length,
- (u8 *)&cdgst);
- rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) +
- ahslen);
- if (cdgst != rdgst) {
- printk(KERN_ERR "iscsi_tcp: hdrdgst error "
- "recv 0x%x calc 0x%x\n", rdgst, cdgst);
- return ISCSI_ERR_HDR_DGST;
- }
- }
opcode = hdr->opcode & ISCSI_OPCODE_MASK;
/* verify itt (itt encoding: age+cid+itt) */
rc = iscsi_verify_itt(conn, hdr, &itt);
- if (rc == ISCSI_ERR_NO_SCSI_CMD) {
- tcp_conn->in.datalen = 0; /* force drop */
- return 0;
- } else if (rc)
+ if (rc)
return rc;
- debug_tcp("opcode 0x%x offset %d copy %d ahslen %d datalen %d\n",
- opcode, tcp_conn->in.offset, tcp_conn->in.copy,
- ahslen, tcp_conn->in.datalen);
+ debug_tcp("opcode 0x%x ahslen %d datalen %d\n",
+ opcode, ahslen, tcp_conn->in.datalen);
switch(opcode) {
case ISCSI_OP_SCSI_DATA_IN:
- tcp_conn->in.ctask = session->cmds[itt];
- rc = iscsi_data_rsp(conn, tcp_conn->in.ctask);
+ ctask = session->cmds[itt];
+ spin_lock(&conn->session->lock);
+ rc = iscsi_data_rsp(conn, ctask);
+ spin_unlock(&conn->session->lock);
if (rc)
return rc;
+ if (tcp_conn->in.datalen) {
+ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct hash_desc *rx_hash = NULL;
+
+ /*
+ * Setup copy of Data-In into the Scsi_Cmnd
+ * Scatterlist case:
+ * We set up the iscsi_segment to point to the next
+ * scatterlist entry to copy to. As we go along,
+ * we move on to the next scatterlist entry and
+ * update the digest per-entry.
+ */
+ if (conn->datadgst_en)
+ rx_hash = &tcp_conn->rx_hash;
+
+ debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
+ "datalen=%d)\n", tcp_conn,
+ tcp_ctask->data_offset,
+ tcp_conn->in.datalen);
+ return iscsi_segment_seek_sg(&tcp_conn->in.segment,
+ scsi_sglist(ctask->sc),
+ scsi_sg_count(ctask->sc),
+ tcp_ctask->data_offset,
+ tcp_conn->in.datalen,
+ iscsi_tcp_process_data_in,
+ rx_hash);
+ }
/* fall through */
case ISCSI_OP_SCSI_CMD_RSP:
- tcp_conn->in.ctask = session->cmds[itt];
- if (tcp_conn->in.datalen)
- goto copy_hdr;
-
- spin_lock(&session->lock);
- rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
- spin_unlock(&session->lock);
+ if (tcp_conn->in.datalen) {
+ iscsi_tcp_data_recv_prep(tcp_conn);
+ return 0;
+ }
+ rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
break;
case ISCSI_OP_R2T:
- tcp_conn->in.ctask = session->cmds[itt];
+ ctask = session->cmds[itt];
if (ahslen)
rc = ISCSI_ERR_AHSLEN;
- else if (tcp_conn->in.ctask->sc->sc_data_direction ==
- DMA_TO_DEVICE)
- rc = iscsi_r2t_rsp(conn, tcp_conn->in.ctask);
- else
+ else if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
+ spin_lock(&session->lock);
+ rc = iscsi_r2t_rsp(conn, ctask);
+ spin_unlock(&session->lock);
+ } else
rc = ISCSI_ERR_PROTO;
break;
case ISCSI_OP_LOGIN_RSP:
@@ -520,8 +818,7 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
* than 8K, but there are no targets that currently do this.
* For now we fail until we find a vendor that needs it
*/
- if (ISCSI_DEF_MAX_RECV_SEG_LEN <
- tcp_conn->in.datalen) {
+ if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
printk(KERN_ERR "iscsi_tcp: received buffer of len %u "
"but conn buffer is only %u (opcode %0x)\n",
tcp_conn->in.datalen,
@@ -530,8 +827,13 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
break;
}
- if (tcp_conn->in.datalen)
- goto copy_hdr;
+ /* If there's data coming in with the response,
+ * receive it to the connection's buffer.
+ */
+ if (tcp_conn->in.datalen) {
+ iscsi_tcp_data_recv_prep(tcp_conn);
+ return 0;
+ }
/* fall through */
case ISCSI_OP_LOGOUT_RSP:
case ISCSI_OP_NOOP_IN:
@@ -543,461 +845,161 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
break;
}
- return rc;
-
-copy_hdr:
- /*
- * if we did zero copy for the header but we will need multiple
- * skbs to complete the command then we have to copy the header
- * for later use
- */
- if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy <=
- (tcp_conn->in.datalen + tcp_conn->in.padding +
- (conn->datadgst_en ? 4 : 0))) {
- debug_tcp("Copying header for later use. in.copy %d in.datalen"
- " %d\n", tcp_conn->in.copy, tcp_conn->in.datalen);
- memcpy(&tcp_conn->hdr, tcp_conn->in.hdr,
- sizeof(struct iscsi_hdr));
- tcp_conn->in.hdr = &tcp_conn->hdr;
- tcp_conn->in.zero_copy_hdr = 0;
- }
- return 0;
-}
-
-/**
- * iscsi_ctask_copy - copy skb bits to the destanation cmd task
- * @conn: iscsi tcp connection
- * @ctask: scsi command task
- * @buf: buffer to copy to
- * @buf_size: size of buffer
- * @offset: offset within the buffer
- *
- * Notes:
- * The function calls skb_copy_bits() and updates per-connection and
- * per-cmd byte counters.
- *
- * Read counters (in bytes):
- *
- * conn->in.offset offset within in progress SKB
- * conn->in.copy left to copy from in progress SKB
- * including padding
- * conn->in.copied copied already from in progress SKB
- * conn->data_copied copied already from in progress buffer
- * ctask->sent total bytes sent up to the MidLayer
- * ctask->data_count left to copy from in progress Data-In
- * buf_left left to copy from in progress buffer
- **/
-static inline int
-iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask,
- void *buf, int buf_size, int offset)
-{
- struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
- int buf_left = buf_size - (tcp_conn->data_copied + offset);
- unsigned size = min(tcp_conn->in.copy, buf_left);
- int rc;
-
- size = min(size, ctask->data_count);
-
- debug_tcp("ctask_copy %d bytes at offset %d copied %d\n",
- size, tcp_conn->in.offset, tcp_conn->in.copied);
-
- BUG_ON(size <= 0);
- BUG_ON(tcp_ctask->sent + size > scsi_bufflen(ctask->sc));
-
- rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset,
- (char*)buf + (offset + tcp_conn->data_copied), size);
- /* must fit into skb->len */
- BUG_ON(rc);
-
- tcp_conn->in.offset += size;
- tcp_conn->in.copy -= size;
- tcp_conn->in.copied += size;
- tcp_conn->data_copied += size;
- tcp_ctask->sent += size;
- ctask->data_count -= size;
-
- BUG_ON(tcp_conn->in.copy < 0);
- BUG_ON(ctask->data_count < 0);
-
- if (buf_size != (tcp_conn->data_copied + offset)) {
- if (!ctask->data_count) {
- BUG_ON(buf_size - tcp_conn->data_copied < 0);
- /* done with this PDU */
- return buf_size - tcp_conn->data_copied;
- }
- return -EAGAIN;
+ if (rc == 0) {
+ /* Anything that comes with data should have
+ * been handled above. */
+ if (tcp_conn->in.datalen)
+ return ISCSI_ERR_PROTO;
+ iscsi_tcp_hdr_recv_prep(tcp_conn);
}
- /* done with this buffer or with both - PDU and buffer */
- tcp_conn->data_copied = 0;
- return 0;
+ return rc;
}
/**
- * iscsi_tcp_copy - copy skb bits to the destanation buffer
- * @conn: iscsi tcp connection
+ * iscsi_tcp_hdr_recv_done - process PDU header
*
- * Notes:
- * The function calls skb_copy_bits() and updates per-connection
- * byte counters.
- **/
-static inline int
-iscsi_tcp_copy(struct iscsi_conn *conn, int buf_size)
-{
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- int buf_left = buf_size - tcp_conn->data_copied;
- int size = min(tcp_conn->in.copy, buf_left);
- int rc;
-
- debug_tcp("tcp_copy %d bytes at offset %d copied %d\n",
- size, tcp_conn->in.offset, tcp_conn->data_copied);
- BUG_ON(size <= 0);
-
- rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset,
- (char*)conn->data + tcp_conn->data_copied, size);
- BUG_ON(rc);
-
- tcp_conn->in.offset += size;
- tcp_conn->in.copy -= size;
- tcp_conn->in.copied += size;
- tcp_conn->data_copied += size;
-
- if (buf_size != tcp_conn->data_copied)
- return -EAGAIN;
-
- return 0;
-}
-
-static inline void
-partial_sg_digest_update(struct hash_desc *desc, struct scatterlist *sg,
- int offset, int length)
-{
- struct scatterlist temp;
-
- sg_init_table(&temp, 1);
- sg_set_page(&temp, sg_page(sg), length, offset);
- crypto_hash_update(desc, &temp, length);
-}
-
-static void
-iscsi_recv_digest_update(struct iscsi_tcp_conn *tcp_conn, char* buf, int len)
-{
- struct scatterlist tmp;
-
- sg_init_one(&tmp, buf, len);
- crypto_hash_update(&tcp_conn->rx_hash, &tmp, len);
-}
-
-static int iscsi_scsi_data_in(struct iscsi_conn *conn)
+ * This is the callback invoked when the PDU header has
+ * been received. If the header is followed by additional
+ * header segments, we go back for more data.
+ */
+static int
+iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment)
{
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct iscsi_cmd_task *ctask = tcp_conn->in.ctask;
- struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
- struct scsi_cmnd *sc = ctask->sc;
- struct scatterlist *sg;
- int i, offset, rc = 0;
-
- BUG_ON((void*)ctask != sc->SCp.ptr);
-
- offset = tcp_ctask->data_offset;
- sg = scsi_sglist(sc);
-
- if (tcp_ctask->data_offset)
- for (i = 0; i < tcp_ctask->sg_count; i++)
- offset -= sg[i].length;
- /* we've passed through partial sg*/
- if (offset < 0)
- offset = 0;
-
- for (i = tcp_ctask->sg_count; i < scsi_sg_count(sc); i++) {
- char *dest;
-
- dest = kmap_atomic(sg_page(&sg[i]), KM_SOFTIRQ0);
- rc = iscsi_ctask_copy(tcp_conn, ctask, dest + sg[i].offset,
- sg[i].length, offset);
- kunmap_atomic(dest, KM_SOFTIRQ0);
- if (rc == -EAGAIN)
- /* continue with the next SKB/PDU */
- return rc;
- if (!rc) {
- if (conn->datadgst_en) {
- if (!offset)
- crypto_hash_update(
- &tcp_conn->rx_hash,
- &sg[i], sg[i].length);
- else
- partial_sg_digest_update(
- &tcp_conn->rx_hash,
- &sg[i],
- sg[i].offset + offset,
- sg[i].length - offset);
- }
- offset = 0;
- tcp_ctask->sg_count++;
- }
-
- if (!ctask->data_count) {
- if (rc && conn->datadgst_en)
- /*
- * data-in is complete, but buffer not...
- */
- partial_sg_digest_update(&tcp_conn->rx_hash,
- &sg[i],
- sg[i].offset,
- sg[i].length-rc);
- rc = 0;
- break;
- }
-
- if (!tcp_conn->in.copy)
- return -EAGAIN;
- }
- BUG_ON(ctask->data_count);
+ struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+ struct iscsi_hdr *hdr;
- /* check for non-exceptional status */
- if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) {
- debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n",
- (long)sc, sc->result, ctask->itt,
- tcp_conn->in.hdr->flags);
- spin_lock(&conn->session->lock);
- __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
- spin_unlock(&conn->session->lock);
+ /* Check if there are additional header segments
+ * *prior* to computing the digest, because we
+ * may need to go back to the caller for more.
+ */
+ hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf;
+ if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) {
+ /* Bump the header length - the caller will
+ * just loop around and get the AHS for us, and
+ * call again. */
+ unsigned int ahslen = hdr->hlength << 2;
+
+ /* Make sure we don't overflow */
+ if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf))
+ return ISCSI_ERR_AHSLEN;
+
+ segment->total_size += ahslen;
+ segment->size += ahslen;
+ return 0;
}
- return rc;
-}
-
-static int
-iscsi_data_recv(struct iscsi_conn *conn)
-{
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- int rc = 0, opcode;
-
- opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK;
- switch (opcode) {
- case ISCSI_OP_SCSI_DATA_IN:
- rc = iscsi_scsi_data_in(conn);
- break;
- case ISCSI_OP_SCSI_CMD_RSP:
- case ISCSI_OP_TEXT_RSP:
- case ISCSI_OP_LOGIN_RSP:
- case ISCSI_OP_ASYNC_EVENT:
- case ISCSI_OP_REJECT:
- /*
- * Collect data segment to the connection's data
- * placeholder
- */
- if (iscsi_tcp_copy(conn, tcp_conn->in.datalen)) {
- rc = -EAGAIN;
- goto exit;
+ /* We're done processing the header. See if we're doing
+ * header digests; if so, set up the recv_digest buffer
+ * and go back for more. */
+ if (conn->hdrdgst_en) {
+ if (segment->digest_len == 0) {
+ iscsi_tcp_segment_splice_digest(segment,
+ segment->recv_digest);
+ return 0;
}
+ iscsi_tcp_dgst_header(&tcp_conn->rx_hash, hdr,
+ segment->total_copied - ISCSI_DIGEST_SIZE,
+ segment->digest);
- rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, conn->data,
- tcp_conn->in.datalen);
- if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP)
- iscsi_recv_digest_update(tcp_conn, conn->data,
- tcp_conn->in.datalen);
- break;
- default:
- BUG_ON(1);
+ if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+ return ISCSI_ERR_HDR_DGST;
}
-exit:
- return rc;
+
+ tcp_conn->in.hdr = hdr;
+ return iscsi_tcp_hdr_dissect(conn, hdr);
}
/**
- * iscsi_tcp_data_recv - TCP receive in sendfile fashion
+ * iscsi_tcp_recv - TCP receive in sendfile fashion
* @rd_desc: read descriptor
* @skb: socket buffer
* @offset: offset in skb
* @len: skb->len - offset
**/
static int
-iscsi_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
- unsigned int offset, size_t len)
+iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
+ unsigned int offset, size_t len)
{
- int rc;
struct iscsi_conn *conn = rd_desc->arg.data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- int processed;
- char pad[ISCSI_PAD_LEN];
- struct scatterlist sg;
-
- /*
- * Save current SKB and its offset in the corresponding
- * connection context.
- */
- tcp_conn->in.copy = skb->len - offset;
- tcp_conn->in.offset = offset;
- tcp_conn->in.skb = skb;
- tcp_conn->in.len = tcp_conn->in.copy;
- BUG_ON(tcp_conn->in.copy <= 0);
- debug_tcp("in %d bytes\n", tcp_conn->in.copy);
+ struct iscsi_segment *segment = &tcp_conn->in.segment;
+ struct skb_seq_state seq;
+ unsigned int consumed = 0;
+ int rc = 0;
-more:
- tcp_conn->in.copied = 0;
- rc = 0;
+ debug_tcp("in %d bytes\n", skb->len - offset);
if (unlikely(conn->suspend_rx)) {
debug_tcp("conn %d Rx suspended!\n", conn->id);
return 0;
}
- if (tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER ||
- tcp_conn->in_progress == IN_PROGRESS_HEADER_GATHER) {
- rc = iscsi_hdr_extract(tcp_conn);
- if (rc) {
- if (rc == -EAGAIN)
- goto nomore;
- else {
- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
- return 0;
- }
- }
+ skb_prepare_seq_read(skb, offset, skb->len, &seq);
+ while (1) {
+ unsigned int avail;
+ const u8 *ptr;
- /*
- * Verify and process incoming PDU header.
- */
- rc = iscsi_tcp_hdr_recv(conn);
- if (!rc && tcp_conn->in.datalen) {
- if (conn->datadgst_en)
- crypto_hash_init(&tcp_conn->rx_hash);
- tcp_conn->in_progress = IN_PROGRESS_DATA_RECV;
- } else if (rc) {
- iscsi_conn_failure(conn, rc);
- return 0;
+ avail = skb_seq_read(consumed, &ptr, &seq);
+ if (avail == 0) {
+ debug_tcp("no more data avail. Consumed %d\n",
+ consumed);
+ break;
}
- }
-
- if (tcp_conn->in_progress == IN_PROGRESS_DDIGEST_RECV &&
- tcp_conn->in.copy) {
- uint32_t recv_digest;
-
- debug_tcp("extra data_recv offset %d copy %d\n",
- tcp_conn->in.offset, tcp_conn->in.copy);
-
- if (!tcp_conn->data_copied) {
- if (tcp_conn->in.padding) {
- debug_tcp("padding -> %d\n",
- tcp_conn->in.padding);
- memset(pad, 0, tcp_conn->in.padding);
- sg_init_one(&sg, pad, tcp_conn->in.padding);
- crypto_hash_update(&tcp_conn->rx_hash,
- &sg, sg.length);
+ BUG_ON(segment->copied >= segment->size);
+
+ debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail);
+ rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail);
+ BUG_ON(rc == 0);
+ consumed += rc;
+
+ if (segment->total_copied >= segment->total_size) {
+ debug_tcp("segment done\n");
+ rc = segment->done(tcp_conn, segment);
+ if (rc != 0) {
+ skb_abort_seq_read(&seq);
+ goto error;
}
- crypto_hash_final(&tcp_conn->rx_hash,
- (u8 *) &tcp_conn->in.datadgst);
- debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst);
- }
- rc = iscsi_tcp_copy(conn, sizeof(uint32_t));
- if (rc) {
- if (rc == -EAGAIN)
- goto again;
- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
- return 0;
- }
-
- memcpy(&recv_digest, conn->data, sizeof(uint32_t));
- if (recv_digest != tcp_conn->in.datadgst) {
- debug_tcp("iscsi_tcp: data digest error!"
- "0x%x != 0x%x\n", recv_digest,
- tcp_conn->in.datadgst);
- iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
- return 0;
- } else {
- debug_tcp("iscsi_tcp: data digest match!"
- "0x%x == 0x%x\n", recv_digest,
- tcp_conn->in.datadgst);
- tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
+ /* The done() functions sets up the
+ * next segment. */
}
}
+ skb_abort_seq_read(&seq);
+ conn->rxdata_octets += consumed;
+ return consumed;
- if (tcp_conn->in_progress == IN_PROGRESS_DATA_RECV &&
- tcp_conn->in.copy) {
- debug_tcp("data_recv offset %d copy %d\n",
- tcp_conn->in.offset, tcp_conn->in.copy);
-
- rc = iscsi_data_recv(conn);
- if (rc) {
- if (rc == -EAGAIN)
- goto again;
- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
- return 0;
- }
-
- if (tcp_conn->in.padding)
- tcp_conn->in_progress = IN_PROGRESS_PAD_RECV;
- else if (conn->datadgst_en)
- tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
- else
- tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
- tcp_conn->data_copied = 0;
- }
-
- if (tcp_conn->in_progress == IN_PROGRESS_PAD_RECV &&
- tcp_conn->in.copy) {
- int copylen = min(tcp_conn->in.padding - tcp_conn->data_copied,
- tcp_conn->in.copy);
-
- tcp_conn->in.copy -= copylen;
- tcp_conn->in.offset += copylen;
- tcp_conn->data_copied += copylen;
-
- if (tcp_conn->data_copied != tcp_conn->in.padding)
- tcp_conn->in_progress = IN_PROGRESS_PAD_RECV;
- else if (conn->datadgst_en)
- tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
- else
- tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
- tcp_conn->data_copied = 0;
- }
-
- debug_tcp("f, processed %d from out of %d padding %d\n",
- tcp_conn->in.offset - offset, (int)len, tcp_conn->in.padding);
- BUG_ON(tcp_conn->in.offset - offset > len);
-
- if (tcp_conn->in.offset - offset != len) {
- debug_tcp("continue to process %d bytes\n",
- (int)len - (tcp_conn->in.offset - offset));
- goto more;
- }
-
-nomore:
- processed = tcp_conn->in.offset - offset;
- BUG_ON(processed == 0);
- return processed;
-
-again:
- processed = tcp_conn->in.offset - offset;
- debug_tcp("c, processed %d from out of %d rd_desc_cnt %d\n",
- processed, (int)len, (int)rd_desc->count);
- BUG_ON(processed == 0);
- BUG_ON(processed > len);
-
- conn->rxdata_octets += processed;
- return processed;
+error:
+ debug_tcp("Error receiving PDU, errno=%d\n", rc);
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ return 0;
}
static void
iscsi_tcp_data_ready(struct sock *sk, int flag)
{
struct iscsi_conn *conn = sk->sk_user_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
read_descriptor_t rd_desc;
read_lock(&sk->sk_callback_lock);
/*
- * Use rd_desc to pass 'conn' to iscsi_tcp_data_recv.
+ * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
* We set count to 1 because we want the network layer to
- * hand us all the skbs that are available. iscsi_tcp_data_recv
+ * hand us all the skbs that are available. iscsi_tcp_recv
* handled pdus that cross buffers or pdus that still need data.
*/
rd_desc.arg.data = conn;
rd_desc.count = 1;
- tcp_read_sock(sk, &rd_desc, iscsi_tcp_data_recv);
+ tcp_read_sock(sk, &rd_desc, iscsi_tcp_recv);
read_unlock(&sk->sk_callback_lock);
+
+ /* If we had to (atomically) map a highmem page,
+ * unmap it now. */
+ iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
}
static void
@@ -1077,121 +1079,173 @@ iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn)
}
/**
- * iscsi_send - generic send routine
- * @sk: kernel's socket
- * @buf: buffer to write from
- * @size: actual size to write
- * @flags: socket's flags
- */
-static inline int
-iscsi_send(struct iscsi_conn *conn, struct iscsi_buf *buf, int size, int flags)
+ * iscsi_xmit - TCP transmit
+ **/
+static int
+iscsi_xmit(struct iscsi_conn *conn)
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct socket *sk = tcp_conn->sock;
- int offset = buf->sg.offset + buf->sent, res;
+ struct iscsi_segment *segment = &tcp_conn->out.segment;
+ unsigned int consumed = 0;
+ int rc = 0;
- /*
- * if we got use_sg=0 or are sending something we kmallocd
- * then we did not have to do kmap (kmap returns page_address)
- *
- * if we got use_sg > 0, but had to drop down, we do not
- * set clustering so this should only happen for that
- * slab case.
- */
- if (buf->use_sendmsg)
- res = sock_no_sendpage(sk, sg_page(&buf->sg), offset, size, flags);
- else
- res = tcp_conn->sendpage(sk, sg_page(&buf->sg), offset, size, flags);
-
- if (res >= 0) {
- conn->txdata_octets += res;
- buf->sent += res;
- return res;
+ while (1) {
+ rc = iscsi_tcp_xmit_segment(tcp_conn, segment);
+ if (rc < 0)
+ goto error;
+ if (rc == 0)
+ break;
+
+ consumed += rc;
+
+ if (segment->total_copied >= segment->total_size) {
+ if (segment->done != NULL) {
+ rc = segment->done(tcp_conn, segment);
+ if (rc < 0)
+ goto error;
+ }
+ }
}
- tcp_conn->sendpage_failures_cnt++;
- if (res == -EAGAIN)
- res = -ENOBUFS;
- else
- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
- return res;
+ debug_tcp("xmit %d bytes\n", consumed);
+
+ conn->txdata_octets += consumed;
+ return consumed;
+
+error:
+ /* Transmit error. We could initiate error recovery
+ * here. */
+ debug_tcp("Error sending PDU, errno=%d\n", rc);
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ return rc;
}
/**
- * iscsi_sendhdr - send PDU Header via tcp_sendpage()
- * @conn: iscsi connection
- * @buf: buffer to write from
- * @datalen: lenght of data to be sent after the header
- *
- * Notes:
- * (Tx, Fast Path)
- **/
+ * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
+ */
static inline int
-iscsi_sendhdr(struct iscsi_conn *conn, struct iscsi_buf *buf, int datalen)
+iscsi_tcp_xmit_qlen(struct iscsi_conn *conn)
{
- int flags = 0; /* MSG_DONTWAIT; */
- int res, size;
-
- size = buf->sg.length - buf->sent;
- BUG_ON(buf->sent + size > buf->sg.length);
- if (buf->sent + size != buf->sg.length || datalen)
- flags |= MSG_MORE;
-
- res = iscsi_send(conn, buf, size, flags);
- debug_tcp("sendhdr %d bytes, sent %d res %d\n", size, buf->sent, res);
- if (res >= 0) {
- if (size != res)
- return -EAGAIN;
- return 0;
- }
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_segment *segment = &tcp_conn->out.segment;
- return res;
+ return segment->total_copied - segment->total_size;
}
-/**
- * iscsi_sendpage - send one page of iSCSI Data-Out.
- * @conn: iscsi connection
- * @buf: buffer to write from
- * @count: remaining data
- * @sent: number of bytes sent
- *
- * Notes:
- * (Tx, Fast Path)
- **/
static inline int
-iscsi_sendpage(struct iscsi_conn *conn, struct iscsi_buf *buf,
- int *count, int *sent)
+iscsi_tcp_flush(struct iscsi_conn *conn)
{
- int flags = 0; /* MSG_DONTWAIT; */
- int res, size;
-
- size = buf->sg.length - buf->sent;
- BUG_ON(buf->sent + size > buf->sg.length);
- if (size > *count)
- size = *count;
- if (buf->sent + size != buf->sg.length || *count != size)
- flags |= MSG_MORE;
-
- res = iscsi_send(conn, buf, size, flags);
- debug_tcp("sendpage: %d bytes, sent %d left %d sent %d res %d\n",
- size, buf->sent, *count, *sent, res);
- if (res >= 0) {
- *count -= res;
- *sent += res;
- if (size != res)
+ int rc;
+
+ while (iscsi_tcp_xmit_qlen(conn)) {
+ rc = iscsi_xmit(conn);
+ if (rc == 0)
return -EAGAIN;
- return 0;
+ if (rc < 0)
+ return rc;
}
- return res;
+ return 0;
}
-static inline void
-iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn,
- struct iscsi_tcp_cmd_task *tcp_ctask)
+/*
+ * This is called when we're done sending the header.
+ * Simply copy the data_segment to the send segment, and return.
+ */
+static int
+iscsi_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment)
+{
+ tcp_conn->out.segment = tcp_conn->out.data_segment;
+ debug_tcp("Header done. Next segment size %u total_size %u\n",
+ tcp_conn->out.segment.size, tcp_conn->out.segment.total_size);
+ return 0;
+}
+
+static void
+iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
{
- crypto_hash_init(&tcp_conn->tx_hash);
- tcp_ctask->digest_count = 4;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+ debug_tcp("%s(%p%s)\n", __FUNCTION__, tcp_conn,
+ conn->hdrdgst_en? ", digest enabled" : "");
+
+ /* Clear the data segment - needs to be filled in by the
+ * caller using iscsi_tcp_send_data_prep() */
+ memset(&tcp_conn->out.data_segment, 0, sizeof(struct iscsi_segment));
+
+ /* If header digest is enabled, compute the CRC and
+ * place the digest into the same buffer. We make
+ * sure that both iscsi_tcp_ctask and mtask have
+ * sufficient room.
+ */
+ if (conn->hdrdgst_en) {
+ iscsi_tcp_dgst_header(&tcp_conn->tx_hash, hdr, hdrlen,
+ hdr + hdrlen);
+ hdrlen += ISCSI_DIGEST_SIZE;
+ }
+
+ /* Remember header pointer for later, when we need
+ * to decide whether there's a payload to go along
+ * with the header. */
+ tcp_conn->out.hdr = hdr;
+
+ iscsi_segment_init_linear(&tcp_conn->out.segment, hdr, hdrlen,
+ iscsi_tcp_send_hdr_done, NULL);
+}
+
+/*
+ * Prepare the send buffer for the payload data.
+ * Padding and checksumming will all be taken care
+ * of by the iscsi_segment routines.
+ */
+static int
+iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
+ unsigned int count, unsigned int offset,
+ unsigned int len)
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct hash_desc *tx_hash = NULL;
+ unsigned int hdr_spec_len;
+
+ debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __FUNCTION__,
+ tcp_conn, offset, len,
+ conn->datadgst_en? ", digest enabled" : "");
+
+ /* Make sure the datalen matches what the caller
+ said he would send. */
+ hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
+ WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
+
+ if (conn->datadgst_en)
+ tx_hash = &tcp_conn->tx_hash;
+
+ return iscsi_segment_seek_sg(&tcp_conn->out.data_segment,
+ sg, count, offset, len,
+ NULL, tx_hash);
+}
+
+static void
+iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+ size_t len)
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct hash_desc *tx_hash = NULL;
+ unsigned int hdr_spec_len;
+
+ debug_tcp("%s(%p, datalen=%d%s)\n", __FUNCTION__, tcp_conn, len,
+ conn->datadgst_en? ", digest enabled" : "");
+
+ /* Make sure the datalen matches what the caller
+ said he would send. */
+ hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
+ WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
+
+ if (conn->datadgst_en)
+ tx_hash = &tcp_conn->tx_hash;
+
+ iscsi_segment_init_linear(&tcp_conn->out.data_segment,
+ data, len, NULL, tx_hash);
}
/**
@@ -1207,12 +1261,17 @@ iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn,
*
* Called under connection lock.
**/
-static void
+static int
iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
- struct iscsi_r2t_info *r2t, int left)
+ struct iscsi_r2t_info *r2t)
{
struct iscsi_data *hdr;
- int new_offset;
+ int new_offset, left;
+
+ BUG_ON(r2t->data_length - r2t->sent < 0);
+ left = r2t->data_length - r2t->sent;
+ if (left == 0)
+ return 0;
hdr = &r2t->dtask.hdr;
memset(hdr, 0, sizeof(struct iscsi_data));
@@ -1233,43 +1292,46 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
r2t->data_count = left;
hdr->flags = ISCSI_FLAG_CMD_FINAL;
}
- conn->dataout_pdus_cnt++;
-
- iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr,
- sizeof(struct iscsi_hdr));
-
- if (iscsi_buf_left(&r2t->sendbuf))
- return;
-
- iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
- r2t->sg += 1;
-}
-static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask,
- unsigned long len)
-{
- tcp_ctask->pad_count = len & (ISCSI_PAD_LEN - 1);
- if (!tcp_ctask->pad_count)
- return;
-
- tcp_ctask->pad_count = ISCSI_PAD_LEN - tcp_ctask->pad_count;
- debug_scsi("write padding %d bytes\n", tcp_ctask->pad_count);
- set_bit(XMSTATE_BIT_W_PAD, &tcp_ctask->xmstate);
+ conn->dataout_pdus_cnt++;
+ return 1;
}
/**
- * iscsi_tcp_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ * iscsi_tcp_ctask - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
* @conn: iscsi connection
* @ctask: scsi command task
* @sc: scsi command
**/
-static void
-iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
+static int
+iscsi_tcp_ctask_init(struct iscsi_cmd_task *ctask)
{
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_conn *conn = ctask->conn;
+ struct scsi_cmnd *sc = ctask->sc;
+ int err;
BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
- tcp_ctask->xmstate = 1 << XMSTATE_BIT_CMD_HDR_INIT;
+ tcp_ctask->sent = 0;
+ tcp_ctask->exp_datasn = 0;
+
+ /* Prepare PDU, optionally w/ immediate data */
+ debug_scsi("ctask deq [cid %d itt 0x%x imm %d unsol %d]\n",
+ conn->id, ctask->itt, ctask->imm_count,
+ ctask->unsol_count);
+ iscsi_tcp_send_hdr_prep(conn, ctask->hdr, ctask->hdr_len);
+
+ if (!ctask->imm_count)
+ return 0;
+
+ /* If we have immediate data, attach a payload */
+ err = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc), scsi_sg_count(sc),
+ 0, ctask->imm_count);
+ if (err)
+ return err;
+ tcp_ctask->sent += ctask->imm_count;
+ ctask->imm_count = 0;
+ return 0;
}
/**
@@ -1281,484 +1343,130 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
* The function can return -EAGAIN in which case caller must
* call it again later, or recover. '0' return code means successful
* xmit.
- *
- * Management xmit state machine consists of these states:
- * XMSTATE_BIT_IMM_HDR_INIT - calculate digest of PDU Header
- * XMSTATE_BIT_IMM_HDR - PDU Header xmit in progress
- * XMSTATE_BIT_IMM_DATA - PDU Data xmit in progress
- * XMSTATE_VALUE_IDLE - management PDU is done
**/
static int
iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
{
- struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
int rc;
- debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n",
- conn->id, tcp_mtask->xmstate, mtask->itt);
-
- if (test_bit(XMSTATE_BIT_IMM_HDR_INIT, &tcp_mtask->xmstate)) {
- iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr,
- sizeof(struct iscsi_hdr));
-
- if (mtask->data_count) {
- set_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate);
- iscsi_buf_init_iov(&tcp_mtask->sendbuf,
- (char*)mtask->data,
- mtask->data_count);
- }
-
- if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE &&
- conn->stop_stage != STOP_CONN_RECOVER &&
- conn->hdrdgst_en)
- iscsi_hdr_digest(conn, &tcp_mtask->headbuf,
- (u8*)tcp_mtask->hdrext);
-
- tcp_mtask->sent = 0;
- clear_bit(XMSTATE_BIT_IMM_HDR_INIT, &tcp_mtask->xmstate);
- set_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate);
- }
-
- if (test_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate)) {
- rc = iscsi_sendhdr(conn, &tcp_mtask->headbuf,
- mtask->data_count);
- if (rc)
- return rc;
- clear_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate);
- }
-
- if (test_and_clear_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate)) {
- BUG_ON(!mtask->data_count);
- /* FIXME: implement.
- * Virtual buffer could be spreaded across multiple pages...
- */
- do {
- int rc;
-
- rc = iscsi_sendpage(conn, &tcp_mtask->sendbuf,
- &mtask->data_count, &tcp_mtask->sent);
- if (rc) {
- set_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate);
- return rc;
- }
- } while (mtask->data_count);
- }
+ /* Flush any pending data first. */
+ rc = iscsi_tcp_flush(conn);
+ if (rc < 0)
+ return rc;
- BUG_ON(tcp_mtask->xmstate != XMSTATE_VALUE_IDLE);
if (mtask->hdr->itt == RESERVED_ITT) {
struct iscsi_session *session = conn->session;
spin_lock_bh(&session->lock);
- list_del(&conn->mtask->running);
- __kfifo_put(session->mgmtpool.queue, (void*)&conn->mtask,
- sizeof(void*));
+ iscsi_free_mgmt_task(conn, mtask);
spin_unlock_bh(&session->lock);
}
+
return 0;
}
+/*
+ * iscsi_tcp_ctask_xmit - xmit normal PDU task
+ * @conn: iscsi connection
+ * @ctask: iscsi command task
+ *
+ * We're expected to return 0 when everything was transmitted succesfully,
+ * -EAGAIN if there's still data in the queue, or != 0 for any other kind
+ * of error.
+ */
static int
-iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
{
- struct scsi_cmnd *sc = ctask->sc;
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct scsi_cmnd *sc = ctask->sc;
int rc = 0;
- if (test_bit(XMSTATE_BIT_CMD_HDR_INIT, &tcp_ctask->xmstate)) {
- tcp_ctask->sent = 0;
- tcp_ctask->sg_count = 0;
- tcp_ctask->exp_datasn = 0;
-
- if (sc->sc_data_direction == DMA_TO_DEVICE) {
- struct scatterlist *sg = scsi_sglist(sc);
-
- iscsi_buf_init_sg(&tcp_ctask->sendbuf, sg);
- tcp_ctask->sg = sg + 1;
- tcp_ctask->bad_sg = sg + scsi_sg_count(sc);
-
- debug_scsi("cmd [itt 0x%x total %d imm_data %d "
- "unsol count %d, unsol offset %d]\n",
- ctask->itt, scsi_bufflen(sc),
- ctask->imm_count, ctask->unsol_count,
- ctask->unsol_offset);
- }
-
- iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)ctask->hdr,
- sizeof(struct iscsi_hdr));
-
- if (conn->hdrdgst_en)
- iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
- (u8*)tcp_ctask->hdrext);
- clear_bit(XMSTATE_BIT_CMD_HDR_INIT, &tcp_ctask->xmstate);
- set_bit(XMSTATE_BIT_CMD_HDR_XMIT, &tcp_ctask->xmstate);
- }
-
- if (test_bit(XMSTATE_BIT_CMD_HDR_XMIT, &tcp_ctask->xmstate)) {
- rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count);
- if (rc)
- return rc;
- clear_bit(XMSTATE_BIT_CMD_HDR_XMIT, &tcp_ctask->xmstate);
-
- if (sc->sc_data_direction != DMA_TO_DEVICE)
- return 0;
-
- if (ctask->imm_count) {
- set_bit(XMSTATE_BIT_IMM_DATA, &tcp_ctask->xmstate);
- iscsi_set_padding(tcp_ctask, ctask->imm_count);
-
- if (ctask->conn->datadgst_en) {
- iscsi_data_digest_init(ctask->conn->dd_data,
- tcp_ctask);
- tcp_ctask->immdigest = 0;
- }
- }
-
- if (ctask->unsol_count) {
- set_bit(XMSTATE_BIT_UNS_HDR, &tcp_ctask->xmstate);
- set_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate);
- }
- }
- return rc;
-}
-
-static int
-iscsi_send_padding(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
-{
- struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- int sent = 0, rc;
-
- if (test_bit(XMSTATE_BIT_W_PAD, &tcp_ctask->xmstate)) {
- iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad,
- tcp_ctask->pad_count);
- if (conn->datadgst_en)
- crypto_hash_update(&tcp_conn->tx_hash,
- &tcp_ctask->sendbuf.sg,
- tcp_ctask->sendbuf.sg.length);
- } else if (!test_bit(XMSTATE_BIT_W_RESEND_PAD, &tcp_ctask->xmstate))
- return 0;
-
- clear_bit(XMSTATE_BIT_W_PAD, &tcp_ctask->xmstate);
- clear_bit(XMSTATE_BIT_W_RESEND_PAD, &tcp_ctask->xmstate);
- debug_scsi("sending %d pad bytes for itt 0x%x\n",
- tcp_ctask->pad_count, ctask->itt);
- rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count,
- &sent);
- if (rc) {
- debug_scsi("padding send failed %d\n", rc);
- set_bit(XMSTATE_BIT_W_RESEND_PAD, &tcp_ctask->xmstate);
- }
- return rc;
-}
-
-static int
-iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
- struct iscsi_buf *buf, uint32_t *digest)
-{
- struct iscsi_tcp_cmd_task *tcp_ctask;
- struct iscsi_tcp_conn *tcp_conn;
- int rc, sent = 0;
-
- if (!conn->datadgst_en)
- return 0;
-
- tcp_ctask = ctask->dd_data;
- tcp_conn = conn->dd_data;
-
- if (!test_bit(XMSTATE_BIT_W_RESEND_DATA_DIGEST, &tcp_ctask->xmstate)) {
- crypto_hash_final(&tcp_conn->tx_hash, (u8*)digest);
- iscsi_buf_init_iov(buf, (char*)digest, 4);
- }
- clear_bit(XMSTATE_BIT_W_RESEND_DATA_DIGEST, &tcp_ctask->xmstate);
-
- rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent);
- if (!rc)
- debug_scsi("sent digest 0x%x for itt 0x%x\n", *digest,
- ctask->itt);
- else {
- debug_scsi("sending digest 0x%x failed for itt 0x%x!\n",
- *digest, ctask->itt);
- set_bit(XMSTATE_BIT_W_RESEND_DATA_DIGEST, &tcp_ctask->xmstate);
- }
- return rc;
-}
-
-static int
-iscsi_send_data(struct iscsi_cmd_task *ctask, struct iscsi_buf *sendbuf,
- struct scatterlist **sg, int *sent, int *count,
- struct iscsi_buf *digestbuf, uint32_t *digest)
-{
- struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
- struct iscsi_conn *conn = ctask->conn;
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- int rc, buf_sent, offset;
-
- while (*count) {
- buf_sent = 0;
- offset = sendbuf->sent;
-
- rc = iscsi_sendpage(conn, sendbuf, count, &buf_sent);
- *sent = *sent + buf_sent;
- if (buf_sent && conn->datadgst_en)
- partial_sg_digest_update(&tcp_conn->tx_hash,
- &sendbuf->sg, sendbuf->sg.offset + offset,
- buf_sent);
- if (!iscsi_buf_left(sendbuf) && *sg != tcp_ctask->bad_sg) {
- iscsi_buf_init_sg(sendbuf, *sg);
- *sg = *sg + 1;
- }
-
- if (rc)
- return rc;
- }
-
- rc = iscsi_send_padding(conn, ctask);
- if (rc)
+flush:
+ /* Flush any pending data first. */
+ rc = iscsi_tcp_flush(conn);
+ if (rc < 0)
return rc;
- return iscsi_send_digest(conn, ctask, digestbuf, digest);
-}
-
-static int
-iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
-{
- struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
- struct iscsi_data_task *dtask;
- int rc;
-
- set_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate);
- if (test_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate)) {
- dtask = &tcp_ctask->unsol_dtask;
-
- iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr);
- iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)&dtask->hdr,
- sizeof(struct iscsi_hdr));
- if (conn->hdrdgst_en)
- iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
- (u8*)dtask->hdrext);
-
- clear_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate);
- iscsi_set_padding(tcp_ctask, ctask->data_count);
- }
-
- rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count);
- if (rc) {
- clear_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate);
- set_bit(XMSTATE_BIT_UNS_HDR, &tcp_ctask->xmstate);
- return rc;
- }
+ /* Are we done already? */
+ if (sc->sc_data_direction != DMA_TO_DEVICE)
+ return 0;
- if (conn->datadgst_en) {
- dtask = &tcp_ctask->unsol_dtask;
- iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask);
- dtask->digest = 0;
- }
+ if (ctask->unsol_count != 0) {
+ struct iscsi_data *hdr = &tcp_ctask->unsol_dtask.hdr;
- debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n",
- ctask->itt, ctask->unsol_count, tcp_ctask->sent);
- return 0;
-}
+ /* Prepare a header for the unsolicited PDU.
+ * The amount of data we want to send will be
+ * in ctask->data_count.
+ * FIXME: return the data count instead.
+ */
+ iscsi_prep_unsolicit_data_pdu(ctask, hdr);
-static int
-iscsi_send_unsol_pdu(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
-{
- struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
- int rc;
+ debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
+ ctask->itt, tcp_ctask->sent, ctask->data_count);
- if (test_and_clear_bit(XMSTATE_BIT_UNS_HDR, &tcp_ctask->xmstate)) {
- BUG_ON(!ctask->unsol_count);
-send_hdr:
- rc = iscsi_send_unsol_hdr(conn, ctask);
+ iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
+ rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
+ scsi_sg_count(sc),
+ tcp_ctask->sent,
+ ctask->data_count);
if (rc)
- return rc;
- }
-
- if (test_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate)) {
- struct iscsi_data_task *dtask = &tcp_ctask->unsol_dtask;
- int start = tcp_ctask->sent;
+ goto fail;
+ tcp_ctask->sent += ctask->data_count;
+ ctask->unsol_count -= ctask->data_count;
+ goto flush;
+ } else {
+ struct iscsi_session *session = conn->session;
+ struct iscsi_r2t_info *r2t;
- rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
- &tcp_ctask->sent, &ctask->data_count,
- &dtask->digestbuf, &dtask->digest);
- ctask->unsol_count -= tcp_ctask->sent - start;
- if (rc)
- return rc;
- clear_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate);
- /*
- * Done with the Data-Out. Next, check if we need
- * to send another unsolicited Data-Out.
+ /* All unsolicited PDUs sent. Check for solicited PDUs.
*/
- if (ctask->unsol_count) {
- debug_scsi("sending more uns\n");
- set_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate);
- goto send_hdr;
+ spin_lock_bh(&session->lock);
+ r2t = tcp_ctask->r2t;
+ if (r2t != NULL) {
+ /* Continue with this R2T? */
+ if (!iscsi_solicit_data_cont(conn, ctask, r2t)) {
+ debug_scsi(" done with r2t %p\n", r2t);
+
+ __kfifo_put(tcp_ctask->r2tpool.queue,
+ (void*)&r2t, sizeof(void*));
+ tcp_ctask->r2t = r2t = NULL;
+ }
}
- }
- return 0;
-}
-
-static int iscsi_send_sol_pdu(struct iscsi_conn *conn,
- struct iscsi_cmd_task *ctask)
-{
- struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
- struct iscsi_session *session = conn->session;
- struct iscsi_r2t_info *r2t;
- struct iscsi_data_task *dtask;
- int left, rc;
- if (test_bit(XMSTATE_BIT_SOL_HDR_INIT, &tcp_ctask->xmstate)) {
- if (!tcp_ctask->r2t) {
- spin_lock_bh(&session->lock);
+ if (r2t == NULL) {
__kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
sizeof(void*));
- spin_unlock_bh(&session->lock);
+ r2t = tcp_ctask->r2t;
}
-send_hdr:
- r2t = tcp_ctask->r2t;
- dtask = &r2t->dtask;
-
- if (conn->hdrdgst_en)
- iscsi_hdr_digest(conn, &r2t->headbuf,
- (u8*)dtask->hdrext);
- clear_bit(XMSTATE_BIT_SOL_HDR_INIT, &tcp_ctask->xmstate);
- set_bit(XMSTATE_BIT_SOL_HDR, &tcp_ctask->xmstate);
- }
-
- if (test_bit(XMSTATE_BIT_SOL_HDR, &tcp_ctask->xmstate)) {
- r2t = tcp_ctask->r2t;
- dtask = &r2t->dtask;
-
- rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count);
- if (rc)
- return rc;
- clear_bit(XMSTATE_BIT_SOL_HDR, &tcp_ctask->xmstate);
- set_bit(XMSTATE_BIT_SOL_DATA, &tcp_ctask->xmstate);
+ spin_unlock_bh(&session->lock);
- if (conn->datadgst_en) {
- iscsi_data_digest_init(conn->dd_data, tcp_ctask);
- dtask->digest = 0;
+ /* Waiting for more R2Ts to arrive. */
+ if (r2t == NULL) {
+ debug_tcp("no R2Ts yet\n");
+ return 0;
}
- iscsi_set_padding(tcp_ctask, r2t->data_count);
- debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n",
- r2t->solicit_datasn - 1, ctask->itt, r2t->data_count,
- r2t->sent);
- }
+ debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
+ r2t, r2t->solicit_datasn - 1, ctask->itt,
+ r2t->data_offset + r2t->sent, r2t->data_count);
- if (test_bit(XMSTATE_BIT_SOL_DATA, &tcp_ctask->xmstate)) {
- r2t = tcp_ctask->r2t;
- dtask = &r2t->dtask;
+ iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
+ sizeof(struct iscsi_hdr));
- rc = iscsi_send_data(ctask, &r2t->sendbuf, &r2t->sg,
- &r2t->sent, &r2t->data_count,
- &dtask->digestbuf, &dtask->digest);
+ rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
+ scsi_sg_count(sc),
+ r2t->data_offset + r2t->sent,
+ r2t->data_count);
if (rc)
- return rc;
- clear_bit(XMSTATE_BIT_SOL_DATA, &tcp_ctask->xmstate);
-
- /*
- * Done with this Data-Out. Next, check if we have
- * to send another Data-Out for this R2T.
- */
- BUG_ON(r2t->data_length - r2t->sent < 0);
- left = r2t->data_length - r2t->sent;
- if (left) {
- iscsi_solicit_data_cont(conn, ctask, r2t, left);
- goto send_hdr;
- }
-
- /*
- * Done with this R2T. Check if there are more
- * outstanding R2Ts ready to be processed.
- */
- spin_lock_bh(&session->lock);
- tcp_ctask->r2t = NULL;
- __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
- sizeof(void*));
- if (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t,
- sizeof(void*))) {
- tcp_ctask->r2t = r2t;
- spin_unlock_bh(&session->lock);
- goto send_hdr;
- }
- spin_unlock_bh(&session->lock);
+ goto fail;
+ tcp_ctask->sent += r2t->data_count;
+ r2t->sent += r2t->data_count;
+ goto flush;
}
return 0;
-}
-
-/**
- * iscsi_tcp_ctask_xmit - xmit normal PDU task
- * @conn: iscsi connection
- * @ctask: iscsi command task
- *
- * Notes:
- * The function can return -EAGAIN in which case caller must
- * call it again later, or recover. '0' return code means successful
- * xmit.
- * The function is devided to logical helpers (above) for the different
- * xmit stages.
- *
- *iscsi_send_cmd_hdr()
- * XMSTATE_BIT_CMD_HDR_INIT - prepare Header and Data buffers Calculate
- * Header Digest
- * XMSTATE_BIT_CMD_HDR_XMIT - Transmit header in progress
- *
- *iscsi_send_padding
- * XMSTATE_BIT_W_PAD - Prepare and send pading
- * XMSTATE_BIT_W_RESEND_PAD - retry send pading
- *
- *iscsi_send_digest
- * XMSTATE_BIT_W_RESEND_DATA_DIGEST - Finalize and send Data Digest
- * XMSTATE_BIT_W_RESEND_DATA_DIGEST - retry sending digest
- *
- *iscsi_send_unsol_hdr
- * XMSTATE_BIT_UNS_INIT - prepare un-solicit data header and digest
- * XMSTATE_BIT_UNS_HDR - send un-solicit header
- *
- *iscsi_send_unsol_pdu
- * XMSTATE_BIT_UNS_DATA - send un-solicit data in progress
- *
- *iscsi_send_sol_pdu
- * XMSTATE_BIT_SOL_HDR_INIT - solicit data header and digest initialize
- * XMSTATE_BIT_SOL_HDR - send solicit header
- * XMSTATE_BIT_SOL_DATA - send solicit data
- *
- *iscsi_tcp_ctask_xmit
- * XMSTATE_BIT_IMM_DATA - xmit managment data (??)
- **/
-static int
-iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
-{
- struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
- int rc = 0;
-
- debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n",
- conn->id, tcp_ctask->xmstate, ctask->itt);
-
- rc = iscsi_send_cmd_hdr(conn, ctask);
- if (rc)
- return rc;
- if (ctask->sc->sc_data_direction != DMA_TO_DEVICE)
- return 0;
-
- if (test_bit(XMSTATE_BIT_IMM_DATA, &tcp_ctask->xmstate)) {
- rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
- &tcp_ctask->sent, &ctask->imm_count,
- &tcp_ctask->immbuf, &tcp_ctask->immdigest);
- if (rc)
- return rc;
- clear_bit(XMSTATE_BIT_IMM_DATA, &tcp_ctask->xmstate);
- }
-
- rc = iscsi_send_unsol_pdu(conn, ctask);
- if (rc)
- return rc;
-
- rc = iscsi_send_sol_pdu(conn, ctask);
- if (rc)
- return rc;
-
- return rc;
+fail:
+ iscsi_conn_failure(conn, rc);
+ return -EIO;
}
static struct iscsi_cls_conn *
@@ -1784,9 +1492,6 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
conn->dd_data = tcp_conn;
tcp_conn->iscsi_conn = conn;
- tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
- /* initial operational parameters */
- tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
CRYPTO_ALG_ASYNC);
@@ -1863,11 +1568,9 @@ static void
iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
iscsi_conn_stop(cls_conn, flag);
iscsi_tcp_release_conn(conn);
- tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
}
static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
@@ -1967,7 +1670,7 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
/*
* set receive state machine into initial state
*/
- tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
+ iscsi_tcp_hdr_recv_prep(tcp_conn);
return 0;
free_socket:
@@ -1977,10 +1680,17 @@ free_socket:
/* called with host lock */
static void
-iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
+iscsi_tcp_mtask_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
{
- struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
- tcp_mtask->xmstate = 1 << XMSTATE_BIT_IMM_HDR_INIT;
+ debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
+
+ /* Prepare PDU, optionally w/ immediate data */
+ iscsi_tcp_send_hdr_prep(conn, mtask->hdr, sizeof(*mtask->hdr));
+
+ /* If we have immediate data, attach a payload */
+ if (mtask->data_count)
+ iscsi_tcp_send_linear_data_prepare(conn, mtask->data,
+ mtask->data_count);
}
static int
@@ -2003,8 +1713,7 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
*/
/* R2T pool */
- if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4,
- (void***)&tcp_ctask->r2ts,
+ if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, NULL,
sizeof(struct iscsi_r2t_info))) {
goto r2t_alloc_fail;
}
@@ -2013,8 +1722,7 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
tcp_ctask->r2tqueue = kfifo_alloc(
session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
- iscsi_pool_free(&tcp_ctask->r2tpool,
- (void**)tcp_ctask->r2ts);
+ iscsi_pool_free(&tcp_ctask->r2tpool);
goto r2t_alloc_fail;
}
}
@@ -2027,8 +1735,7 @@ r2t_alloc_fail:
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
kfifo_free(tcp_ctask->r2tqueue);
- iscsi_pool_free(&tcp_ctask->r2tpool,
- (void**)tcp_ctask->r2ts);
+ iscsi_pool_free(&tcp_ctask->r2tpool);
}
return -ENOMEM;
}
@@ -2043,8 +1750,7 @@ iscsi_r2tpool_free(struct iscsi_session *session)
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
kfifo_free(tcp_ctask->r2tqueue);
- iscsi_pool_free(&tcp_ctask->r2tpool,
- (void**)tcp_ctask->r2ts);
+ iscsi_pool_free(&tcp_ctask->r2tpool);
}
}
@@ -2060,9 +1766,6 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
switch(param) {
case ISCSI_PARAM_HDRDGST_EN:
iscsi_set_param(cls_conn, param, buf, buflen);
- tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
- if (conn->hdrdgst_en)
- tcp_conn->hdr_size += sizeof(__u32);
break;
case ISCSI_PARAM_DATADGST_EN:
iscsi_set_param(cls_conn, param, buf, buflen);
@@ -2071,12 +1774,12 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
break;
case ISCSI_PARAM_MAX_R2T:
sscanf(buf, "%d", &value);
- if (session->max_r2t == roundup_pow_of_two(value))
+ if (value <= 0 || !is_power_of_2(value))
+ return -EINVAL;
+ if (session->max_r2t == value)
break;
iscsi_r2tpool_free(session);
iscsi_set_param(cls_conn, param, buf, buflen);
- if (session->max_r2t & (session->max_r2t - 1))
- session->max_r2t = roundup_pow_of_two(session->max_r2t);
if (iscsi_r2tpool_alloc(session))
return -ENOMEM;
break;
@@ -2183,14 +1886,15 @@ iscsi_tcp_session_create(struct iscsi_transport *iscsit,
struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
- ctask->hdr = &tcp_ctask->hdr;
+ ctask->hdr = &tcp_ctask->hdr.cmd_hdr;
+ ctask->hdr_max = sizeof(tcp_ctask->hdr) - ISCSI_DIGEST_SIZE;
}
for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
- mtask->hdr = &tcp_mtask->hdr;
+ mtask->hdr = (struct iscsi_hdr *) &tcp_mtask->hdr;
}
if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session)))
@@ -2222,12 +1926,14 @@ static struct scsi_host_template iscsi_sht = {
.queuecommand = iscsi_queuecommand,
.change_queue_depth = iscsi_change_queue_depth,
.can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
- .sg_tablesize = ISCSI_SG_TABLESIZE,
+ .sg_tablesize = 4096,
.max_sectors = 0xFFFF,
.cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
.eh_abort_handler = iscsi_eh_abort,
+ .eh_device_reset_handler= iscsi_eh_device_reset,
.eh_host_reset_handler = iscsi_eh_host_reset,
.use_clustering = DISABLE_CLUSTERING,
+ .use_sg_chaining = ENABLE_SG_CHAINING,
.slave_configure = iscsi_tcp_slave_configure,
.proc_name = "iscsi_tcp",
.this_id = -1,
@@ -2257,14 +1963,17 @@ static struct iscsi_transport iscsi_tcp_transport = {
ISCSI_PERSISTENT_ADDRESS |
ISCSI_TARGET_NAME | ISCSI_TPGT |
ISCSI_USERNAME | ISCSI_PASSWORD |
- ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN,
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+ ISCSI_LU_RESET_TMO |
+ ISCSI_PING_TMO | ISCSI_RECV_TMO,
.host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
ISCSI_HOST_INITIATOR_NAME |
ISCSI_HOST_NETDEV_NAME,
.host_template = &iscsi_sht,
.conndata_size = sizeof(struct iscsi_conn),
.max_conn = 1,
- .max_cmd_len = ISCSI_TCP_MAX_CMD_LEN,
+ .max_cmd_len = 16,
/* session management */
.create_session = iscsi_tcp_session_create,
.destroy_session = iscsi_tcp_session_destroy,
@@ -2283,8 +1992,8 @@ static struct iscsi_transport iscsi_tcp_transport = {
/* IO */
.send_pdu = iscsi_conn_send_pdu,
.get_stats = iscsi_conn_get_stats,
- .init_cmd_task = iscsi_tcp_cmd_init,
- .init_mgmt_task = iscsi_tcp_mgmt_init,
+ .init_cmd_task = iscsi_tcp_ctask_init,
+ .init_mgmt_task = iscsi_tcp_mtask_init,
.xmit_cmd_task = iscsi_tcp_ctask_xmit,
.xmit_mgmt_task = iscsi_tcp_mtask_xmit,
.cleanup_cmd_task = iscsi_tcp_cleanup_ctask,
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 68c36cc8997e..ed0b991d1e72 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -24,71 +24,61 @@
#include <scsi/libiscsi.h>
-/* Socket's Receive state machine */
-#define IN_PROGRESS_WAIT_HEADER 0x0
-#define IN_PROGRESS_HEADER_GATHER 0x1
-#define IN_PROGRESS_DATA_RECV 0x2
-#define IN_PROGRESS_DDIGEST_RECV 0x3
-#define IN_PROGRESS_PAD_RECV 0x4
-
-/* xmit state machine */
-#define XMSTATE_VALUE_IDLE 0
-#define XMSTATE_BIT_CMD_HDR_INIT 0
-#define XMSTATE_BIT_CMD_HDR_XMIT 1
-#define XMSTATE_BIT_IMM_HDR 2
-#define XMSTATE_BIT_IMM_DATA 3
-#define XMSTATE_BIT_UNS_INIT 4
-#define XMSTATE_BIT_UNS_HDR 5
-#define XMSTATE_BIT_UNS_DATA 6
-#define XMSTATE_BIT_SOL_HDR 7
-#define XMSTATE_BIT_SOL_DATA 8
-#define XMSTATE_BIT_W_PAD 9
-#define XMSTATE_BIT_W_RESEND_PAD 10
-#define XMSTATE_BIT_W_RESEND_DATA_DIGEST 11
-#define XMSTATE_BIT_IMM_HDR_INIT 12
-#define XMSTATE_BIT_SOL_HDR_INIT 13
-
-#define ISCSI_PAD_LEN 4
-#define ISCSI_SG_TABLESIZE SG_ALL
-#define ISCSI_TCP_MAX_CMD_LEN 16
-
struct crypto_hash;
struct socket;
+struct iscsi_tcp_conn;
+struct iscsi_segment;
+
+typedef int iscsi_segment_done_fn_t(struct iscsi_tcp_conn *,
+ struct iscsi_segment *);
+
+struct iscsi_segment {
+ unsigned char *data;
+ unsigned int size;
+ unsigned int copied;
+ unsigned int total_size;
+ unsigned int total_copied;
+
+ struct hash_desc *hash;
+ unsigned char recv_digest[ISCSI_DIGEST_SIZE];
+ unsigned char digest[ISCSI_DIGEST_SIZE];
+ unsigned int digest_len;
+
+ struct scatterlist *sg;
+ void *sg_mapped;
+ unsigned int sg_offset;
+
+ iscsi_segment_done_fn_t *done;
+};
/* Socket connection recieve helper */
struct iscsi_tcp_recv {
struct iscsi_hdr *hdr;
- struct sk_buff *skb;
- int offset;
- int len;
- int hdr_offset;
- int copy;
- int copied;
- int padding;
- struct iscsi_cmd_task *ctask; /* current cmd in progress */
+ struct iscsi_segment segment;
+
+ /* Allocate buffer for BHS + AHS */
+ uint32_t hdr_buf[64];
/* copied and flipped values */
int datalen;
- int datadgst;
- char zero_copy_hdr;
+};
+
+/* Socket connection send helper */
+struct iscsi_tcp_send {
+ struct iscsi_hdr *hdr;
+ struct iscsi_segment segment;
+ struct iscsi_segment data_segment;
};
struct iscsi_tcp_conn {
struct iscsi_conn *iscsi_conn;
struct socket *sock;
- struct iscsi_hdr hdr; /* header placeholder */
- char hdrext[4*sizeof(__u16) +
- sizeof(__u32)];
- int data_copied;
int stop_stage; /* conn_stop() flag: *
* stop to recover, *
* stop to terminate */
- /* iSCSI connection-wide sequencing */
- int hdr_size; /* PDU header size */
-
/* control data */
struct iscsi_tcp_recv in; /* TCP receive context */
- int in_progress; /* connection state machine */
+ struct iscsi_tcp_send out; /* TCP send context */
/* old values for socket callbacks */
void (*old_data_ready)(struct sock *, int);
@@ -103,29 +93,19 @@ struct iscsi_tcp_conn {
uint32_t sendpage_failures_cnt;
uint32_t discontiguous_hdr_cnt;
- ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
-};
+ int error;
-struct iscsi_buf {
- struct scatterlist sg;
- unsigned int sent;
- char use_sendmsg;
+ ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
};
struct iscsi_data_task {
struct iscsi_data hdr; /* PDU */
- char hdrext[sizeof(__u32)]; /* Header-Digest */
- struct iscsi_buf digestbuf; /* digest buffer */
- uint32_t digest; /* data digest */
+ char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */
};
struct iscsi_tcp_mgmt_task {
struct iscsi_hdr hdr;
- char hdrext[sizeof(__u32)]; /* Header-Digest */
- unsigned long xmstate; /* mgmt xmit progress */
- struct iscsi_buf headbuf; /* header buffer */
- struct iscsi_buf sendbuf; /* in progress buffer */
- int sent;
+ char hdrext[ISCSI_DIGEST_SIZE]; /* Header-Digest */
};
struct iscsi_r2t_info {
@@ -133,38 +113,26 @@ struct iscsi_r2t_info {
__be32 exp_statsn; /* copied from R2T */
uint32_t data_length; /* copied from R2T */
uint32_t data_offset; /* copied from R2T */
- struct iscsi_buf headbuf; /* Data-Out Header Buffer */
- struct iscsi_buf sendbuf; /* Data-Out in progress buffer*/
int sent; /* R2T sequence progress */
int data_count; /* DATA-Out payload progress */
- struct scatterlist *sg; /* per-R2T SG list */
int solicit_datasn;
- struct iscsi_data_task dtask; /* which data task */
+ struct iscsi_data_task dtask; /* Data-Out header buf */
};
struct iscsi_tcp_cmd_task {
- struct iscsi_cmd hdr;
- char hdrext[4*sizeof(__u16)+ /* AHS */
- sizeof(__u32)]; /* HeaderDigest */
- char pad[ISCSI_PAD_LEN];
- int pad_count; /* padded bytes */
- struct iscsi_buf headbuf; /* header buf (xmit) */
- struct iscsi_buf sendbuf; /* in progress buffer*/
- unsigned long xmstate; /* xmit xtate machine */
+ struct iscsi_hdr_buff {
+ struct iscsi_cmd cmd_hdr;
+ char hdrextbuf[ISCSI_MAX_AHS_SIZE +
+ ISCSI_DIGEST_SIZE];
+ } hdr;
+
int sent;
- struct scatterlist *sg; /* per-cmd SG list */
- struct scatterlist *bad_sg; /* assert statement */
- int sg_count; /* SG's to process */
- uint32_t exp_datasn; /* expected target's R2TSN/DataSN */
+ uint32_t exp_datasn; /* expected target's R2TSN/DataSN */
int data_offset;
- struct iscsi_r2t_info *r2t; /* in progress R2T */
- struct iscsi_queue r2tpool;
+ struct iscsi_r2t_info *r2t; /* in progress R2T */
+ struct iscsi_pool r2tpool;
struct kfifo *r2tqueue;
- struct iscsi_r2t_info **r2ts;
- int digest_count;
- uint32_t immdigest; /* for imm data */
- struct iscsi_buf immbuf; /* for imm data digest */
- struct iscsi_data_task unsol_dtask; /* unsol data task */
+ struct iscsi_data_task unsol_dtask; /* Data-Out header buf */
};
#endif /* ISCSI_H */
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 8b57af5baaec..553168ae44f1 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -24,6 +24,7 @@
#include <linux/types.h>
#include <linux/kfifo.h>
#include <linux/delay.h>
+#include <linux/log2.h>
#include <asm/unaligned.h>
#include <net/tcp.h>
#include <scsi/scsi_cmnd.h>
@@ -86,7 +87,7 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
* xmit thread
*/
if (!list_empty(&session->leadconn->xmitqueue) ||
- __kfifo_len(session->leadconn->mgmtqueue))
+ !list_empty(&session->leadconn->mgmtqueue))
scsi_queue_work(session->host,
&session->leadconn->xmitwork);
}
@@ -122,6 +123,20 @@ void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
}
EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
+static int iscsi_add_hdr(struct iscsi_cmd_task *ctask, unsigned len)
+{
+ unsigned exp_len = ctask->hdr_len + len;
+
+ if (exp_len > ctask->hdr_max) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
+ ctask->hdr_len = exp_len;
+ return 0;
+}
+
/**
* iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
* @ctask: iscsi cmd task
@@ -129,27 +144,32 @@ EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
* Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
* fields like dlength or final based on how much data it sends
*/
-static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
+static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
{
struct iscsi_conn *conn = ctask->conn;
struct iscsi_session *session = conn->session;
struct iscsi_cmd *hdr = ctask->hdr;
struct scsi_cmnd *sc = ctask->sc;
+ unsigned hdrlength;
+ int rc;
- hdr->opcode = ISCSI_OP_SCSI_CMD;
- hdr->flags = ISCSI_ATTR_SIMPLE;
- int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
- hdr->itt = build_itt(ctask->itt, conn->id, session->age);
- hdr->data_length = cpu_to_be32(scsi_bufflen(sc));
- hdr->cmdsn = cpu_to_be32(session->cmdsn);
- session->cmdsn++;
- hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
- memcpy(hdr->cdb, sc->cmnd, sc->cmd_len);
+ ctask->hdr_len = 0;
+ rc = iscsi_add_hdr(ctask, sizeof(*hdr));
+ if (rc)
+ return rc;
+ hdr->opcode = ISCSI_OP_SCSI_CMD;
+ hdr->flags = ISCSI_ATTR_SIMPLE;
+ int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
+ hdr->itt = build_itt(ctask->itt, conn->id, session->age);
+ hdr->data_length = cpu_to_be32(scsi_bufflen(sc));
+ hdr->cmdsn = cpu_to_be32(session->cmdsn);
+ session->cmdsn++;
+ hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+ memcpy(hdr->cdb, sc->cmnd, sc->cmd_len);
if (sc->cmd_len < MAX_COMMAND_SIZE)
memset(&hdr->cdb[sc->cmd_len], 0,
MAX_COMMAND_SIZE - sc->cmd_len);
- ctask->data_count = 0;
ctask->imm_count = 0;
if (sc->sc_data_direction == DMA_TO_DEVICE) {
hdr->flags |= ISCSI_FLAG_CMD_WRITE;
@@ -178,9 +198,9 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
else
ctask->imm_count = min(scsi_bufflen(sc),
conn->max_xmit_dlength);
- hton24(ctask->hdr->dlength, ctask->imm_count);
+ hton24(hdr->dlength, ctask->imm_count);
} else
- zero_data(ctask->hdr->dlength);
+ zero_data(hdr->dlength);
if (!session->initial_r2t_en) {
ctask->unsol_count = min((session->first_burst),
@@ -190,7 +210,7 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
if (!ctask->unsol_count)
/* No unsolicit Data-Out's */
- ctask->hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
} else {
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
zero_data(hdr->dlength);
@@ -199,13 +219,25 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
hdr->flags |= ISCSI_FLAG_CMD_READ;
}
- conn->scsicmd_pdus_cnt++;
+ /* calculate size of additional header segments (AHSs) */
+ hdrlength = ctask->hdr_len - sizeof(*hdr);
+
+ WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
+ hdrlength /= ISCSI_PAD_LEN;
+
+ WARN_ON(hdrlength >= 256);
+ hdr->hlength = hdrlength & 0xFF;
+
+ if (conn->session->tt->init_cmd_task(conn->ctask))
+ return EIO;
- debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
+ conn->scsicmd_pdus_cnt++;
+ debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
"cmdsn %d win %d]\n",
- sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
+ sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
conn->id, sc, sc->cmnd[0], ctask->itt, scsi_bufflen(sc),
- session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+ session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+ return 0;
}
/**
@@ -218,13 +250,16 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
*/
static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
{
- struct iscsi_session *session = ctask->conn->session;
+ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
struct scsi_cmnd *sc = ctask->sc;
ctask->state = ISCSI_TASK_COMPLETED;
ctask->sc = NULL;
/* SCSI eh reuses commands to verify us */
sc->SCp.ptr = NULL;
+ if (conn->ctask == ctask)
+ conn->ctask = NULL;
list_del_init(&ctask->running);
__kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
sc->scsi_done(sc);
@@ -241,6 +276,112 @@ static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
iscsi_complete_command(ctask);
}
+/*
+ * session lock must be held
+ */
+static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ int err)
+{
+ struct scsi_cmnd *sc;
+
+ sc = ctask->sc;
+ if (!sc)
+ return;
+
+ if (ctask->state == ISCSI_TASK_PENDING)
+ /*
+ * cmd never made it to the xmit thread, so we should not count
+ * the cmd in the sequencing
+ */
+ conn->session->queued_cmdsn--;
+ else
+ conn->session->tt->cleanup_cmd_task(conn, ctask);
+
+ sc->result = err;
+ scsi_set_resid(sc, scsi_bufflen(sc));
+ if (conn->ctask == ctask)
+ conn->ctask = NULL;
+ /* release ref from queuecommand */
+ __iscsi_put_ctask(ctask);
+}
+
+/**
+ * iscsi_free_mgmt_task - return mgmt task back to pool
+ * @conn: iscsi connection
+ * @mtask: mtask
+ *
+ * Must be called with session lock.
+ */
+void iscsi_free_mgmt_task(struct iscsi_conn *conn,
+ struct iscsi_mgmt_task *mtask)
+{
+ list_del_init(&mtask->running);
+ if (conn->login_mtask == mtask)
+ return;
+
+ if (conn->ping_mtask == mtask)
+ conn->ping_mtask = NULL;
+ __kfifo_put(conn->session->mgmtpool.queue,
+ (void*)&mtask, sizeof(void*));
+}
+EXPORT_SYMBOL_GPL(iscsi_free_mgmt_task);
+
+static struct iscsi_mgmt_task *
+__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size)
+{
+ struct iscsi_session *session = conn->session;
+ struct iscsi_mgmt_task *mtask;
+
+ if (session->state == ISCSI_STATE_TERMINATE)
+ return NULL;
+
+ if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
+ hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
+ /*
+ * Login and Text are sent serially, in
+ * request-followed-by-response sequence.
+ * Same mtask can be used. Same ITT must be used.
+ * Note that login_mtask is preallocated at conn_create().
+ */
+ mtask = conn->login_mtask;
+ else {
+ BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
+ BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
+
+ if (!__kfifo_get(session->mgmtpool.queue,
+ (void*)&mtask, sizeof(void*)))
+ return NULL;
+ }
+
+ if (data_size) {
+ memcpy(mtask->data, data, data_size);
+ mtask->data_count = data_size;
+ } else
+ mtask->data_count = 0;
+
+ memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
+ INIT_LIST_HEAD(&mtask->running);
+ list_add_tail(&mtask->running, &conn->mgmtqueue);
+ return mtask;
+}
+
+int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ int err = 0;
+
+ spin_lock_bh(&session->lock);
+ if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
+ err = -EPERM;
+ spin_unlock_bh(&session->lock);
+ scsi_queue_work(session->host, &conn->xmitwork);
+ return err;
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+
/**
* iscsi_cmd_rsp - SCSI Command Response processing
* @conn: iscsi connection
@@ -291,17 +432,19 @@ invalid_datalen:
min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
}
- if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) {
+ if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
+ ISCSI_FLAG_CMD_OVERFLOW)) {
int res_count = be32_to_cpu(rhdr->residual_count);
- if (res_count > 0 && res_count <= scsi_bufflen(sc))
+ if (res_count > 0 &&
+ (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+ res_count <= scsi_bufflen(sc)))
scsi_set_resid(sc, res_count);
else
sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
- } else if (rhdr->flags & ISCSI_FLAG_CMD_BIDI_UNDERFLOW)
+ } else if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
+ ISCSI_FLAG_CMD_BIDI_OVERFLOW))
sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
- else if (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW)
- scsi_set_resid(sc, be32_to_cpu(rhdr->residual_count));
out:
debug_scsi("done [sc %lx res %d itt 0x%x]\n",
@@ -318,18 +461,51 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
conn->tmfrsp_pdus_cnt++;
- if (conn->tmabort_state != TMABORT_INITIAL)
+ if (conn->tmf_state != TMF_QUEUED)
return;
if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
- conn->tmabort_state = TMABORT_SUCCESS;
+ conn->tmf_state = TMF_SUCCESS;
else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
- conn->tmabort_state = TMABORT_NOT_FOUND;
+ conn->tmf_state = TMF_NOT_FOUND;
else
- conn->tmabort_state = TMABORT_FAILED;
+ conn->tmf_state = TMF_FAILED;
wake_up(&conn->ehwait);
}
+static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+{
+ struct iscsi_nopout hdr;
+ struct iscsi_mgmt_task *mtask;
+
+ if (!rhdr && conn->ping_mtask)
+ return;
+
+ memset(&hdr, 0, sizeof(struct iscsi_nopout));
+ hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
+ hdr.flags = ISCSI_FLAG_CMD_FINAL;
+
+ if (rhdr) {
+ memcpy(hdr.lun, rhdr->lun, 8);
+ hdr.ttt = rhdr->ttt;
+ hdr.itt = RESERVED_ITT;
+ } else
+ hdr.ttt = RESERVED_ITT;
+
+ mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+ if (!mtask) {
+ printk(KERN_ERR "Could not send nopout\n");
+ return;
+ }
+
+ /* only track our nops */
+ if (!rhdr) {
+ conn->ping_mtask = mtask;
+ conn->last_ping = jiffies;
+ }
+ scsi_queue_work(conn->session->host, &conn->xmitwork);
+}
+
static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
char *data, int datalen)
{
@@ -374,6 +550,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
struct iscsi_mgmt_task *mtask;
uint32_t itt;
+ conn->last_recv = jiffies;
if (hdr->itt != RESERVED_ITT)
itt = get_itt(hdr->itt);
else
@@ -429,10 +606,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
*/
if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
rc = ISCSI_ERR_CONN_FAILED;
- list_del(&mtask->running);
- if (conn->login_mtask != mtask)
- __kfifo_put(session->mgmtpool.queue,
- (void*)&mtask, sizeof(void*));
+ iscsi_free_mgmt_task(conn, mtask);
break;
case ISCSI_OP_SCSI_TMFUNC_RSP:
if (datalen) {
@@ -441,20 +615,26 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
}
iscsi_tmf_rsp(conn, hdr);
+ iscsi_free_mgmt_task(conn, mtask);
break;
case ISCSI_OP_NOOP_IN:
- if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
+ if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) ||
+ datalen) {
rc = ISCSI_ERR_PROTO;
break;
}
conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
- if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
- rc = ISCSI_ERR_CONN_FAILED;
- list_del(&mtask->running);
- if (conn->login_mtask != mtask)
- __kfifo_put(session->mgmtpool.queue,
- (void*)&mtask, sizeof(void*));
+ if (conn->ping_mtask != mtask) {
+ /*
+ * If this is not in response to one of our
+ * nops then it must be from userspace.
+ */
+ if (iscsi_recv_pdu(conn->cls_conn, hdr, data,
+ datalen))
+ rc = ISCSI_ERR_CONN_FAILED;
+ }
+ iscsi_free_mgmt_task(conn, mtask);
break;
default:
rc = ISCSI_ERR_BAD_OPCODE;
@@ -473,8 +653,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG))
break;
- if (iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0))
- rc = ISCSI_ERR_CONN_FAILED;
+ iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr);
break;
case ISCSI_OP_REJECT:
rc = iscsi_handle_reject(conn, hdr, data, datalen);
@@ -609,20 +788,19 @@ static void iscsi_prep_mtask(struct iscsi_conn *conn,
session->tt->init_mgmt_task(conn, mtask);
debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
- hdr->opcode, hdr->itt, mtask->data_count);
+ hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
+ mtask->data_count);
}
static int iscsi_xmit_mtask(struct iscsi_conn *conn)
{
struct iscsi_hdr *hdr = conn->mtask->hdr;
- int rc, was_logout = 0;
+ int rc;
+ if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
+ conn->session->state = ISCSI_STATE_LOGGING_OUT;
spin_unlock_bh(&conn->session->lock);
- if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) {
- conn->session->state = ISCSI_STATE_IN_RECOVERY;
- iscsi_block_session(session_to_cls(conn->session));
- was_logout = 1;
- }
+
rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
spin_lock_bh(&conn->session->lock);
if (rc)
@@ -630,11 +808,6 @@ static int iscsi_xmit_mtask(struct iscsi_conn *conn)
/* done with this in-progress mtask */
conn->mtask = NULL;
-
- if (was_logout) {
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
- return -ENODATA;
- }
return 0;
}
@@ -658,21 +831,13 @@ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
static int iscsi_xmit_ctask(struct iscsi_conn *conn)
{
struct iscsi_cmd_task *ctask = conn->ctask;
- int rc = 0;
-
- /*
- * serialize with TMF AbortTask
- */
- if (ctask->state == ISCSI_TASK_ABORTING)
- goto done;
+ int rc;
__iscsi_get_ctask(ctask);
spin_unlock_bh(&conn->session->lock);
rc = conn->session->tt->xmit_cmd_task(conn, ctask);
spin_lock_bh(&conn->session->lock);
__iscsi_put_ctask(ctask);
-
-done:
if (!rc)
/* done with this ctask */
conn->ctask = NULL;
@@ -680,6 +845,22 @@ done:
}
/**
+ * iscsi_requeue_ctask - requeue ctask to run from session workqueue
+ * @ctask: ctask to requeue
+ *
+ * LLDs that need to run a ctask from the session workqueue should call
+ * this. The session lock must be held.
+ */
+void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask)
+{
+ struct iscsi_conn *conn = ctask->conn;
+
+ list_move_tail(&ctask->running, &conn->requeue);
+ scsi_queue_work(conn->session->host, &conn->xmitwork);
+}
+EXPORT_SYMBOL_GPL(iscsi_requeue_ctask);
+
+/**
* iscsi_data_xmit - xmit any command into the scheduled connection
* @conn: iscsi connection
*
@@ -717,36 +898,40 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
* overflow us with nop-ins
*/
check_mgmt:
- while (__kfifo_get(conn->mgmtqueue, (void*)&conn->mtask,
- sizeof(void*))) {
+ while (!list_empty(&conn->mgmtqueue)) {
+ conn->mtask = list_entry(conn->mgmtqueue.next,
+ struct iscsi_mgmt_task, running);
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
+ iscsi_free_mgmt_task(conn, conn->mtask);
+ conn->mtask = NULL;
+ continue;
+ }
+
iscsi_prep_mtask(conn, conn->mtask);
- list_add_tail(&conn->mtask->running, &conn->mgmt_run_list);
+ list_move_tail(conn->mgmtqueue.next, &conn->mgmt_run_list);
rc = iscsi_xmit_mtask(conn);
if (rc)
goto again;
}
- /* process command queue */
+ /* process pending command queue */
while (!list_empty(&conn->xmitqueue)) {
- /*
- * iscsi tcp may readd the task to the xmitqueue to send
- * write data
- */
+ if (conn->tmf_state == TMF_QUEUED)
+ break;
+
conn->ctask = list_entry(conn->xmitqueue.next,
struct iscsi_cmd_task, running);
- switch (conn->ctask->state) {
- case ISCSI_TASK_ABORTING:
- break;
- case ISCSI_TASK_PENDING:
- iscsi_prep_scsi_cmd_pdu(conn->ctask);
- conn->session->tt->init_cmd_task(conn->ctask);
- /* fall through */
- default:
- conn->ctask->state = ISCSI_TASK_RUNNING;
- break;
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
+ fail_command(conn, conn->ctask, DID_IMM_RETRY << 16);
+ continue;
+ }
+ if (iscsi_prep_scsi_cmd_pdu(conn->ctask)) {
+ fail_command(conn, conn->ctask, DID_ABORT << 16);
+ continue;
}
- list_move_tail(conn->xmitqueue.next, &conn->run_list);
+ conn->ctask->state = ISCSI_TASK_RUNNING;
+ list_move_tail(conn->xmitqueue.next, &conn->run_list);
rc = iscsi_xmit_ctask(conn);
if (rc)
goto again;
@@ -755,7 +940,28 @@ check_mgmt:
* we need to check the mgmt queue for nops that need to
* be sent to aviod starvation
*/
- if (__kfifo_len(conn->mgmtqueue))
+ if (!list_empty(&conn->mgmtqueue))
+ goto check_mgmt;
+ }
+
+ while (!list_empty(&conn->requeue)) {
+ if (conn->session->fast_abort && conn->tmf_state != TMF_INITIAL)
+ break;
+
+ /*
+ * we always do fastlogout - conn stop code will clean up.
+ */
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+ break;
+
+ conn->ctask = list_entry(conn->requeue.next,
+ struct iscsi_cmd_task, running);
+ conn->ctask->state = ISCSI_TASK_RUNNING;
+ list_move_tail(conn->requeue.next, &conn->run_list);
+ rc = iscsi_xmit_ctask(conn);
+ if (rc)
+ goto again;
+ if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
}
spin_unlock_bh(&conn->session->lock);
@@ -790,6 +996,7 @@ enum {
FAILURE_SESSION_TERMINATE,
FAILURE_SESSION_IN_RECOVERY,
FAILURE_SESSION_RECOVERY_TIMEOUT,
+ FAILURE_SESSION_LOGGING_OUT,
};
int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
@@ -805,8 +1012,9 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
sc->SCp.ptr = NULL;
host = sc->device->host;
- session = iscsi_hostdata(host->hostdata);
+ spin_unlock(host->host_lock);
+ session = iscsi_hostdata(host->hostdata);
spin_lock(&session->lock);
/*
@@ -822,17 +1030,22 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
* be entering our queuecommand while a block is starting
* up because the block code is not locked)
*/
- if (session->state == ISCSI_STATE_IN_RECOVERY) {
+ switch (session->state) {
+ case ISCSI_STATE_IN_RECOVERY:
reason = FAILURE_SESSION_IN_RECOVERY;
goto reject;
- }
-
- if (session->state == ISCSI_STATE_RECOVERY_FAILED)
+ case ISCSI_STATE_LOGGING_OUT:
+ reason = FAILURE_SESSION_LOGGING_OUT;
+ goto reject;
+ case ISCSI_STATE_RECOVERY_FAILED:
reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
- else if (session->state == ISCSI_STATE_TERMINATE)
+ break;
+ case ISCSI_STATE_TERMINATE:
reason = FAILURE_SESSION_TERMINATE;
- else
+ break;
+ default:
reason = FAILURE_SESSION_FREED;
+ }
goto fault;
}
@@ -859,7 +1072,6 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
atomic_set(&ctask->refcount, 1);
ctask->state = ISCSI_TASK_PENDING;
- ctask->mtask = NULL;
ctask->conn = conn;
ctask->sc = sc;
INIT_LIST_HEAD(&ctask->running);
@@ -868,11 +1080,13 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
spin_unlock(&session->lock);
scsi_queue_work(host, &conn->xmitwork);
+ spin_lock(host->host_lock);
return 0;
reject:
spin_unlock(&session->lock);
debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason);
+ spin_lock(host->host_lock);
return SCSI_MLQUEUE_HOST_BUSY;
fault:
@@ -882,6 +1096,7 @@ fault:
sc->result = (DID_NO_CONNECT << 16);
scsi_set_resid(sc, scsi_bufflen(sc));
sc->scsi_done(sc);
+ spin_lock(host->host_lock);
return 0;
}
EXPORT_SYMBOL_GPL(iscsi_queuecommand);
@@ -895,72 +1110,15 @@ int iscsi_change_queue_depth(struct scsi_device *sdev, int depth)
}
EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
-static struct iscsi_mgmt_task *
-__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
- char *data, uint32_t data_size)
-{
- struct iscsi_session *session = conn->session;
- struct iscsi_mgmt_task *mtask;
-
- if (session->state == ISCSI_STATE_TERMINATE)
- return NULL;
-
- if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
- hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
- /*
- * Login and Text are sent serially, in
- * request-followed-by-response sequence.
- * Same mtask can be used. Same ITT must be used.
- * Note that login_mtask is preallocated at conn_create().
- */
- mtask = conn->login_mtask;
- else {
- BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
- BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
-
- if (!__kfifo_get(session->mgmtpool.queue,
- (void*)&mtask, sizeof(void*)))
- return NULL;
- }
-
- if (data_size) {
- memcpy(mtask->data, data, data_size);
- mtask->data_count = data_size;
- } else
- mtask->data_count = 0;
-
- INIT_LIST_HEAD(&mtask->running);
- memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
- __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*));
- return mtask;
-}
-
-int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
- char *data, uint32_t data_size)
-{
- struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_session *session = conn->session;
- int err = 0;
-
- spin_lock_bh(&session->lock);
- if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
- err = -EPERM;
- spin_unlock_bh(&session->lock);
- scsi_queue_work(session->host, &conn->xmitwork);
- return err;
-}
-EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
-
void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
{
struct iscsi_session *session = class_to_transport_session(cls_session);
- struct iscsi_conn *conn = session->leadconn;
spin_lock_bh(&session->lock);
if (session->state != ISCSI_STATE_LOGGED_IN) {
session->state = ISCSI_STATE_RECOVERY_FAILED;
- if (conn)
- wake_up(&conn->ehwait);
+ if (session->leadconn)
+ wake_up(&session->leadconn->ehwait);
}
spin_unlock_bh(&session->lock);
}
@@ -971,30 +1129,25 @@ int iscsi_eh_host_reset(struct scsi_cmnd *sc)
struct Scsi_Host *host = sc->device->host;
struct iscsi_session *session = iscsi_hostdata(host->hostdata);
struct iscsi_conn *conn = session->leadconn;
- int fail_session = 0;
+ mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->lock);
if (session->state == ISCSI_STATE_TERMINATE) {
failed:
debug_scsi("failing host reset: session terminated "
"[CID %d age %d]\n", conn->id, session->age);
spin_unlock_bh(&session->lock);
+ mutex_unlock(&session->eh_mutex);
return FAILED;
}
- if (sc->SCp.phase == session->age) {
- debug_scsi("failing connection CID %d due to SCSI host reset\n",
- conn->id);
- fail_session = 1;
- }
spin_unlock_bh(&session->lock);
-
+ mutex_unlock(&session->eh_mutex);
/*
* we drop the lock here but the leadconn cannot be destoyed while
* we are in the scsi eh
*/
- if (fail_session)
- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
debug_scsi("iscsi_eh_host_reset wait for relogin\n");
wait_event_interruptible(conn->ehwait,
@@ -1004,73 +1157,56 @@ failed:
if (signal_pending(current))
flush_signals(current);
+ mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->lock);
if (session->state == ISCSI_STATE_LOGGED_IN)
printk(KERN_INFO "iscsi: host reset succeeded\n");
else
goto failed;
spin_unlock_bh(&session->lock);
-
+ mutex_unlock(&session->eh_mutex);
return SUCCESS;
}
EXPORT_SYMBOL_GPL(iscsi_eh_host_reset);
-static void iscsi_tmabort_timedout(unsigned long data)
+static void iscsi_tmf_timedout(unsigned long data)
{
- struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)data;
- struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_conn *conn = (struct iscsi_conn *)data;
struct iscsi_session *session = conn->session;
spin_lock(&session->lock);
- if (conn->tmabort_state == TMABORT_INITIAL) {
- conn->tmabort_state = TMABORT_TIMEDOUT;
- debug_scsi("tmabort timedout [sc %p itt 0x%x]\n",
- ctask->sc, ctask->itt);
+ if (conn->tmf_state == TMF_QUEUED) {
+ conn->tmf_state = TMF_TIMEDOUT;
+ debug_scsi("tmf timedout\n");
/* unblock eh_abort() */
wake_up(&conn->ehwait);
}
spin_unlock(&session->lock);
}
-static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
- struct iscsi_cmd_task *ctask)
+static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+ struct iscsi_tm *hdr, int age,
+ int timeout)
{
- struct iscsi_conn *conn = ctask->conn;
struct iscsi_session *session = conn->session;
- struct iscsi_tm *hdr = &conn->tmhdr;
-
- /*
- * ctask timed out but session is OK requests must be serialized.
- */
- memset(hdr, 0, sizeof(struct iscsi_tm));
- hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
- hdr->flags = ISCSI_TM_FUNC_ABORT_TASK;
- hdr->flags |= ISCSI_FLAG_CMD_FINAL;
- memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
- hdr->rtt = ctask->hdr->itt;
- hdr->refcmdsn = ctask->hdr->cmdsn;
+ struct iscsi_mgmt_task *mtask;
- ctask->mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
- NULL, 0);
- if (!ctask->mtask) {
+ mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
+ NULL, 0);
+ if (!mtask) {
spin_unlock_bh(&session->lock);
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
- spin_lock_bh(&session->lock)
- debug_scsi("abort sent failure [itt 0x%x]\n", ctask->itt);
+ spin_lock_bh(&session->lock);
+ debug_scsi("tmf exec failure\n");
return -EPERM;
}
- ctask->state = ISCSI_TASK_ABORTING;
+ conn->tmfcmd_pdus_cnt++;
+ conn->tmf_timer.expires = timeout * HZ + jiffies;
+ conn->tmf_timer.function = iscsi_tmf_timedout;
+ conn->tmf_timer.data = (unsigned long)conn;
+ add_timer(&conn->tmf_timer);
+ debug_scsi("tmf set timeout\n");
- debug_scsi("abort sent [itt 0x%x]\n", ctask->itt);
-
- if (conn->tmabort_state == TMABORT_INITIAL) {
- conn->tmfcmd_pdus_cnt++;
- conn->tmabort_timer.expires = 20*HZ + jiffies;
- conn->tmabort_timer.function = iscsi_tmabort_timedout;
- conn->tmabort_timer.data = (unsigned long)ctask;
- add_timer(&conn->tmabort_timer);
- debug_scsi("abort set timeout [itt 0x%x]\n", ctask->itt);
- }
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
scsi_queue_work(session->host, &conn->xmitwork);
@@ -1078,113 +1214,197 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
/*
* block eh thread until:
*
- * 1) abort response
- * 2) abort timeout
+ * 1) tmf response
+ * 2) tmf timeout
* 3) session is terminated or restarted or userspace has
* given up on recovery
*/
- wait_event_interruptible(conn->ehwait,
- sc->SCp.phase != session->age ||
+ wait_event_interruptible(conn->ehwait, age != session->age ||
session->state != ISCSI_STATE_LOGGED_IN ||
- conn->tmabort_state != TMABORT_INITIAL);
+ conn->tmf_state != TMF_QUEUED);
if (signal_pending(current))
flush_signals(current);
- del_timer_sync(&conn->tmabort_timer);
+ del_timer_sync(&conn->tmf_timer);
+
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->lock);
+ /* if the session drops it will clean up the mtask */
+ if (age != session->age ||
+ session->state != ISCSI_STATE_LOGGED_IN)
+ return -ENOTCONN;
return 0;
}
/*
- * session lock must be held
+ * Fail commands. session lock held and recv side suspended and xmit
+ * thread flushed
*/
-static struct iscsi_mgmt_task *
-iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt)
+static void fail_all_commands(struct iscsi_conn *conn, unsigned lun)
{
- int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*);
- struct iscsi_mgmt_task *task;
+ struct iscsi_cmd_task *ctask, *tmp;
- debug_scsi("searching %d tasks\n", nr_tasks);
+ if (conn->ctask && (conn->ctask->sc->device->lun == lun || lun == -1))
+ conn->ctask = NULL;
- for (i = 0; i < nr_tasks; i++) {
- __kfifo_get(fifo, (void*)&task, sizeof(void*));
- debug_scsi("check task %u\n", task->itt);
+ /* flush pending */
+ list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
+ if (lun == ctask->sc->device->lun || lun == -1) {
+ debug_scsi("failing pending sc %p itt 0x%x\n",
+ ctask->sc, ctask->itt);
+ fail_command(conn, ctask, DID_BUS_BUSY << 16);
+ }
+ }
- if (task->itt == itt) {
- debug_scsi("matched task\n");
- return task;
+ list_for_each_entry_safe(ctask, tmp, &conn->requeue, running) {
+ if (lun == ctask->sc->device->lun || lun == -1) {
+ debug_scsi("failing requeued sc %p itt 0x%x\n",
+ ctask->sc, ctask->itt);
+ fail_command(conn, ctask, DID_BUS_BUSY << 16);
}
+ }
- __kfifo_put(fifo, (void*)&task, sizeof(void*));
+ /* fail all other running */
+ list_for_each_entry_safe(ctask, tmp, &conn->run_list, running) {
+ if (lun == ctask->sc->device->lun || lun == -1) {
+ debug_scsi("failing in progress sc %p itt 0x%x\n",
+ ctask->sc, ctask->itt);
+ fail_command(conn, ctask, DID_BUS_BUSY << 16);
+ }
}
- return NULL;
}
-static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask)
+static void iscsi_suspend_tx(struct iscsi_conn *conn)
{
- struct iscsi_conn *conn = ctask->conn;
- struct iscsi_session *session = conn->session;
-
- if (!ctask->mtask)
- return -EINVAL;
+ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ scsi_flush_work(conn->session->host);
+}
- if (!iscsi_remove_mgmt_task(conn->mgmtqueue, ctask->mtask->itt))
- list_del(&ctask->mtask->running);
- __kfifo_put(session->mgmtpool.queue, (void*)&ctask->mtask,
- sizeof(void*));
- ctask->mtask = NULL;
- return 0;
+static void iscsi_start_tx(struct iscsi_conn *conn)
+{
+ clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ scsi_queue_work(conn->session->host, &conn->xmitwork);
}
-/*
- * session lock must be held
- */
-static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
- int err)
+static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
{
- struct scsi_cmnd *sc;
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+ struct iscsi_conn *conn;
+ enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
- sc = ctask->sc;
- if (!sc)
- return;
+ cls_session = starget_to_session(scsi_target(scmd->device));
+ session = class_to_transport_session(cls_session);
- if (ctask->state == ISCSI_TASK_PENDING)
+ debug_scsi("scsi cmd %p timedout\n", scmd);
+
+ spin_lock(&session->lock);
+ if (session->state != ISCSI_STATE_LOGGED_IN) {
/*
- * cmd never made it to the xmit thread, so we should not count
- * the cmd in the sequencing
+ * We are probably in the middle of iscsi recovery so let
+ * that complete and handle the error.
*/
- conn->session->queued_cmdsn--;
- else
- conn->session->tt->cleanup_cmd_task(conn, ctask);
- iscsi_ctask_mtask_cleanup(ctask);
+ rc = EH_RESET_TIMER;
+ goto done;
+ }
- sc->result = err;
- scsi_set_resid(sc, scsi_bufflen(sc));
- if (conn->ctask == ctask)
- conn->ctask = NULL;
- /* release ref from queuecommand */
- __iscsi_put_ctask(ctask);
+ conn = session->leadconn;
+ if (!conn) {
+ /* In the middle of shuting down */
+ rc = EH_RESET_TIMER;
+ goto done;
+ }
+
+ if (!conn->recv_timeout && !conn->ping_timeout)
+ goto done;
+ /*
+ * if the ping timedout then we are in the middle of cleaning up
+ * and can let the iscsi eh handle it
+ */
+ if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
+ (conn->ping_timeout * HZ), jiffies))
+ rc = EH_RESET_TIMER;
+ /*
+ * if we are about to check the transport then give the command
+ * more time
+ */
+ if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
+ jiffies))
+ rc = EH_RESET_TIMER;
+ /* if in the middle of checking the transport then give us more time */
+ if (conn->ping_mtask)
+ rc = EH_RESET_TIMER;
+done:
+ spin_unlock(&session->lock);
+ debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh");
+ return rc;
}
-static void iscsi_suspend_tx(struct iscsi_conn *conn)
+static void iscsi_check_transport_timeouts(unsigned long data)
{
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
- scsi_flush_work(conn->session->host);
+ struct iscsi_conn *conn = (struct iscsi_conn *)data;
+ struct iscsi_session *session = conn->session;
+ unsigned long timeout, next_timeout = 0, last_recv;
+
+ spin_lock(&session->lock);
+ if (session->state != ISCSI_STATE_LOGGED_IN)
+ goto done;
+
+ timeout = conn->recv_timeout;
+ if (!timeout)
+ goto done;
+
+ timeout *= HZ;
+ last_recv = conn->last_recv;
+ if (time_before_eq(last_recv + timeout + (conn->ping_timeout * HZ),
+ jiffies)) {
+ printk(KERN_ERR "ping timeout of %d secs expired, "
+ "last rx %lu, last ping %lu, now %lu\n",
+ conn->ping_timeout, last_recv,
+ conn->last_ping, jiffies);
+ spin_unlock(&session->lock);
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ return;
+ }
+
+ if (time_before_eq(last_recv + timeout, jiffies)) {
+ if (time_before_eq(conn->last_ping, last_recv)) {
+ /* send a ping to try to provoke some traffic */
+ debug_scsi("Sending nopout as ping on conn %p\n", conn);
+ iscsi_send_nopout(conn, NULL);
+ }
+ next_timeout = last_recv + timeout + (conn->ping_timeout * HZ);
+ } else {
+ next_timeout = last_recv + timeout;
+ }
+
+ if (next_timeout) {
+ debug_scsi("Setting next tmo %lu\n", next_timeout);
+ mod_timer(&conn->transport_timer, next_timeout);
+ }
+done:
+ spin_unlock(&session->lock);
}
-static void iscsi_start_tx(struct iscsi_conn *conn)
+static void iscsi_prep_abort_task_pdu(struct iscsi_cmd_task *ctask,
+ struct iscsi_tm *hdr)
{
- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
- scsi_queue_work(conn->session->host, &conn->xmitwork);
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
+ hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
+ hdr->rtt = ctask->hdr->itt;
+ hdr->refcmdsn = ctask->hdr->cmdsn;
}
int iscsi_eh_abort(struct scsi_cmnd *sc)
{
struct Scsi_Host *host = sc->device->host;
struct iscsi_session *session = iscsi_hostdata(host->hostdata);
- struct iscsi_cmd_task *ctask;
struct iscsi_conn *conn;
- int rc;
+ struct iscsi_cmd_task *ctask;
+ struct iscsi_tm *hdr;
+ int rc, age;
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->lock);
@@ -1199,19 +1419,23 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
return SUCCESS;
}
- ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
- conn = ctask->conn;
-
- conn->eh_abort_cnt++;
- debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
-
/*
* If we are not logged in or we have started a new session
* then let the host reset code handle this
*/
- if (session->state != ISCSI_STATE_LOGGED_IN ||
- sc->SCp.phase != session->age)
- goto failed;
+ if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||
+ sc->SCp.phase != session->age) {
+ spin_unlock_bh(&session->lock);
+ mutex_unlock(&session->eh_mutex);
+ return FAILED;
+ }
+
+ conn = session->leadconn;
+ conn->eh_abort_cnt++;
+ age = session->age;
+
+ ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
+ debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
/* ctask completed before time out */
if (!ctask->sc) {
@@ -1219,27 +1443,26 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
goto success;
}
- /* what should we do here ? */
- if (conn->ctask == ctask) {
- printk(KERN_INFO "iscsi: sc %p itt 0x%x partially sent. "
- "Failing abort\n", sc, ctask->itt);
- goto failed;
- }
-
if (ctask->state == ISCSI_TASK_PENDING) {
fail_command(conn, ctask, DID_ABORT << 16);
goto success;
}
- conn->tmabort_state = TMABORT_INITIAL;
- rc = iscsi_exec_abort_task(sc, ctask);
- if (rc || sc->SCp.phase != session->age ||
- session->state != ISCSI_STATE_LOGGED_IN)
+ /* only have one tmf outstanding at a time */
+ if (conn->tmf_state != TMF_INITIAL)
+ goto failed;
+ conn->tmf_state = TMF_QUEUED;
+
+ hdr = &conn->tmhdr;
+ iscsi_prep_abort_task_pdu(ctask, hdr);
+
+ if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
+ rc = FAILED;
goto failed;
- iscsi_ctask_mtask_cleanup(ctask);
+ }
- switch (conn->tmabort_state) {
- case TMABORT_SUCCESS:
+ switch (conn->tmf_state) {
+ case TMF_SUCCESS:
spin_unlock_bh(&session->lock);
iscsi_suspend_tx(conn);
/*
@@ -1248,22 +1471,26 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
write_lock_bh(conn->recv_lock);
spin_lock(&session->lock);
fail_command(conn, ctask, DID_ABORT << 16);
+ conn->tmf_state = TMF_INITIAL;
spin_unlock(&session->lock);
write_unlock_bh(conn->recv_lock);
iscsi_start_tx(conn);
goto success_unlocked;
- case TMABORT_NOT_FOUND:
- if (!ctask->sc) {
+ case TMF_TIMEDOUT:
+ spin_unlock_bh(&session->lock);
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ goto failed_unlocked;
+ case TMF_NOT_FOUND:
+ if (!sc->SCp.ptr) {
+ conn->tmf_state = TMF_INITIAL;
/* ctask completed before tmf abort response */
debug_scsi("sc completed while abort in progress\n");
goto success;
}
/* fall through */
default:
- /* timedout or failed */
- spin_unlock_bh(&session->lock);
- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
- goto failed_unlocked;
+ conn->tmf_state = TMF_INITIAL;
+ goto failed;
}
success:
@@ -1276,65 +1503,152 @@ success_unlocked:
failed:
spin_unlock_bh(&session->lock);
failed_unlocked:
- debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
+ debug_scsi("abort failed [sc %p itt 0x%x]\n", sc,
+ ctask ? ctask->itt : 0);
mutex_unlock(&session->eh_mutex);
return FAILED;
}
EXPORT_SYMBOL_GPL(iscsi_eh_abort);
+static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
+{
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
+ hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
+ hdr->rtt = RESERVED_ITT;
+}
+
+int iscsi_eh_device_reset(struct scsi_cmnd *sc)
+{
+ struct Scsi_Host *host = sc->device->host;
+ struct iscsi_session *session = iscsi_hostdata(host->hostdata);
+ struct iscsi_conn *conn;
+ struct iscsi_tm *hdr;
+ int rc = FAILED;
+
+ debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
+
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+ /*
+ * Just check if we are not logged in. We cannot check for
+ * the phase because the reset could come from a ioctl.
+ */
+ if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
+ goto unlock;
+ conn = session->leadconn;
+
+ /* only have one tmf outstanding at a time */
+ if (conn->tmf_state != TMF_INITIAL)
+ goto unlock;
+ conn->tmf_state = TMF_QUEUED;
+
+ hdr = &conn->tmhdr;
+ iscsi_prep_lun_reset_pdu(sc, hdr);
+
+ if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
+ session->lu_reset_timeout)) {
+ rc = FAILED;
+ goto unlock;
+ }
+
+ switch (conn->tmf_state) {
+ case TMF_SUCCESS:
+ break;
+ case TMF_TIMEDOUT:
+ spin_unlock_bh(&session->lock);
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ goto done;
+ default:
+ conn->tmf_state = TMF_INITIAL;
+ goto unlock;
+ }
+
+ rc = SUCCESS;
+ spin_unlock_bh(&session->lock);
+
+ iscsi_suspend_tx(conn);
+ /* need to grab the recv lock then session lock */
+ write_lock_bh(conn->recv_lock);
+ spin_lock(&session->lock);
+ fail_all_commands(conn, sc->device->lun);
+ conn->tmf_state = TMF_INITIAL;
+ spin_unlock(&session->lock);
+ write_unlock_bh(conn->recv_lock);
+
+ iscsi_start_tx(conn);
+ goto done;
+
+unlock:
+ spin_unlock_bh(&session->lock);
+done:
+ debug_scsi("iscsi_eh_device_reset %s\n",
+ rc == SUCCESS ? "SUCCESS" : "FAILED");
+ mutex_unlock(&session->eh_mutex);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(iscsi_eh_device_reset);
+
+/*
+ * Pre-allocate a pool of @max items of @item_size. By default, the pool
+ * should be accessed via kfifo_{get,put} on q->queue.
+ * Optionally, the caller can obtain the array of object pointers
+ * by passing in a non-NULL @items pointer
+ */
int
-iscsi_pool_init(struct iscsi_queue *q, int max, void ***items, int item_size)
+iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
{
- int i;
+ int i, num_arrays = 1;
- *items = kmalloc(max * sizeof(void*), GFP_KERNEL);
- if (*items == NULL)
- return -ENOMEM;
+ memset(q, 0, sizeof(*q));
q->max = max;
- q->pool = kmalloc(max * sizeof(void*), GFP_KERNEL);
- if (q->pool == NULL) {
- kfree(*items);
- return -ENOMEM;
- }
+
+ /* If the user passed an items pointer, he wants a copy of
+ * the array. */
+ if (items)
+ num_arrays++;
+ q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
+ if (q->pool == NULL)
+ goto enomem;
q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
GFP_KERNEL, NULL);
- if (q->queue == ERR_PTR(-ENOMEM)) {
- kfree(q->pool);
- kfree(*items);
- return -ENOMEM;
- }
+ if (q->queue == ERR_PTR(-ENOMEM))
+ goto enomem;
for (i = 0; i < max; i++) {
- q->pool[i] = kmalloc(item_size, GFP_KERNEL);
+ q->pool[i] = kzalloc(item_size, GFP_KERNEL);
if (q->pool[i] == NULL) {
- int j;
-
- for (j = 0; j < i; j++)
- kfree(q->pool[j]);
-
- kfifo_free(q->queue);
- kfree(q->pool);
- kfree(*items);
- return -ENOMEM;
+ q->max = i;
+ goto enomem;
}
- memset(q->pool[i], 0, item_size);
- (*items)[i] = q->pool[i];
__kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*));
}
+
+ if (items) {
+ *items = q->pool + max;
+ memcpy(*items, q->pool, max * sizeof(void *));
+ }
+
return 0;
+
+enomem:
+ iscsi_pool_free(q);
+ return -ENOMEM;
}
EXPORT_SYMBOL_GPL(iscsi_pool_init);
-void iscsi_pool_free(struct iscsi_queue *q, void **items)
+void iscsi_pool_free(struct iscsi_pool *q)
{
int i;
for (i = 0; i < q->max; i++)
- kfree(items[i]);
- kfree(q->pool);
- kfree(items);
+ kfree(q->pool[i]);
+ if (q->pool)
+ kfree(q->pool);
}
EXPORT_SYMBOL_GPL(iscsi_pool_free);
@@ -1387,7 +1701,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
qdepth = ISCSI_DEF_CMD_PER_LUN;
}
- if (cmds_max < 2 || (cmds_max & (cmds_max - 1)) ||
+ if (!is_power_of_2(cmds_max) ||
cmds_max >= ISCSI_MGMT_ITT_OFFSET) {
if (cmds_max != 0)
printk(KERN_ERR "iscsi: invalid can_queue of %d. "
@@ -1411,12 +1725,16 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
shost->max_cmd_len = iscsit->max_cmd_len;
shost->transportt = scsit;
shost->transportt->create_work_queue = 1;
+ shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
*hostno = shost->host_no;
session = iscsi_hostdata(shost->hostdata);
memset(session, 0, sizeof(struct iscsi_session));
session->host = shost;
session->state = ISCSI_STATE_FREE;
+ session->fast_abort = 1;
+ session->lu_reset_timeout = 15;
+ session->abort_timeout = 10;
session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
session->cmds_max = cmds_max;
session->queued_cmdsn = session->cmdsn = initial_cmdsn;
@@ -1479,9 +1797,9 @@ module_put:
cls_session_fail:
scsi_remove_host(shost);
add_host_fail:
- iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
+ iscsi_pool_free(&session->mgmtpool);
mgmtpool_alloc_fail:
- iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
+ iscsi_pool_free(&session->cmdpool);
cmdpool_alloc_fail:
scsi_host_put(shost);
return NULL;
@@ -1501,11 +1819,11 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
struct module *owner = cls_session->transport->owner;
- iscsi_unblock_session(cls_session);
+ iscsi_remove_session(cls_session);
scsi_remove_host(shost);
- iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
- iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
+ iscsi_pool_free(&session->mgmtpool);
+ iscsi_pool_free(&session->cmdpool);
kfree(session->password);
kfree(session->password_in);
@@ -1516,7 +1834,7 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
kfree(session->hwaddress);
kfree(session->initiatorname);
- iscsi_destroy_session(cls_session);
+ iscsi_free_session(cls_session);
scsi_host_put(shost);
module_put(owner);
}
@@ -1546,17 +1864,17 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
conn->id = conn_idx;
conn->exp_statsn = 0;
- conn->tmabort_state = TMABORT_INITIAL;
+ conn->tmf_state = TMF_INITIAL;
+
+ init_timer(&conn->transport_timer);
+ conn->transport_timer.data = (unsigned long)conn;
+ conn->transport_timer.function = iscsi_check_transport_timeouts;
+
INIT_LIST_HEAD(&conn->run_list);
INIT_LIST_HEAD(&conn->mgmt_run_list);
+ INIT_LIST_HEAD(&conn->mgmtqueue);
INIT_LIST_HEAD(&conn->xmitqueue);
-
- /* initialize general immediate & non-immediate PDU commands queue */
- conn->mgmtqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
- GFP_KERNEL, NULL);
- if (conn->mgmtqueue == ERR_PTR(-ENOMEM))
- goto mgmtqueue_alloc_fail;
-
+ INIT_LIST_HEAD(&conn->requeue);
INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
/* allocate login_mtask used for the login/text sequences */
@@ -1574,7 +1892,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
goto login_mtask_data_alloc_fail;
conn->login_mtask->data = conn->data = data;
- init_timer(&conn->tmabort_timer);
+ init_timer(&conn->tmf_timer);
init_waitqueue_head(&conn->ehwait);
return cls_conn;
@@ -1583,8 +1901,6 @@ login_mtask_data_alloc_fail:
__kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
sizeof(void*));
login_mtask_alloc_fail:
- kfifo_free(conn->mgmtqueue);
-mgmtqueue_alloc_fail:
iscsi_destroy_conn(cls_conn);
return NULL;
}
@@ -1603,8 +1919,9 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
struct iscsi_session *session = conn->session;
unsigned long flags;
+ del_timer_sync(&conn->transport_timer);
+
spin_lock_bh(&session->lock);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
if (session->leadconn == conn) {
/*
@@ -1637,7 +1954,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
}
/* flush queued up work because we free the connection below */
- scsi_flush_work(session->host);
+ iscsi_suspend_tx(conn);
spin_lock_bh(&session->lock);
kfree(conn->data);
@@ -1648,8 +1965,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
session->leadconn = NULL;
spin_unlock_bh(&session->lock);
- kfifo_free(conn->mgmtqueue);
-
iscsi_destroy_conn(cls_conn);
}
EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
@@ -1672,11 +1987,29 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
return -EINVAL;
}
+ if (conn->ping_timeout && !conn->recv_timeout) {
+ printk(KERN_ERR "iscsi: invalid recv timeout of zero "
+ "Using 5 seconds\n.");
+ conn->recv_timeout = 5;
+ }
+
+ if (conn->recv_timeout && !conn->ping_timeout) {
+ printk(KERN_ERR "iscsi: invalid ping timeout of zero "
+ "Using 5 seconds.\n");
+ conn->ping_timeout = 5;
+ }
+
spin_lock_bh(&session->lock);
conn->c_stage = ISCSI_CONN_STARTED;
session->state = ISCSI_STATE_LOGGED_IN;
session->queued_cmdsn = session->cmdsn;
+ conn->last_recv = jiffies;
+ conn->last_ping = jiffies;
+ if (conn->recv_timeout && conn->ping_timeout)
+ mod_timer(&conn->transport_timer,
+ jiffies + (conn->recv_timeout * HZ));
+
switch(conn->stop_stage) {
case STOP_CONN_RECOVER:
/*
@@ -1684,7 +2017,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
* commands after successful recovery
*/
conn->stop_stage = 0;
- conn->tmabort_state = TMABORT_INITIAL;
+ conn->tmf_state = TMF_INITIAL;
session->age++;
spin_unlock_bh(&session->lock);
@@ -1709,55 +2042,27 @@ flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
struct iscsi_mgmt_task *mtask, *tmp;
/* handle pending */
- while (__kfifo_get(conn->mgmtqueue, (void*)&mtask, sizeof(void*))) {
- if (mtask == conn->login_mtask)
- continue;
+ list_for_each_entry_safe(mtask, tmp, &conn->mgmtqueue, running) {
debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt);
- __kfifo_put(session->mgmtpool.queue, (void*)&mtask,
- sizeof(void*));
+ iscsi_free_mgmt_task(conn, mtask);
}
/* handle running */
list_for_each_entry_safe(mtask, tmp, &conn->mgmt_run_list, running) {
debug_scsi("flushing running mgmt task itt 0x%x\n", mtask->itt);
- list_del(&mtask->running);
-
- if (mtask == conn->login_mtask)
- continue;
- __kfifo_put(session->mgmtpool.queue, (void*)&mtask,
- sizeof(void*));
+ iscsi_free_mgmt_task(conn, mtask);
}
conn->mtask = NULL;
}
-/* Fail commands. Mutex and session lock held and recv side suspended */
-static void fail_all_commands(struct iscsi_conn *conn)
-{
- struct iscsi_cmd_task *ctask, *tmp;
-
- /* flush pending */
- list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
- debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc,
- ctask->itt);
- fail_command(conn, ctask, DID_BUS_BUSY << 16);
- }
-
- /* fail all other running */
- list_for_each_entry_safe(ctask, tmp, &conn->run_list, running) {
- debug_scsi("failing in progress sc %p itt 0x%x\n",
- ctask->sc, ctask->itt);
- fail_command(conn, ctask, DID_BUS_BUSY << 16);
- }
-
- conn->ctask = NULL;
-}
-
static void iscsi_start_session_recovery(struct iscsi_session *session,
struct iscsi_conn *conn, int flag)
{
int old_stop_stage;
+ del_timer_sync(&conn->transport_timer);
+
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->lock);
if (conn->stop_stage == STOP_CONN_TERM) {
@@ -1818,7 +2123,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
* flush queues.
*/
spin_lock_bh(&session->lock);
- fail_all_commands(conn);
+ fail_all_commands(conn, -1);
flush_control_queues(session, conn);
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
@@ -1869,6 +2174,21 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
uint32_t value;
switch(param) {
+ case ISCSI_PARAM_FAST_ABORT:
+ sscanf(buf, "%d", &session->fast_abort);
+ break;
+ case ISCSI_PARAM_ABORT_TMO:
+ sscanf(buf, "%d", &session->abort_timeout);
+ break;
+ case ISCSI_PARAM_LU_RESET_TMO:
+ sscanf(buf, "%d", &session->lu_reset_timeout);
+ break;
+ case ISCSI_PARAM_PING_TMO:
+ sscanf(buf, "%d", &conn->ping_timeout);
+ break;
+ case ISCSI_PARAM_RECV_TMO:
+ sscanf(buf, "%d", &conn->recv_timeout);
+ break;
case ISCSI_PARAM_MAX_RECV_DLENGTH:
sscanf(buf, "%d", &conn->max_recv_dlength);
break;
@@ -1983,6 +2303,15 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
int len;
switch(param) {
+ case ISCSI_PARAM_FAST_ABORT:
+ len = sprintf(buf, "%d\n", session->fast_abort);
+ break;
+ case ISCSI_PARAM_ABORT_TMO:
+ len = sprintf(buf, "%d\n", session->abort_timeout);
+ break;
+ case ISCSI_PARAM_LU_RESET_TMO:
+ len = sprintf(buf, "%d\n", session->lu_reset_timeout);
+ break;
case ISCSI_PARAM_INITIAL_R2T_EN:
len = sprintf(buf, "%d\n", session->initial_r2t_en);
break;
@@ -2040,6 +2369,12 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
int len;
switch(param) {
+ case ISCSI_PARAM_PING_TMO:
+ len = sprintf(buf, "%u\n", conn->ping_timeout);
+ break;
+ case ISCSI_PARAM_RECV_TMO:
+ len = sprintf(buf, "%u\n", conn->recv_timeout);
+ break;
case ISCSI_PARAM_MAX_RECV_DLENGTH:
len = sprintf(buf, "%u\n", conn->max_recv_dlength);
break;
diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig
index c01a40d321d4..18f33cd54411 100644
--- a/drivers/scsi/libsas/Kconfig
+++ b/drivers/scsi/libsas/Kconfig
@@ -38,6 +38,15 @@ config SCSI_SAS_ATA
Builds in ATA support into libsas. Will necessitate
the loading of libata along with libsas.
+config SCSI_SAS_HOST_SMP
+ bool "Support for SMP interpretation for SAS hosts"
+ default y
+ depends on SCSI_SAS_LIBSAS
+ help
+ Allows sas hosts to receive SMP frames. Selecting this
+ option builds an SMP interpreter into libsas. Say
+ N here if you want to save the few kb this consumes.
+
config SCSI_SAS_LIBSAS_DEBUG
bool "Compile the SAS Domain Transport Attributes in debug mode"
default y
diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile
index fd387b91856e..1ad1323c60fa 100644
--- a/drivers/scsi/libsas/Makefile
+++ b/drivers/scsi/libsas/Makefile
@@ -33,5 +33,7 @@ libsas-y += sas_init.o \
sas_dump.o \
sas_discover.o \
sas_expander.o \
- sas_scsi_host.o
+ sas_scsi_host.o \
+ sas_task.o
libsas-$(CONFIG_SCSI_SAS_ATA) += sas_ata.o
+libsas-$(CONFIG_SCSI_SAS_HOST_SMP) += sas_host_smp.o \ No newline at end of file
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 827cfb132f21..0996f866f14c 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -498,7 +498,7 @@ static int sas_execute_task(struct sas_task *task, void *buffer, int size,
goto ex_err;
}
wait_for_completion(&task->completion);
- res = -ETASK;
+ res = -ECOMM;
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
int res2;
SAS_DPRINTK("task aborted, flags:0x%x\n",
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 5f3a0d7b18de..31b9af224243 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -98,7 +98,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
dev->dev_type = SATA_PM;
else
dev->dev_type = SATA_DEV;
- dev->tproto = SATA_PROTO;
+ dev->tproto = SAS_PROTOCOL_SATA;
} else {
struct sas_identify_frame *id =
(struct sas_identify_frame *) dev->frame_rcvd;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 8727436b222d..aefd865a5788 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -96,7 +96,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
}
wait_for_completion(&task->completion);
- res = -ETASK;
+ res = -ECOMM;
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
SAS_DPRINTK("smp task timed out or aborted\n");
i->dft->lldd_abort_task(task);
@@ -109,6 +109,16 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
task->task_status.stat == SAM_GOOD) {
res = 0;
break;
+ } if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_UNDERRUN) {
+ /* no error, but return the number of bytes of
+ * underrun */
+ res = task->task_status.residual;
+ break;
+ } if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_OVERRUN) {
+ res = -EMSGSIZE;
+ break;
} else {
SAS_DPRINTK("%s: task to dev %016llx response: 0x%x "
"status 0x%x\n", __FUNCTION__,
@@ -656,9 +666,9 @@ static struct domain_device *sas_ex_discover_end_dev(
sas_ex_get_linkrate(parent, child, phy);
#ifdef CONFIG_SCSI_SAS_ATA
- if ((phy->attached_tproto & SAS_PROTO_STP) || phy->attached_sata_dev) {
+ if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) {
child->dev_type = SATA_DEV;
- if (phy->attached_tproto & SAS_PROTO_STP)
+ if (phy->attached_tproto & SAS_PROTOCOL_STP)
child->tproto = phy->attached_tproto;
if (phy->attached_sata_dev)
child->tproto |= SATA_DEV;
@@ -695,7 +705,7 @@ static struct domain_device *sas_ex_discover_end_dev(
}
} else
#endif
- if (phy->attached_tproto & SAS_PROTO_SSP) {
+ if (phy->attached_tproto & SAS_PROTOCOL_SSP) {
child->dev_type = SAS_END_DEV;
rphy = sas_end_device_alloc(phy->port);
/* FIXME: error handling */
@@ -1896,11 +1906,9 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
}
/* no rphy means no smp target support (ie aic94xx host) */
- if (!rphy) {
- printk("%s: can we send a smp request to a host?\n",
- __FUNCTION__);
- return -EINVAL;
- }
+ if (!rphy)
+ return sas_smp_host_handler(shost, req, rsp);
+
type = rphy->identify.device_type;
if (type != SAS_EDGE_EXPANDER_DEVICE &&
@@ -1926,6 +1934,15 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
ret = smp_execute_task(dev, bio_data(req->bio), req->data_len,
bio_data(rsp->bio), rsp->data_len);
+ if (ret > 0) {
+ /* positive number is the untransferred residual */
+ rsp->data_len = ret;
+ req->data_len = 0;
+ ret = 0;
+ } else if (ret == 0) {
+ rsp->data_len = 0;
+ req->data_len = 0;
+ }
return ret;
}
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
new file mode 100644
index 000000000000..16f93123271e
--- /dev/null
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -0,0 +1,274 @@
+/*
+ * Serial Attached SCSI (SAS) Expander discovery and configuration
+ *
+ * Copyright (C) 2007 James E.J. Bottomley
+ * <James.Bottomley@HansenPartnership.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 only.
+ */
+#include <linux/scatterlist.h>
+#include <linux/blkdev.h>
+
+#include "sas_internal.h"
+
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_sas.h>
+#include "../scsi_sas_internal.h"
+
+static void sas_host_smp_discover(struct sas_ha_struct *sas_ha, u8 *resp_data,
+ u8 phy_id)
+{
+ struct sas_phy *phy;
+ struct sas_rphy *rphy;
+
+ if (phy_id >= sas_ha->num_phys) {
+ resp_data[2] = SMP_RESP_NO_PHY;
+ return;
+ }
+ resp_data[2] = SMP_RESP_FUNC_ACC;
+
+ phy = sas_ha->sas_phy[phy_id]->phy;
+ resp_data[9] = phy_id;
+ resp_data[13] = phy->negotiated_linkrate;
+ memcpy(resp_data + 16, sas_ha->sas_addr, SAS_ADDR_SIZE);
+ memcpy(resp_data + 24, sas_ha->sas_phy[phy_id]->attached_sas_addr,
+ SAS_ADDR_SIZE);
+ resp_data[40] = (phy->minimum_linkrate << 4) |
+ phy->minimum_linkrate_hw;
+ resp_data[41] = (phy->maximum_linkrate << 4) |
+ phy->maximum_linkrate_hw;
+
+ if (!sas_ha->sas_phy[phy_id]->port ||
+ !sas_ha->sas_phy[phy_id]->port->port_dev)
+ return;
+
+ rphy = sas_ha->sas_phy[phy_id]->port->port_dev->rphy;
+ resp_data[12] = rphy->identify.device_type << 4;
+ resp_data[14] = rphy->identify.initiator_port_protocols;
+ resp_data[15] = rphy->identify.target_port_protocols;
+}
+
+static void sas_report_phy_sata(struct sas_ha_struct *sas_ha, u8 *resp_data,
+ u8 phy_id)
+{
+ struct sas_rphy *rphy;
+ struct dev_to_host_fis *fis;
+ int i;
+
+ if (phy_id >= sas_ha->num_phys) {
+ resp_data[2] = SMP_RESP_NO_PHY;
+ return;
+ }
+
+ resp_data[2] = SMP_RESP_PHY_NO_SATA;
+
+ if (!sas_ha->sas_phy[phy_id]->port)
+ return;
+
+ rphy = sas_ha->sas_phy[phy_id]->port->port_dev->rphy;
+ fis = (struct dev_to_host_fis *)
+ sas_ha->sas_phy[phy_id]->port->port_dev->frame_rcvd;
+ if (rphy->identify.target_port_protocols != SAS_PROTOCOL_SATA)
+ return;
+
+ resp_data[2] = SMP_RESP_FUNC_ACC;
+ resp_data[9] = phy_id;
+ memcpy(resp_data + 16, sas_ha->sas_phy[phy_id]->attached_sas_addr,
+ SAS_ADDR_SIZE);
+
+ /* check to see if we have a valid d2h fis */
+ if (fis->fis_type != 0x34)
+ return;
+
+ /* the d2h fis is required by the standard to be in LE format */
+ for (i = 0; i < 20; i += 4) {
+ u8 *dst = resp_data + 24 + i, *src =
+ &sas_ha->sas_phy[phy_id]->port->port_dev->frame_rcvd[i];
+ dst[0] = src[3];
+ dst[1] = src[2];
+ dst[2] = src[1];
+ dst[3] = src[0];
+ }
+}
+
+static void sas_phy_control(struct sas_ha_struct *sas_ha, u8 phy_id,
+ u8 phy_op, enum sas_linkrate min,
+ enum sas_linkrate max, u8 *resp_data)
+{
+ struct sas_internal *i =
+ to_sas_internal(sas_ha->core.shost->transportt);
+ struct sas_phy_linkrates rates;
+
+ if (phy_id >= sas_ha->num_phys) {
+ resp_data[2] = SMP_RESP_NO_PHY;
+ return;
+ }
+ switch (phy_op) {
+ case PHY_FUNC_NOP:
+ case PHY_FUNC_LINK_RESET:
+ case PHY_FUNC_HARD_RESET:
+ case PHY_FUNC_DISABLE:
+ case PHY_FUNC_CLEAR_ERROR_LOG:
+ case PHY_FUNC_CLEAR_AFFIL:
+ case PHY_FUNC_TX_SATA_PS_SIGNAL:
+ break;
+
+ default:
+ resp_data[2] = SMP_RESP_PHY_UNK_OP;
+ return;
+ }
+
+ rates.minimum_linkrate = min;
+ rates.maximum_linkrate = max;
+
+ if (i->dft->lldd_control_phy(sas_ha->sas_phy[phy_id], phy_op, &rates))
+ resp_data[2] = SMP_RESP_FUNC_FAILED;
+ else
+ resp_data[2] = SMP_RESP_FUNC_ACC;
+}
+
+int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
+ struct request *rsp)
+{
+ u8 *req_data = NULL, *resp_data = NULL, *buf;
+ struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+ int error = -EINVAL, resp_data_len = rsp->data_len;
+
+ /* eight is the minimum size for request and response frames */
+ if (req->data_len < 8 || rsp->data_len < 8)
+ goto out;
+
+ if (bio_offset(req->bio) + req->data_len > PAGE_SIZE ||
+ bio_offset(rsp->bio) + rsp->data_len > PAGE_SIZE) {
+ shost_printk(KERN_ERR, shost,
+ "SMP request/response frame crosses page boundary");
+ goto out;
+ }
+
+ req_data = kzalloc(req->data_len, GFP_KERNEL);
+
+ /* make sure frame can always be built ... we copy
+ * back only the requested length */
+ resp_data = kzalloc(max(rsp->data_len, 128U), GFP_KERNEL);
+
+ if (!req_data || !resp_data) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ local_irq_disable();
+ buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio);
+ memcpy(req_data, buf, req->data_len);
+ kunmap_atomic(buf - bio_offset(req->bio), KM_USER0);
+ local_irq_enable();
+
+ if (req_data[0] != SMP_REQUEST)
+ goto out;
+
+ /* always succeeds ... even if we can't process the request
+ * the result is in the response frame */
+ error = 0;
+
+ /* set up default don't know response */
+ resp_data[0] = SMP_RESPONSE;
+ resp_data[1] = req_data[1];
+ resp_data[2] = SMP_RESP_FUNC_UNK;
+
+ switch (req_data[1]) {
+ case SMP_REPORT_GENERAL:
+ req->data_len -= 8;
+ resp_data_len -= 32;
+ resp_data[2] = SMP_RESP_FUNC_ACC;
+ resp_data[9] = sas_ha->num_phys;
+ break;
+
+ case SMP_REPORT_MANUF_INFO:
+ req->data_len -= 8;
+ resp_data_len -= 64;
+ resp_data[2] = SMP_RESP_FUNC_ACC;
+ memcpy(resp_data + 12, shost->hostt->name,
+ SAS_EXPANDER_VENDOR_ID_LEN);
+ memcpy(resp_data + 20, "libsas virt phy",
+ SAS_EXPANDER_PRODUCT_ID_LEN);
+ break;
+
+ case SMP_READ_GPIO_REG:
+ /* FIXME: need GPIO support in the transport class */
+ break;
+
+ case SMP_DISCOVER:
+ req->data_len =- 16;
+ if (req->data_len < 0) {
+ req->data_len = 0;
+ error = -EINVAL;
+ goto out;
+ }
+ resp_data_len -= 56;
+ sas_host_smp_discover(sas_ha, resp_data, req_data[9]);
+ break;
+
+ case SMP_REPORT_PHY_ERR_LOG:
+ /* FIXME: could implement this with additional
+ * libsas callbacks providing the HW supports it */
+ break;
+
+ case SMP_REPORT_PHY_SATA:
+ req->data_len =- 16;
+ if (req->data_len < 0) {
+ req->data_len = 0;
+ error = -EINVAL;
+ goto out;
+ }
+ resp_data_len -= 60;
+ sas_report_phy_sata(sas_ha, resp_data, req_data[9]);
+ break;
+
+ case SMP_REPORT_ROUTE_INFO:
+ /* Can't implement; hosts have no routes */
+ break;
+
+ case SMP_WRITE_GPIO_REG:
+ /* FIXME: need GPIO support in the transport class */
+ break;
+
+ case SMP_CONF_ROUTE_INFO:
+ /* Can't implement; hosts have no routes */
+ break;
+
+ case SMP_PHY_CONTROL:
+ req->data_len =- 44;
+ if (req->data_len < 0) {
+ req->data_len = 0;
+ error = -EINVAL;
+ goto out;
+ }
+ resp_data_len -= 8;
+ sas_phy_control(sas_ha, req_data[9], req_data[10],
+ req_data[32] >> 4, req_data[33] >> 4,
+ resp_data);
+ break;
+
+ case SMP_PHY_TEST_FUNCTION:
+ /* FIXME: should this be implemented? */
+ break;
+
+ default:
+ /* probably a 2.0 function */
+ break;
+ }
+
+ local_irq_disable();
+ buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio);
+ memcpy(buf, resp_data, rsp->data_len);
+ flush_kernel_dcache_page(bio_page(rsp->bio));
+ kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0);
+ local_irq_enable();
+ rsp->data_len = resp_data_len;
+
+ out:
+ kfree(req_data);
+ kfree(resp_data);
+ return error;
+}
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 2b8213b1832d..b4f9368f116a 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -45,7 +45,7 @@
void sas_scsi_recover_host(struct Scsi_Host *shost);
int sas_show_class(enum sas_class class, char *buf);
-int sas_show_proto(enum sas_proto proto, char *buf);
+int sas_show_proto(enum sas_protocol proto, char *buf);
int sas_show_linkrate(enum sas_linkrate linkrate, char *buf);
int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf);
@@ -80,6 +80,20 @@ struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
void sas_hae_reset(struct work_struct *work);
+#ifdef CONFIG_SCSI_SAS_HOST_SMP
+extern int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
+ struct request *rsp);
+#else
+static inline int sas_smp_host_handler(struct Scsi_Host *shost,
+ struct request *req,
+ struct request *rsp)
+{
+ shost_printk(KERN_ERR, shost,
+ "Cannot send SMP to a sas host (not enabled in CONFIG)\n");
+ return -EINVAL;
+}
+#endif
+
static inline void sas_queue_event(int event, spinlock_t *lock,
unsigned long *pending,
struct work_struct *work,
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index a3fdc57e2673..f869fba86807 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -108,7 +108,7 @@ static void sas_scsi_task_done(struct sas_task *task)
break;
case SAM_CHECK_COND:
memcpy(sc->sense_buffer, ts->buf,
- max(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
+ min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
stat = SAM_CHECK_COND;
break;
default:
@@ -148,7 +148,6 @@ static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
if (!task)
return NULL;
- *(u32 *)cmd->sense_buffer = 0;
task->uldd_task = cmd;
ASSIGN_SAS_TASK(cmd, task);
@@ -200,6 +199,10 @@ int sas_queue_up(struct sas_task *task)
*/
int sas_queuecommand(struct scsi_cmnd *cmd,
void (*scsi_done)(struct scsi_cmnd *))
+ __releases(host->host_lock)
+ __acquires(dev->sata_dev.ap->lock)
+ __releases(dev->sata_dev.ap->lock)
+ __acquires(host->host_lock)
{
int res = 0;
struct domain_device *dev = cmd_to_domain_dev(cmd);
@@ -410,7 +413,7 @@ static int sas_recover_I_T(struct domain_device *dev)
}
/* Find the sas_phy that's attached to this device */
-struct sas_phy *find_local_sas_phy(struct domain_device *dev)
+static struct sas_phy *find_local_sas_phy(struct domain_device *dev)
{
struct domain_device *pdev = dev->parent;
struct ex_phy *exphy = NULL;
diff --git a/drivers/scsi/libsas/sas_task.c b/drivers/scsi/libsas/sas_task.c
new file mode 100644
index 000000000000..594524d5bfa1
--- /dev/null
+++ b/drivers/scsi/libsas/sas_task.c
@@ -0,0 +1,36 @@
+#include <linux/kernel.h>
+#include <scsi/sas.h>
+#include <scsi/libsas.h>
+
+/* fill task_status_struct based on SSP response frame */
+void sas_ssp_task_response(struct device *dev, struct sas_task *task,
+ struct ssp_response_iu *iu)
+{
+ struct task_status_struct *tstat = &task->task_status;
+
+ tstat->resp = SAS_TASK_COMPLETE;
+
+ if (iu->datapres == 0)
+ tstat->stat = iu->status;
+ else if (iu->datapres == 1)
+ tstat->stat = iu->resp_data[3];
+ else if (iu->datapres == 2) {
+ tstat->stat = SAM_CHECK_COND;
+ tstat->buf_valid_size =
+ min_t(int, SAS_STATUS_BUF_SIZE,
+ be32_to_cpu(iu->sense_data_len));
+ memcpy(tstat->buf, iu->sense_data, tstat->buf_valid_size);
+
+ if (iu->status != SAM_CHECK_COND)
+ dev_printk(KERN_WARNING, dev,
+ "dev %llx sent sense data, but "
+ "stat(%x) is not CHECK CONDITION\n",
+ SAS_ADDR(task->dev->sas_addr),
+ iu->status);
+ }
+ else
+ /* when datapres contains corrupt/unknown value... */
+ tstat->stat = SAM_CHECK_COND;
+}
+EXPORT_SYMBOL_GPL(sas_ssp_task_response);
+
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
index 2ad0a27dbaab..5cff0204227d 100644
--- a/drivers/scsi/libsrp.c
+++ b/drivers/scsi/libsrp.c
@@ -192,18 +192,18 @@ static int srp_direct_data(struct scsi_cmnd *sc, struct srp_direct_buf *md,
if (dma_map) {
iue = (struct iu_entry *) sc->SCp.ptr;
- sg = sc->request_buffer;
+ sg = scsi_sglist(sc);
- dprintk("%p %u %u %d\n", iue, sc->request_bufflen,
- md->len, sc->use_sg);
+ dprintk("%p %u %u %d\n", iue, scsi_bufflen(sc),
+ md->len, scsi_sg_count(sc));
- nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg,
+ nsg = dma_map_sg(iue->target->dev, sg, scsi_sg_count(sc),
DMA_BIDIRECTIONAL);
if (!nsg) {
- printk("fail to map %p %d\n", iue, sc->use_sg);
+ printk("fail to map %p %d\n", iue, scsi_sg_count(sc));
return 0;
}
- len = min(sc->request_bufflen, md->len);
+ len = min(scsi_bufflen(sc), md->len);
} else
len = md->len;
@@ -229,10 +229,10 @@ static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
if (dma_map || ext_desc) {
iue = (struct iu_entry *) sc->SCp.ptr;
- sg = sc->request_buffer;
+ sg = scsi_sglist(sc);
dprintk("%p %u %u %d %d\n",
- iue, sc->request_bufflen, id->len,
+ iue, scsi_bufflen(sc), id->len,
cmd->data_in_desc_cnt, cmd->data_out_desc_cnt);
}
@@ -268,13 +268,14 @@ static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
rdma:
if (dma_map) {
- nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg, DMA_BIDIRECTIONAL);
+ nsg = dma_map_sg(iue->target->dev, sg, scsi_sg_count(sc),
+ DMA_BIDIRECTIONAL);
if (!nsg) {
- eprintk("fail to map %p %d\n", iue, sc->use_sg);
+ eprintk("fail to map %p %d\n", iue, scsi_sg_count(sc));
err = -EIO;
goto free_mem;
}
- len = min(sc->request_bufflen, id->len);
+ len = min(scsi_bufflen(sc), id->len);
} else
len = id->len;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index ba3ecab9baf3..f26b9538affe 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -29,7 +29,8 @@ struct lpfc_sli2_slim;
#define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact
the NameServer before giving up. */
#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */
-#define LPFC_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
+#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
+#define LPFC_MAX_SG_SEG_CNT 256 /* sg element count per scsi cmnd */
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
@@ -68,6 +69,7 @@ struct lpfc_dmabuf {
struct list_head list;
void *virt; /* virtual address ptr */
dma_addr_t phys; /* mapped address */
+ uint32_t buffer_tag; /* used for tagged queue ring */
};
struct lpfc_dma_pool {
@@ -272,10 +274,16 @@ struct lpfc_vport {
#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */
-#define FC_RFF_NOT_SUPPORTED 0x40000 /* RFF_ID was rejected by switch */
#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */
#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */
+ uint32_t ct_flags;
+#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
+#define FC_CT_RNN_ID 0x2 /* RNN_ID accepted by switch */
+#define FC_CT_RSNN_NN 0x4 /* RSNN_NN accepted by switch */
+#define FC_CT_RSPN_ID 0x8 /* RSPN_ID accepted by switch */
+#define FC_CT_RFT_ID 0x10 /* RFT_ID accepted by switch */
+
struct list_head fc_nodes;
/* Keep counters for the number of entries in each list. */
@@ -344,6 +352,7 @@ struct lpfc_vport {
uint32_t cfg_discovery_threads;
uint32_t cfg_log_verbose;
uint32_t cfg_max_luns;
+ uint32_t cfg_enable_da_id;
uint32_t dev_loss_tmo_changed;
@@ -360,6 +369,7 @@ struct lpfc_vport {
struct hbq_s {
uint16_t entry_count; /* Current number of HBQ slots */
+ uint16_t buffer_count; /* Current number of buffers posted */
uint32_t next_hbqPutIdx; /* Index to next HBQ slot to use */
uint32_t hbqPutIdx; /* HBQ slot to use */
uint32_t local_hbqGetIdx; /* Local copy of Get index from Port */
@@ -377,6 +387,11 @@ struct hbq_s {
#define LPFC_ELS_HBQ 0
#define LPFC_EXTRA_HBQ 1
+enum hba_temp_state {
+ HBA_NORMAL_TEMP,
+ HBA_OVER_TEMP
+};
+
struct lpfc_hba {
struct lpfc_sli sli;
uint32_t sli_rev; /* SLI2 or SLI3 */
@@ -457,7 +472,8 @@ struct lpfc_hba {
uint64_t cfg_soft_wwnn;
uint64_t cfg_soft_wwpn;
uint32_t cfg_hba_queue_depth;
-
+ uint32_t cfg_enable_hba_reset;
+ uint32_t cfg_enable_hba_heartbeat;
lpfc_vpd_t vpd; /* vital product data */
@@ -544,8 +560,7 @@ struct lpfc_hba {
struct list_head port_list;
struct lpfc_vport *pport; /* physical lpfc_vport pointer */
uint16_t max_vpi; /* Maximum virtual nports */
-#define LPFC_MAX_VPI 100 /* Max number of VPI supported */
-#define LPFC_MAX_VPORTS (LPFC_MAX_VPI+1)/* Max number of VPorts supported */
+#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */
unsigned long *vpi_bmask; /* vpi allocation table */
/* Data structure used by fabric iocb scheduler */
@@ -563,16 +578,30 @@ struct lpfc_hba {
struct dentry *hba_debugfs_root;
atomic_t debugfs_vport_count;
struct dentry *debug_hbqinfo;
- struct dentry *debug_dumpslim;
+ struct dentry *debug_dumpHostSlim;
+ struct dentry *debug_dumpHBASlim;
struct dentry *debug_slow_ring_trc;
struct lpfc_debugfs_trc *slow_ring_trc;
atomic_t slow_ring_trc_cnt;
#endif
+ /* Used for deferred freeing of ELS data buffers */
+ struct list_head elsbuf;
+ int elsbuf_cnt;
+ int elsbuf_prev_cnt;
+
+ uint8_t temp_sensor_support;
/* Fields used for heart beat. */
unsigned long last_completion_time;
struct timer_list hb_tmofunc;
uint8_t hb_outstanding;
+ /*
+ * Following bit will be set for all buffer tags which are not
+ * associated with any HBQ.
+ */
+#define QUE_BUFTAG_BIT (1<<31)
+ uint32_t buffer_tag_count;
+ enum hba_temp_state over_temp_state;
};
static inline struct Scsi_Host *
@@ -598,5 +627,15 @@ lpfc_is_link_up(struct lpfc_hba *phba)
phba->link_state == LPFC_HBA_READY;
}
-#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */
+#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */
+#define FC_REG_TEMPERATURE_EVENT 0x20 /* Register for temperature
+ event */
+struct temp_event {
+ uint32_t event_type;
+ uint32_t event_code;
+ uint32_t data;
+};
+#define LPFC_CRIT_TEMP 0x1
+#define LPFC_THRESHOLD_TEMP 0x2
+#define LPFC_NORMAL_TEMP 0x3
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 80a11218b9bb..4bae4a2ed2f1 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2007 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -45,6 +45,10 @@
#define LPFC_MIN_DEVLOSS_TMO 1
#define LPFC_MAX_DEVLOSS_TMO 255
+#define LPFC_MAX_LINK_SPEED 8
+#define LPFC_LINK_SPEED_BITMAP 0x00000117
+#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8"
+
static void
lpfc_jedec_to_ascii(int incr, char hdw[])
{
@@ -86,6 +90,15 @@ lpfc_serialnum_show(struct class_device *cdev, char *buf)
}
static ssize_t
+lpfc_temp_sensor_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ return snprintf(buf, PAGE_SIZE, "%d\n",phba->temp_sensor_support);
+}
+
+static ssize_t
lpfc_modeldesc_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -178,12 +191,9 @@ lpfc_state_show(struct class_device *cdev, char *buf)
case LPFC_LINK_UP:
case LPFC_CLEAR_LA:
case LPFC_HBA_READY:
- len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - \n");
+ len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
switch (vport->port_state) {
- len += snprintf(buf + len, PAGE_SIZE-len,
- "initializing\n");
- break;
case LPFC_LOCAL_CFG_LINK:
len += snprintf(buf + len, PAGE_SIZE-len,
"Configuring Link\n");
@@ -252,8 +262,7 @@ lpfc_issue_lip(struct Scsi_Host *shost)
int mbxstatus = MBXERR_ERROR;
if ((vport->fc_flag & FC_OFFLINE_MODE) ||
- (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) ||
- (vport->port_state != LPFC_VPORT_READY))
+ (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
return -EPERM;
pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
@@ -305,12 +314,14 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
psli = &phba->sli;
+ /* Wait a little for things to settle down, but not
+ * long enough for dev loss timeout to expire.
+ */
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
- /* The linkdown event takes 30 seconds to timeout. */
while (pring->txcmplq_cnt) {
msleep(10);
- if (cnt++ > 3000) {
+ if (cnt++ > 500) { /* 5 secs */
lpfc_printf_log(phba,
KERN_WARNING, LOG_INIT,
"0466 Outstanding IO when "
@@ -336,6 +347,9 @@ lpfc_selective_reset(struct lpfc_hba *phba)
struct completion online_compl;
int status = 0;
+ if (!phba->cfg_enable_hba_reset)
+ return -EIO;
+
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
if (status != 0)
@@ -409,6 +423,8 @@ lpfc_board_mode_store(struct class_device *cdev, const char *buf, size_t count)
struct completion online_compl;
int status=0;
+ if (!phba->cfg_enable_hba_reset)
+ return -EACCES;
init_completion(&online_compl);
if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
@@ -908,6 +924,8 @@ static CLASS_DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL);
static CLASS_DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
static CLASS_DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
static CLASS_DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
+static CLASS_DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show,
+ NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
@@ -971,6 +989,14 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
unsigned int i, j, cnt=count;
u8 wwpn[8];
+ if (!phba->cfg_enable_hba_reset)
+ return -EACCES;
+ spin_lock_irq(&phba->hbalock);
+ if (phba->over_temp_state == HBA_OVER_TEMP) {
+ spin_unlock_irq(&phba->hbalock);
+ return -EACCES;
+ }
+ spin_unlock_irq(&phba->hbalock);
/* count may include a LF at end of string */
if (buf[cnt-1] == '\n')
cnt--;
@@ -1102,7 +1128,13 @@ MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
" 2 - select SLI-2 even on SLI-3 capable HBAs,"
" 3 - select SLI-3");
-LPFC_ATTR_R(enable_npiv, 0, 0, 1, "Enable NPIV functionality");
+int lpfc_enable_npiv = 0;
+module_param(lpfc_enable_npiv, int, 0);
+MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality");
+lpfc_param_show(enable_npiv);
+lpfc_param_init(enable_npiv, 0, 0, 1);
+static CLASS_DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO,
+ lpfc_enable_npiv_show, NULL);
/*
# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
@@ -1248,6 +1280,13 @@ LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffff,
"Verbose logging bit-mask");
/*
+# lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters
+# objects that have been registered with the nameserver after login.
+*/
+LPFC_VPORT_ATTR_R(enable_da_id, 0, 0, 1,
+ "Deregister nameserver objects before LOGO");
+
+/*
# lun_queue_depth: This parameter is used to limit the number of outstanding
# commands per FCP LUN. Value range is [1,128]. Default value is 30.
*/
@@ -1369,7 +1408,33 @@ LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
# Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
# Default value is 0.
*/
-LPFC_ATTR_RW(topology, 0, 0, 6, "Select Fibre Channel topology");
+static int
+lpfc_topology_set(struct lpfc_hba *phba, int val)
+{
+ int err;
+ uint32_t prev_val;
+ if (val >= 0 && val <= 6) {
+ prev_val = phba->cfg_topology;
+ phba->cfg_topology = val;
+ err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
+ if (err)
+ phba->cfg_topology = prev_val;
+ return err;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "%d:0467 lpfc_topology attribute cannot be set to %d, "
+ "allowed range is [0, 6]\n",
+ phba->brd_no, val);
+ return -EINVAL;
+}
+static int lpfc_topology = 0;
+module_param(lpfc_topology, int, 0);
+MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology");
+lpfc_param_show(topology)
+lpfc_param_init(topology, 0, 0, 6)
+lpfc_param_store(topology)
+static CLASS_DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
+ lpfc_topology_show, lpfc_topology_store);
/*
# lpfc_link_speed: Link speed selection for initializing the Fibre Channel
@@ -1381,7 +1446,59 @@ LPFC_ATTR_RW(topology, 0, 0, 6, "Select Fibre Channel topology");
# 8 = 8 Gigabaud
# Value range is [0,8]. Default value is 0.
*/
-LPFC_ATTR_R(link_speed, 0, 0, 8, "Select link speed");
+static int
+lpfc_link_speed_set(struct lpfc_hba *phba, int val)
+{
+ int err;
+ uint32_t prev_val;
+
+ if (((val == LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
+ ((val == LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
+ ((val == LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
+ ((val == LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
+ ((val == LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)))
+ return -EINVAL;
+
+ if ((val >= 0 && val <= LPFC_MAX_LINK_SPEED)
+ && (LPFC_LINK_SPEED_BITMAP & (1 << val))) {
+ prev_val = phba->cfg_link_speed;
+ phba->cfg_link_speed = val;
+ err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
+ if (err)
+ phba->cfg_link_speed = prev_val;
+ return err;
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "%d:0469 lpfc_link_speed attribute cannot be set to %d, "
+ "allowed range is [0, 8]\n",
+ phba->brd_no, val);
+ return -EINVAL;
+}
+
+static int lpfc_link_speed = 0;
+module_param(lpfc_link_speed, int, 0);
+MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
+lpfc_param_show(link_speed)
+static int
+lpfc_link_speed_init(struct lpfc_hba *phba, int val)
+{
+ if ((val >= 0 && val <= LPFC_MAX_LINK_SPEED)
+ && (LPFC_LINK_SPEED_BITMAP & (1 << val))) {
+ phba->cfg_link_speed = val;
+ return 0;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0454 lpfc_link_speed attribute cannot "
+ "be set to %d, allowed values are "
+ "["LPFC_LINK_SPEED_STRING"]\n", val);
+ phba->cfg_link_speed = 0;
+ return -EINVAL;
+}
+
+lpfc_param_store(link_speed)
+static CLASS_DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
+ lpfc_link_speed_show, lpfc_link_speed_store);
/*
# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
@@ -1479,7 +1596,30 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
*/
LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible");
+/*
+# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
+# 0 = HBA resets disabled
+# 1 = HBA resets enabled (default)
+# Value range is [0,1]. Default value is 1.
+*/
+LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
+
+/*
+# lpfc_enable_hba_heartbeat: Enable HBA heartbeat timer..
+# 0 = HBA Heartbeat disabled
+# 1 = HBA Heartbeat enabled (default)
+# Value range is [0,1]. Default value is 1.
+*/
+LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
+/*
+ * lpfc_sg_seg_cnt: Initial Maximum DMA Segment Count
+ * This value can be set to values between 64 and 256. The default value is
+ * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
+ * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE).
+ */
+LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
+ LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
struct class_device_attribute *lpfc_hba_attrs[] = {
&class_device_attr_info,
@@ -1494,6 +1634,7 @@ struct class_device_attribute *lpfc_hba_attrs[] = {
&class_device_attr_state,
&class_device_attr_num_discovered_ports,
&class_device_attr_lpfc_drvr_version,
+ &class_device_attr_lpfc_temp_sensor,
&class_device_attr_lpfc_log_verbose,
&class_device_attr_lpfc_lun_queue_depth,
&class_device_attr_lpfc_hba_queue_depth,
@@ -1530,6 +1671,9 @@ struct class_device_attribute *lpfc_hba_attrs[] = {
&class_device_attr_lpfc_soft_wwnn,
&class_device_attr_lpfc_soft_wwpn,
&class_device_attr_lpfc_soft_wwn_enable,
+ &class_device_attr_lpfc_enable_hba_reset,
+ &class_device_attr_lpfc_enable_hba_heartbeat,
+ &class_device_attr_lpfc_sg_seg_cnt,
NULL,
};
@@ -1552,6 +1696,7 @@ struct class_device_attribute *lpfc_vport_attrs[] = {
&class_device_attr_lpfc_max_luns,
&class_device_attr_nport_evt_cnt,
&class_device_attr_npiv_info,
+ &class_device_attr_lpfc_enable_da_id,
NULL,
};
@@ -1727,13 +1872,18 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
spin_lock_irq(&phba->hbalock);
+ if (phba->over_temp_state == HBA_OVER_TEMP) {
+ sysfs_mbox_idle(phba);
+ spin_unlock_irq(&phba->hbalock);
+ return -EACCES;
+ }
+
if (off == 0 &&
phba->sysfs_mbox.state == SMBOX_WRITING &&
phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
switch (phba->sysfs_mbox.mbox->mb.mbxCommand) {
/* Offline only */
- case MBX_WRITE_NV:
case MBX_INIT_LINK:
case MBX_DOWN_LINK:
case MBX_CONFIG_LINK:
@@ -1744,9 +1894,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
case MBX_DUMP_CONTEXT:
case MBX_RUN_DIAGS:
case MBX_RESTART:
- case MBX_FLASH_WR_ULA:
case MBX_SET_MASK:
- case MBX_SET_SLIM:
case MBX_SET_DEBUG:
if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
printk(KERN_WARNING "mbox_read:Command 0x%x "
@@ -1756,6 +1904,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
spin_unlock_irq(&phba->hbalock);
return -EPERM;
}
+ case MBX_WRITE_NV:
+ case MBX_WRITE_VPARMS:
case MBX_LOAD_SM:
case MBX_READ_NV:
case MBX_READ_CONFIG:
@@ -1772,6 +1922,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
case MBX_LOAD_EXP_ROM:
case MBX_BEACON:
case MBX_DEL_LD_ENTRY:
+ case MBX_SET_VARIABLE:
+ case MBX_WRITE_WWN:
break;
case MBX_READ_SPARM64:
case MBX_READ_LA:
@@ -1793,6 +1945,17 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
return -EPERM;
}
+ /* If HBA encountered an error attention, allow only DUMP
+ * mailbox command until the HBA is restarted.
+ */
+ if ((phba->pport->stopped) &&
+ (phba->sysfs_mbox.mbox->mb.mbxCommand
+ != MBX_DUMP_MEMORY)) {
+ sysfs_mbox_idle(phba);
+ spin_unlock_irq(&phba->hbalock);
+ return -EPERM;
+ }
+
phba->sysfs_mbox.mbox->vport = vport;
if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
@@ -1993,7 +2156,8 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
}
- }
+ } else
+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
spin_unlock_irq(shost->host_lock);
}
@@ -2013,7 +2177,7 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost)
node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
else
/* fabric is local port if there is no F/FL_Port */
- node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
+ node_name = 0;
spin_unlock_irq(shost->host_lock);
@@ -2337,8 +2501,6 @@ struct fc_function_template lpfc_transport_functions = {
.dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
.terminate_rport_io = lpfc_terminate_rport_io,
- .vport_create = lpfc_vport_create,
- .vport_delete = lpfc_vport_delete,
.dd_fcvport_size = sizeof(struct lpfc_vport *),
};
@@ -2414,21 +2576,23 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
lpfc_use_msi_init(phba, lpfc_use_msi);
+ lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
+ lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
phba->cfg_poll = lpfc_poll;
phba->cfg_soft_wwnn = 0L;
phba->cfg_soft_wwpn = 0L;
- /*
- * The total number of segments is the configuration value plus 2
- * since the IOCB need a command and response bde.
- */
- phba->cfg_sg_seg_cnt = LPFC_SG_SEG_CNT + 2;
+ lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
+ /* Also reinitialize the host templates with new values. */
+ lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+ lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
/*
* Since the sg_tablesize is module parameter, the sg_dma_buf_size
- * used to create the sg_dma_buf_pool must be dynamically calculated
+ * used to create the sg_dma_buf_pool must be dynamically calculated.
+ * 2 segments are added since the IOCB needs a command and response bde.
*/
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp) +
- (phba->cfg_sg_seg_cnt * sizeof(struct ulp_bde64));
+ ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
return;
}
@@ -2448,5 +2612,6 @@ lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
lpfc_max_luns_init(vport, lpfc_max_luns);
lpfc_scan_down_init(vport, lpfc_scan_down);
+ lpfc_enable_da_id_init(vport, lpfc_enable_da_id);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index a599e1510710..50fcb7c930bc 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -23,6 +23,8 @@ typedef int (*node_filter)(struct lpfc_nodelist *ndlp, void *param);
struct fc_rport;
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
+
void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
struct lpfc_dmabuf *mp);
@@ -43,9 +45,9 @@ void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
void lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove);
int lpfc_linkdown(struct lpfc_hba *);
+void lpfc_port_link_failure(struct lpfc_vport *);
void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
-void lpfc_mbx_cmpl_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -66,15 +68,15 @@ int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
void lpfc_nlp_init(struct lpfc_vport *, struct lpfc_nodelist *, uint32_t);
struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *);
int lpfc_nlp_put(struct lpfc_nodelist *);
+int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp);
struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t);
void lpfc_disc_list_loopmap(struct lpfc_vport *);
void lpfc_disc_start(struct lpfc_vport *);
-void lpfc_disc_flush_list(struct lpfc_vport *);
void lpfc_cleanup_discovery_resources(struct lpfc_vport *);
+void lpfc_cleanup(struct lpfc_vport *);
void lpfc_disc_timeout(unsigned long);
struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
-struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
void lpfc_worker_wake_up(struct lpfc_hba *);
int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
@@ -82,17 +84,17 @@ int lpfc_do_work(void *);
int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *,
uint32_t);
-void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
- struct lpfc_nodelist *);
void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *);
int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *,
struct serv_parm *, uint32_t);
int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
+void lpfc_more_plogi(struct lpfc_vport *);
+void lpfc_more_adisc(struct lpfc_vport *);
+void lpfc_end_rscn(struct lpfc_vport *);
int lpfc_els_chk_latt(struct lpfc_vport *);
int lpfc_els_abort_flogi(struct lpfc_hba *);
int lpfc_initial_flogi(struct lpfc_vport *);
int lpfc_initial_fdisc(struct lpfc_vport *);
-int lpfc_issue_els_fdisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
int lpfc_issue_els_plogi(struct lpfc_vport *, uint32_t, uint8_t);
int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
@@ -112,7 +114,6 @@ int lpfc_els_rsp_prli_acc(struct lpfc_vport *, struct lpfc_iocbq *,
void lpfc_cancel_retry_delay_tmo(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_els_retry_delay(unsigned long);
void lpfc_els_retry_delay_handler(struct lpfc_nodelist *);
-void lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *);
void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
int lpfc_els_handle_rscn(struct lpfc_vport *);
@@ -124,7 +125,6 @@ int lpfc_els_disc_adisc(struct lpfc_vport *);
int lpfc_els_disc_plogi(struct lpfc_vport *);
void lpfc_els_timeout(unsigned long);
void lpfc_els_timeout_handler(struct lpfc_vport *);
-void lpfc_hb_timeout(unsigned long);
void lpfc_hb_timeout_handler(struct lpfc_hba *);
void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
@@ -142,7 +142,6 @@ void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int, int);
void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
int lpfc_online(struct lpfc_hba *);
-void lpfc_block_mgmt_io(struct lpfc_hba *);
void lpfc_unblock_mgmt_io(struct lpfc_hba *);
void lpfc_offline_prep(struct lpfc_hba *);
void lpfc_offline(struct lpfc_hba *);
@@ -165,7 +164,6 @@ int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
uint32_t , LPFC_MBOXQ_t *);
-struct lpfc_hbq_entry * lpfc_sli_next_hbq_slot(struct lpfc_hba *, uint32_t);
struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *);
void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *);
@@ -178,7 +176,6 @@ void lpfc_poll_start_timer(struct lpfc_hba * phba);
void lpfc_sli_poll_fcp_ring(struct lpfc_hba * hba);
struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
void lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
-void __lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
uint16_t lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
void lpfc_reset_barrier(struct lpfc_hba * phba);
@@ -204,11 +201,14 @@ int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
struct lpfc_sli_ring *,
dma_addr_t);
+
+uint32_t lpfc_sli_get_buffer_tag(struct lpfc_hba *);
+struct lpfc_dmabuf * lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *,
+ struct lpfc_sli_ring *, uint32_t );
+
int lpfc_sli_hbq_count(void);
-int lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *, uint32_t);
int lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *, uint32_t);
void lpfc_sli_hbqbuf_free_all(struct lpfc_hba *);
-struct hbq_dmabuf *lpfc_sli_hbqbuf_find(struct lpfc_hba *, uint32_t);
int lpfc_sli_hbq_size(void);
int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
@@ -219,9 +219,6 @@ int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t,
void lpfc_mbox_timeout(unsigned long);
void lpfc_mbox_timeout_handler(struct lpfc_hba *);
-struct lpfc_nodelist *__lpfc_find_node(struct lpfc_vport *, node_filter,
- void *);
-struct lpfc_nodelist *lpfc_find_node(struct lpfc_vport *, node_filter, void *);
struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t);
struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
struct lpfc_name *);
@@ -260,6 +257,7 @@ extern struct scsi_host_template lpfc_vport_template;
extern struct fc_function_template lpfc_transport_functions;
extern struct fc_function_template lpfc_vport_transport_functions;
extern int lpfc_sli_mode;
+extern int lpfc_enable_npiv;
int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
void lpfc_terminate_rport_io(struct fc_rport *);
@@ -281,11 +279,8 @@ extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
extern struct lpfc_hbq_init *lpfc_hbq_defs[];
/* Interface exported by fabric iocb scheduler */
-int lpfc_issue_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
-void lpfc_fabric_abort_vport(struct lpfc_vport *);
void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
void lpfc_fabric_abort_hba(struct lpfc_hba *);
-void lpfc_fabric_abort_flogi(struct lpfc_hba *);
void lpfc_fabric_block_timeout(unsigned long);
void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
void lpfc_adjust_queue_depth(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index c701e4d611a9..92441ce610ed 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -19,7 +19,7 @@
*******************************************************************/
/*
- * Fibre Channel SCSI LAN Device Driver CT support
+ * Fibre Channel SCSI LAN Device Driver CT support: FC Generic Services FC-GS
*/
#include <linux/blkdev.h>
@@ -57,45 +57,27 @@
static char *lpfc_release_version = LPFC_DRIVER_VERSION;
-/*
- * lpfc_ct_unsol_event
- */
static void
-lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
- struct lpfc_dmabuf *mp, uint32_t size)
+lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
+ struct lpfc_dmabuf *mp, uint32_t size)
{
if (!mp) {
- printk(KERN_ERR "%s (%d): Unsolited CT, no buffer, "
- "piocbq = %p, status = x%x, mp = %p, size = %d\n",
- __FUNCTION__, __LINE__,
- piocbq, piocbq->iocb.ulpStatus, mp, size);
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "0146 Ignoring unsolicted CT No HBQ "
+ "status = x%x\n",
+ piocbq->iocb.ulpStatus);
}
-
- printk(KERN_ERR "%s (%d): Ignoring unsolicted CT piocbq = %p, "
- "buffer = %p, size = %d, status = x%x\n",
- __FUNCTION__, __LINE__,
- piocbq, mp, size,
- piocbq->iocb.ulpStatus);
-
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "0145 Ignoring unsolicted CT HBQ Size:%d "
+ "status = x%x\n",
+ size, piocbq->iocb.ulpStatus);
}
static void
-lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
- struct lpfc_dmabuf *mp, uint32_t size)
+lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
+ struct lpfc_dmabuf *mp, uint32_t size)
{
- if (!mp) {
- printk(KERN_ERR "%s (%d): Unsolited CT, no "
- "HBQ buffer, piocbq = %p, status = x%x\n",
- __FUNCTION__, __LINE__,
- piocbq, piocbq->iocb.ulpStatus);
- } else {
- lpfc_ct_unsol_buffer(phba, piocbq, mp, size);
- printk(KERN_ERR "%s (%d): Ignoring unsolicted CT "
- "piocbq = %p, buffer = %p, size = %d, "
- "status = x%x\n",
- __FUNCTION__, __LINE__,
- piocbq, mp, size, piocbq->iocb.ulpStatus);
- }
+ lpfc_ct_ignore_hbq_buffer(phba, piocbq, mp, size);
}
void
@@ -109,11 +91,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *iocbq;
dma_addr_t paddr;
uint32_t size;
- struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
- struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
-
- piocbq->context2 = NULL;
- piocbq->context3 = NULL;
+ struct list_head head;
+ struct lpfc_dmabuf *bdeBuf;
if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
@@ -122,7 +101,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* Not enough posted buffers; Try posting more buffers */
phba->fc_stat.NoRcvBuf++;
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
- lpfc_post_buffer(phba, pring, 0, 1);
+ lpfc_post_buffer(phba, pring, 2, 1);
return;
}
@@ -133,38 +112,34 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return;
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
- list_for_each_entry(iocbq, &piocbq->list, list) {
+ INIT_LIST_HEAD(&head);
+ list_add_tail(&head, &piocbq->list);
+ list_for_each_entry(iocbq, &head, list) {
icmd = &iocbq->iocb;
- if (icmd->ulpBdeCount == 0) {
- printk(KERN_ERR "%s (%d): Unsolited CT, no "
- "BDE, iocbq = %p, status = x%x\n",
- __FUNCTION__, __LINE__,
- iocbq, iocbq->iocb.ulpStatus);
+ if (icmd->ulpBdeCount == 0)
continue;
- }
-
+ bdeBuf = iocbq->context2;
+ iocbq->context2 = NULL;
size = icmd->un.cont64[0].tus.f.bdeSize;
- lpfc_ct_ignore_hbq_buffer(phba, piocbq, bdeBuf1, size);
- lpfc_in_buf_free(phba, bdeBuf1);
+ lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size);
+ lpfc_in_buf_free(phba, bdeBuf);
if (icmd->ulpBdeCount == 2) {
- lpfc_ct_ignore_hbq_buffer(phba, piocbq, bdeBuf2,
- size);
- lpfc_in_buf_free(phba, bdeBuf2);
+ bdeBuf = iocbq->context3;
+ iocbq->context3 = NULL;
+ size = icmd->unsli3.rcvsli3.bde2.tus.f.bdeSize;
+ lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf,
+ size);
+ lpfc_in_buf_free(phba, bdeBuf);
}
}
+ list_del(&head);
} else {
struct lpfc_iocbq *next;
list_for_each_entry_safe(iocbq, next, &piocbq->list, list) {
icmd = &iocbq->iocb;
- if (icmd->ulpBdeCount == 0) {
- printk(KERN_ERR "%s (%d): Unsolited CT, no "
- "BDE, iocbq = %p, status = x%x\n",
- __FUNCTION__, __LINE__,
- iocbq, iocbq->iocb.ulpStatus);
- continue;
- }
-
+ if (icmd->ulpBdeCount == 0)
+ lpfc_ct_unsol_buffer(phba, piocbq, NULL, 0);
for (i = 0; i < icmd->ulpBdeCount; i++) {
paddr = getPaddr(icmd->un.cont64[i].addrHigh,
icmd->un.cont64[i].addrLow);
@@ -176,6 +151,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
list_del(&iocbq->list);
lpfc_sli_release_iocbq(phba, iocbq);
+ lpfc_post_buffer(phba, pring, i, 1);
}
}
}
@@ -203,7 +179,7 @@ lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl,
struct lpfc_dmabuf *mp;
int cnt, i = 0;
- /* We get chucks of FCELSSIZE */
+ /* We get chunks of FCELSSIZE */
cnt = size > FCELSSIZE ? FCELSSIZE: size;
while (size) {
@@ -426,6 +402,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
lpfc_set_disctmo(vport);
vport->num_disc_nodes = 0;
+ vport->fc_ns_retry = 0;
list_add_tail(&head, &mp->list);
@@ -458,7 +435,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
((lpfc_find_vport_by_did(phba, Did) == NULL) ||
vport->cfg_peer_port_login)) {
if ((vport->port_type != LPFC_NPIV_PORT) ||
- (vport->fc_flag & FC_RFF_NOT_SUPPORTED) ||
+ (!(vport->ct_flags & FC_CT_RFF_ID)) ||
(!vport->cfg_restrict_login)) {
ndlp = lpfc_setup_disc_node(vport, Did);
if (ndlp) {
@@ -506,7 +483,17 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
Did, vport->fc_flag,
vport->fc_rscn_id_cnt);
- if (lpfc_ns_cmd(vport,
+ /* This NPortID was previously
+ * a FCP target, * Don't even
+ * bother to send GFF_ID.
+ */
+ ndlp = lpfc_findnode_did(vport,
+ Did);
+ if (ndlp && (ndlp->nlp_type &
+ NLP_FCP_TARGET))
+ lpfc_setup_disc_node
+ (vport, Did);
+ else if (lpfc_ns_cmd(vport,
SLI_CTNS_GFF_ID,
0, Did) == 0)
vport->num_disc_nodes++;
@@ -554,7 +541,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
struct lpfc_nodelist *ndlp;
- int rc;
+ int rc, retry;
/* First save ndlp, before we overwrite it */
ndlp = cmdiocb->context_un.ndlp;
@@ -574,7 +561,6 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (vport->load_flag & FC_UNLOADING)
goto out;
-
if (lpfc_els_chk_latt(vport) || lpfc_error_lost_link(irsp)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0216 Link event during NS query\n");
@@ -585,14 +571,35 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (irsp->ulpStatus) {
/* Check for retry */
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
- if ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
- (irsp->un.ulpWord[4] != IOERR_NO_RESOURCES))
+ retry = 1;
+ if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+ switch (irsp->un.ulpWord[4]) {
+ case IOERR_NO_RESOURCES:
+ /* We don't increment the retry
+ * count for this case.
+ */
+ break;
+ case IOERR_LINK_DOWN:
+ case IOERR_SLI_ABORTED:
+ case IOERR_SLI_DOWN:
+ retry = 0;
+ break;
+ default:
+ vport->fc_ns_retry++;
+ }
+ }
+ else
vport->fc_ns_retry++;
- /* CT command is being retried */
- rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
+
+ if (retry) {
+ /* CT command is being retried */
+ rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
vport->fc_ns_retry, 0);
- if (rc == 0)
- goto out;
+ if (rc == 0) {
+ /* success */
+ goto out;
+ }
+ }
}
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
@@ -698,7 +705,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *) cmdiocb->context1;
struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *) cmdiocb->context2;
struct lpfc_sli_ct_request *CTrsp;
- int did;
+ int did, rc, retry;
uint8_t fbits;
struct lpfc_nodelist *ndlp;
@@ -729,6 +736,39 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
}
else {
+ /* Check for retry */
+ if (cmdiocb->retry < LPFC_MAX_NS_RETRY) {
+ retry = 1;
+ if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+ switch (irsp->un.ulpWord[4]) {
+ case IOERR_NO_RESOURCES:
+ /* We don't increment the retry
+ * count for this case.
+ */
+ break;
+ case IOERR_LINK_DOWN:
+ case IOERR_SLI_ABORTED:
+ case IOERR_SLI_DOWN:
+ retry = 0;
+ break;
+ default:
+ cmdiocb->retry++;
+ }
+ }
+ else
+ cmdiocb->retry++;
+
+ if (retry) {
+ /* CT command is being retried */
+ rc = lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID,
+ cmdiocb->retry, did);
+ if (rc == 0) {
+ /* success */
+ lpfc_ct_free_iocb(phba, cmdiocb);
+ return;
+ }
+ }
+ }
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0267 NameServer GFF Rsp "
"x%x Error (%d %d) Data: x%x x%x\n",
@@ -778,8 +818,8 @@ out:
static void
-lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_iocbq *rspiocb)
+lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_dmabuf *inp;
@@ -809,7 +849,7 @@ lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0209 RFT request completes, latt %d, "
+ "0209 CT Request completes, latt %d, "
"ulpStatus x%x CmdRsp x%x, Context x%x, Tag x%x\n",
latt, irsp->ulpStatus,
CTrsp->CommandResponse.bits.CmdRsp,
@@ -848,10 +888,44 @@ out:
}
static void
+lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_vport *vport = cmdiocb->vport;
+
+ if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ struct lpfc_dmabuf *outp;
+ struct lpfc_sli_ct_request *CTrsp;
+
+ outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ if (CTrsp->CommandResponse.bits.CmdRsp ==
+ be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
+ vport->ct_flags |= FC_CT_RFT_ID;
+ }
+ lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
+ return;
+}
+
+static void
lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_vport *vport = cmdiocb->vport;
+
+ if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ struct lpfc_dmabuf *outp;
+ struct lpfc_sli_ct_request *CTrsp;
+
+ outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ if (CTrsp->CommandResponse.bits.CmdRsp ==
+ be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
+ vport->ct_flags |= FC_CT_RNN_ID;
+ }
+ lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
return;
}
@@ -859,7 +933,20 @@ static void
lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_vport *vport = cmdiocb->vport;
+
+ if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ struct lpfc_dmabuf *outp;
+ struct lpfc_sli_ct_request *CTrsp;
+
+ outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ if (CTrsp->CommandResponse.bits.CmdRsp ==
+ be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
+ vport->ct_flags |= FC_CT_RSPN_ID;
+ }
+ lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
return;
}
@@ -867,7 +954,32 @@ static void
lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_vport *vport = cmdiocb->vport;
+
+ if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ struct lpfc_dmabuf *outp;
+ struct lpfc_sli_ct_request *CTrsp;
+
+ outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ if (CTrsp->CommandResponse.bits.CmdRsp ==
+ be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
+ vport->ct_flags |= FC_CT_RSNN_NN;
+ }
+ lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
+ return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_da_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+
+ /* even if it fails we will act as though it succeeded. */
+ vport->ct_flags = 0;
+ lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
return;
}
@@ -878,10 +990,17 @@ lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport;
- if (irsp->ulpStatus != IOSTAT_SUCCESS)
- vport->fc_flag |= FC_RFF_NOT_SUPPORTED;
+ if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ struct lpfc_dmabuf *outp;
+ struct lpfc_sli_ct_request *CTrsp;
- lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
+ outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ if (CTrsp->CommandResponse.bits.CmdRsp ==
+ be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
+ vport->ct_flags |= FC_CT_RFF_ID;
+ }
+ lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
return;
}
@@ -1001,6 +1120,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
bpl->tus.f.bdeSize = RSPN_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RSNN_NN)
bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
+ else if (cmdcode == SLI_CTNS_DA_ID)
+ bpl->tus.f.bdeSize = DA_ID_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RFF_ID)
bpl->tus.f.bdeSize = RFF_REQUEST_SZ;
else
@@ -1029,31 +1150,34 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
case SLI_CTNS_GFF_ID:
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_GFF_ID);
- CtReq->un.gff.PortId = be32_to_cpu(context);
+ CtReq->un.gff.PortId = cpu_to_be32(context);
cmpl = lpfc_cmpl_ct_cmd_gff_id;
break;
case SLI_CTNS_RFT_ID:
+ vport->ct_flags &= ~FC_CT_RFT_ID;
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_RFT_ID);
- CtReq->un.rft.PortId = be32_to_cpu(vport->fc_myDID);
+ CtReq->un.rft.PortId = cpu_to_be32(vport->fc_myDID);
CtReq->un.rft.fcpReg = 1;
cmpl = lpfc_cmpl_ct_cmd_rft_id;
break;
case SLI_CTNS_RNN_ID:
+ vport->ct_flags &= ~FC_CT_RNN_ID;
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_RNN_ID);
- CtReq->un.rnn.PortId = be32_to_cpu(vport->fc_myDID);
+ CtReq->un.rnn.PortId = cpu_to_be32(vport->fc_myDID);
memcpy(CtReq->un.rnn.wwnn, &vport->fc_nodename,
sizeof (struct lpfc_name));
cmpl = lpfc_cmpl_ct_cmd_rnn_id;
break;
case SLI_CTNS_RSPN_ID:
+ vport->ct_flags &= ~FC_CT_RSPN_ID;
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_RSPN_ID);
- CtReq->un.rspn.PortId = be32_to_cpu(vport->fc_myDID);
+ CtReq->un.rspn.PortId = cpu_to_be32(vport->fc_myDID);
size = sizeof(CtReq->un.rspn.symbname);
CtReq->un.rspn.len =
lpfc_vport_symbolic_port_name(vport,
@@ -1061,6 +1185,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
cmpl = lpfc_cmpl_ct_cmd_rspn_id;
break;
case SLI_CTNS_RSNN_NN:
+ vport->ct_flags &= ~FC_CT_RSNN_NN;
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_RSNN_NN);
memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename,
@@ -1071,11 +1196,18 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
CtReq->un.rsnn.symbname, size);
cmpl = lpfc_cmpl_ct_cmd_rsnn_nn;
break;
+ case SLI_CTNS_DA_ID:
+ /* Implement DA_ID Nameserver request */
+ CtReq->CommandResponse.bits.CmdRsp =
+ be16_to_cpu(SLI_CTNS_DA_ID);
+ CtReq->un.da_id.port_id = cpu_to_be32(vport->fc_myDID);
+ cmpl = lpfc_cmpl_ct_cmd_da_id;
+ break;
case SLI_CTNS_RFF_ID:
- vport->fc_flag &= ~FC_RFF_NOT_SUPPORTED;
+ vport->ct_flags &= ~FC_CT_RFF_ID;
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_RFF_ID);
- CtReq->un.rff.PortId = be32_to_cpu(vport->fc_myDID);;
+ CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID);;
CtReq->un.rff.fbits = FC4_FEATURE_INIT;
CtReq->un.rff.type_code = FC_FCP_DATA;
cmpl = lpfc_cmpl_ct_cmd_rff_id;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index d6a98bc970ff..783d1eea13ef 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -43,6 +43,7 @@
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
#include "lpfc_version.h"
+#include "lpfc_compat.h"
#include "lpfc_debugfs.h"
#ifdef CONFIG_LPFC_DEBUG_FS
@@ -75,18 +76,18 @@ module_param(lpfc_debugfs_enable, int, 0);
MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services");
/* This MUST be a power of 2 */
-static int lpfc_debugfs_max_disc_trc = 0;
+static int lpfc_debugfs_max_disc_trc;
module_param(lpfc_debugfs_max_disc_trc, int, 0);
MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc,
"Set debugfs discovery trace depth");
/* This MUST be a power of 2 */
-static int lpfc_debugfs_max_slow_ring_trc = 0;
+static int lpfc_debugfs_max_slow_ring_trc;
module_param(lpfc_debugfs_max_slow_ring_trc, int, 0);
MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc,
"Set debugfs slow ring trace depth");
-static int lpfc_debugfs_mask_disc_trc = 0;
+int lpfc_debugfs_mask_disc_trc;
module_param(lpfc_debugfs_mask_disc_trc, int, 0);
MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
"Set debugfs discovery trace mask");
@@ -100,8 +101,11 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
#define LPFC_NODELIST_SIZE 8192
#define LPFC_NODELIST_ENTRY_SIZE 120
-/* dumpslim output buffer size */
-#define LPFC_DUMPSLIM_SIZE 4096
+/* dumpHBASlim output buffer size */
+#define LPFC_DUMPHBASLIM_SIZE 4096
+
+/* dumpHostSlim output buffer size */
+#define LPFC_DUMPHOSTSLIM_SIZE 4096
/* hbqinfo output buffer size */
#define LPFC_HBQINFO_SIZE 8192
@@ -243,16 +247,17 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
raw_index = phba->hbq_get[i];
getidx = le32_to_cpu(raw_index);
len += snprintf(buf+len, size-len,
- "entrys:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n",
- hbqs->entry_count, hbqs->hbqPutIdx, hbqs->next_hbqPutIdx,
- hbqs->local_hbqGetIdx, getidx);
+ "entrys:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n",
+ hbqs->entry_count, hbqs->buffer_count, hbqs->hbqPutIdx,
+ hbqs->next_hbqPutIdx, hbqs->local_hbqGetIdx, getidx);
hbqe = (struct lpfc_hbq_entry *) phba->hbqs[i].hbq_virt;
for (j=0; j<hbqs->entry_count; j++) {
len += snprintf(buf+len, size-len,
"%03d: %08x %04x %05x ", j,
- hbqe->bde.addrLow, hbqe->bde.tus.w, hbqe->buffer_tag);
-
+ le32_to_cpu(hbqe->bde.addrLow),
+ le32_to_cpu(hbqe->bde.tus.w),
+ le32_to_cpu(hbqe->buffer_tag));
i = 0;
found = 0;
@@ -276,7 +281,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
list_for_each_entry(d_buf, &hbqs->hbq_buffer_list, list) {
hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff);
- if (phys == hbqe->bde.addrLow) {
+ if (phys == le32_to_cpu(hbqe->bde.addrLow)) {
len += snprintf(buf+len, size-len,
"Buf%d: %p %06x\n", i,
hbq_buf->dbuf.virt, hbq_buf->tag);
@@ -297,18 +302,58 @@ skipit:
return len;
}
+static int lpfc_debugfs_last_hba_slim_off;
+
+static int
+lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
+{
+ int len = 0;
+ int i, off;
+ uint32_t *ptr;
+ char buffer[1024];
+
+ off = 0;
+ spin_lock_irq(&phba->hbalock);
+
+ len += snprintf(buf+len, size-len, "HBA SLIM\n");
+ lpfc_memcpy_from_slim(buffer,
+ ((uint8_t *)phba->MBslimaddr) + lpfc_debugfs_last_hba_slim_off,
+ 1024);
+
+ ptr = (uint32_t *)&buffer[0];
+ off = lpfc_debugfs_last_hba_slim_off;
+
+ /* Set it up for the next time */
+ lpfc_debugfs_last_hba_slim_off += 1024;
+ if (lpfc_debugfs_last_hba_slim_off >= 4096)
+ lpfc_debugfs_last_hba_slim_off = 0;
+
+ i = 1024;
+ while (i > 0) {
+ len += snprintf(buf+len, size-len,
+ "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
+ *(ptr+5), *(ptr+6), *(ptr+7));
+ ptr += 8;
+ i -= (8 * sizeof(uint32_t));
+ off += (8 * sizeof(uint32_t));
+ }
+
+ spin_unlock_irq(&phba->hbalock);
+ return len;
+}
+
static int
-lpfc_debugfs_dumpslim_data(struct lpfc_hba *phba, char *buf, int size)
+lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
{
int len = 0;
- int cnt, i, off;
+ int i, off;
uint32_t word0, word1, word2, word3;
uint32_t *ptr;
struct lpfc_pgp *pgpp;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
- cnt = LPFC_DUMPSLIM_SIZE;
off = 0;
spin_lock_irq(&phba->hbalock);
@@ -620,7 +665,34 @@ out:
}
static int
-lpfc_debugfs_dumpslim_open(struct inode *inode, struct file *file)
+lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_hba *phba = inode->i_private;
+ struct lpfc_debug *debug;
+ int rc = -ENOMEM;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ /* Round to page boundry */
+ debug->buffer = kmalloc(LPFC_DUMPHBASLIM_SIZE, GFP_KERNEL);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->len = lpfc_debugfs_dumpHBASlim_data(phba, debug->buffer,
+ LPFC_DUMPHBASLIM_SIZE);
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static int
+lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file)
{
struct lpfc_hba *phba = inode->i_private;
struct lpfc_debug *debug;
@@ -631,14 +703,14 @@ lpfc_debugfs_dumpslim_open(struct inode *inode, struct file *file)
goto out;
/* Round to page boundry */
- debug->buffer = kmalloc(LPFC_DUMPSLIM_SIZE, GFP_KERNEL);
+ debug->buffer = kmalloc(LPFC_DUMPHOSTSLIM_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
goto out;
}
- debug->len = lpfc_debugfs_dumpslim_data(phba, debug->buffer,
- LPFC_DUMPSLIM_SIZE);
+ debug->len = lpfc_debugfs_dumpHostSlim_data(phba, debug->buffer,
+ LPFC_DUMPHOSTSLIM_SIZE);
file->private_data = debug;
rc = 0;
@@ -741,10 +813,19 @@ static struct file_operations lpfc_debugfs_op_hbqinfo = {
.release = lpfc_debugfs_release,
};
-#undef lpfc_debugfs_op_dumpslim
-static struct file_operations lpfc_debugfs_op_dumpslim = {
+#undef lpfc_debugfs_op_dumpHBASlim
+static struct file_operations lpfc_debugfs_op_dumpHBASlim = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_dumpHBASlim_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .release = lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_dumpHostSlim
+static struct file_operations lpfc_debugfs_op_dumpHostSlim = {
.owner = THIS_MODULE,
- .open = lpfc_debugfs_dumpslim_open,
+ .open = lpfc_debugfs_dumpHostSlim_open,
.llseek = lpfc_debugfs_lseek,
.read = lpfc_debugfs_read,
.release = lpfc_debugfs_release,
@@ -812,15 +893,27 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
goto debug_failed;
}
- /* Setup dumpslim */
- snprintf(name, sizeof(name), "dumpslim");
- phba->debug_dumpslim =
+ /* Setup dumpHBASlim */
+ snprintf(name, sizeof(name), "dumpHBASlim");
+ phba->debug_dumpHBASlim =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dumpHBASlim);
+ if (!phba->debug_dumpHBASlim) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0409 Cannot create debugfs dumpHBASlim\n");
+ goto debug_failed;
+ }
+
+ /* Setup dumpHostSlim */
+ snprintf(name, sizeof(name), "dumpHostSlim");
+ phba->debug_dumpHostSlim =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_dumpslim);
- if (!phba->debug_dumpslim) {
+ phba, &lpfc_debugfs_op_dumpHostSlim);
+ if (!phba->debug_dumpHostSlim) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Cannot create debugfs dumpslim\n");
+ "0409 Cannot create debugfs dumpHostSlim\n");
goto debug_failed;
}
@@ -970,9 +1063,13 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
phba->debug_hbqinfo = NULL;
}
- if (phba->debug_dumpslim) {
- debugfs_remove(phba->debug_dumpslim); /* dumpslim */
- phba->debug_dumpslim = NULL;
+ if (phba->debug_dumpHBASlim) {
+ debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */
+ phba->debug_dumpHBASlim = NULL;
+ }
+ if (phba->debug_dumpHostSlim) {
+ debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */
+ phba->debug_dumpHostSlim = NULL;
}
if (phba->slow_ring_trc) {
kfree(phba->slow_ring_trc);
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index aacac9ac5381..cfe81c50529a 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -36,7 +36,6 @@ enum lpfc_work_type {
LPFC_EVT_WARM_START,
LPFC_EVT_KILL,
LPFC_EVT_ELS_RETRY,
- LPFC_EVT_DEV_LOSS_DELAY,
LPFC_EVT_DEV_LOSS,
};
@@ -92,6 +91,7 @@ struct lpfc_nodelist {
#define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */
#define NLP_RNID_SND 0x400 /* sent RNID request for this entry */
#define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */
+#define NLP_DEFER_RM 0x10000 /* Remove this ndlp if no longer used */
#define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */
#define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */
#define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 8085900635d4..c6b739dc6bc3 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -18,7 +18,7 @@
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
*******************************************************************/
-
+/* See Fibre Channel protocol T11 FC-LS for details */
#include <linux/blkdev.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@@ -42,6 +42,14 @@ static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
+static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
+static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp, uint8_t retry);
+static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
+ struct lpfc_iocbq *iocb);
+static void lpfc_register_new_vport(struct lpfc_hba *phba,
+ struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp);
static int lpfc_max_els_tries = 3;
@@ -109,14 +117,11 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
/* fill in BDEs for command */
/* Allocate buffer for command payload */
- if (((pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL)) == 0) ||
- ((pcmd->virt = lpfc_mbuf_alloc(phba,
- MEM_PRI, &(pcmd->phys))) == 0)) {
- kfree(pcmd);
-
- lpfc_sli_release_iocbq(phba, elsiocb);
- return NULL;
- }
+ pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (pcmd)
+ pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
+ if (!pcmd || !pcmd->virt)
+ goto els_iocb_free_pcmb_exit;
INIT_LIST_HEAD(&pcmd->list);
@@ -126,13 +131,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
if (prsp)
prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
&prsp->phys);
- if (prsp == 0 || prsp->virt == 0) {
- kfree(prsp);
- lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
- kfree(pcmd);
- lpfc_sli_release_iocbq(phba, elsiocb);
- return NULL;
- }
+ if (!prsp || !prsp->virt)
+ goto els_iocb_free_prsp_exit;
INIT_LIST_HEAD(&prsp->list);
} else {
prsp = NULL;
@@ -143,15 +143,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
if (pbuflist)
pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
&pbuflist->phys);
- if (pbuflist == 0 || pbuflist->virt == 0) {
- lpfc_sli_release_iocbq(phba, elsiocb);
- lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
- lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
- kfree(pcmd);
- kfree(prsp);
- kfree(pbuflist);
- return NULL;
- }
+ if (!pbuflist || !pbuflist->virt)
+ goto els_iocb_free_pbuf_exit;
INIT_LIST_HEAD(&pbuflist->list);
@@ -196,7 +189,10 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
bpl->tus.w = le32_to_cpu(bpl->tus.w);
}
+ /* prevent preparing iocb with NULL ndlp reference */
elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1)
+ goto els_iocb_free_pbuf_exit;
elsiocb->context2 = pcmd;
elsiocb->context3 = pbuflist;
elsiocb->retry = retry;
@@ -222,8 +218,20 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
cmdSize);
}
return elsiocb;
-}
+els_iocb_free_pbuf_exit:
+ lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
+ kfree(pbuflist);
+
+els_iocb_free_prsp_exit:
+ lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
+ kfree(prsp);
+
+els_iocb_free_pcmb_exit:
+ kfree(pcmd);
+ lpfc_sli_release_iocbq(phba, elsiocb);
+ return NULL;
+}
static int
lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
@@ -234,40 +242,53 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
struct lpfc_nodelist *ndlp;
struct serv_parm *sp;
int rc;
+ int err = 0;
sp = &phba->fc_fabparam;
ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp)
+ if (!ndlp) {
+ err = 1;
goto fail;
+ }
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
+ if (!mbox) {
+ err = 2;
goto fail;
+ }
vport->port_state = LPFC_FABRIC_CFG_LINK;
lpfc_config_link(phba, mbox);
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
- if (rc == MBX_NOT_FINISHED)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ err = 3;
goto fail_free_mbox;
+ }
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
+ if (!mbox) {
+ err = 4;
goto fail;
+ }
rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
0);
- if (rc)
+ if (rc) {
+ err = 5;
goto fail_free_mbox;
+ }
mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
mbox->vport = vport;
mbox->context2 = lpfc_nlp_get(ndlp);
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
- if (rc == MBX_NOT_FINISHED)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ err = 6;
goto fail_issue_reg_login;
+ }
return 0;
@@ -282,7 +303,7 @@ fail_free_mbox:
fail:
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0249 Cannot issue Register Fabric login\n");
+ "0249 Cannot issue Register Fabric login: Err %d\n", err);
return -ENXIO;
}
@@ -370,11 +391,12 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
lpfc_mbx_unreg_vpi(vport);
+ spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
}
}
- ndlp->nlp_sid = irsp->un.ulpWord[4] & Mask_DID;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
@@ -429,8 +451,7 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox,
- MBX_NOWAIT | MBX_STOP_IOCB);
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
mempool_free(mbox, phba->mbox_mem_pool);
goto fail;
@@ -463,6 +484,9 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_nlp_put(ndlp);
}
+ /* If we are pt2pt with another NPort, force NPIV off! */
+ phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
+
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_PT2PT;
spin_unlock_irq(shost->host_lock);
@@ -488,6 +512,9 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) {
+ /* One additional decrement on node reference count to
+ * trigger the release of the node
+ */
lpfc_nlp_put(ndlp);
goto out;
}
@@ -562,8 +589,13 @@ flogifail:
/* Start discovery */
lpfc_disc_start(vport);
+ } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
+ ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) &&
+ (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) &&
+ (phba->link_state != LPFC_CLEAR_LA)) {
+ /* If FLOGI failed enable link interrupt. */
+ lpfc_issue_clear_la(phba, vport);
}
-
out:
lpfc_els_free_iocb(phba, cmdiocb);
}
@@ -685,6 +717,9 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp;
+ vport->port_state = LPFC_FLOGI;
+ lpfc_set_disctmo(vport);
+
/* First look for the Fabric ndlp */
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp) {
@@ -696,7 +731,11 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
} else {
lpfc_dequeue_node(vport, ndlp);
}
+
if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
+ /* This decrement of reference count to node shall kick off
+ * the release of the node.
+ */
lpfc_nlp_put(ndlp);
}
return 1;
@@ -720,11 +759,16 @@ lpfc_initial_fdisc(struct lpfc_vport *vport)
lpfc_dequeue_node(vport, ndlp);
}
if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
+ /* decrement node reference count to trigger the release of
+ * the node.
+ */
lpfc_nlp_put(ndlp);
+ return 0;
}
return 1;
}
-static void
+
+void
lpfc_more_plogi(struct lpfc_vport *vport)
{
int sentplogi;
@@ -752,6 +796,8 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
{
struct lpfc_vport *vport = ndlp->vport;
struct lpfc_nodelist *new_ndlp;
+ struct lpfc_rport_data *rdata;
+ struct fc_rport *rport;
struct serv_parm *sp;
uint8_t name[sizeof(struct lpfc_name)];
uint32_t rc;
@@ -788,11 +834,34 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
lpfc_unreg_rpi(vport, new_ndlp);
new_ndlp->nlp_DID = ndlp->nlp_DID;
new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
+
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
+ new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+
lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
/* Move this back to NPR state */
- if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0)
+ if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
+ /* The new_ndlp is replacing ndlp totally, so we need
+ * to put ndlp on UNUSED list and try to free it.
+ */
+
+ /* Fix up the rport accordingly */
+ rport = ndlp->rport;
+ if (rport) {
+ rdata = rport->dd_data;
+ if (rdata->pnode == ndlp) {
+ lpfc_nlp_put(ndlp);
+ ndlp->rport = NULL;
+ rdata->pnode = lpfc_nlp_get(new_ndlp);
+ new_ndlp->rport = rport;
+ }
+ new_ndlp->nlp_type = ndlp->nlp_type;
+ }
+
lpfc_drop_node(vport, ndlp);
+ }
else {
lpfc_unreg_rpi(vport, ndlp);
ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */
@@ -801,6 +870,27 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
return new_ndlp;
}
+void
+lpfc_end_rscn(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (vport->fc_flag & FC_RSCN_MODE) {
+ /*
+ * Check to see if more RSCNs came in while we were
+ * processing this one.
+ */
+ if (vport->fc_rscn_id_cnt ||
+ (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
+ lpfc_els_handle_rscn(vport);
+ else {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_RSCN_MODE;
+ spin_unlock_irq(shost->host_lock);
+ }
+ }
+}
+
static void
lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -871,13 +961,6 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
/* PLOGI failed */
- if (ndlp->nlp_DID == NameServer_DID) {
- lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0250 Nameserver login error: "
- "0x%x / 0x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4]);
- }
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
if (lpfc_error_lost_link(irsp)) {
rc = NLP_STE_FREED_NODE;
@@ -905,20 +988,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
spin_unlock_irq(shost->host_lock);
lpfc_can_disctmo(vport);
- if (vport->fc_flag & FC_RSCN_MODE) {
- /*
- * Check to see if more RSCNs came in while
- * we were processing this one.
- */
- if ((vport->fc_rscn_id_cnt == 0) &&
- (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_RSCN_MODE;
- spin_unlock_irq(shost->host_lock);
- } else {
- lpfc_els_handle_rscn(vport);
- }
- }
+ lpfc_end_rscn(vport);
}
}
@@ -933,6 +1003,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
struct lpfc_hba *phba = vport->phba;
struct serv_parm *sp;
IOCB_t *icmd;
+ struct lpfc_nodelist *ndlp;
struct lpfc_iocbq *elsiocb;
struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
@@ -943,8 +1014,11 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
+ ndlp = lpfc_findnode_did(vport, did);
+ /* If ndlp if not NULL, we will bump the reference count on it */
+
cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
- elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, NULL, did,
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
ELS_CMD_PLOGI);
if (!elsiocb)
return 1;
@@ -1109,7 +1183,7 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 0;
}
-static void
+void
lpfc_more_adisc(struct lpfc_vport *vport)
{
int sentadisc;
@@ -1134,8 +1208,6 @@ lpfc_more_adisc(struct lpfc_vport *vport)
static void
lpfc_rscn_disc(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
lpfc_can_disctmo(vport);
/* RSCN discovery */
@@ -1144,19 +1216,7 @@ lpfc_rscn_disc(struct lpfc_vport *vport)
if (lpfc_els_disc_plogi(vport))
return;
- if (vport->fc_flag & FC_RSCN_MODE) {
- /* Check to see if more RSCNs came in while we were
- * processing this one.
- */
- if ((vport->fc_rscn_id_cnt == 0) &&
- (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_RSCN_MODE;
- spin_unlock_irq(shost->host_lock);
- } else {
- lpfc_els_handle_rscn(vport);
- }
- }
+ lpfc_end_rscn(vport);
}
static void
@@ -1413,6 +1473,13 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING];
+ spin_lock_irq(shost->host_lock);
+ if (ndlp->nlp_flag & NLP_LOGO_SND) {
+ spin_unlock_irq(shost->host_lock);
+ return 0;
+ }
+ spin_unlock_irq(shost->host_lock);
+
cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_LOGO);
@@ -1499,6 +1566,9 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
ndlp->nlp_DID, ELS_CMD_SCR);
if (!elsiocb) {
+ /* This will trigger the release of the node just
+ * allocated
+ */
lpfc_nlp_put(ndlp);
return 1;
}
@@ -1520,10 +1590,17 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
phba->fc_stat.elsXmitSCR++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ /* The additional lpfc_nlp_put will cause the following
+ * lpfc_els_free_iocb routine to trigger the rlease of
+ * the node.
+ */
lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
+ /* This will cause the callback-function lpfc_cmpl_els_cmd to
+ * trigger the release of node.
+ */
lpfc_nlp_put(ndlp);
return 0;
}
@@ -1555,6 +1632,9 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_RNID);
if (!elsiocb) {
+ /* This will trigger the release of the node just
+ * allocated
+ */
lpfc_nlp_put(ndlp);
return 1;
}
@@ -1591,35 +1671,21 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
phba->fc_stat.elsXmitFARPR++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ /* The additional lpfc_nlp_put will cause the following
+ * lpfc_els_free_iocb routine to trigger the release of
+ * the node.
+ */
lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
+ /* This will cause the callback-function lpfc_cmpl_els_cmd to
+ * trigger the release of the node.
+ */
lpfc_nlp_put(ndlp);
return 0;
}
-static void
-lpfc_end_rscn(struct lpfc_vport *vport)
-{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
- if (vport->fc_flag & FC_RSCN_MODE) {
- /*
- * Check to see if more RSCNs came in while we were
- * processing this one.
- */
- if (vport->fc_rscn_id_cnt ||
- (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
- lpfc_els_handle_rscn(vport);
- else {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_RSCN_MODE;
- spin_unlock_irq(shost->host_lock);
- }
- }
-}
-
void
lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
{
@@ -1675,7 +1741,10 @@ lpfc_els_retry_delay(unsigned long ptr)
return;
}
- evtp->evt_arg1 = ndlp;
+ /* We need to hold the node by incrementing the reference
+ * count until the queued work is done
+ */
+ evtp->evt_arg1 = lpfc_nlp_get(ndlp);
evtp->evt = LPFC_EVT_ELS_RETRY;
list_add_tail(&evtp->evt_listp, &phba->work_list);
if (phba->work_wait)
@@ -1759,6 +1828,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
uint32_t *elscmd;
struct ls_rjt stat;
int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
+ int logerr = 0;
uint32_t cmd = 0;
uint32_t did;
@@ -1815,6 +1885,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
case IOERR_NO_RESOURCES:
+ logerr = 1; /* HBA out of resources */
retry = 1;
if (cmdiocb->retry > 100)
delay = 100;
@@ -1843,6 +1914,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
case IOSTAT_NPORT_BSY:
case IOSTAT_FABRIC_BSY:
+ logerr = 1; /* Fabric / Remote NPort out of resources */
retry = 1;
break;
@@ -1923,6 +1995,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (did == FDMI_DID)
retry = 1;
+ if ((cmd == ELS_CMD_FLOGI) &&
+ (phba->fc_topology != TOPOLOGY_LOOP)) {
+ /* FLOGI retry policy */
+ retry = 1;
+ maxretry = 48;
+ if (cmdiocb->retry >= 32)
+ delay = 1000;
+ }
+
if ((++cmdiocb->retry) >= maxretry) {
phba->fc_stat.elsRetryExceeded++;
retry = 0;
@@ -2006,11 +2087,46 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
}
/* No retry ELS command <elsCmd> to remote NPORT <did> */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ if (logerr) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0137 No retry ELS command x%x to remote "
+ "NPORT x%x: Out of Resources: Error:x%x/%x\n",
+ cmd, did, irsp->ulpStatus,
+ irsp->un.ulpWord[4]);
+ }
+ else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0108 No retry ELS command x%x to remote "
"NPORT x%x Retried:%d Error:x%x/%x\n",
cmd, did, cmdiocb->retry, irsp->ulpStatus,
irsp->un.ulpWord[4]);
+ }
+ return 0;
+}
+
+static int
+lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
+{
+ struct lpfc_dmabuf *buf_ptr;
+
+ /* Free the response before processing the command. */
+ if (!list_empty(&buf_ptr1->list)) {
+ list_remove_head(&buf_ptr1->list, buf_ptr,
+ struct lpfc_dmabuf,
+ list);
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+ kfree(buf_ptr);
+ }
+ lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
+ kfree(buf_ptr1);
+ return 0;
+}
+
+static int
+lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
+{
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+ kfree(buf_ptr);
return 0;
}
@@ -2018,30 +2134,63 @@ int
lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
{
struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
+ struct lpfc_nodelist *ndlp;
- if (elsiocb->context1) {
- lpfc_nlp_put(elsiocb->context1);
+ ndlp = (struct lpfc_nodelist *)elsiocb->context1;
+ if (ndlp) {
+ if (ndlp->nlp_flag & NLP_DEFER_RM) {
+ lpfc_nlp_put(ndlp);
+
+ /* If the ndlp is not being used by another discovery
+ * thread, free it.
+ */
+ if (!lpfc_nlp_not_used(ndlp)) {
+ /* If ndlp is being used by another discovery
+ * thread, just clear NLP_DEFER_RM
+ */
+ ndlp->nlp_flag &= ~NLP_DEFER_RM;
+ }
+ }
+ else
+ lpfc_nlp_put(ndlp);
elsiocb->context1 = NULL;
}
/* context2 = cmd, context2->next = rsp, context3 = bpl */
if (elsiocb->context2) {
- buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
- /* Free the response before processing the command. */
- if (!list_empty(&buf_ptr1->list)) {
- list_remove_head(&buf_ptr1->list, buf_ptr,
- struct lpfc_dmabuf,
- list);
- lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
- kfree(buf_ptr);
+ if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
+ /* Firmware could still be in progress of DMAing
+ * payload, so don't free data buffer till after
+ * a hbeat.
+ */
+ elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
+ buf_ptr = elsiocb->context2;
+ elsiocb->context2 = NULL;
+ if (buf_ptr) {
+ buf_ptr1 = NULL;
+ spin_lock_irq(&phba->hbalock);
+ if (!list_empty(&buf_ptr->list)) {
+ list_remove_head(&buf_ptr->list,
+ buf_ptr1, struct lpfc_dmabuf,
+ list);
+ INIT_LIST_HEAD(&buf_ptr1->list);
+ list_add_tail(&buf_ptr1->list,
+ &phba->elsbuf);
+ phba->elsbuf_cnt++;
+ }
+ INIT_LIST_HEAD(&buf_ptr->list);
+ list_add_tail(&buf_ptr->list, &phba->elsbuf);
+ phba->elsbuf_cnt++;
+ spin_unlock_irq(&phba->hbalock);
+ }
+ } else {
+ buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
+ lpfc_els_free_data(phba, buf_ptr1);
}
- lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
- kfree(buf_ptr1);
}
if (elsiocb->context3) {
buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
- lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
- kfree(buf_ptr);
+ lpfc_els_free_bpl(phba, buf_ptr);
}
lpfc_sli_release_iocbq(phba, elsiocb);
return 0;
@@ -2065,15 +2214,20 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"Data: x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
- switch (ndlp->nlp_state) {
- case NLP_STE_UNUSED_NODE: /* node is just allocated */
- lpfc_drop_node(vport, ndlp);
- break;
- case NLP_STE_NPR_NODE: /* NPort Recovery mode */
- lpfc_unreg_rpi(vport, ndlp);
- break;
- default:
- break;
+
+ if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
+ /* NPort Recovery mode or node is just allocated */
+ if (!lpfc_nlp_not_used(ndlp)) {
+ /* If the ndlp is being used by another discovery
+ * thread, just unregister the RPI.
+ */
+ lpfc_unreg_rpi(vport, ndlp);
+ } else {
+ /* Indicate the node has already released, should
+ * not reference to it from within lpfc_els_free_iocb.
+ */
+ cmdiocb->context1 = NULL;
+ }
}
lpfc_els_free_iocb(phba, cmdiocb);
return;
@@ -2089,7 +2243,14 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
- lpfc_nlp_put(ndlp);
+ if (ndlp) {
+ lpfc_nlp_put(ndlp);
+ /* This is the end of the default RPI cleanup logic for this
+ * ndlp. If no other discovery threads are using this ndlp.
+ * we should free all resources associated with it.
+ */
+ lpfc_nlp_not_used(ndlp);
+ }
return;
}
@@ -2100,15 +2261,29 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
- IOCB_t *irsp;
+ IOCB_t *irsp;
+ uint8_t *pcmd;
LPFC_MBOXQ_t *mbox = NULL;
struct lpfc_dmabuf *mp = NULL;
+ uint32_t ls_rjt = 0;
irsp = &rspiocb->iocb;
if (cmdiocb->context_un.mbox)
mbox = cmdiocb->context_un.mbox;
+ /* First determine if this is a LS_RJT cmpl. Note, this callback
+ * function can have cmdiocb->contest1 (ndlp) field set to NULL.
+ */
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
+ if (ndlp && (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
+ /* A LS_RJT associated with Default RPI cleanup has its own
+ * seperate code path.
+ */
+ if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
+ ls_rjt = 1;
+ }
+
/* Check to see if link went down during discovery */
if (!ndlp || lpfc_els_chk_latt(vport)) {
if (mbox) {
@@ -2119,6 +2294,15 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
mempool_free(mbox, phba->mbox_mem_pool);
}
+ if (ndlp && (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
+ if (lpfc_nlp_not_used(ndlp)) {
+ ndlp = NULL;
+ /* Indicate the node has already released,
+ * should not reference to it from within
+ * the routine lpfc_els_free_iocb.
+ */
+ cmdiocb->context1 = NULL;
+ }
goto out;
}
@@ -2150,20 +2334,39 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_nlp_set_state(vport, ndlp,
NLP_STE_REG_LOGIN_ISSUE);
}
- if (lpfc_sli_issue_mbox(phba, mbox,
- (MBX_NOWAIT | MBX_STOP_IOCB))
+ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
!= MBX_NOT_FINISHED) {
goto out;
}
- lpfc_nlp_put(ndlp);
- /* NOTE: we should have messages for unsuccessful
- reglogin */
+
+ /* ELS rsp: Cannot issue reg_login for <NPortid> */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0138 ELS rsp: Cannot issue reg_login for x%x "
+ "Data: x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi);
+
+ if (lpfc_nlp_not_used(ndlp)) {
+ ndlp = NULL;
+ /* Indicate node has already been released,
+ * should not reference to it from within
+ * the routine lpfc_els_free_iocb.
+ */
+ cmdiocb->context1 = NULL;
+ }
} else {
/* Do not drop node for lpfc_els_abort'ed ELS cmds */
if (!lpfc_error_lost_link(irsp) &&
ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
- lpfc_drop_node(vport, ndlp);
- ndlp = NULL;
+ if (lpfc_nlp_not_used(ndlp)) {
+ ndlp = NULL;
+ /* Indicate node has already been
+ * released, should not reference
+ * to it from within the routine
+ * lpfc_els_free_iocb.
+ */
+ cmdiocb->context1 = NULL;
+ }
}
}
mp = (struct lpfc_dmabuf *) mbox->context1;
@@ -2178,7 +2381,21 @@ out:
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
spin_unlock_irq(shost->host_lock);
+
+ /* If the node is not being used by another discovery thread,
+ * and we are sending a reject, we are done with it.
+ * Release driver reference count here and free associated
+ * resources.
+ */
+ if (ls_rjt)
+ if (lpfc_nlp_not_used(ndlp))
+ /* Indicate node has already been released,
+ * should not reference to it from within
+ * the routine lpfc_els_free_iocb.
+ */
+ cmdiocb->context1 = NULL;
}
+
lpfc_els_free_iocb(phba, cmdiocb);
return;
}
@@ -2349,14 +2566,6 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
- /* If the node is in the UNUSED state, and we are sending
- * a reject, we are done with it. Release driver reference
- * count here. The outstanding els will release its reference on
- * completion and the node can be freed then.
- */
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
- lpfc_nlp_put(ndlp);
-
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
@@ -2642,7 +2851,10 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
}
}
}
- if (sentplogi == 0) {
+ if (sentplogi) {
+ lpfc_set_disctmo(vport);
+ }
+ else {
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_NLP_MORE;
spin_unlock_irq(shost->host_lock);
@@ -2830,10 +3042,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
"RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
+ spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_RSCN_DEFERRED;
if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
!(vport->fc_flag & FC_RSCN_DISCOVERY)) {
- spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_RSCN_MODE;
spin_unlock_irq(shost->host_lock);
if (rscn_cnt) {
@@ -2862,7 +3074,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
vport->fc_rscn_id_cnt, vport->fc_flag,
vport->port_state);
} else {
- spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_RSCN_DISCOVERY;
spin_unlock_irq(shost->host_lock);
/* ReDiscovery RSCN */
@@ -2877,7 +3088,9 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* send RECOVERY event for ALL nodes that match RSCN payload */
lpfc_rscn_recovery_check(vport);
+ spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_RSCN_DEFERRED;
+ spin_unlock_irq(shost->host_lock);
return 0;
}
@@ -2929,6 +3142,8 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
/* To process RSCN, first compare RSCN data with NameServer */
vport->fc_ns_retry = 0;
+ vport->num_disc_nodes = 0;
+
ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
/* Good ndlp, issue CT Request to NameServer */
@@ -3022,8 +3237,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->vport = vport;
- rc = lpfc_sli_issue_mbox
- (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
lpfc_set_loopback_flag(phba);
if (rc == MBX_NOT_FINISHED) {
mempool_free(mbox, phba->mbox_mem_pool);
@@ -3140,7 +3354,10 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
lpfc_max_els_tries, ndlp,
ndlp->nlp_DID, ELS_CMD_ACC);
+
+ /* Decrement the ndlp reference count from previous mbox command */
lpfc_nlp_put(ndlp);
+
if (!elsiocb)
return;
@@ -3160,13 +3377,13 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
status |= 0x4;
rps_rsp->rsvd1 = 0;
- rps_rsp->portStatus = be16_to_cpu(status);
- rps_rsp->linkFailureCnt = be32_to_cpu(mb->un.varRdLnk.linkFailureCnt);
- rps_rsp->lossSyncCnt = be32_to_cpu(mb->un.varRdLnk.lossSyncCnt);
- rps_rsp->lossSignalCnt = be32_to_cpu(mb->un.varRdLnk.lossSignalCnt);
- rps_rsp->primSeqErrCnt = be32_to_cpu(mb->un.varRdLnk.primSeqErrCnt);
- rps_rsp->invalidXmitWord = be32_to_cpu(mb->un.varRdLnk.invalidXmitWord);
- rps_rsp->crcCnt = be32_to_cpu(mb->un.varRdLnk.crcCnt);
+ rps_rsp->portStatus = cpu_to_be16(status);
+ rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
+ rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
+ rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
+ rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
+ rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
+ rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
/* Xmit ELS RPS ACC response tag <ulpIoTag> */
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
"0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
@@ -3223,11 +3440,13 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
mbox->context2 = lpfc_nlp_get(ndlp);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
- if (lpfc_sli_issue_mbox (phba, mbox,
- (MBX_NOWAIT | MBX_STOP_IOCB)) != MBX_NOT_FINISHED)
+ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
+ != MBX_NOT_FINISHED)
/* Mbox completion will send ELS Response */
return 0;
-
+ /* Decrement reference count used for the failed mbox
+ * command.
+ */
lpfc_nlp_put(ndlp);
mempool_free(mbox, phba->mbox_mem_pool);
}
@@ -3461,6 +3680,7 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
* other NLP_FABRIC logins
*/
lpfc_drop_node(vport, ndlp);
+
} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
/* Fail outstanding I/O now since this
* device is marked for PLOGI
@@ -3469,8 +3689,6 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
}
}
- vport->port_state = LPFC_FLOGI;
- lpfc_set_disctmo(vport);
lpfc_initial_flogi(vport);
return 0;
}
@@ -3711,6 +3929,7 @@ static void
lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
{
+ struct Scsi_Host *shost;
struct lpfc_nodelist *ndlp;
struct ls_rjt stat;
uint32_t *payload;
@@ -3750,11 +3969,19 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
goto dropit;
lpfc_nlp_init(vport, ndlp, did);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
newnode = 1;
if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) {
ndlp->nlp_type |= NLP_FABRIC;
}
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
+ }
+ else {
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+ /* This is simular to the new node path */
+ lpfc_nlp_get(ndlp);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ newnode = 1;
+ }
}
phba->fc_stat.elsRcvFrame++;
@@ -3783,6 +4010,12 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
rjt_err = LSRJT_UNABLE_TPC;
break;
}
+
+ shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
+ spin_unlock_irq(shost->host_lock);
+
lpfc_disc_state_machine(vport, ndlp, elsiocb,
NLP_EVT_RCV_PLOGI);
@@ -3795,7 +4028,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvFLOGI++;
lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
if (newnode)
- lpfc_drop_node(vport, ndlp);
+ lpfc_nlp_put(ndlp);
break;
case ELS_CMD_LOGO:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -3825,7 +4058,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvRSCN++;
lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
if (newnode)
- lpfc_drop_node(vport, ndlp);
+ lpfc_nlp_put(ndlp);
break;
case ELS_CMD_ADISC:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -3897,7 +4130,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvLIRR++;
lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
if (newnode)
- lpfc_drop_node(vport, ndlp);
+ lpfc_nlp_put(ndlp);
break;
case ELS_CMD_RPS:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -3907,7 +4140,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvRPS++;
lpfc_els_rcv_rps(vport, elsiocb, ndlp);
if (newnode)
- lpfc_drop_node(vport, ndlp);
+ lpfc_nlp_put(ndlp);
break;
case ELS_CMD_RPL:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -3917,7 +4150,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvRPL++;
lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
if (newnode)
- lpfc_drop_node(vport, ndlp);
+ lpfc_nlp_put(ndlp);
break;
case ELS_CMD_RNID:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -3927,7 +4160,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvRNID++;
lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
if (newnode)
- lpfc_drop_node(vport, ndlp);
+ lpfc_nlp_put(ndlp);
break;
default:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -3942,7 +4175,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
"0115 Unknown ELS command x%x "
"received from NPORT x%x\n", cmd, did);
if (newnode)
- lpfc_drop_node(vport, ndlp);
+ lpfc_nlp_put(ndlp);
break;
}
@@ -3958,10 +4191,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return;
dropit:
- lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ if (vport && !(vport->load_flag & FC_UNLOADING))
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"(%d):0111 Dropping received ELS cmd "
"Data: x%x x%x x%x\n",
- vport ? vport->vpi : 0xffff, icmd->ulpStatus,
+ vport->vpi, icmd->ulpStatus,
icmd->un.ulpWord[4], icmd->ulpTimeout);
phba->fc_stat.elsRcvDrop++;
}
@@ -4114,8 +4348,9 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
MAILBOX_t *mb = &pmb->mb;
+ spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
- lpfc_nlp_put(ndlp);
+ spin_unlock_irq(shost->host_lock);
if (mb->mbxStatus) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
@@ -4135,7 +4370,9 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
default:
/* Try to recover from this error */
lpfc_mbx_unreg_vpi(vport);
+ spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
lpfc_initial_fdisc(vport);
break;
}
@@ -4146,14 +4383,21 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
else
lpfc_do_scr_ns_plogi(phba, vport);
}
+
+ /* Now, we decrement the ndlp reference count held for this
+ * callback function
+ */
+ lpfc_nlp_put(ndlp);
+
mempool_free(pmb, phba->mbox_mem_pool);
return;
}
-void
+static void
lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
LPFC_MBOXQ_t *mbox;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -4162,25 +4406,31 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
mbox->vport = vport;
mbox->context2 = lpfc_nlp_get(ndlp);
mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
- if (lpfc_sli_issue_mbox(phba, mbox,
- MBX_NOWAIT | MBX_STOP_IOCB)
+ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
== MBX_NOT_FINISHED) {
+ /* mailbox command not success, decrement ndlp
+ * reference count for this command
+ */
+ lpfc_nlp_put(ndlp);
mempool_free(mbox, phba->mbox_mem_pool);
- vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
- lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
"0253 Register VPI: Can't send mbox\n");
+ goto mbox_err_exit;
}
} else {
- lpfc_vport_set_state(vport, FC_VPORT_FAILED);
-
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
"0254 Register VPI: no memory\n");
-
- vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
- lpfc_nlp_put(ndlp);
+ goto mbox_err_exit;
}
+ return;
+
+mbox_err_exit:
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
+ return;
}
static void
@@ -4251,7 +4501,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_unreg_rpi(vport, np);
}
lpfc_mbx_unreg_vpi(vport);
+ spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
}
if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
@@ -4259,14 +4511,15 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
else
lpfc_do_scr_ns_plogi(phba, vport);
- lpfc_nlp_put(ndlp); /* Free Fabric ndlp for vports */
+ /* Unconditionaly kick off releasing fabric node for vports */
+ lpfc_nlp_put(ndlp);
}
out:
lpfc_els_free_iocb(phba, cmdiocb);
}
-int
+static int
lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
{
@@ -4539,7 +4792,7 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
}
-int
+static int
lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
{
unsigned long iflags;
@@ -4583,7 +4836,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
}
-void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
+static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
{
LIST_HEAD(completions);
struct lpfc_hba *phba = vport->phba;
@@ -4663,6 +4916,7 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
}
+#if 0
void lpfc_fabric_abort_flogi(struct lpfc_hba *phba)
{
LIST_HEAD(completions);
@@ -4693,5 +4947,6 @@ void lpfc_fabric_abort_flogi(struct lpfc_hba *phba)
(piocb->iocb_cmpl) (phba, piocb, piocb);
}
}
+#endif /* 0 */
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index c81c2b3228d6..dc042bd97baa 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -57,6 +57,7 @@ static uint8_t lpfcAlpaArray[] = {
};
static void lpfc_disc_timeout_handler(struct lpfc_vport *);
+static void lpfc_disc_flush_list(struct lpfc_vport *vport);
void
lpfc_terminate_rport_io(struct fc_rport *rport)
@@ -107,20 +108,14 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
struct lpfc_nodelist * ndlp;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
- struct completion devloss_compl;
struct lpfc_work_evt *evtp;
+ int put_node;
+ int put_rport;
rdata = rport->dd_data;
ndlp = rdata->pnode;
-
- if (!ndlp) {
- if (rport->scsi_target_id != -1) {
- printk(KERN_ERR "Cannot find remote node"
- " for rport in dev_loss_tmo_callbk x%x\n",
- rport->port_id);
- }
+ if (!ndlp)
return;
- }
vport = ndlp->vport;
phba = vport->phba;
@@ -129,15 +124,35 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
"rport devlosscb: sid:x%x did:x%x flg:x%x",
ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
- init_completion(&devloss_compl);
+ /* Don't defer this if we are in the process of deleting the vport
+ * or unloading the driver. The unload will cleanup the node
+ * appropriately we just need to cleanup the ndlp rport info here.
+ */
+ if (vport->load_flag & FC_UNLOADING) {
+ put_node = rdata->pnode != NULL;
+ put_rport = ndlp->rport != NULL;
+ rdata->pnode = NULL;
+ ndlp->rport = NULL;
+ if (put_node)
+ lpfc_nlp_put(ndlp);
+ if (put_rport)
+ put_device(&rport->dev);
+ return;
+ }
+
+ if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
+ return;
+
evtp = &ndlp->dev_loss_evt;
if (!list_empty(&evtp->evt_listp))
return;
spin_lock_irq(&phba->hbalock);
- evtp->evt_arg1 = ndlp;
- evtp->evt_arg2 = &devloss_compl;
+ /* We need to hold the node by incrementing the reference
+ * count until this queued work is done
+ */
+ evtp->evt_arg1 = lpfc_nlp_get(ndlp);
evtp->evt = LPFC_EVT_DEV_LOSS;
list_add_tail(&evtp->evt_listp, &phba->work_list);
if (phba->work_wait)
@@ -145,8 +160,6 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
spin_unlock_irq(&phba->hbalock);
- wait_for_completion(&devloss_compl);
-
return;
}
@@ -154,7 +167,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
* This function is called from the worker thread when dev_loss_tmo
* expire.
*/
-void
+static void
lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
{
struct lpfc_rport_data *rdata;
@@ -162,6 +175,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
struct lpfc_vport *vport;
struct lpfc_hba *phba;
uint8_t *name;
+ int put_node;
+ int put_rport;
int warn_on = 0;
rport = ndlp->rport;
@@ -178,14 +193,32 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
"rport devlosstmo:did:x%x type:x%x id:x%x",
ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
- if (!(vport->load_flag & FC_UNLOADING) &&
- ndlp->nlp_state == NLP_STE_MAPPED_NODE)
+ /* Don't defer this if we are in the process of deleting the vport
+ * or unloading the driver. The unload will cleanup the node
+ * appropriately we just need to cleanup the ndlp rport info here.
+ */
+ if (vport->load_flag & FC_UNLOADING) {
+ if (ndlp->nlp_sid != NLP_NO_SID) {
+ /* flush the target */
+ lpfc_sli_abort_iocb(vport,
+ &phba->sli.ring[phba->sli.fcp_ring],
+ ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+ }
+ put_node = rdata->pnode != NULL;
+ put_rport = ndlp->rport != NULL;
+ rdata->pnode = NULL;
+ ndlp->rport = NULL;
+ if (put_node)
+ lpfc_nlp_put(ndlp);
+ if (put_rport)
+ put_device(&rport->dev);
return;
+ }
- if (ndlp->nlp_type & NLP_FABRIC) {
- int put_node;
- int put_rport;
+ if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
+ return;
+ if (ndlp->nlp_type & NLP_FABRIC) {
/* We will clean up these Nodes in linkup */
put_node = rdata->pnode != NULL;
put_rport = ndlp->rport != NULL;
@@ -227,23 +260,20 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
ndlp->nlp_state, ndlp->nlp_rpi);
}
+ put_node = rdata->pnode != NULL;
+ put_rport = ndlp->rport != NULL;
+ rdata->pnode = NULL;
+ ndlp->rport = NULL;
+ if (put_node)
+ lpfc_nlp_put(ndlp);
+ if (put_rport)
+ put_device(&rport->dev);
+
if (!(vport->load_flag & FC_UNLOADING) &&
!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
!(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
- (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
+ (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) {
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
- else {
- int put_node;
- int put_rport;
-
- put_node = rdata->pnode != NULL;
- put_rport = ndlp->rport != NULL;
- rdata->pnode = NULL;
- ndlp->rport = NULL;
- if (put_node)
- lpfc_nlp_put(ndlp);
- if (put_rport)
- put_device(&rport->dev);
}
}
@@ -260,7 +290,6 @@ lpfc_work_list_done(struct lpfc_hba *phba)
{
struct lpfc_work_evt *evtp = NULL;
struct lpfc_nodelist *ndlp;
- struct lpfc_vport *vport;
int free_evt;
spin_lock_irq(&phba->hbalock);
@@ -270,35 +299,22 @@ lpfc_work_list_done(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
free_evt = 1;
switch (evtp->evt) {
- case LPFC_EVT_DEV_LOSS_DELAY:
- free_evt = 0; /* evt is part of ndlp */
- ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
- vport = ndlp->vport;
- if (!vport)
- break;
-
- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
- "rport devlossdly:did:x%x flg:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag, 0);
-
- if (!(vport->load_flag & FC_UNLOADING) &&
- !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
- !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
- lpfc_disc_state_machine(vport, ndlp, NULL,
- NLP_EVT_DEVICE_RM);
- }
- break;
case LPFC_EVT_ELS_RETRY:
ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
lpfc_els_retry_delay_handler(ndlp);
free_evt = 0; /* evt is part of ndlp */
+ /* decrement the node reference count held
+ * for this queued work
+ */
+ lpfc_nlp_put(ndlp);
break;
case LPFC_EVT_DEV_LOSS:
ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
- lpfc_nlp_get(ndlp);
lpfc_dev_loss_tmo_handler(ndlp);
free_evt = 0;
- complete((struct completion *)(evtp->evt_arg2));
+ /* decrement the node reference count held for
+ * this queued work
+ */
lpfc_nlp_put(ndlp);
break;
case LPFC_EVT_ONLINE:
@@ -373,7 +389,7 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_handle_latt(phba);
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for(i = 0; i < LPFC_MAX_VPORTS; i++) {
+ for(i = 0; i <= phba->max_vpi; i++) {
/*
* We could have no vports in array if unloading, so if
* this happens then just use the pport
@@ -405,14 +421,14 @@ lpfc_work_done(struct lpfc_hba *phba)
vport->work_port_events &= ~work_port_events;
spin_unlock_irq(&vport->work_port_lock);
}
- lpfc_destroy_vport_work_array(vports);
+ lpfc_destroy_vport_work_array(phba, vports);
pring = &phba->sli.ring[LPFC_ELS_RING];
status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
status >>= (4*LPFC_ELS_RING);
if ((status & HA_RXMASK)
|| (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
- if (pring->flag & LPFC_STOP_IOCB_MASK) {
+ if (pring->flag & LPFC_STOP_IOCB_EVENT) {
pring->flag |= LPFC_DEFERRED_RING_EVENT;
} else {
lpfc_sli_handle_slow_ring_event(phba, pring,
@@ -544,6 +560,7 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
void
lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp, *next_ndlp;
int rc;
@@ -552,7 +569,9 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
- if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN)
+ if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
+ ((vport->port_type == LPFC_NPIV_PORT) &&
+ (ndlp->nlp_DID == NameServer_DID)))
lpfc_unreg_rpi(vport, ndlp);
/* Leave Fabric nodes alone on link down */
@@ -565,14 +584,30 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
}
if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
lpfc_mbx_unreg_vpi(vport);
+ spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
}
}
+void
+lpfc_port_link_failure(struct lpfc_vport *vport)
+{
+ /* Cleanup any outstanding RSCN activity */
+ lpfc_els_flush_rscn(vport);
+
+ /* Cleanup any outstanding ELS commands */
+ lpfc_els_flush_cmd(vport);
+
+ lpfc_cleanup_rpis(vport, 0);
+
+ /* Turn off discovery timer if its running */
+ lpfc_can_disctmo(vport);
+}
+
static void
lpfc_linkdown_port(struct lpfc_vport *vport)
{
- struct lpfc_nodelist *ndlp, *next_ndlp;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
@@ -581,21 +616,8 @@ lpfc_linkdown_port(struct lpfc_vport *vport)
"Link Down: state:x%x rtry:x%x flg:x%x",
vport->port_state, vport->fc_ns_retry, vport->fc_flag);
- /* Cleanup any outstanding RSCN activity */
- lpfc_els_flush_rscn(vport);
-
- /* Cleanup any outstanding ELS commands */
- lpfc_els_flush_cmd(vport);
+ lpfc_port_link_failure(vport);
- lpfc_cleanup_rpis(vport, 0);
-
- /* free any ndlp's on unused list */
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
- lpfc_drop_node(vport, ndlp);
-
- /* Turn off discovery timer if its running */
- lpfc_can_disctmo(vport);
}
int
@@ -618,18 +640,18 @@ lpfc_linkdown(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
/* Issue a LINK DOWN event to all nodes */
lpfc_linkdown_port(vports[i]);
}
- lpfc_destroy_vport_work_array(vports);
+ lpfc_destroy_vport_work_array(phba, vports);
/* Clean up any firmware default rpi's */
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mb) {
lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
mb->vport = vport;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
+ if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
== MBX_NOT_FINISHED) {
mempool_free(mb, phba->mbox_mem_pool);
}
@@ -643,8 +665,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
lpfc_config_link(phba, mb);
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mb->vport = vport;
- if (lpfc_sli_issue_mbox(phba, mb,
- (MBX_NOWAIT | MBX_STOP_IOCB))
+ if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
== MBX_NOT_FINISHED) {
mempool_free(mb, phba->mbox_mem_pool);
}
@@ -686,7 +707,6 @@ static void
lpfc_linkup_port(struct lpfc_vport *vport)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- struct lpfc_nodelist *ndlp, *next_ndlp;
struct lpfc_hba *phba = vport->phba;
if ((vport->load_flag & FC_UNLOADING) != 0)
@@ -713,11 +733,6 @@ lpfc_linkup_port(struct lpfc_vport *vport)
if (vport->fc_flag & FC_LBIT)
lpfc_linkup_cleanup_nodes(vport);
- /* free any ndlp's in unused state */
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
- nlp_listp)
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
- lpfc_drop_node(vport, ndlp);
}
static int
@@ -734,9 +749,9 @@ lpfc_linkup(struct lpfc_hba *phba)
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++)
+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
lpfc_linkup_port(vports[i]);
- lpfc_destroy_vport_work_array(vports);
+ lpfc_destroy_vport_work_array(phba, vports);
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
lpfc_issue_clear_la(phba, phba->pport);
@@ -749,7 +764,7 @@ lpfc_linkup(struct lpfc_hba *phba)
* as the completion routine when the command is
* handed off to the SLI layer.
*/
-void
+static void
lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
@@ -852,8 +867,6 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* LPFC_FLOGI while waiting for FLOGI cmpl
*/
if (vport->port_state != LPFC_FLOGI) {
- vport->port_state = LPFC_FLOGI;
- lpfc_set_disctmo(vport);
lpfc_initial_flogi(vport);
}
return;
@@ -1022,8 +1035,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
lpfc_read_sparam(phba, sparam_mbox, 0);
sparam_mbox->vport = vport;
sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
- rc = lpfc_sli_issue_mbox(phba, sparam_mbox,
- (MBX_NOWAIT | MBX_STOP_IOCB));
+ rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -1040,8 +1052,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
lpfc_config_link(phba, cfglink_mbox);
cfglink_mbox->vport = vport;
cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
- rc = lpfc_sli_issue_mbox(phba, cfglink_mbox,
- (MBX_NOWAIT | MBX_STOP_IOCB));
+ rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
if (rc != MBX_NOT_FINISHED)
return;
mempool_free(cfglink_mbox, phba->mbox_mem_pool);
@@ -1174,6 +1185,9 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
+ /* decrement the node reference count held for this callback
+ * function.
+ */
lpfc_nlp_put(ndlp);
return;
@@ -1219,7 +1233,7 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
lpfc_unreg_vpi(phba, vport->vpi, mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
- rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
"1800 Could not issue unreg_vpi\n");
@@ -1319,7 +1333,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for(i = 0;
- i < LPFC_MAX_VPORTS && vports[i] != NULL;
+ i <= phba->max_vpi && vports[i] != NULL;
i++) {
if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
continue;
@@ -1335,7 +1349,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
"Fabric support\n");
}
}
- lpfc_destroy_vport_work_array(vports);
+ lpfc_destroy_vport_work_array(phba, vports);
lpfc_do_scr_ns_plogi(phba, vport);
}
@@ -1361,11 +1375,16 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (mb->mbxStatus) {
out:
+ /* decrement the node reference count held for this
+ * callback function.
+ */
lpfc_nlp_put(ndlp);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
- lpfc_drop_node(vport, ndlp);
+
+ /* If no other thread is using the ndlp, free it */
+ lpfc_nlp_not_used(ndlp);
if (phba->fc_topology == TOPOLOGY_LOOP) {
/*
@@ -1410,6 +1429,9 @@ out:
goto out;
}
+ /* decrement the node reference count held for this
+ * callback function.
+ */
lpfc_nlp_put(ndlp);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
@@ -1656,8 +1678,18 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
void
lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
+ /*
+ * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
+ * be used if we wish to issue the "last" lpfc_nlp_put() to remove
+ * the ndlp from the vport. The ndlp marked as UNUSED on the list
+ * until ALL other outstanding threads have completed. We check
+ * that the ndlp not already in the UNUSED state before we proceed.
+ */
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ return;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
lpfc_nlp_put(ndlp);
+ return;
}
/*
@@ -1868,8 +1900,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- rc = lpfc_sli_issue_mbox(phba, mbox,
- (MBX_NOWAIT | MBX_STOP_IOCB));
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
mempool_free(mbox, phba->mbox_mem_pool);
}
@@ -1892,8 +1923,8 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- rc = lpfc_sli_issue_mbox(phba, mbox,
- (MBX_NOWAIT | MBX_STOP_IOCB));
+ mbox->context1 = NULL;
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
if (rc == MBX_NOT_FINISHED) {
mempool_free(mbox, phba->mbox_mem_pool);
}
@@ -1912,8 +1943,8 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- rc = lpfc_sli_issue_mbox(phba, mbox,
- (MBX_NOWAIT | MBX_STOP_IOCB));
+ mbox->context1 = NULL;
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
if (rc == MBX_NOT_FINISHED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
"1815 Could not issue "
@@ -1981,11 +2012,6 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
list_del_init(&ndlp->dev_loss_evt.evt_listp);
- if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) {
- list_del_init(&ndlp->dev_loss_evt.evt_listp);
- complete((struct completion *)(ndlp->dev_loss_evt.evt_arg2));
- }
-
lpfc_unreg_rpi(vport, ndlp);
return 0;
@@ -1999,12 +2025,39 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
static void
lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_rport_data *rdata;
+ LPFC_MBOXQ_t *mbox;
+ int rc;
if (ndlp->nlp_flag & NLP_DELAY_TMO) {
lpfc_cancel_retry_delay_tmo(vport, ndlp);
}
+ if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) {
+ /* For this case we need to cleanup the default rpi
+ * allocated by the firmware.
+ */
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
+ != NULL) {
+ rc = lpfc_reg_login(phba, vport->vpi, ndlp->nlp_DID,
+ (uint8_t *) &vport->fc_sparam, mbox, 0);
+ if (rc) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+ else {
+ mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
+ mbox->vport = vport;
+ mbox->context2 = NULL;
+ rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+ }
+ }
+ }
+
lpfc_cleanup_node(vport, ndlp);
/*
@@ -2132,6 +2185,12 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
}
if (vport->fc_flag & FC_RSCN_MODE) {
if (lpfc_rscn_payload_check(vport, did)) {
+ /* If we've already recieved a PLOGI from this NPort
+ * we don't need to try to discover it again.
+ */
+ if (ndlp->nlp_flag & NLP_RCV_PLOGI)
+ return NULL;
+
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
@@ -2144,8 +2203,13 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
} else
ndlp = NULL;
} else {
+ /* If we've already recieved a PLOGI from this NPort,
+ * or we are already in the process of discovery on it,
+ * we don't need to try to discover it again.
+ */
if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
- ndlp->nlp_state == NLP_STE_PLOGI_ISSUE)
+ ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
+ ndlp->nlp_flag & NLP_RCV_PLOGI)
return NULL;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(shost->host_lock);
@@ -2220,8 +2284,7 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
lpfc_clear_la(phba, mbox);
mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT |
- MBX_STOP_IOCB));
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
mempool_free(mbox, phba->mbox_mem_pool);
lpfc_disc_flush_list(vport);
@@ -2244,8 +2307,7 @@ lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
regvpimbox->vport = vport;
- if (lpfc_sli_issue_mbox(phba, regvpimbox,
- (MBX_NOWAIT | MBX_STOP_IOCB))
+ if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
== MBX_NOT_FINISHED) {
mempool_free(regvpimbox, phba->mbox_mem_pool);
}
@@ -2414,7 +2476,7 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
}
}
-void
+static void
lpfc_disc_flush_list(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp, *next_ndlp;
@@ -2426,7 +2488,6 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
lpfc_free_tx(phba, ndlp);
- lpfc_nlp_put(ndlp);
}
}
}
@@ -2516,6 +2577,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
if (ndlp->nlp_type & NLP_FABRIC) {
/* Clean up the ndlp on Fabric connections */
lpfc_drop_node(vport, ndlp);
+
} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
/* Fail outstanding IO now since device
* is marked for PLOGI.
@@ -2524,9 +2586,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
}
}
if (vport->port_state != LPFC_FLOGI) {
- vport->port_state = LPFC_FLOGI;
- lpfc_set_disctmo(vport);
lpfc_initial_flogi(vport);
+ return;
}
break;
@@ -2536,7 +2597,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
/* Initial FLOGI timeout */
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0222 Initial %s timeout\n",
- vport->vpi ? "FLOGI" : "FDISC");
+ vport->vpi ? "FDISC" : "FLOGI");
/* Assume no Fabric and go on with discovery.
* Check for outstanding ELS FLOGI to abort.
@@ -2558,10 +2619,10 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
/* Next look for NameServer ndlp */
ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (ndlp)
- lpfc_nlp_put(ndlp);
- /* Start discovery */
- lpfc_disc_start(vport);
- break;
+ lpfc_els_abort(phba, ndlp);
+
+ /* ReStart discovery */
+ goto restart_disc;
case LPFC_NS_QRY:
/* Check for wait for NameServer Rsp timeout */
@@ -2580,6 +2641,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
}
vport->fc_ns_retry = 0;
+restart_disc:
/*
* Discovery is over.
* set port_state to PORT_READY if SLI2.
@@ -2608,8 +2670,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
initlinkmbox->vport = vport;
initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
- (MBX_NOWAIT | MBX_STOP_IOCB));
+ rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
lpfc_set_loopback_flag(phba);
if (rc == MBX_NOT_FINISHED)
mempool_free(initlinkmbox, phba->mbox_mem_pool);
@@ -2664,12 +2725,14 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
clrlaerr = 1;
break;
+ case LPFC_LINK_UP:
+ lpfc_issue_clear_la(phba, vport);
+ /* Drop thru */
case LPFC_LINK_UNKNOWN:
case LPFC_WARM_START:
case LPFC_INIT_START:
case LPFC_INIT_MBX_CMDS:
case LPFC_LINK_DOWN:
- case LPFC_LINK_UP:
case LPFC_HBA_ERROR:
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0230 Unexpected timeout, hba link "
@@ -2723,7 +2786,9 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
else
mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
- /* Mailbox took a reference to the node */
+ /* decrement the node reference count held for this callback
+ * function.
+ */
lpfc_nlp_put(ndlp);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
@@ -2747,19 +2812,19 @@ lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
sizeof(ndlp->nlp_portname)) == 0;
}
-struct lpfc_nodelist *
+static struct lpfc_nodelist *
__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
{
struct lpfc_nodelist *ndlp;
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (ndlp->nlp_state != NLP_STE_UNUSED_NODE &&
- filter(ndlp, param))
+ if (filter(ndlp, param))
return ndlp;
}
return NULL;
}
+#if 0
/*
* Search node lists for a remote port matching filter criteria
* Caller needs to hold host_lock before calling this routine.
@@ -2775,6 +2840,7 @@ lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
spin_unlock_irq(shost->host_lock);
return ndlp;
}
+#endif /* 0 */
/*
* This routine looks up the ndlp lists for the given RPI. If rpi found it
@@ -2786,6 +2852,7 @@ __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
}
+#if 0
struct lpfc_nodelist *
lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
{
@@ -2797,6 +2864,7 @@ lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
spin_unlock_irq(shost->host_lock);
return ndlp;
}
+#endif /* 0 */
/*
* This routine looks up the ndlp lists for the given WWPN. If WWPN found it
@@ -2837,6 +2905,9 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return;
}
+/* This routine releases all resources associated with a specifc NPort's ndlp
+ * and mempool_free's the nodelist.
+ */
static void
lpfc_nlp_release(struct kref *kref)
{
@@ -2851,16 +2922,57 @@ lpfc_nlp_release(struct kref *kref)
mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
}
+/* This routine bumps the reference count for a ndlp structure to ensure
+ * that one discovery thread won't free a ndlp while another discovery thread
+ * is using it.
+ */
struct lpfc_nodelist *
lpfc_nlp_get(struct lpfc_nodelist *ndlp)
{
- if (ndlp)
+ if (ndlp) {
+ lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+ "node get: did:x%x flg:x%x refcnt:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount));
kref_get(&ndlp->kref);
+ }
return ndlp;
}
+
+/* This routine decrements the reference count for a ndlp structure. If the
+ * count goes to 0, this indicates the the associated nodelist should be freed.
+ */
int
lpfc_nlp_put(struct lpfc_nodelist *ndlp)
{
+ if (ndlp) {
+ lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+ "node put: did:x%x flg:x%x refcnt:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount));
+ }
return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
}
+
+/* This routine free's the specified nodelist if it is not in use
+ * by any other discovery thread. This routine returns 1 if the ndlp
+ * is not being used by anyone and has been freed. A return value of
+ * 0 indicates it is being used by another discovery thread and the
+ * refcount is left unchanged.
+ */
+int
+lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
+{
+ lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+ "node not used: did:x%x flg:x%x refcnt:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount));
+
+ if (atomic_read(&ndlp->kref.refcount) == 1) {
+ lpfc_nlp_put(ndlp);
+ return 1;
+ }
+ return 0;
+}
+
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 451accd5564b..041f83e7634a 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -139,6 +139,9 @@ struct lpfc_sli_ct_request {
uint8_t len;
uint8_t symbname[255];
} rsnn;
+ struct da_id { /* For DA_ID requests */
+ uint32_t port_id;
+ } da_id;
struct rspn { /* For RSPN_ID requests */
uint32_t PortId;
uint8_t len;
@@ -150,11 +153,7 @@ struct lpfc_sli_ct_request {
struct gff_acc {
uint8_t fbits[128];
} gff_acc;
-#ifdef __BIG_ENDIAN_BITFIELD
#define FCP_TYPE_FEATURE_OFFSET 7
-#else /* __LITTLE_ENDIAN_BITFIELD */
-#define FCP_TYPE_FEATURE_OFFSET 4
-#endif
struct rff {
uint32_t PortId;
uint8_t reserved[2];
@@ -177,6 +176,8 @@ struct lpfc_sli_ct_request {
sizeof(struct rnn))
#define RSNN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct rsnn))
+#define DA_ID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct da_id))
#define RSPN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct rspn))
@@ -1228,7 +1229,8 @@ typedef struct { /* FireFly BIU registers */
#define HS_FFER3 0x20000000 /* Bit 29 */
#define HS_FFER2 0x40000000 /* Bit 30 */
#define HS_FFER1 0x80000000 /* Bit 31 */
-#define HS_FFERM 0xFF000000 /* Mask for error bits 31:24 */
+#define HS_CRIT_TEMP 0x00000100 /* Bit 8 */
+#define HS_FFERM 0xFF000100 /* Mask for error bits 31:24 and 8 */
/* Host Control Register */
@@ -1277,12 +1279,14 @@ typedef struct { /* FireFly BIU registers */
#define MBX_DEL_LD_ENTRY 0x1D
#define MBX_RUN_PROGRAM 0x1E
#define MBX_SET_MASK 0x20
-#define MBX_SET_SLIM 0x21
+#define MBX_SET_VARIABLE 0x21
#define MBX_UNREG_D_ID 0x23
#define MBX_KILL_BOARD 0x24
#define MBX_CONFIG_FARP 0x25
#define MBX_BEACON 0x2A
#define MBX_HEARTBEAT 0x31
+#define MBX_WRITE_VPARMS 0x32
+#define MBX_ASYNCEVT_ENABLE 0x33
#define MBX_CONFIG_HBQ 0x7C
#define MBX_LOAD_AREA 0x81
@@ -1297,7 +1301,7 @@ typedef struct { /* FireFly BIU registers */
#define MBX_REG_VNPID 0x96
#define MBX_UNREG_VNPID 0x97
-#define MBX_FLASH_WR_ULA 0x98
+#define MBX_WRITE_WWN 0x98
#define MBX_SET_DEBUG 0x99
#define MBX_LOAD_EXP_ROM 0x9C
@@ -1344,6 +1348,7 @@ typedef struct { /* FireFly BIU registers */
/* SLI_2 IOCB Command Set */
+#define CMD_ASYNC_STATUS 0x7C
#define CMD_RCV_SEQUENCE64_CX 0x81
#define CMD_XMIT_SEQUENCE64_CR 0x82
#define CMD_XMIT_SEQUENCE64_CX 0x83
@@ -1368,6 +1373,7 @@ typedef struct { /* FireFly BIU registers */
#define CMD_FCP_TRECEIVE64_CX 0xA1
#define CMD_FCP_TRSP64_CX 0xA3
+#define CMD_QUE_XRI64_CX 0xB3
#define CMD_IOCB_RCV_SEQ64_CX 0xB5
#define CMD_IOCB_RCV_ELS64_CX 0xB7
#define CMD_IOCB_RCV_CONT64_CX 0xBB
@@ -1406,6 +1412,8 @@ typedef struct { /* FireFly BIU registers */
#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */
#define MBX_TIMEOUT 0xfffffe /* time-out expired waiting for */
+#define TEMPERATURE_OFFSET 0xB0 /* Slim offset for critical temperature event */
+
/*
* Begin Structure Definitions for Mailbox Commands
*/
@@ -2606,6 +2614,18 @@ typedef struct {
uint32_t IPAddress;
} CONFIG_FARP_VAR;
+/* Structure for MB Command MBX_ASYNCEVT_ENABLE (0x33) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd:30;
+ uint32_t ring:2; /* Ring for ASYNC_EVENT iocb Bits 0-1*/
+#else /* __LITTLE_ENDIAN */
+ uint32_t ring:2; /* Ring for ASYNC_EVENT iocb Bits 0-1*/
+ uint32_t rsvd:30;
+#endif
+} ASYNCEVT_ENABLE_VAR;
+
/* Union of all Mailbox Command types */
#define MAILBOX_CMD_WSIZE 32
#define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t))
@@ -2645,6 +2665,7 @@ typedef union {
CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */
REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */
UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */
+ ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */
} MAILVARIANTS;
/*
@@ -2973,6 +2994,34 @@ typedef struct {
#endif
} RCV_ELS_REQ64;
+/* IOCB Command template for RCV_SEQ64 */
+struct rcv_seq64 {
+ struct ulp_bde64 elsReq;
+ uint32_t hbq_1;
+ uint32_t parmRo;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rctl:8;
+ uint32_t type:8;
+ uint32_t dfctl:8;
+ uint32_t ls:1;
+ uint32_t fs:1;
+ uint32_t rsvd2:3;
+ uint32_t si:1;
+ uint32_t bc:1;
+ uint32_t rsvd3:1;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t rsvd3:1;
+ uint32_t bc:1;
+ uint32_t si:1;
+ uint32_t rsvd2:3;
+ uint32_t fs:1;
+ uint32_t ls:1;
+ uint32_t dfctl:8;
+ uint32_t type:8;
+ uint32_t rctl:8;
+#endif
+};
+
/* IOCB Command template for all 64 bit FCP Initiator commands */
typedef struct {
ULP_BDL bdl;
@@ -2987,6 +3036,21 @@ typedef struct {
uint32_t fcpt_Length; /* transfer ready for IWRITE */
} FCPT_FIELDS64;
+/* IOCB Command template for Async Status iocb commands */
+typedef struct {
+ uint32_t rsvd[4];
+ uint32_t param;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t evt_code; /* High order bits word 5 */
+ uint16_t sub_ctxt_tag; /* Low order bits word 5 */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t sub_ctxt_tag; /* High order bits word 5 */
+ uint16_t evt_code; /* Low order bits word 5 */
+#endif
+} ASYNCSTAT_FIELDS;
+#define ASYNC_TEMP_WARN 0x100
+#define ASYNC_TEMP_SAFE 0x101
+
/* IOCB Command template for CMD_IOCB_RCV_ELS64_CX (0xB7)
or CMD_IOCB_RCV_SEQ64_CX (0xB5) */
@@ -3004,7 +3068,26 @@ struct rcv_sli3 {
struct ulp_bde64 bde2;
};
+/* Structure used for a single HBQ entry */
+struct lpfc_hbq_entry {
+ struct ulp_bde64 bde;
+ uint32_t buffer_tag;
+};
+/* IOCB Command template for QUE_XRI64_CX (0xB3) command */
+typedef struct {
+ struct lpfc_hbq_entry buff;
+ uint32_t rsvd;
+ uint32_t rsvd1;
+} QUE_XRI64_CX_FIELDS;
+
+struct que_xri64cx_ext_fields {
+ uint32_t iotag64_low;
+ uint32_t iotag64_high;
+ uint32_t ebde_count;
+ uint32_t rsvd;
+ struct lpfc_hbq_entry buff[5];
+};
typedef struct _IOCB { /* IOCB structure */
union {
@@ -3028,6 +3111,9 @@ typedef struct _IOCB { /* IOCB structure */
XMT_SEQ_FIELDS64 xseq64; /* XMIT / BCAST cmd */
FCPI_FIELDS64 fcpi64; /* FCP 64 bit Initiator template */
FCPT_FIELDS64 fcpt64; /* FCP 64 bit target template */
+ ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */
+ QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */
+ struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */
uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */
} un;
@@ -3085,6 +3171,10 @@ typedef struct _IOCB { /* IOCB structure */
union {
struct rcv_sli3 rcvsli3; /* words 8 - 15 */
+
+ /* words 8-31 used for que_xri_cx iocb */
+ struct que_xri64cx_ext_fields que_xri64cx_ext_words;
+
uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */
} unsli3;
@@ -3124,12 +3214,6 @@ typedef struct _IOCB { /* IOCB structure */
} IOCB_t;
-/* Structure used for a single HBQ entry */
-struct lpfc_hbq_entry {
- struct ulp_bde64 bde;
- uint32_t buffer_tag;
-};
-
#define SLI1_SLIM_SIZE (4 * 1024)
@@ -3172,6 +3256,8 @@ lpfc_is_LC_HBA(unsigned short device)
(device == PCI_DEVICE_ID_BSMB) ||
(device == PCI_DEVICE_ID_ZMID) ||
(device == PCI_DEVICE_ID_ZSMB) ||
+ (device == PCI_DEVICE_ID_SAT_MID) ||
+ (device == PCI_DEVICE_ID_SAT_SMB) ||
(device == PCI_DEVICE_ID_RFLY))
return 1;
else
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index ecebdfa00470..3205f7488d1c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -212,6 +212,18 @@ out_free_mbox:
return 0;
}
+/* Completion handler for config async event mailbox command. */
+static void
+lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
+{
+ if (pmboxq->mb.mbxStatus == MBX_SUCCESS)
+ phba->temp_sensor_support = 1;
+ else
+ phba->temp_sensor_support = 0;
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return;
+}
+
/************************************************************************/
/* */
/* lpfc_config_port_post */
@@ -234,6 +246,15 @@ lpfc_config_port_post(struct lpfc_hba *phba)
int i, j;
int rc;
+ spin_lock_irq(&phba->hbalock);
+ /*
+ * If the Config port completed correctly the HBA is not
+ * over heated any more.
+ */
+ if (phba->over_temp_state == HBA_OVER_TEMP)
+ phba->over_temp_state = HBA_NORMAL_TEMP;
+ spin_unlock_irq(&phba->hbalock);
+
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
phba->link_state = LPFC_HBA_ERROR;
@@ -343,7 +364,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
phba->link_state = LPFC_LINK_DOWN;
- /* Only process IOCBs on ring 0 till hba_state is READY */
+ /* Only process IOCBs on ELS ring till hba_state is READY */
if (psli->ring[psli->extra_ring].cmdringaddr)
psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
if (psli->ring[psli->fcp_ring].cmdringaddr)
@@ -409,7 +430,21 @@ lpfc_config_port_post(struct lpfc_hba *phba)
return -EIO;
}
/* MBOX buffer will be freed in mbox compl */
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ lpfc_config_async(phba, pmb, LPFC_ELS_RING);
+ pmb->mbox_cmpl = lpfc_config_async_cmpl;
+ pmb->vport = phba->pport;
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_INIT,
+ "0456 Adapter failed to issue "
+ "ASYNCEVT_ENABLE mbox status x%x \n.",
+ rc);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ }
return (0);
}
@@ -449,6 +484,9 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
struct lpfc_dmabuf *mp, *next_mp;
+ struct lpfc_iocbq *iocb;
+ IOCB_t *cmd = NULL;
+ LIST_HEAD(completions);
int i;
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
@@ -464,16 +502,42 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
}
}
+ spin_lock_irq(&phba->hbalock);
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
+
+ /* At this point in time the HBA is either reset or DOA. Either
+ * way, nothing should be on txcmplq as it will NEVER complete.
+ */
+ list_splice_init(&pring->txcmplq, &completions);
+ pring->txcmplq_cnt = 0;
+ spin_unlock_irq(&phba->hbalock);
+
+ while (!list_empty(&completions)) {
+ iocb = list_get_first(&completions, struct lpfc_iocbq,
+ list);
+ cmd = &iocb->iocb;
+ list_del_init(&iocb->list);
+
+ if (!iocb->iocb_cmpl)
+ lpfc_sli_release_iocbq(phba, iocb);
+ else {
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ (iocb->iocb_cmpl) (phba, iocb, iocb);
+ }
+ }
+
lpfc_sli_abort_iocb_ring(phba, pring);
+ spin_lock_irq(&phba->hbalock);
}
+ spin_unlock_irq(&phba->hbalock);
return 0;
}
/* HBA heart beat timeout handler */
-void
+static void
lpfc_hb_timeout(unsigned long ptr)
{
struct lpfc_hba *phba;
@@ -512,8 +576,10 @@ void
lpfc_hb_timeout_handler(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *pmboxq;
+ struct lpfc_dmabuf *buf_ptr;
int retval;
struct lpfc_sli *psli = &phba->sli;
+ LIST_HEAD(completions);
if ((phba->link_state == LPFC_HBA_ERROR) ||
(phba->pport->load_flag & FC_UNLOADING) ||
@@ -540,49 +606,88 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
}
spin_unlock_irq(&phba->pport->work_port_lock);
- /* If there is no heart beat outstanding, issue a heartbeat command */
- if (!phba->hb_outstanding) {
- pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
- if (!pmboxq) {
- mod_timer(&phba->hb_tmofunc,
- jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
- return;
+ if (phba->elsbuf_cnt &&
+ (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
+ spin_lock_irq(&phba->hbalock);
+ list_splice_init(&phba->elsbuf, &completions);
+ phba->elsbuf_cnt = 0;
+ phba->elsbuf_prev_cnt = 0;
+ spin_unlock_irq(&phba->hbalock);
+
+ while (!list_empty(&completions)) {
+ list_remove_head(&completions, buf_ptr,
+ struct lpfc_dmabuf, list);
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+ kfree(buf_ptr);
}
+ }
+ phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
- lpfc_heart_beat(phba, pmboxq);
- pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
- pmboxq->vport = phba->pport;
- retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ /* If there is no heart beat outstanding, issue a heartbeat command */
+ if (phba->cfg_enable_hba_heartbeat) {
+ if (!phba->hb_outstanding) {
+ pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
+ if (!pmboxq) {
+ mod_timer(&phba->hb_tmofunc,
+ jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+ return;
+ }
- if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
- mempool_free(pmboxq, phba->mbox_mem_pool);
+ lpfc_heart_beat(phba, pmboxq);
+ pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
+ pmboxq->vport = phba->pport;
+ retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+
+ if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ mod_timer(&phba->hb_tmofunc,
+ jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+ return;
+ }
mod_timer(&phba->hb_tmofunc,
- jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+ jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+ phba->hb_outstanding = 1;
return;
+ } else {
+ /*
+ * If heart beat timeout called with hb_outstanding set
+ * we need to take the HBA offline.
+ */
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0459 Adapter heartbeat failure, "
+ "taking this port offline.\n");
+
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+
+ lpfc_offline_prep(phba);
+ lpfc_offline(phba);
+ lpfc_unblock_mgmt_io(phba);
+ phba->link_state = LPFC_HBA_ERROR;
+ lpfc_hba_down_post(phba);
}
- mod_timer(&phba->hb_tmofunc,
- jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
- phba->hb_outstanding = 1;
- return;
- } else {
- /*
- * If heart beat timeout called with hb_outstanding set we
- * need to take the HBA offline.
- */
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0459 Adapter heartbeat failure, taking "
- "this port offline.\n");
+ }
+}
- spin_lock_irq(&phba->hbalock);
- psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
- spin_unlock_irq(&phba->hbalock);
+static void
+lpfc_offline_eratt(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
- lpfc_offline_prep(phba);
- lpfc_offline(phba);
- lpfc_unblock_mgmt_io(phba);
- phba->link_state = LPFC_HBA_ERROR;
- lpfc_hba_down_post(phba);
- }
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_offline_prep(phba);
+
+ lpfc_offline(phba);
+ lpfc_reset_barrier(phba);
+ lpfc_sli_brdreset(phba);
+ lpfc_hba_down_post(phba);
+ lpfc_sli_brdready(phba, HS_MBRDY);
+ lpfc_unblock_mgmt_io(phba);
+ phba->link_state = LPFC_HBA_ERROR;
+ return;
}
/************************************************************************/
@@ -601,6 +706,8 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
struct lpfc_sli_ring *pring;
struct lpfc_vport **vports;
uint32_t event_data;
+ unsigned long temperature;
+ struct temp_event temp_event_data;
struct Scsi_Host *shost;
int i;
@@ -608,6 +715,9 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
* since we cannot communicate with the pci card anyway. */
if (pci_channel_offline(phba->pcidev))
return;
+ /* If resets are disabled then leave the HBA alone and return */
+ if (!phba->cfg_enable_hba_reset)
+ return;
if (phba->work_hs & HS_FFER6 ||
phba->work_hs & HS_FFER5) {
@@ -620,14 +730,14 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for(i = 0;
- i < LPFC_MAX_VPORTS && vports[i] != NULL;
+ i <= phba->max_vpi && vports[i] != NULL;
i++){
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
vports[i]->fc_flag |= FC_ESTABLISH_LINK;
spin_unlock_irq(shost->host_lock);
}
- lpfc_destroy_vport_work_array(vports);
+ lpfc_destroy_vport_work_array(phba, vports);
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
spin_unlock_irq(&phba->hbalock);
@@ -655,6 +765,31 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
return;
}
lpfc_unblock_mgmt_io(phba);
+ } else if (phba->work_hs & HS_CRIT_TEMP) {
+ temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
+ temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+ temp_event_data.event_code = LPFC_CRIT_TEMP;
+ temp_event_data.data = (uint32_t)temperature;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0459 Adapter maximum temperature exceeded "
+ "(%ld), taking this port offline "
+ "Data: x%x x%x x%x\n",
+ temperature, phba->work_hs,
+ phba->work_status[0], phba->work_status[1]);
+
+ shost = lpfc_shost_from_vport(phba->pport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(temp_event_data),
+ (char *) &temp_event_data,
+ SCSI_NL_VID_TYPE_PCI
+ | PCI_VENDOR_ID_EMULEX);
+
+ spin_lock_irq(&phba->hbalock);
+ phba->over_temp_state = HBA_OVER_TEMP;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_offline_eratt(phba);
+
} else {
/* The if clause above forces this code path when the status
* failure is a value other than FFER6. Do not call the offline
@@ -672,14 +807,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
sizeof(event_data), (char *) &event_data,
SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
- spin_lock_irq(&phba->hbalock);
- psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
- spin_unlock_irq(&phba->hbalock);
- lpfc_offline_prep(phba);
- lpfc_offline(phba);
- lpfc_unblock_mgmt_io(phba);
- phba->link_state = LPFC_HBA_ERROR;
- lpfc_hba_down_post(phba);
+ lpfc_offline_eratt(phba);
}
}
@@ -699,21 +827,25 @@ lpfc_handle_latt(struct lpfc_hba *phba)
LPFC_MBOXQ_t *pmb;
volatile uint32_t control;
struct lpfc_dmabuf *mp;
- int rc = -ENOMEM;
+ int rc = 0;
pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!pmb)
+ if (!pmb) {
+ rc = 1;
goto lpfc_handle_latt_err_exit;
+ }
mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!mp)
+ if (!mp) {
+ rc = 2;
goto lpfc_handle_latt_free_pmb;
+ }
mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
- if (!mp->virt)
+ if (!mp->virt) {
+ rc = 3;
goto lpfc_handle_latt_free_mp;
-
- rc = -EIO;
+ }
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_all_cmd(phba);
@@ -722,9 +854,11 @@ lpfc_handle_latt(struct lpfc_hba *phba)
lpfc_read_la(phba, pmb, mp);
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
pmb->vport = vport;
- rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
- if (rc == MBX_NOT_FINISHED)
+ rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ rc = 4;
goto lpfc_handle_latt_free_mbuf;
+ }
/* Clear Link Attention in HA REG */
spin_lock_irq(&phba->hbalock);
@@ -756,10 +890,8 @@ lpfc_handle_latt_err_exit:
lpfc_linkdown(phba);
phba->link_state = LPFC_HBA_ERROR;
- /* The other case is an error from issue_mbox */
- if (rc == -ENOMEM)
- lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
- "0300 READ_LA: no buffers\n");
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
return;
}
@@ -1088,9 +1220,8 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt,
/* Allocate buffer to post */
mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
if (mp1)
- mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
- &mp1->phys);
- if (mp1 == 0 || mp1->virt == 0) {
+ mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
+ if (!mp1 || !mp1->virt) {
kfree(mp1);
lpfc_sli_release_iocbq(phba, iocb);
pring->missbufcnt = cnt;
@@ -1104,7 +1235,7 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt,
if (mp2)
mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
&mp2->phys);
- if (mp2 == 0 || mp2->virt == 0) {
+ if (!mp2 || !mp2->virt) {
kfree(mp2);
lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
kfree(mp1);
@@ -1280,15 +1411,39 @@ lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
kfree(HashWorking);
}
-static void
+void
lpfc_cleanup(struct lpfc_vport *vport)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp, *next_ndlp;
+ int i = 0;
- /* clean up phba - lpfc specific */
- lpfc_can_disctmo(vport);
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
- lpfc_nlp_put(ndlp);
+ if (phba->link_state > LPFC_LINK_DOWN)
+ lpfc_port_link_failure(vport);
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (ndlp->nlp_type & NLP_FABRIC)
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
+ }
+
+ /* At this point, ALL ndlp's should be gone
+ * because of the previous NLP_EVT_DEVICE_RM.
+ * Lets wait for this to happen, if needed.
+ */
+ while (!list_empty(&vport->fc_nodes)) {
+
+ if (i++ > 3000) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "0233 Nodelist not empty\n");
+ break;
+ }
+
+ /* Wait for any activity on ndlps to settle */
+ msleep(10);
+ }
return;
}
@@ -1307,14 +1462,14 @@ lpfc_establish_link_tmo(unsigned long ptr)
phba->pport->fc_flag, phba->pport->port_state);
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
struct Scsi_Host *shost;
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irqsave(shost->host_lock, iflag);
vports[i]->fc_flag &= ~FC_ESTABLISH_LINK;
spin_unlock_irqrestore(shost->host_lock, iflag);
}
- lpfc_destroy_vport_work_array(vports);
+ lpfc_destroy_vport_work_array(phba, vports);
}
void
@@ -1339,6 +1494,16 @@ lpfc_stop_phba_timers(struct lpfc_hba *phba)
return;
}
+static void
+lpfc_block_mgmt_io(struct lpfc_hba * phba)
+{
+ unsigned long iflag;
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+}
+
int
lpfc_online(struct lpfc_hba *phba)
{
@@ -1369,7 +1534,7 @@ lpfc_online(struct lpfc_hba *phba)
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
struct Scsi_Host *shost;
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
@@ -1378,23 +1543,13 @@ lpfc_online(struct lpfc_hba *phba)
vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
}
- lpfc_destroy_vport_work_array(vports);
+ lpfc_destroy_vport_work_array(phba, vports);
lpfc_unblock_mgmt_io(phba);
return 0;
}
void
-lpfc_block_mgmt_io(struct lpfc_hba * phba)
-{
- unsigned long iflag;
-
- spin_lock_irqsave(&phba->hbalock, iflag);
- phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
- spin_unlock_irqrestore(&phba->hbalock, iflag);
-}
-
-void
lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
{
unsigned long iflag;
@@ -1409,6 +1564,8 @@ lpfc_offline_prep(struct lpfc_hba * phba)
{
struct lpfc_vport *vport = phba->pport;
struct lpfc_nodelist *ndlp, *next_ndlp;
+ struct lpfc_vport **vports;
+ int i;
if (vport->fc_flag & FC_OFFLINE_MODE)
return;
@@ -1417,10 +1574,34 @@ lpfc_offline_prep(struct lpfc_hba * phba)
lpfc_linkdown(phba);
- /* Issue an unreg_login to all nodes */
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
- if (ndlp->nlp_state != NLP_STE_UNUSED_NODE)
- lpfc_unreg_rpi(vport, ndlp);
+ /* Issue an unreg_login to all nodes on all vports */
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL) {
+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ struct Scsi_Host *shost;
+
+ if (vports[i]->load_flag & FC_UNLOADING)
+ continue;
+ shost = lpfc_shost_from_vport(vports[i]);
+ list_for_each_entry_safe(ndlp, next_ndlp,
+ &vports[i]->fc_nodes,
+ nlp_listp) {
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ continue;
+ if (ndlp->nlp_type & NLP_FABRIC) {
+ lpfc_disc_state_machine(vports[i], ndlp,
+ NULL, NLP_EVT_DEVICE_RECOVERY);
+ lpfc_disc_state_machine(vports[i], ndlp,
+ NULL, NLP_EVT_DEVICE_RM);
+ }
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_unreg_rpi(vports[i], ndlp);
+ }
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
lpfc_sli_flush_mbox_queue(phba);
}
@@ -1439,9 +1620,9 @@ lpfc_offline(struct lpfc_hba *phba)
lpfc_stop_phba_timers(phba);
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++)
+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
lpfc_stop_vport_timers(vports[i]);
- lpfc_destroy_vport_work_array(vports);
+ lpfc_destroy_vport_work_array(phba, vports);
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0460 Bring Adapter offline\n");
/* Bring down the SLI Layer and cleanup. The HBA is offline
@@ -1452,15 +1633,14 @@ lpfc_offline(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
- lpfc_cleanup(vports[i]);
spin_lock_irq(shost->host_lock);
vports[i]->work_port_events = 0;
vports[i]->fc_flag |= FC_OFFLINE_MODE;
spin_unlock_irq(shost->host_lock);
}
- lpfc_destroy_vport_work_array(vports);
+ lpfc_destroy_vport_work_array(phba, vports);
}
/******************************************************************************
@@ -1674,6 +1854,8 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
fc_host_supported_speeds(shost) = 0;
if (phba->lmt & LMT_10Gb)
fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
+ if (phba->lmt & LMT_8Gb)
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
if (phba->lmt & LMT_4Gb)
fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
if (phba->lmt & LMT_2Gb)
@@ -1707,13 +1889,14 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
struct Scsi_Host *shost = NULL;
void *ptr;
unsigned long bar0map_len, bar2map_len;
- int error = -ENODEV;
+ int error = -ENODEV, retval;
int i, hbq_count;
uint16_t iotag;
+ int bars = pci_select_bars(pdev, IORESOURCE_MEM);
- if (pci_enable_device(pdev))
+ if (pci_enable_device_bars(pdev, bars))
goto out;
- if (pci_request_regions(pdev, LPFC_DRIVER_NAME))
+ if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
goto out_disable_device;
phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL);
@@ -1823,9 +2006,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
lpfc_sli_setup(phba);
lpfc_sli_queue_setup(phba);
- error = lpfc_mem_alloc(phba);
- if (error)
+ retval = lpfc_mem_alloc(phba);
+ if (retval) {
+ error = retval;
goto out_free_hbqslimp;
+ }
/* Initialize and populate the iocb list per host. */
INIT_LIST_HEAD(&phba->lpfc_iocb_list);
@@ -1880,6 +2065,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Initialize list of fabric iocbs */
INIT_LIST_HEAD(&phba->fabric_iocb_list);
+ /* Initialize list to save ELS buffers */
+ INIT_LIST_HEAD(&phba->elsbuf);
+
vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
if (!vport)
goto out_kthread_stop;
@@ -1891,8 +2079,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
pci_set_drvdata(pdev, shost);
if (phba->cfg_use_msi) {
- error = pci_enable_msi(phba->pcidev);
- if (!error)
+ retval = pci_enable_msi(phba->pcidev);
+ if (!retval)
phba->using_msi = 1;
else
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -1900,11 +2088,12 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
"with IRQ\n");
}
- error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED,
+ retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED,
LPFC_DRIVER_NAME, phba);
- if (error) {
+ if (retval) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0451 Enable interrupt handler failed\n");
+ error = retval;
goto out_disable_msi;
}
@@ -1914,11 +2103,15 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
- if (lpfc_alloc_sysfs_attr(vport))
+ if (lpfc_alloc_sysfs_attr(vport)) {
+ error = -ENOMEM;
goto out_free_irq;
+ }
- if (lpfc_sli_hba_setup(phba))
+ if (lpfc_sli_hba_setup(phba)) {
+ error = -ENODEV;
goto out_remove_device;
+ }
/*
* hba setup may have changed the hba_queue_depth so we need to adjust
@@ -1975,7 +2168,7 @@ out_idr_remove:
out_free_phba:
kfree(phba);
out_release_regions:
- pci_release_regions(pdev);
+ pci_release_selected_regions(pdev, bars);
out_disable_device:
pci_disable_device(pdev);
out:
@@ -1991,6 +2184,8 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
+ int bars = pci_select_bars(pdev, IORESOURCE_MEM);
+
spin_lock_irq(&phba->hbalock);
vport->load_flag |= FC_UNLOADING;
spin_unlock_irq(&phba->hbalock);
@@ -1998,8 +2193,12 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
kfree(vport->vname);
lpfc_free_sysfs_attr(vport);
+ kthread_stop(phba->worker_thread);
+
fc_remove_host(shost);
scsi_remove_host(shost);
+ lpfc_cleanup(vport);
+
/*
* Bring down the SLI Layer. This step disable all interrupts,
* clears the rings, discards all mailbox commands, and resets
@@ -2014,9 +2213,6 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
spin_unlock_irq(&phba->hbalock);
lpfc_debugfs_terminate(vport);
- lpfc_cleanup(vport);
-
- kthread_stop(phba->worker_thread);
/* Release the irq reservation */
free_irq(phba->pcidev->irq, phba);
@@ -2048,7 +2244,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
kfree(phba);
- pci_release_regions(pdev);
+ pci_release_selected_regions(pdev, bars);
pci_disable_device(pdev);
}
@@ -2239,12 +2435,22 @@ lpfc_init(void)
printk(LPFC_MODULE_DESC "\n");
printk(LPFC_COPYRIGHT "\n");
+ if (lpfc_enable_npiv) {
+ lpfc_transport_functions.vport_create = lpfc_vport_create;
+ lpfc_transport_functions.vport_delete = lpfc_vport_delete;
+ }
lpfc_transport_template =
fc_attach_transport(&lpfc_transport_functions);
- lpfc_vport_transport_template =
- fc_attach_transport(&lpfc_vport_transport_functions);
- if (!lpfc_transport_template || !lpfc_vport_transport_template)
+ if (lpfc_transport_template == NULL)
return -ENOMEM;
+ if (lpfc_enable_npiv) {
+ lpfc_vport_transport_template =
+ fc_attach_transport(&lpfc_vport_transport_functions);
+ if (lpfc_vport_transport_template == NULL) {
+ fc_release_transport(lpfc_transport_template);
+ return -ENOMEM;
+ }
+ }
error = pci_register_driver(&lpfc_driver);
if (error) {
fc_release_transport(lpfc_transport_template);
@@ -2259,7 +2465,8 @@ lpfc_exit(void)
{
pci_unregister_driver(&lpfc_driver);
fc_release_transport(lpfc_transport_template);
- fc_release_transport(lpfc_vport_transport_template);
+ if (lpfc_enable_npiv)
+ fc_release_transport(lpfc_vport_transport_template);
}
module_init(lpfc_init);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 626e4d878725..c5841d7565f7 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -26,6 +26,7 @@
#define LOG_IP 0x20 /* IP traffic history */
#define LOG_FCP 0x40 /* FCP traffic history */
#define LOG_NODE 0x80 /* Node table events */
+#define LOG_TEMP 0x100 /* Temperature sensor events */
#define LOG_MISC 0x400 /* Miscellaneous events */
#define LOG_SLI 0x800 /* SLI events */
#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index a592733664e9..dfc63f6ccd7b 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -82,6 +82,24 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
}
/**********************************************/
+/* lpfc_config_async Issue a */
+/* MBX_ASYNC_EVT_ENABLE mailbox command */
+/**********************************************/
+void
+lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
+ uint32_t ring)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+ mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
+ mb->un.varCfgAsyncEvent.ring = ring;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**********************************************/
/* lpfc_heart_beat Issue a HEART_BEAT */
/* mailbox command */
/**********************************************/
@@ -270,8 +288,10 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
/* Get a buffer to hold the HBAs Service Parameters */
- if (((mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == 0) ||
- ((mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys))) == 0)) {
+ mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ if (mp)
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+ if (!mp || !mp->virt) {
kfree(mp);
mb->mbxCommand = MBX_READ_SPARM64;
/* READ_SPARAM: no buffers */
@@ -369,8 +389,10 @@ lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
mb->mbxOwner = OWN_HOST;
/* Get a buffer to hold NPorts Service Parameters */
- if (((mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == NULL) ||
- ((mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys))) == 0)) {
+ mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ if (mp)
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+ if (!mp || !mp->virt) {
kfree(mp);
mb->mbxCommand = MBX_REG_LOGIN64;
/* REG_LOGIN: no buffers */
@@ -874,7 +896,7 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
case MBX_DOWN_LOAD: /* 0x1C */
case MBX_DEL_LD_ENTRY: /* 0x1D */
case MBX_LOAD_AREA: /* 0x81 */
- case MBX_FLASH_WR_ULA: /* 0x98 */
+ case MBX_WRITE_WWN: /* 0x98 */
case MBX_LOAD_EXP_ROM: /* 0x9C */
return LPFC_MBOX_TMO_FLASH_CMD;
}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 43c3b8a0d76a..6dc5ab8d6716 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -98,6 +98,7 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
fail_free_hbq_pool:
lpfc_sli_hbqbuf_free_all(phba);
+ pci_pool_destroy(phba->lpfc_hbq_pool);
fail_free_nlp_mem_pool:
mempool_destroy(phba->nlp_mem_pool);
phba->nlp_mem_pool = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 880af0cd463d..4a0e3406e37a 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -287,6 +287,24 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
+ if (wwn_to_u64(sp->portName.u.wwn) == 0) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0140 PLOGI Reject: invalid nname\n");
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+ NULL);
+ return 0;
+ }
+ if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0141 PLOGI Reject: invalid pname\n");
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+ NULL);
+ return 0;
+ }
if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) {
/* Reject this request because invalid parameters */
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
@@ -343,8 +361,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_config_link(phba, mbox);
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->vport = vport;
- rc = lpfc_sli_issue_mbox
- (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
mempool_free(mbox, phba->mbox_mem_pool);
goto out;
@@ -407,6 +424,61 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp, mbox);
return 1;
}
+
+ /* If the remote NPort logs into us, before we can initiate
+ * discovery to them, cleanup the NPort from discovery accordingly.
+ */
+ if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+ spin_unlock_irq(shost->host_lock);
+ del_timer_sync(&ndlp->nlp_delayfunc);
+ ndlp->nlp_last_elscmd = 0;
+
+ if (!list_empty(&ndlp->els_retry_evt.evt_listp))
+ list_del_init(&ndlp->els_retry_evt.evt_listp);
+
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
+
+ if ((ndlp->nlp_flag & NLP_ADISC_SND) &&
+ (vport->num_disc_nodes)) {
+ /* Check to see if there are more
+ * ADISCs to be sent
+ */
+ lpfc_more_adisc(vport);
+
+ if ((vport->num_disc_nodes == 0) &&
+ (vport->fc_npr_cnt))
+ lpfc_els_disc_plogi(vport);
+
+ if (vport->num_disc_nodes == 0) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
+ lpfc_end_rscn(vport);
+ }
+ }
+ else if (vport->num_disc_nodes) {
+ /* Check to see if there are more
+ * PLOGIs to be sent
+ */
+ lpfc_more_plogi(vport);
+
+ if (vport->num_disc_nodes == 0) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
+ lpfc_end_rscn(vport);
+ }
+ }
+ }
+ }
+
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
return 1;
@@ -501,12 +573,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_unlock_irq(shost->host_lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
- ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- } else {
- ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
}
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
@@ -594,6 +663,25 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return ndlp->nlp_state;
}
+static uint32_t
+lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ /* This transition is only legal if we previously
+ * rcv'ed a PLOGI. Since we don't want 2 discovery threads
+ * working on the same NPortID, do nothing for this thread
+ * to stop it.
+ */
+ if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "0253 Illegal State Transition: node x%x "
+ "event x%x, state x%x Data: x%x x%x\n",
+ ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
+ ndlp->nlp_flag);
+ }
+ return ndlp->nlp_state;
+}
+
/* Start of Discovery State Machine routines */
static uint32_t
@@ -605,11 +693,8 @@ lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
cmdiocb = (struct lpfc_iocbq *) arg;
if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
- ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
return ndlp->nlp_state;
}
- lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
@@ -618,7 +703,6 @@ lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
lpfc_issue_els_logo(vport, ndlp, 0);
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
return ndlp->nlp_state;
}
@@ -633,7 +717,6 @@ lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_flag |= NLP_LOGO_ACC;
spin_unlock_irq(shost->host_lock);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
return ndlp->nlp_state;
}
@@ -642,7 +725,6 @@ static uint32_t
lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
@@ -650,7 +732,6 @@ static uint32_t
lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
@@ -752,6 +833,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
uint32_t evt)
{
struct lpfc_hba *phba = vport->phba;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb, *rspiocb;
struct lpfc_dmabuf *pcmd, *prsp, *mp;
uint32_t *lp;
@@ -778,6 +860,12 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
lp = (uint32_t *) prsp->virt;
sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
+ if (wwn_to_u64(sp->portName.u.wwn) == 0 ||
+ wwn_to_u64(sp->nodeName.u.wwn) == 0) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0142 PLOGI RSP: Invalid WWN.\n");
+ goto out;
+ }
if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3))
goto out;
/* PLOGI chkparm OK */
@@ -828,13 +916,15 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
}
mbox->context2 = lpfc_nlp_get(ndlp);
mbox->vport = vport;
- if (lpfc_sli_issue_mbox(phba, mbox,
- (MBX_NOWAIT | MBX_STOP_IOCB))
+ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
!= MBX_NOT_FINISHED) {
lpfc_nlp_set_state(vport, ndlp,
NLP_STE_REG_LOGIN_ISSUE);
return ndlp->nlp_state;
}
+ /* decrement node reference count to the failed mbox
+ * command
+ */
lpfc_nlp_put(ndlp);
mp = (struct lpfc_dmabuf *) mbox->context1;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -864,13 +954,27 @@ out:
"0261 Cannot Register NameServer login\n");
}
- /* Free this node since the driver cannot login or has the wrong
- sparm */
- lpfc_drop_node(vport, ndlp);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_DEFER_RM;
+ spin_unlock_irq(shost->host_lock);
return NLP_STE_FREED_NODE;
}
static uint32_t
+lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
+{
+ return ndlp->nlp_state;
+}
+
+static uint32_t
lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
@@ -1137,7 +1241,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
mp = (struct lpfc_dmabuf *) (mb->context1);
if (mp) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ __lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
lpfc_nlp_put(ndlp);
@@ -1197,8 +1301,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
* retry discovery.
*/
if (mb->mbxStatus == MBXERR_RPI_FULL) {
- ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
+ ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
return ndlp->nlp_state;
}
@@ -1378,7 +1482,7 @@ out:
lpfc_issue_els_logo(vport, ndlp, 0);
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
return ndlp->nlp_state;
}
@@ -1753,7 +1857,7 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
irsp = &rspiocb->iocb;
if (irsp->ulpStatus) {
- lpfc_drop_node(vport, ndlp);
+ ndlp->nlp_flag |= NLP_DEFER_RM;
return NLP_STE_FREED_NODE;
}
return ndlp->nlp_state;
@@ -1942,9 +2046,9 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
lpfc_rcv_els_plogi_issue, /* RCV_PRLO */
lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */
lpfc_disc_illegal, /* CMPL_PRLI */
- lpfc_disc_illegal, /* CMPL_LOGO */
+ lpfc_cmpl_logo_plogi_issue, /* CMPL_LOGO */
lpfc_disc_illegal, /* CMPL_ADISC */
- lpfc_disc_illegal, /* CMPL_REG_LOGIN */
+ lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN */
lpfc_device_rm_plogi_issue, /* DEVICE_RM */
lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */
@@ -1968,7 +2072,7 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */
lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */
lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */
- lpfc_disc_illegal, /* CMPL_PLOGI */
+ lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */
lpfc_disc_illegal, /* CMPL_PRLI */
lpfc_disc_illegal, /* CMPL_LOGO */
lpfc_disc_illegal, /* CMPL_ADISC */
@@ -1982,7 +2086,7 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */
lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */
lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */
- lpfc_disc_illegal, /* CMPL_PLOGI */
+ lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */
lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */
lpfc_disc_illegal, /* CMPL_LOGO */
lpfc_disc_illegal, /* CMPL_ADISC */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 4e46045dea6d..6483c62730b3 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -130,7 +130,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
shost_for_each_device(sdev, shost) {
new_queue_depth =
@@ -151,7 +151,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
new_queue_depth);
}
}
- lpfc_destroy_vport_work_array(vports);
+ lpfc_destroy_vport_work_array(phba, vports);
atomic_set(&phba->num_rsrc_err, 0);
atomic_set(&phba->num_cmd_success, 0);
}
@@ -166,7 +166,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
shost_for_each_device(sdev, shost) {
if (sdev->ordered_tags)
@@ -179,7 +179,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
sdev->queue_depth+1);
}
}
- lpfc_destroy_vport_work_array(vports);
+ lpfc_destroy_vport_work_array(phba, vports);
atomic_set(&phba->num_rsrc_err, 0);
atomic_set(&phba->num_cmd_success, 0);
}
@@ -380,7 +380,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
(num_bde * sizeof (struct ulp_bde64));
iocb_cmd->ulpBdeCount = 1;
iocb_cmd->ulpLe = 1;
- fcp_cmnd->fcpDl = be32_to_cpu(scsi_bufflen(scsi_cmnd));
+ fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
return 0;
}
@@ -542,6 +542,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
int result;
struct scsi_device *sdev, *tmp_sdev;
int depth = 0;
+ unsigned long flags;
lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
@@ -608,6 +609,15 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
cmd->scsi_done(cmd);
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
+ /*
+ * If there is a thread waiting for command completion
+ * wake up the thread.
+ */
+ spin_lock_irqsave(sdev->host->host_lock, flags);
+ lpfc_cmd->pCmd = NULL;
+ if (lpfc_cmd->waitq)
+ wake_up(lpfc_cmd->waitq);
+ spin_unlock_irqrestore(sdev->host->host_lock, flags);
lpfc_release_scsi_buf(phba, lpfc_cmd);
return;
}
@@ -669,6 +679,16 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
}
+ /*
+ * If there is a thread waiting for command completion
+ * wake up the thread.
+ */
+ spin_lock_irqsave(sdev->host->host_lock, flags);
+ lpfc_cmd->pCmd = NULL;
+ if (lpfc_cmd->waitq)
+ wake_up(lpfc_cmd->waitq);
+ spin_unlock_irqrestore(sdev->host->host_lock, flags);
+
lpfc_release_scsi_buf(phba, lpfc_cmd);
}
@@ -743,6 +763,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
piocbq->iocb.ulpContext = pnode->nlp_rpi;
if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
piocbq->iocb.ulpFCP2Rcvy = 1;
+ else
+ piocbq->iocb.ulpFCP2Rcvy = 0;
piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
piocbq->context1 = lpfc_cmd;
@@ -1018,8 +1040,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
struct lpfc_iocbq *abtsiocb;
struct lpfc_scsi_buf *lpfc_cmd;
IOCB_t *cmd, *icmd;
- unsigned int loop_count = 0;
int ret = SUCCESS;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
lpfc_block_error_handler(cmnd);
lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
@@ -1074,17 +1096,15 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_sli_poll_fcp_ring (phba);
+ lpfc_cmd->waitq = &waitq;
/* Wait for abort to complete */
- while (lpfc_cmd->pCmd == cmnd)
- {
- if (phba->cfg_poll & DISABLE_FCP_RING_INT)
- lpfc_sli_poll_fcp_ring (phba);
+ wait_event_timeout(waitq,
+ (lpfc_cmd->pCmd != cmnd),
+ (2*vport->cfg_devloss_tmo*HZ));
- schedule_timeout_uninterruptible(LPFC_ABORT_WAIT * HZ);
- if (++loop_count
- > (2 * vport->cfg_devloss_tmo)/LPFC_ABORT_WAIT)
- break;
- }
+ spin_lock_irq(shost->host_lock);
+ lpfc_cmd->waitq = NULL;
+ spin_unlock_irq(shost->host_lock);
if (lpfc_cmd->pCmd == cmnd) {
ret = FAILED;
@@ -1438,7 +1458,7 @@ struct scsi_host_template lpfc_template = {
.slave_destroy = lpfc_slave_destroy,
.scan_finished = lpfc_scan_finished,
.this_id = -1,
- .sg_tablesize = LPFC_SG_SEG_CNT,
+ .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
.use_sg_chaining = ENABLE_SG_CHAINING,
.cmd_per_lun = LPFC_CMD_PER_LUN,
.use_clustering = ENABLE_CLUSTERING,
@@ -1459,7 +1479,7 @@ struct scsi_host_template lpfc_vport_template = {
.slave_destroy = lpfc_slave_destroy,
.scan_finished = lpfc_scan_finished,
.this_id = -1,
- .sg_tablesize = LPFC_SG_SEG_CNT,
+ .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
.cmd_per_lun = LPFC_CMD_PER_LUN,
.use_clustering = ENABLE_CLUSTERING,
.use_sg_chaining = ENABLE_SG_CHAINING,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 31787bb6d53e..daba92374985 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -138,6 +138,7 @@ struct lpfc_scsi_buf {
* Iotag is in here
*/
struct lpfc_iocbq cur_iocbq;
+ wait_queue_head_t *waitq;
};
#define LPFC_SCSI_DMA_EXT_SIZE 264
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index ce348c5c706c..fdd01e384e36 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -106,7 +106,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
return iocbq;
}
-void
+static void
__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
@@ -199,6 +199,7 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
case CMD_RCV_ELS_REQ_CX:
case CMD_RCV_SEQUENCE64_CX:
case CMD_RCV_ELS_REQ64_CX:
+ case CMD_ASYNC_STATUS:
case CMD_IOCB_RCV_SEQ64_CX:
case CMD_IOCB_RCV_ELS64_CX:
case CMD_IOCB_RCV_CONT64_CX:
@@ -473,8 +474,7 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
if (pring->txq_cnt &&
lpfc_is_link_up(phba) &&
(pring->ringno != phba->sli.fcp_ring ||
- phba->sli.sli_flag & LPFC_PROCESS_LA) &&
- !(pring->flag & LPFC_STOP_IOCB_MBX)) {
+ phba->sli.sli_flag & LPFC_PROCESS_LA)) {
while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
(nextiocb = lpfc_sli_ringtx_get(phba, pring)))
@@ -489,32 +489,7 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
return;
}
-/* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
-static void
-lpfc_sli_turn_on_ring(struct lpfc_hba *phba, int ringno)
-{
- struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
- &phba->slim2p->mbx.us.s3_pgp.port[ringno] :
- &phba->slim2p->mbx.us.s2.port[ringno];
- unsigned long iflags;
-
- /* If the ring is active, flag it */
- spin_lock_irqsave(&phba->hbalock, iflags);
- if (phba->sli.ring[ringno].cmdringaddr) {
- if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) {
- phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX;
- /*
- * Force update of the local copy of cmdGetInx
- */
- phba->sli.ring[ringno].local_getidx
- = le32_to_cpu(pgp->cmdGetInx);
- lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]);
- }
- }
- spin_unlock_irqrestore(&phba->hbalock, iflags);
-}
-
-struct lpfc_hbq_entry *
+static struct lpfc_hbq_entry *
lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
{
struct hbq_s *hbqp = &phba->hbqs[hbqno];
@@ -565,6 +540,7 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
list_del(&hbq_buf->dbuf.list);
(phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
}
+ phba->hbqs[i].buffer_count = 0;
}
}
@@ -633,8 +609,8 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
return 0;
}
- start = lpfc_hbq_defs[hbqno]->buffer_count;
- end = count + lpfc_hbq_defs[hbqno]->buffer_count;
+ start = phba->hbqs[hbqno].buffer_count;
+ end = count + start;
if (end > lpfc_hbq_defs[hbqno]->entry_count) {
end = lpfc_hbq_defs[hbqno]->entry_count;
}
@@ -646,7 +622,7 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
return 1;
hbq_buffer->tag = (i | (hbqno << 16));
if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
- lpfc_hbq_defs[hbqno]->buffer_count++;
+ phba->hbqs[hbqno].buffer_count++;
else
(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
}
@@ -660,14 +636,14 @@ lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
lpfc_hbq_defs[qno]->add_count));
}
-int
+static int
lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
{
return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
lpfc_hbq_defs[qno]->init_count));
}
-struct hbq_dmabuf *
+static struct hbq_dmabuf *
lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
{
struct lpfc_dmabuf *d_buf;
@@ -686,7 +662,7 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
}
lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
"1803 Bad hbq tag. Data: x%x x%x\n",
- tag, lpfc_hbq_defs[tag >> 16]->buffer_count);
+ tag, phba->hbqs[tag >> 16].buffer_count);
return NULL;
}
@@ -712,6 +688,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
case MBX_LOAD_SM:
case MBX_READ_NV:
case MBX_WRITE_NV:
+ case MBX_WRITE_VPARMS:
case MBX_RUN_BIU_DIAG:
case MBX_INIT_LINK:
case MBX_DOWN_LINK:
@@ -739,7 +716,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
case MBX_DEL_LD_ENTRY:
case MBX_RUN_PROGRAM:
case MBX_SET_MASK:
- case MBX_SET_SLIM:
+ case MBX_SET_VARIABLE:
case MBX_UNREG_D_ID:
case MBX_KILL_BOARD:
case MBX_CONFIG_FARP:
@@ -751,9 +728,10 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
case MBX_READ_RPI64:
case MBX_REG_LOGIN64:
case MBX_READ_LA64:
- case MBX_FLASH_WR_ULA:
+ case MBX_WRITE_WWN:
case MBX_SET_DEBUG:
case MBX_LOAD_EXP_ROM:
+ case MBX_ASYNCEVT_ENABLE:
case MBX_REG_VPI:
case MBX_UNREG_VPI:
case MBX_HEARTBEAT:
@@ -953,6 +931,17 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
return &new_hbq_entry->dbuf;
}
+static struct lpfc_dmabuf *
+lpfc_sli_get_buff(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring,
+ uint32_t tag)
+{
+ if (tag & QUE_BUFTAG_BIT)
+ return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
+ else
+ return lpfc_sli_replace_hbqbuff(phba, tag);
+}
+
static int
lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *saveq)
@@ -961,19 +950,112 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
WORD5 * w5p;
uint32_t Rctl, Type;
uint32_t match, i;
+ struct lpfc_iocbq *iocbq;
match = 0;
irsp = &(saveq->iocb);
- if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
- || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)
- || (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)
- || (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX)) {
+
+ if (irsp->ulpStatus == IOSTAT_NEED_BUFFER)
+ return 1;
+ if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
+ if (pring->lpfc_sli_rcv_async_status)
+ pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
+ else
+ lpfc_printf_log(phba,
+ KERN_WARNING,
+ LOG_SLI,
+ "0316 Ring %d handler: unexpected "
+ "ASYNC_STATUS iocb received evt_code "
+ "0x%x\n",
+ pring->ringno,
+ irsp->un.asyncstat.evt_code);
+ return 1;
+ }
+
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ if (irsp->ulpBdeCount != 0) {
+ saveq->context2 = lpfc_sli_get_buff(phba, pring,
+ irsp->un.ulpWord[3]);
+ if (!saveq->context2)
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_SLI,
+ "0341 Ring %d Cannot find buffer for "
+ "an unsolicited iocb. tag 0x%x\n",
+ pring->ringno,
+ irsp->un.ulpWord[3]);
+ }
+ if (irsp->ulpBdeCount == 2) {
+ saveq->context3 = lpfc_sli_get_buff(phba, pring,
+ irsp->unsli3.sli3Words[7]);
+ if (!saveq->context3)
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_SLI,
+ "0342 Ring %d Cannot find buffer for an"
+ " unsolicited iocb. tag 0x%x\n",
+ pring->ringno,
+ irsp->unsli3.sli3Words[7]);
+ }
+ list_for_each_entry(iocbq, &saveq->list, list) {
+ irsp = &(iocbq->iocb);
+ if (irsp->ulpBdeCount != 0) {
+ iocbq->context2 = lpfc_sli_get_buff(phba, pring,
+ irsp->un.ulpWord[3]);
+ if (!iocbq->context2)
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_SLI,
+ "0343 Ring %d Cannot find "
+ "buffer for an unsolicited iocb"
+ ". tag 0x%x\n", pring->ringno,
+ irsp->un.ulpWord[3]);
+ }
+ if (irsp->ulpBdeCount == 2) {
+ iocbq->context3 = lpfc_sli_get_buff(phba, pring,
+ irsp->unsli3.sli3Words[7]);
+ if (!iocbq->context3)
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_SLI,
+ "0344 Ring %d Cannot find "
+ "buffer for an unsolicited "
+ "iocb. tag 0x%x\n",
+ pring->ringno,
+ irsp->unsli3.sli3Words[7]);
+ }
+ }
+ }
+ if (irsp->ulpBdeCount != 0 &&
+ (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
+ irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
+ int found = 0;
+
+ /* search continue save q for same XRI */
+ list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
+ if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) {
+ list_add_tail(&saveq->list, &iocbq->list);
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ list_add_tail(&saveq->clist,
+ &pring->iocb_continue_saveq);
+ if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
+ list_del_init(&iocbq->clist);
+ saveq = iocbq;
+ irsp = &(saveq->iocb);
+ } else
+ return 0;
+ }
+ if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
+ (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
+ (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
Rctl = FC_ELS_REQ;
Type = FC_ELS_DATA;
} else {
- w5p =
- (WORD5 *) & (saveq->iocb.un.
- ulpWord[5]);
+ w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
Rctl = w5p->hcsw.Rctl;
Type = w5p->hcsw.Type;
@@ -988,15 +1070,6 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
}
- if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
- if (irsp->ulpBdeCount != 0)
- saveq->context2 = lpfc_sli_replace_hbqbuff(phba,
- irsp->un.ulpWord[3]);
- if (irsp->ulpBdeCount == 2)
- saveq->context3 = lpfc_sli_replace_hbqbuff(phba,
- irsp->unsli3.sli3Words[7]);
- }
-
/* unSolicited Responses */
if (pring->prt[0].profile) {
if (pring->prt[0].lpfc_sli_rcv_unsol_event)
@@ -1006,12 +1079,9 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
} else {
/* We must search, based on rctl / type
for the right routine */
- for (i = 0; i < pring->num_mask;
- i++) {
- if ((pring->prt[i].rctl ==
- Rctl)
- && (pring->prt[i].
- type == Type)) {
+ for (i = 0; i < pring->num_mask; i++) {
+ if ((pring->prt[i].rctl == Rctl)
+ && (pring->prt[i].type == Type)) {
if (pring->prt[i].lpfc_sli_rcv_unsol_event)
(pring->prt[i].lpfc_sli_rcv_unsol_event)
(phba, pring, saveq);
@@ -1084,6 +1154,12 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
IOSTAT_LOCAL_REJECT;
saveq->iocb.un.ulpWord[4] =
IOERR_SLI_ABORTED;
+
+ /* Firmware could still be in progress
+ * of DMAing payload, so don't free data
+ * buffer till after a hbeat.
+ */
+ saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
}
}
(cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
@@ -1572,12 +1648,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
- if (list_empty(&(pring->iocb_continueq))) {
- list_add(&rspiocbp->list, &(pring->iocb_continueq));
- } else {
- list_add_tail(&rspiocbp->list,
- &(pring->iocb_continueq));
- }
+ list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
pring->iocb_continueq_cnt++;
if (irsp->ulpLe) {
@@ -1642,17 +1713,17 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
if (type == LPFC_SOL_IOCB) {
- spin_unlock_irqrestore(&phba->hbalock,
- iflag);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
rc = lpfc_sli_process_sol_iocb(phba, pring,
saveq);
spin_lock_irqsave(&phba->hbalock, iflag);
} else if (type == LPFC_UNSOL_IOCB) {
- spin_unlock_irqrestore(&phba->hbalock,
- iflag);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
rc = lpfc_sli_process_unsol_iocb(phba, pring,
saveq);
spin_lock_irqsave(&phba->hbalock, iflag);
+ if (!rc)
+ free_saveq = 0;
} else if (type == LPFC_ABORT_IOCB) {
if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
((cmdiocbp =
@@ -1921,8 +1992,8 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
"0329 Kill HBA Data: x%x x%x\n",
phba->pport->port_state, psli->sli_flag);
- if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
- GFP_KERNEL)) == 0)
+ pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb)
return 1;
/* Disable the error attention */
@@ -2113,7 +2184,10 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
<status> */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0436 Adapter failed to init, "
- "timeout, status reg x%x\n", status);
+ "timeout, status reg x%x, "
+ "FW Data: A8 x%x AC x%x\n", status,
+ readl(phba->MBslimaddr + 0xa8),
+ readl(phba->MBslimaddr + 0xac));
phba->link_state = LPFC_HBA_ERROR;
return -ETIMEDOUT;
}
@@ -2125,7 +2199,10 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
<status> */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0437 Adapter failed to init, "
- "chipset, status reg x%x\n", status);
+ "chipset, status reg x%x, "
+ "FW Data: A8 x%x AC x%x\n", status,
+ readl(phba->MBslimaddr + 0xa8),
+ readl(phba->MBslimaddr + 0xac));
phba->link_state = LPFC_HBA_ERROR;
return -EIO;
}
@@ -2153,7 +2230,10 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
/* Adapter failed to init, chipset, status reg <status> */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0438 Adapter failed to init, chipset, "
- "status reg x%x\n", status);
+ "status reg x%x, "
+ "FW Data: A8 x%x AC x%x\n", status,
+ readl(phba->MBslimaddr + 0xa8),
+ readl(phba->MBslimaddr + 0xac));
phba->link_state = LPFC_HBA_ERROR;
return -EIO;
}
@@ -2485,11 +2565,16 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
lpfc_sli_abort_iocb_ring(phba, pring);
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "0316 Resetting board due to mailbox timeout\n");
+ "0345 Resetting board due to mailbox timeout\n");
/*
* lpfc_offline calls lpfc_sli_hba_down which will clean up
* on oustanding mailbox commands.
*/
+ /* If resets are disabled then set error state and return. */
+ if (!phba->cfg_enable_hba_reset) {
+ phba->link_state = LPFC_HBA_ERROR;
+ return;
+ }
lpfc_offline_prep(phba);
lpfc_offline(phba);
lpfc_sli_brdrestart(phba);
@@ -2507,6 +2592,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
uint32_t status, evtctr;
uint32_t ha_copy;
int i;
+ unsigned long timeout;
unsigned long drvr_flag = 0;
volatile uint32_t word0, ldata;
void __iomem *to_slim;
@@ -2519,7 +2605,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
"1806 Mbox x%x failed. No vport\n",
pmbox->mb.mbxCommand);
dump_stack();
- return MBXERR_ERROR;
+ return MBX_NOT_FINISHED;
}
}
@@ -2571,21 +2657,6 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
return MBX_NOT_FINISHED;
}
- /* Handle STOP IOCB processing flag. This is only meaningful
- * if we are not polling for mbox completion.
- */
- if (flag & MBX_STOP_IOCB) {
- flag &= ~MBX_STOP_IOCB;
- /* Now flag each ring */
- for (i = 0; i < psli->num_rings; i++) {
- /* If the ring is active, flag it */
- if (psli->ring[i].cmdringaddr) {
- psli->ring[i].flag |=
- LPFC_STOP_IOCB_MBX;
- }
- }
- }
-
/* Another mailbox command is still being processed, queue this
* command to be processed later.
*/
@@ -2620,23 +2691,6 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
return MBX_BUSY;
}
- /* Handle STOP IOCB processing flag. This is only meaningful
- * if we are not polling for mbox completion.
- */
- if (flag & MBX_STOP_IOCB) {
- flag &= ~MBX_STOP_IOCB;
- if (flag == MBX_NOWAIT) {
- /* Now flag each ring */
- for (i = 0; i < psli->num_rings; i++) {
- /* If the ring is active, flag it */
- if (psli->ring[i].cmdringaddr) {
- psli->ring[i].flag |=
- LPFC_STOP_IOCB_MBX;
- }
- }
- }
- }
-
psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
/* If we are not polling, we MUST be in SLI2 mode */
@@ -2714,18 +2768,24 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
}
wmb();
- /* interrupt board to doit right away */
- writel(CA_MBATT, phba->CAregaddr);
- readl(phba->CAregaddr); /* flush */
switch (flag) {
case MBX_NOWAIT:
- /* Don't wait for it to finish, just return */
+ /* Set up reference to mailbox command */
psli->mbox_active = pmbox;
+ /* Interrupt board to do it */
+ writel(CA_MBATT, phba->CAregaddr);
+ readl(phba->CAregaddr); /* flush */
+ /* Don't wait for it to finish, just return */
break;
case MBX_POLL:
+ /* Set up null reference to mailbox command */
psli->mbox_active = NULL;
+ /* Interrupt board to do it */
+ writel(CA_MBATT, phba->CAregaddr);
+ readl(phba->CAregaddr); /* flush */
+
if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
/* First read mbox status word */
word0 = *((volatile uint32_t *)&phba->slim2p->mbx);
@@ -2737,15 +2797,15 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
/* Read the HBA Host Attention Register */
ha_copy = readl(phba->HAregaddr);
-
- i = lpfc_mbox_tmo_val(phba, mb->mbxCommand);
- i *= 1000; /* Convert to ms */
-
+ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
+ mb->mbxCommand) *
+ 1000) + jiffies;
+ i = 0;
/* Wait for command to complete */
while (((word0 & OWN_CHIP) == OWN_CHIP) ||
(!(ha_copy & HA_MBATT) &&
(phba->link_state > LPFC_WARM_START))) {
- if (i-- <= 0) {
+ if (time_after(jiffies, timeout)) {
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
spin_unlock_irqrestore(&phba->hbalock,
drvr_flag);
@@ -2758,12 +2818,12 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
&& (evtctr != psli->slistat.mbox_event))
break;
- spin_unlock_irqrestore(&phba->hbalock,
- drvr_flag);
-
- msleep(1);
-
- spin_lock_irqsave(&phba->hbalock, drvr_flag);
+ if (i++ > 10) {
+ spin_unlock_irqrestore(&phba->hbalock,
+ drvr_flag);
+ msleep(1);
+ spin_lock_irqsave(&phba->hbalock, drvr_flag);
+ }
if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
/* First copy command data */
@@ -2848,7 +2908,7 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/*
* Lockless version of lpfc_sli_issue_iocb.
*/
-int
+static int
__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb, uint32_t flag)
{
@@ -2879,9 +2939,9 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/*
* Check to see if we are blocking IOCB processing because of a
- * outstanding mbox command.
+ * outstanding event.
*/
- if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX))
+ if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
goto iocb_busy;
if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
@@ -2993,6 +3053,61 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
return 0;
}
+static void
+lpfc_sli_async_event_handler(struct lpfc_hba * phba,
+ struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
+{
+ IOCB_t *icmd;
+ uint16_t evt_code;
+ uint16_t temp;
+ struct temp_event temp_event_data;
+ struct Scsi_Host *shost;
+
+ icmd = &iocbq->iocb;
+ evt_code = icmd->un.asyncstat.evt_code;
+ temp = icmd->ulpContext;
+
+ if ((evt_code != ASYNC_TEMP_WARN) &&
+ (evt_code != ASYNC_TEMP_SAFE)) {
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_SLI,
+ "0346 Ring %d handler: unexpected ASYNC_STATUS"
+ " evt_code 0x%x\n",
+ pring->ringno,
+ icmd->un.asyncstat.evt_code);
+ return;
+ }
+ temp_event_data.data = (uint32_t)temp;
+ temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+ if (evt_code == ASYNC_TEMP_WARN) {
+ temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_TEMP,
+ "0347 Adapter is very hot, please take "
+ "corrective action. temperature : %d Celsius\n",
+ temp);
+ }
+ if (evt_code == ASYNC_TEMP_SAFE) {
+ temp_event_data.event_code = LPFC_NORMAL_TEMP;
+ lpfc_printf_log(phba,
+ KERN_ERR,
+ LOG_TEMP,
+ "0340 Adapter temperature is OK now. "
+ "temperature : %d Celsius\n",
+ temp);
+ }
+
+ /* Send temperature change event to applications */
+ shost = lpfc_shost_from_vport(phba->pport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(temp_event_data), (char *) &temp_event_data,
+ SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+
+}
+
+
int
lpfc_sli_setup(struct lpfc_hba *phba)
{
@@ -3059,6 +3174,8 @@ lpfc_sli_setup(struct lpfc_hba *phba)
pring->fast_iotag = 0;
pring->iotag_ctr = 0;
pring->iotag_max = 4096;
+ pring->lpfc_sli_rcv_async_status =
+ lpfc_sli_async_event_handler;
pring->num_mask = 4;
pring->prt[0].profile = 0; /* Mask 0 */
pring->prt[0].rctl = FC_ELS_REQ;
@@ -3123,6 +3240,7 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
INIT_LIST_HEAD(&pring->txq);
INIT_LIST_HEAD(&pring->txcmplq);
INIT_LIST_HEAD(&pring->iocb_continueq);
+ INIT_LIST_HEAD(&pring->iocb_continue_saveq);
INIT_LIST_HEAD(&pring->postbufq);
}
spin_unlock_irq(&phba->hbalock);
@@ -3193,6 +3311,7 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
LIST_HEAD(completions);
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
+ struct lpfc_dmabuf *buf_ptr;
LPFC_MBOXQ_t *pmb;
struct lpfc_iocbq *iocb;
IOCB_t *cmd = NULL;
@@ -3232,6 +3351,19 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
}
}
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_splice_init(&phba->elsbuf, &completions);
+ phba->elsbuf_cnt = 0;
+ phba->elsbuf_prev_cnt = 0;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ while (!list_empty(&completions)) {
+ list_remove_head(&completions, buf_ptr,
+ struct lpfc_dmabuf, list);
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+ kfree(buf_ptr);
+ }
+
/* Return any active mbox cmds */
del_timer_sync(&psli->mbox_tmo);
spin_lock_irqsave(&phba->hbalock, flags);
@@ -3294,6 +3426,47 @@ lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return 0;
}
+uint32_t
+lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
+{
+ spin_lock_irq(&phba->hbalock);
+ phba->buffer_tag_count++;
+ /*
+ * Always set the QUE_BUFTAG_BIT to distiguish between
+ * a tag assigned by HBQ.
+ */
+ phba->buffer_tag_count |= QUE_BUFTAG_BIT;
+ spin_unlock_irq(&phba->hbalock);
+ return phba->buffer_tag_count;
+}
+
+struct lpfc_dmabuf *
+lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ uint32_t tag)
+{
+ struct lpfc_dmabuf *mp, *next_mp;
+ struct list_head *slp = &pring->postbufq;
+
+ /* Search postbufq, from the begining, looking for a match on tag */
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
+ if (mp->buffer_tag == tag) {
+ list_del_init(&mp->list);
+ pring->postbufq_cnt--;
+ spin_unlock_irq(&phba->hbalock);
+ return mp;
+ }
+ }
+
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0410 Cannot find virtual addr for buffer tag on "
+ "ring %d Data x%lx x%p x%p x%x\n",
+ pring->ringno, (unsigned long) tag,
+ slp->next, slp->prev, pring->postbufq_cnt);
+
+ return NULL;
+}
struct lpfc_dmabuf *
lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
@@ -3361,6 +3534,12 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
pring->txcmplq_cnt--;
spin_unlock_irq(&phba->hbalock);
+ /* Firmware could still be in progress of DMAing
+ * payload, so don't free data buffer till after
+ * a hbeat.
+ */
+ abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
+
abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
@@ -3699,7 +3878,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
unsigned long flag;
/* The caller must leave context1 empty. */
- if (pmboxq->context1 != 0)
+ if (pmboxq->context1)
return MBX_NOT_FINISHED;
/* setup wake call as IOCB callback */
@@ -3771,7 +3950,6 @@ lpfc_intr_handler(int irq, void *dev_id)
uint32_t ha_copy;
uint32_t work_ha_copy;
unsigned long status;
- int i;
uint32_t control;
MAILBOX_t *mbox, *pmbox;
@@ -3888,7 +4066,6 @@ lpfc_intr_handler(int irq, void *dev_id)
}
if (work_ha_copy & HA_ERATT) {
- phba->link_state = LPFC_HBA_ERROR;
/*
* There was a link/board error. Read the
* status register to retrieve the error event
@@ -3920,7 +4097,7 @@ lpfc_intr_handler(int irq, void *dev_id)
* Stray Mailbox Interrupt, mbxCommand <cmd>
* mbxStatus <status>
*/
- lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX |
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
LOG_SLI,
"(%d):0304 Stray Mailbox "
"Interrupt mbxCommand x%x "
@@ -3928,51 +4105,60 @@ lpfc_intr_handler(int irq, void *dev_id)
(vport ? vport->vpi : 0),
pmbox->mbxCommand,
pmbox->mbxStatus);
- }
- phba->last_completion_time = jiffies;
- del_timer_sync(&phba->sli.mbox_tmo);
-
- phba->sli.mbox_active = NULL;
- if (pmb->mbox_cmpl) {
- lpfc_sli_pcimem_bcopy(mbox, pmbox,
- MAILBOX_CMD_SIZE);
- }
- if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
- pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
+ /* clear mailbox attention bit */
+ work_ha_copy &= ~HA_MBATT;
+ } else {
+ phba->last_completion_time = jiffies;
+ del_timer(&phba->sli.mbox_tmo);
- lpfc_debugfs_disc_trc(vport,
- LPFC_DISC_TRC_MBOX_VPORT,
- "MBOX dflt rpi: : status:x%x rpi:x%x",
- (uint32_t)pmbox->mbxStatus,
- pmbox->un.varWords[0], 0);
-
- if ( !pmbox->mbxStatus) {
- mp = (struct lpfc_dmabuf *)
- (pmb->context1);
- ndlp = (struct lpfc_nodelist *)
- pmb->context2;
-
- /* Reg_LOGIN of dflt RPI was successful.
- * new lets get rid of the RPI using the
- * same mbox buffer.
- */
- lpfc_unreg_login(phba, vport->vpi,
- pmbox->un.varWords[0], pmb);
- pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
- pmb->context1 = mp;
- pmb->context2 = ndlp;
- pmb->vport = vport;
- spin_lock(&phba->hbalock);
- phba->sli.sli_flag &=
- ~LPFC_SLI_MBOX_ACTIVE;
- spin_unlock(&phba->hbalock);
- goto send_current_mbox;
+ phba->sli.mbox_active = NULL;
+ if (pmb->mbox_cmpl) {
+ lpfc_sli_pcimem_bcopy(mbox, pmbox,
+ MAILBOX_CMD_SIZE);
+ }
+ if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
+ pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
+
+ lpfc_debugfs_disc_trc(vport,
+ LPFC_DISC_TRC_MBOX_VPORT,
+ "MBOX dflt rpi: : "
+ "status:x%x rpi:x%x",
+ (uint32_t)pmbox->mbxStatus,
+ pmbox->un.varWords[0], 0);
+
+ if (!pmbox->mbxStatus) {
+ mp = (struct lpfc_dmabuf *)
+ (pmb->context1);
+ ndlp = (struct lpfc_nodelist *)
+ pmb->context2;
+
+ /* Reg_LOGIN of dflt RPI was
+ * successful. new lets get
+ * rid of the RPI using the
+ * same mbox buffer.
+ */
+ lpfc_unreg_login(phba,
+ vport->vpi,
+ pmbox->un.varWords[0],
+ pmb);
+ pmb->mbox_cmpl =
+ lpfc_mbx_cmpl_dflt_rpi;
+ pmb->context1 = mp;
+ pmb->context2 = ndlp;
+ pmb->vport = vport;
+ spin_lock(&phba->hbalock);
+ phba->sli.sli_flag &=
+ ~LPFC_SLI_MBOX_ACTIVE;
+ spin_unlock(&phba->hbalock);
+ goto send_current_mbox;
+ }
}
+ spin_lock(&phba->pport->work_port_lock);
+ phba->pport->work_port_events &=
+ ~WORKER_MBOX_TMO;
+ spin_unlock(&phba->pport->work_port_lock);
+ lpfc_mbox_cmpl_put(phba, pmb);
}
- spin_lock(&phba->pport->work_port_lock);
- phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
- spin_unlock(&phba->pport->work_port_lock);
- lpfc_mbox_cmpl_put(phba, pmb);
}
if ((work_ha_copy & HA_MBATT) &&
(phba->sli.mbox_active == NULL)) {
@@ -3990,10 +4176,6 @@ send_current_mbox:
lpfc_mbox_cmpl_put(phba, pmb);
goto send_next_mbox;
}
- } else {
- /* Turn on IOCB processing */
- for (i = 0; i < phba->sli.num_rings; i++)
- lpfc_sli_turn_on_ring(phba, i);
}
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 51b2b6b949be..7249fd252cbb 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -33,6 +33,7 @@ typedef enum _lpfc_ctx_cmd {
struct lpfc_iocbq {
/* lpfc_iocbqs are used in double linked lists */
struct list_head list;
+ struct list_head clist;
uint16_t iotag; /* pre-assigned IO tag */
uint16_t rsvd1;
@@ -44,6 +45,7 @@ struct lpfc_iocbq {
#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */
#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
+#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */
uint8_t abort_count;
uint8_t rsvd2;
@@ -92,8 +94,6 @@ typedef struct lpfcMboxq {
#define MBX_POLL 1 /* poll mailbox till command done, then
return */
#define MBX_NOWAIT 2 /* issue command then return immediately */
-#define MBX_STOP_IOCB 4 /* Stop iocb processing till mbox cmds
- complete */
#define LPFC_MAX_RING_MASK 4 /* max num of rctl/type masks allowed per
ring */
@@ -129,9 +129,7 @@ struct lpfc_sli_ring {
uint16_t flag; /* ring flags */
#define LPFC_DEFERRED_RING_EVENT 0x001 /* Deferred processing a ring event */
#define LPFC_CALL_RING_AVAILABLE 0x002 /* indicates cmd was full */
-#define LPFC_STOP_IOCB_MBX 0x010 /* Stop processing IOCB cmds mbox */
#define LPFC_STOP_IOCB_EVENT 0x020 /* Stop processing IOCB cmds event */
-#define LPFC_STOP_IOCB_MASK 0x030 /* Stop processing IOCB cmds mask */
uint16_t abtsiotag; /* tracks next iotag to use for ABTS */
uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */
@@ -163,9 +161,12 @@ struct lpfc_sli_ring {
struct list_head iocb_continueq;
uint16_t iocb_continueq_cnt; /* current length of queue */
uint16_t iocb_continueq_max; /* max length */
+ struct list_head iocb_continue_saveq;
struct lpfc_sli_ring_mask prt[LPFC_MAX_RING_MASK];
uint32_t num_mask; /* number of mask entries in prt array */
+ void (*lpfc_sli_rcv_async_status) (struct lpfc_hba *,
+ struct lpfc_sli_ring *, struct lpfc_iocbq *);
struct lpfc_sli_ring_stat stats; /* SLI statistical info */
@@ -199,9 +200,6 @@ struct lpfc_hbq_init {
uint32_t add_count; /* number to allocate when starved */
} ;
-#define LPFC_MAX_HBQ 16
-
-
/* Structure used to hold SLI statistical counters and info */
struct lpfc_sli_stat {
uint64_t mbox_stat_err; /* Mbox cmds completed status error */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 0081f49286bc..4b633d39a82a 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2007 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -18,10 +18,10 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.2.2"
+#define LPFC_DRIVER_VERSION "8.2.4"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright(c) 2004-2007 Emulex. All rights reserved."
+#define LPFC_COPYRIGHT "Copyright(c) 2004-2008 Emulex. All rights reserved."
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index dcb415e717c3..9fad7663c117 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -125,15 +125,26 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
pmb->vport = vport;
rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
if (rc != MBX_SUCCESS) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
- "1818 VPort failed init, mbxCmd x%x "
- "READ_SPARM mbxStatus x%x, rc = x%x\n",
- mb->mbxCommand, mb->mbxStatus, rc);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- if (rc != MBX_TIMEOUT)
- mempool_free(pmb, phba->mbox_mem_pool);
- return -EIO;
+ if (signal_pending(current)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
+ "1830 Signal aborted mbxCmd x%x\n",
+ mb->mbxCommand);
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -EINTR;
+ } else {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
+ "1818 VPort failed init, mbxCmd x%x "
+ "READ_SPARM mbxStatus x%x, rc = x%x\n",
+ mb->mbxCommand, mb->mbxStatus, rc);
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
}
memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
@@ -204,6 +215,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
int instance;
int vpi;
int rc = VPORT_ERROR;
+ int status;
if ((phba->sli_rev < 3) ||
!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
@@ -248,13 +260,19 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
vport->vpi = vpi;
lpfc_debugfs_initialize(vport);
- if (lpfc_vport_sparm(phba, vport)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
- "1813 Create VPORT failed. "
- "Cannot get sparam\n");
+ if ((status = lpfc_vport_sparm(phba, vport))) {
+ if (status == -EINTR) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+ "1831 Create VPORT Interrupted.\n");
+ rc = VPORT_ERROR;
+ } else {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+ "1813 Create VPORT failed. "
+ "Cannot get sparam\n");
+ rc = VPORT_NORESOURCES;
+ }
lpfc_free_vpi(phba, vpi);
destroy_port(vport);
- rc = VPORT_NORESOURCES;
goto error_out;
}
@@ -427,7 +445,6 @@ int
lpfc_vport_delete(struct fc_vport *fc_vport)
{
struct lpfc_nodelist *ndlp = NULL;
- struct lpfc_nodelist *next_ndlp;
struct Scsi_Host *shost = (struct Scsi_Host *) fc_vport->shost;
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct lpfc_hba *phba = vport->phba;
@@ -482,8 +499,18 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
- phba->link_state >= LPFC_LINK_UP) {
-
+ phba->link_state >= LPFC_LINK_UP) {
+ if (vport->cfg_enable_da_id) {
+ timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
+ if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0))
+ while (vport->ct_flags && timeout)
+ timeout = schedule_timeout(timeout);
+ else
+ lpfc_printf_log(vport->phba, KERN_WARNING,
+ LOG_VPORT,
+ "1829 CT command failed to "
+ "delete objects on fabric. \n");
+ }
/* First look for the Fabric ndlp */
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp) {
@@ -503,23 +530,20 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
}
skip_logo:
+ lpfc_cleanup(vport);
lpfc_sli_host_down(vport);
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- lpfc_disc_state_machine(vport, ndlp, NULL,
- NLP_EVT_DEVICE_RECOVERY);
- lpfc_disc_state_machine(vport, ndlp, NULL,
- NLP_EVT_DEVICE_RM);
- }
-
lpfc_stop_vport_timers(vport);
lpfc_unreg_all_rpis(vport);
- lpfc_unreg_default_rpis(vport);
- /*
- * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
- * scsi_host_put() to release the vport.
- */
- lpfc_mbx_unreg_vpi(vport);
+
+ if (!(phba->pport->load_flag & FC_UNLOADING)) {
+ lpfc_unreg_default_rpis(vport);
+ /*
+ * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi)
+ * does the scsi_host_put() to release the vport.
+ */
+ lpfc_mbx_unreg_vpi(vport);
+ }
lpfc_free_vpi(phba, vport->vpi);
vport->work_port_events = 0;
@@ -532,16 +556,13 @@ skip_logo:
return VPORT_OK;
}
-EXPORT_SYMBOL(lpfc_vport_create);
-EXPORT_SYMBOL(lpfc_vport_delete);
-
struct lpfc_vport **
lpfc_create_vport_work_array(struct lpfc_hba *phba)
{
struct lpfc_vport *port_iterator;
struct lpfc_vport **vports;
int index = 0;
- vports = kzalloc(LPFC_MAX_VPORTS * sizeof(struct lpfc_vport *),
+ vports = kzalloc((phba->max_vpi + 1) * sizeof(struct lpfc_vport *),
GFP_KERNEL);
if (vports == NULL)
return NULL;
@@ -560,12 +581,12 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
}
void
-lpfc_destroy_vport_work_array(struct lpfc_vport **vports)
+lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
{
int i;
if (vports == NULL)
return;
- for (i=0; vports[i] != NULL && i < LPFC_MAX_VPORTS; i++)
+ for (i=0; vports[i] != NULL && i <= phba->max_vpi; i++)
scsi_host_put(lpfc_shost_from_vport(vports[i]));
kfree(vports);
}
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
index 91da17751a37..96c445333b69 100644
--- a/drivers/scsi/lpfc/lpfc_vport.h
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -89,7 +89,7 @@ int lpfc_vport_delete(struct fc_vport *);
int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *);
int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint);
struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *);
-void lpfc_destroy_vport_work_array(struct lpfc_vport **);
+void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **);
/*
* queuecommand VPORT-specific return codes. Specified in the host byte code.
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 66c652035730..765c24d2bc38 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4889,7 +4889,7 @@ __megaraid_shutdown(adapter_t *adapter)
mdelay(1000);
}
-static void
+static void __devexit
megaraid_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index c8923108183a..24e32e446e76 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -300,7 +300,7 @@ static struct pci_device_id pci_id_table_g[] = {
MODULE_DEVICE_TABLE(pci, pci_id_table_g);
-static struct pci_driver megaraid_pci_driver_g = {
+static struct pci_driver megaraid_pci_driver = {
.name = "megaraid",
.id_table = pci_id_table_g,
.probe = megaraid_probe_one,
@@ -394,7 +394,7 @@ megaraid_init(void)
// register as a PCI hot-plug driver module
- rval = pci_register_driver(&megaraid_pci_driver_g);
+ rval = pci_register_driver(&megaraid_pci_driver);
if (rval < 0) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: could not register hotplug support.\n"));
@@ -415,7 +415,7 @@ megaraid_exit(void)
con_log(CL_DLEVEL1, (KERN_NOTICE "megaraid: unloading framework\n"));
// unregister as PCI hotplug driver
- pci_unregister_driver(&megaraid_pci_driver_g);
+ pci_unregister_driver(&megaraid_pci_driver);
return;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index e3c5c5282203..d7ec921865c4 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -2,7 +2,7 @@
*
* Linux MegaRAID driver for SAS based RAID controllers
*
- * Copyright (c) 2003-2005 LSI Logic Corporation.
+ * Copyright (c) 2003-2005 LSI Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -10,7 +10,7 @@
* 2 of the License, or (at your option) any later version.
*
* FILE : megaraid_sas.c
- * Version : v00.00.03.10-rc5
+ * Version : v00.00.03.16-rc1
*
* Authors:
* (email-id : megaraidlinux@lsi.com)
@@ -31,6 +31,7 @@
#include <linux/moduleparam.h>
#include <linux/module.h>
#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/uio.h>
@@ -46,10 +47,18 @@
#include <scsi/scsi_host.h>
#include "megaraid_sas.h"
+/*
+ * poll_mode_io:1- schedule complete completion from q cmd
+ */
+static unsigned int poll_mode_io;
+module_param_named(poll_mode_io, poll_mode_io, int, 0);
+MODULE_PARM_DESC(poll_mode_io,
+ "Complete cmds from IO path, (default=0)");
+
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("megaraidlinux@lsi.com");
-MODULE_DESCRIPTION("LSI Logic MegaRAID SAS Driver");
+MODULE_DESCRIPTION("LSI MegaRAID SAS Driver");
/*
* PCI ID table for all supported controllers
@@ -76,6 +85,10 @@ static DEFINE_MUTEX(megasas_async_queue_mutex);
static u32 megasas_dbg_lvl;
+static void
+megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ u8 alt_status);
+
/**
* megasas_get_cmd - Get a command from the free pool
* @instance: Adapter soft state
@@ -855,6 +868,12 @@ megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
atomic_inc(&instance->fw_outstanding);
instance->instancet->fire_cmd(cmd->frame_phys_addr ,cmd->frame_count-1,instance->reg_set);
+ /*
+ * Check if we have pend cmds to be completed
+ */
+ if (poll_mode_io && atomic_read(&instance->fw_outstanding))
+ tasklet_schedule(&instance->isr_tasklet);
+
return 0;
@@ -886,6 +905,64 @@ static int megasas_slave_configure(struct scsi_device *sdev)
}
/**
+ * megasas_complete_cmd_dpc - Returns FW's controller structure
+ * @instance_addr: Address of adapter soft state
+ *
+ * Tasklet to complete cmds
+ */
+static void megasas_complete_cmd_dpc(unsigned long instance_addr)
+{
+ u32 producer;
+ u32 consumer;
+ u32 context;
+ struct megasas_cmd *cmd;
+ struct megasas_instance *instance =
+ (struct megasas_instance *)instance_addr;
+ unsigned long flags;
+
+ /* If we have already declared adapter dead, donot complete cmds */
+ if (instance->hw_crit_error)
+ return;
+
+ spin_lock_irqsave(&instance->completion_lock, flags);
+
+ producer = *instance->producer;
+ consumer = *instance->consumer;
+
+ while (consumer != producer) {
+ context = instance->reply_queue[consumer];
+
+ cmd = instance->cmd_list[context];
+
+ megasas_complete_cmd(instance, cmd, DID_OK);
+
+ consumer++;
+ if (consumer == (instance->max_fw_cmds + 1)) {
+ consumer = 0;
+ }
+ }
+
+ *instance->consumer = producer;
+
+ spin_unlock_irqrestore(&instance->completion_lock, flags);
+
+ /*
+ * Check if we can restore can_queue
+ */
+ if (instance->flag & MEGASAS_FW_BUSY
+ && time_after(jiffies, instance->last_time + 5 * HZ)
+ && atomic_read(&instance->fw_outstanding) < 17) {
+
+ spin_lock_irqsave(instance->host->host_lock, flags);
+ instance->flag &= ~MEGASAS_FW_BUSY;
+ instance->host->can_queue =
+ instance->max_fw_cmds - MEGASAS_INT_CMDS;
+
+ spin_unlock_irqrestore(instance->host->host_lock, flags);
+ }
+}
+
+/**
* megasas_wait_for_outstanding - Wait for all outstanding cmds
* @instance: Adapter soft state
*
@@ -908,6 +985,11 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
"commands to complete\n",i,outstanding);
+ /*
+ * Call cmd completion routine. Cmd to be
+ * be completed directly without depending on isr.
+ */
+ megasas_complete_cmd_dpc((unsigned long)instance);
}
msleep(1000);
@@ -1100,7 +1182,7 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
static struct scsi_host_template megasas_template = {
.module = THIS_MODULE,
- .name = "LSI Logic SAS based MegaRAID driver",
+ .name = "LSI SAS based MegaRAID driver",
.proc_name = "megaraid_sas",
.slave_configure = megasas_slave_configure,
.queuecommand = megasas_queue_command,
@@ -1749,57 +1831,119 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
}
/**
- * megasas_complete_cmd_dpc - Returns FW's controller structure
- * @instance_addr: Address of adapter soft state
+ * megasas_issue_init_mfi - Initializes the FW
+ * @instance: Adapter soft state
*
- * Tasklet to complete cmds
+ * Issues the INIT MFI cmd
*/
-static void megasas_complete_cmd_dpc(unsigned long instance_addr)
+static int
+megasas_issue_init_mfi(struct megasas_instance *instance)
{
- u32 producer;
- u32 consumer;
u32 context;
+
struct megasas_cmd *cmd;
- struct megasas_instance *instance = (struct megasas_instance *)instance_addr;
- unsigned long flags;
- /* If we have already declared adapter dead, donot complete cmds */
- if (instance->hw_crit_error)
- return;
+ struct megasas_init_frame *init_frame;
+ struct megasas_init_queue_info *initq_info;
+ dma_addr_t init_frame_h;
+ dma_addr_t initq_info_h;
- producer = *instance->producer;
- consumer = *instance->consumer;
+ /*
+ * Prepare a init frame. Note the init frame points to queue info
+ * structure. Each frame has SGL allocated after first 64 bytes. For
+ * this frame - since we don't need any SGL - we use SGL's space as
+ * queue info structure
+ *
+ * We will not get a NULL command below. We just created the pool.
+ */
+ cmd = megasas_get_cmd(instance);
- while (consumer != producer) {
- context = instance->reply_queue[consumer];
+ init_frame = (struct megasas_init_frame *)cmd->frame;
+ initq_info = (struct megasas_init_queue_info *)
+ ((unsigned long)init_frame + 64);
- cmd = instance->cmd_list[context];
+ init_frame_h = cmd->frame_phys_addr;
+ initq_info_h = init_frame_h + 64;
- megasas_complete_cmd(instance, cmd, DID_OK);
+ context = init_frame->context;
+ memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
+ memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
+ init_frame->context = context;
- consumer++;
- if (consumer == (instance->max_fw_cmds + 1)) {
- consumer = 0;
- }
- }
+ initq_info->reply_queue_entries = instance->max_fw_cmds + 1;
+ initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h;
- *instance->consumer = producer;
+ initq_info->producer_index_phys_addr_lo = instance->producer_h;
+ initq_info->consumer_index_phys_addr_lo = instance->consumer_h;
+
+ init_frame->cmd = MFI_CMD_INIT;
+ init_frame->cmd_status = 0xFF;
+ init_frame->queue_info_new_phys_addr_lo = initq_info_h;
+
+ init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info);
/*
- * Check if we can restore can_queue
+ * disable the intr before firing the init frame to FW
*/
- if (instance->flag & MEGASAS_FW_BUSY
- && time_after(jiffies, instance->last_time + 5 * HZ)
- && atomic_read(&instance->fw_outstanding) < 17) {
+ instance->instancet->disable_intr(instance->reg_set);
- spin_lock_irqsave(instance->host->host_lock, flags);
- instance->flag &= ~MEGASAS_FW_BUSY;
- instance->host->can_queue =
- instance->max_fw_cmds - MEGASAS_INT_CMDS;
+ /*
+ * Issue the init frame in polled mode
+ */
- spin_unlock_irqrestore(instance->host->host_lock, flags);
+ if (megasas_issue_polled(instance, cmd)) {
+ printk(KERN_ERR "megasas: Failed to init firmware\n");
+ megasas_return_cmd(instance, cmd);
+ goto fail_fw_init;
}
+ megasas_return_cmd(instance, cmd);
+
+ return 0;
+
+fail_fw_init:
+ return -EINVAL;
+}
+
+/**
+ * megasas_start_timer - Initializes a timer object
+ * @instance: Adapter soft state
+ * @timer: timer object to be initialized
+ * @fn: timer function
+ * @interval: time interval between timer function call
+ */
+static inline void
+megasas_start_timer(struct megasas_instance *instance,
+ struct timer_list *timer,
+ void *fn, unsigned long interval)
+{
+ init_timer(timer);
+ timer->expires = jiffies + interval;
+ timer->data = (unsigned long)instance;
+ timer->function = fn;
+ add_timer(timer);
+}
+
+/**
+ * megasas_io_completion_timer - Timer fn
+ * @instance_addr: Address of adapter soft state
+ *
+ * Schedules tasklet for cmd completion
+ * if poll_mode_io is set
+ */
+static void
+megasas_io_completion_timer(unsigned long instance_addr)
+{
+ struct megasas_instance *instance =
+ (struct megasas_instance *)instance_addr;
+
+ if (atomic_read(&instance->fw_outstanding))
+ tasklet_schedule(&instance->isr_tasklet);
+
+ /* Restart timer */
+ if (poll_mode_io)
+ mod_timer(&instance->io_completion_timer,
+ jiffies + MEGASAS_COMPLETION_TIMER_INTERVAL);
}
/**
@@ -1814,22 +1958,15 @@ static int megasas_init_mfi(struct megasas_instance *instance)
u32 reply_q_sz;
u32 max_sectors_1;
u32 max_sectors_2;
+ u32 tmp_sectors;
struct megasas_register_set __iomem *reg_set;
-
- struct megasas_cmd *cmd;
struct megasas_ctrl_info *ctrl_info;
-
- struct megasas_init_frame *init_frame;
- struct megasas_init_queue_info *initq_info;
- dma_addr_t init_frame_h;
- dma_addr_t initq_info_h;
-
/*
* Map the message registers
*/
instance->base_addr = pci_resource_start(instance->pdev, 0);
- if (pci_request_regions(instance->pdev, "megasas: LSI Logic")) {
+ if (pci_request_regions(instance->pdev, "megasas: LSI")) {
printk(KERN_DEBUG "megasas: IO memory region busy!\n");
return -EBUSY;
}
@@ -1900,52 +2037,8 @@ static int megasas_init_mfi(struct megasas_instance *instance)
goto fail_reply_queue;
}
- /*
- * Prepare a init frame. Note the init frame points to queue info
- * structure. Each frame has SGL allocated after first 64 bytes. For
- * this frame - since we don't need any SGL - we use SGL's space as
- * queue info structure
- *
- * We will not get a NULL command below. We just created the pool.
- */
- cmd = megasas_get_cmd(instance);
-
- init_frame = (struct megasas_init_frame *)cmd->frame;
- initq_info = (struct megasas_init_queue_info *)
- ((unsigned long)init_frame + 64);
-
- init_frame_h = cmd->frame_phys_addr;
- initq_info_h = init_frame_h + 64;
-
- memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
- memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
-
- initq_info->reply_queue_entries = instance->max_fw_cmds + 1;
- initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h;
-
- initq_info->producer_index_phys_addr_lo = instance->producer_h;
- initq_info->consumer_index_phys_addr_lo = instance->consumer_h;
-
- init_frame->cmd = MFI_CMD_INIT;
- init_frame->cmd_status = 0xFF;
- init_frame->queue_info_new_phys_addr_lo = initq_info_h;
-
- init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info);
-
- /*
- * disable the intr before firing the init frame to FW
- */
- instance->instancet->disable_intr(instance->reg_set);
-
- /*
- * Issue the init frame in polled mode
- */
- if (megasas_issue_polled(instance, cmd)) {
- printk(KERN_DEBUG "megasas: Failed to init firmware\n");
+ if (megasas_issue_init_mfi(instance))
goto fail_fw_init;
- }
-
- megasas_return_cmd(instance, cmd);
ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
@@ -1958,17 +2051,20 @@ static int megasas_init_mfi(struct megasas_instance *instance)
* Note that older firmwares ( < FW ver 30) didn't report information
* to calculate max_sectors_1. So the number ended up as zero always.
*/
+ tmp_sectors = 0;
if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) {
max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
ctrl_info->max_strips_per_io;
max_sectors_2 = ctrl_info->max_request_size;
- instance->max_sectors_per_req = (max_sectors_1 < max_sectors_2)
- ? max_sectors_1 : max_sectors_2;
- } else
- instance->max_sectors_per_req = instance->max_num_sge *
- PAGE_SIZE / 512;
+ tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
+ }
+
+ instance->max_sectors_per_req = instance->max_num_sge *
+ PAGE_SIZE / 512;
+ if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
+ instance->max_sectors_per_req = tmp_sectors;
kfree(ctrl_info);
@@ -1976,12 +2072,17 @@ static int megasas_init_mfi(struct megasas_instance *instance)
* Setup tasklet for cmd completion
*/
- tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc,
- (unsigned long)instance);
+ tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc,
+ (unsigned long)instance);
+
+ /* Initialize the cmd completion timer */
+ if (poll_mode_io)
+ megasas_start_timer(instance, &instance->io_completion_timer,
+ megasas_io_completion_timer,
+ MEGASAS_COMPLETION_TIMER_INTERVAL);
return 0;
fail_fw_init:
- megasas_return_cmd(instance, cmd);
pci_free_consistent(instance->pdev, reply_q_sz,
instance->reply_queue, instance->reply_queue_h);
@@ -2263,6 +2364,28 @@ static int megasas_io_attach(struct megasas_instance *instance)
return 0;
}
+static int
+megasas_set_dma_mask(struct pci_dev *pdev)
+{
+ /*
+ * All our contollers are capable of performing 64-bit DMA
+ */
+ if (IS_DMA64) {
+ if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
+
+ if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0)
+ goto fail_set_dma_mask;
+ }
+ } else {
+ if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0)
+ goto fail_set_dma_mask;
+ }
+ return 0;
+
+fail_set_dma_mask:
+ return 1;
+}
+
/**
* megasas_probe_one - PCI hotplug entry point
* @pdev: PCI device structure
@@ -2296,19 +2419,8 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
- /*
- * All our contollers are capable of performing 64-bit DMA
- */
- if (IS_DMA64) {
- if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
-
- if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0)
- goto fail_set_dma_mask;
- }
- } else {
- if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0)
- goto fail_set_dma_mask;
- }
+ if (megasas_set_dma_mask(pdev))
+ goto fail_set_dma_mask;
host = scsi_host_alloc(&megasas_template,
sizeof(struct megasas_instance));
@@ -2357,8 +2469,9 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
init_waitqueue_head(&instance->abort_cmd_wait_q);
spin_lock_init(&instance->cmd_pool_lock);
+ spin_lock_init(&instance->completion_lock);
- sema_init(&instance->aen_mutex, 1);
+ mutex_init(&instance->aen_mutex);
sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS);
/*
@@ -2490,8 +2603,10 @@ static void megasas_flush_cache(struct megasas_instance *instance)
/**
* megasas_shutdown_controller - Instructs FW to shutdown the controller
* @instance: Adapter soft state
+ * @opcode: Shutdown/Hibernate
*/
-static void megasas_shutdown_controller(struct megasas_instance *instance)
+static void megasas_shutdown_controller(struct megasas_instance *instance,
+ u32 opcode)
{
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
@@ -2514,7 +2629,7 @@ static void megasas_shutdown_controller(struct megasas_instance *instance)
dcmd->flags = MFI_FRAME_DIR_NONE;
dcmd->timeout = 0;
dcmd->data_xfer_len = 0;
- dcmd->opcode = MR_DCMD_CTRL_SHUTDOWN;
+ dcmd->opcode = opcode;
megasas_issue_blocked_cmd(instance, cmd);
@@ -2524,6 +2639,139 @@ static void megasas_shutdown_controller(struct megasas_instance *instance)
}
/**
+ * megasas_suspend - driver suspend entry point
+ * @pdev: PCI device structure
+ * @state: PCI power state to suspend routine
+ */
+static int __devinit
+megasas_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct Scsi_Host *host;
+ struct megasas_instance *instance;
+
+ instance = pci_get_drvdata(pdev);
+ host = instance->host;
+
+ if (poll_mode_io)
+ del_timer_sync(&instance->io_completion_timer);
+
+ megasas_flush_cache(instance);
+ megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
+ tasklet_kill(&instance->isr_tasklet);
+
+ pci_set_drvdata(instance->pdev, instance);
+ instance->instancet->disable_intr(instance->reg_set);
+ free_irq(instance->pdev->irq, instance);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+/**
+ * megasas_resume- driver resume entry point
+ * @pdev: PCI device structure
+ */
+static int __devinit
+megasas_resume(struct pci_dev *pdev)
+{
+ int rval;
+ struct Scsi_Host *host;
+ struct megasas_instance *instance;
+
+ instance = pci_get_drvdata(pdev);
+ host = instance->host;
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+
+ /*
+ * PCI prepping: enable device set bus mastering and dma mask
+ */
+ rval = pci_enable_device(pdev);
+
+ if (rval) {
+ printk(KERN_ERR "megasas: Enable device failed\n");
+ return rval;
+ }
+
+ pci_set_master(pdev);
+
+ if (megasas_set_dma_mask(pdev))
+ goto fail_set_dma_mask;
+
+ /*
+ * Initialize MFI Firmware
+ */
+
+ *instance->producer = 0;
+ *instance->consumer = 0;
+
+ atomic_set(&instance->fw_outstanding, 0);
+
+ /*
+ * We expect the FW state to be READY
+ */
+ if (megasas_transition_to_ready(instance))
+ goto fail_ready_state;
+
+ if (megasas_issue_init_mfi(instance))
+ goto fail_init_mfi;
+
+ tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc,
+ (unsigned long)instance);
+
+ /*
+ * Register IRQ
+ */
+ if (request_irq(pdev->irq, megasas_isr, IRQF_SHARED,
+ "megasas", instance)) {
+ printk(KERN_ERR "megasas: Failed to register IRQ\n");
+ goto fail_irq;
+ }
+
+ instance->instancet->enable_intr(instance->reg_set);
+
+ /*
+ * Initiate AEN (Asynchronous Event Notification)
+ */
+ if (megasas_start_aen(instance))
+ printk(KERN_ERR "megasas: Start AEN failed\n");
+
+ /* Initialize the cmd completion timer */
+ if (poll_mode_io)
+ megasas_start_timer(instance, &instance->io_completion_timer,
+ megasas_io_completion_timer,
+ MEGASAS_COMPLETION_TIMER_INTERVAL);
+ return 0;
+
+fail_irq:
+fail_init_mfi:
+ if (instance->evt_detail)
+ pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+ instance->evt_detail,
+ instance->evt_detail_h);
+
+ if (instance->producer)
+ pci_free_consistent(pdev, sizeof(u32), instance->producer,
+ instance->producer_h);
+ if (instance->consumer)
+ pci_free_consistent(pdev, sizeof(u32), instance->consumer,
+ instance->consumer_h);
+ scsi_host_put(host);
+
+fail_set_dma_mask:
+fail_ready_state:
+
+ pci_disable_device(pdev);
+
+ return -ENODEV;
+}
+
+/**
* megasas_detach_one - PCI hot"un"plug entry point
* @pdev: PCI device structure
*/
@@ -2536,9 +2784,12 @@ static void megasas_detach_one(struct pci_dev *pdev)
instance = pci_get_drvdata(pdev);
host = instance->host;
+ if (poll_mode_io)
+ del_timer_sync(&instance->io_completion_timer);
+
scsi_remove_host(instance->host);
megasas_flush_cache(instance);
- megasas_shutdown_controller(instance);
+ megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
tasklet_kill(&instance->isr_tasklet);
/*
@@ -2660,6 +2911,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
void *sense = NULL;
dma_addr_t sense_handle;
u32 *sense_ptr;
+ unsigned long *sense_buff;
memset(kbuff_arr, 0, sizeof(kbuff_arr));
@@ -2764,14 +3016,16 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
*/
if (ioc->sense_len) {
/*
- * sense_ptr points to the location that has the user
+ * sense_buff points to the location that has the user
* sense buffer address
*/
- sense_ptr = (u32 *) ((unsigned long)ioc->frame.raw +
- ioc->sense_off);
+ sense_buff = (unsigned long *) ((unsigned long)ioc->frame.raw +
+ ioc->sense_off);
- if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
- sense, ioc->sense_len)) {
+ if (copy_to_user((void __user *)(unsigned long)(*sense_buff),
+ sense, ioc->sense_len)) {
+ printk(KERN_ERR "megasas: Failed to copy out to user "
+ "sense data\n");
error = -EFAULT;
goto out;
}
@@ -2874,10 +3128,10 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
if (!instance)
return -ENODEV;
- down(&instance->aen_mutex);
+ mutex_lock(&instance->aen_mutex);
error = megasas_register_aen(instance, aen.seq_num,
aen.class_locale_word);
- up(&instance->aen_mutex);
+ mutex_unlock(&instance->aen_mutex);
return error;
}
@@ -2977,6 +3231,8 @@ static struct pci_driver megasas_pci_driver = {
.id_table = megasas_pci_table,
.probe = megasas_probe_one,
.remove = __devexit_p(megasas_detach_one),
+ .suspend = megasas_suspend,
+ .resume = megasas_resume,
.shutdown = megasas_shutdown,
};
@@ -3004,7 +3260,7 @@ static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date,
static ssize_t
megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
{
- return sprintf(buf,"%u",megasas_dbg_lvl);
+ return sprintf(buf, "%u\n", megasas_dbg_lvl);
}
static ssize_t
@@ -3019,7 +3275,65 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun
}
static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUGO, megasas_sysfs_show_dbg_lvl,
- megasas_sysfs_set_dbg_lvl);
+ megasas_sysfs_set_dbg_lvl);
+
+static ssize_t
+megasas_sysfs_show_poll_mode_io(struct device_driver *dd, char *buf)
+{
+ return sprintf(buf, "%u\n", poll_mode_io);
+}
+
+static ssize_t
+megasas_sysfs_set_poll_mode_io(struct device_driver *dd,
+ const char *buf, size_t count)
+{
+ int retval = count;
+ int tmp = poll_mode_io;
+ int i;
+ struct megasas_instance *instance;
+
+ if (sscanf(buf, "%u", &poll_mode_io) < 1) {
+ printk(KERN_ERR "megasas: could not set poll_mode_io\n");
+ retval = -EINVAL;
+ }
+
+ /*
+ * Check if poll_mode_io is already set or is same as previous value
+ */
+ if ((tmp && poll_mode_io) || (tmp == poll_mode_io))
+ goto out;
+
+ if (poll_mode_io) {
+ /*
+ * Start timers for all adapters
+ */
+ for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+ instance = megasas_mgmt_info.instance[i];
+ if (instance) {
+ megasas_start_timer(instance,
+ &instance->io_completion_timer,
+ megasas_io_completion_timer,
+ MEGASAS_COMPLETION_TIMER_INTERVAL);
+ }
+ }
+ } else {
+ /*
+ * Delete timers for all adapters
+ */
+ for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+ instance = megasas_mgmt_info.instance[i];
+ if (instance)
+ del_timer_sync(&instance->io_completion_timer);
+ }
+ }
+
+out:
+ return retval;
+}
+
+static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUGO,
+ megasas_sysfs_show_poll_mode_io,
+ megasas_sysfs_set_poll_mode_io);
/**
* megasas_init - Driver load entry point
@@ -3070,8 +3384,16 @@ static int __init megasas_init(void)
&driver_attr_dbg_lvl);
if (rval)
goto err_dcf_dbg_lvl;
+ rval = driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_poll_mode_io);
+ if (rval)
+ goto err_dcf_poll_mode_io;
return rval;
+
+err_dcf_poll_mode_io:
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_dbg_lvl);
err_dcf_dbg_lvl:
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_release_date);
@@ -3090,6 +3412,8 @@ err_pcidrv:
static void __exit megasas_exit(void)
{
driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_poll_mode_io);
+ driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_dbg_lvl);
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_release_date);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 4dffc918a414..6466bdf548c2 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -2,7 +2,7 @@
*
* Linux MegaRAID driver for SAS based RAID controllers
*
- * Copyright (c) 2003-2005 LSI Logic Corporation.
+ * Copyright (c) 2003-2005 LSI Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -18,9 +18,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.03.10-rc5"
-#define MEGASAS_RELDATE "May 17, 2007"
-#define MEGASAS_EXT_VERSION "Thu May 17 10:09:32 PDT 2007"
+#define MEGASAS_VERSION "00.00.03.16-rc1"
+#define MEGASAS_RELDATE "Nov. 07, 2007"
+#define MEGASAS_EXT_VERSION "Thu. Nov. 07 10:09:32 PDT 2007"
/*
* Device IDs
@@ -117,6 +117,7 @@
#define MR_FLUSH_DISK_CACHE 0x02
#define MR_DCMD_CTRL_SHUTDOWN 0x01050000
+#define MR_DCMD_HIBERNATE_SHUTDOWN 0x01060000
#define MR_ENABLE_DRIVE_SPINDOWN 0x01
#define MR_DCMD_CTRL_EVENT_GET_INFO 0x01040100
@@ -570,7 +571,8 @@ struct megasas_ctrl_info {
#define IS_DMA64 (sizeof(dma_addr_t) == 8)
#define MFI_OB_INTR_STATUS_MASK 0x00000002
-#define MFI_POLL_TIMEOUT_SECS 10
+#define MFI_POLL_TIMEOUT_SECS 60
+#define MEGASAS_COMPLETION_TIMER_INTERVAL (HZ/10)
#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
@@ -1083,13 +1085,15 @@ struct megasas_instance {
struct megasas_cmd **cmd_list;
struct list_head cmd_pool;
spinlock_t cmd_pool_lock;
+ /* used to synch producer, consumer ptrs in dpc */
+ spinlock_t completion_lock;
struct dma_pool *frame_dma_pool;
struct dma_pool *sense_dma_pool;
struct megasas_evt_detail *evt_detail;
dma_addr_t evt_detail_h;
struct megasas_cmd *aen_cmd;
- struct semaphore aen_mutex;
+ struct mutex aen_mutex;
struct semaphore ioctl_sem;
struct Scsi_Host *host;
@@ -1108,6 +1112,8 @@ struct megasas_instance {
u8 flag;
unsigned long last_time;
+
+ struct timer_list io_completion_timer;
};
#define MEGASAS_IS_LOGICAL(scp) \
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 016c462bc771..c02771aa6c9b 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -4963,7 +4963,8 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
** Copy back sense data to caller's buffer.
*/
memcpy(cmd->sense_buffer, cp->sense_buf,
- min(sizeof(cmd->sense_buffer), sizeof(cp->sense_buf)));
+ min_t(size_t, SCSI_SENSE_BUFFERSIZE,
+ sizeof(cp->sense_buf)));
if (DEBUG_FLAGS & (DEBUG_RESULT|DEBUG_TINY)) {
u_char * p = (u_char*) & cmd->sense_buffer;
diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig
index fa481b515ead..53857c6b6d4d 100644
--- a/drivers/scsi/pcmcia/Kconfig
+++ b/drivers/scsi/pcmcia/Kconfig
@@ -6,7 +6,8 @@ menuconfig SCSI_LOWLEVEL_PCMCIA
bool "PCMCIA SCSI adapter support"
depends on SCSI!=n && PCMCIA!=n
-if SCSI_LOWLEVEL_PCMCIA && SCSI && PCMCIA
+# drivers have problems when build in, so require modules
+if SCSI_LOWLEVEL_PCMCIA && SCSI && PCMCIA && m
config PCMCIA_AHA152X
tristate "Adaptec AHA152X PCMCIA support"
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index a45d89b14147..5082ca3c6876 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -135,6 +135,11 @@ static nsp_hw_data nsp_data_base; /* attach <-> detect glue */
#define NSP_DEBUG_BUF_LEN 150
+static inline void nsp_inc_resid(struct scsi_cmnd *SCpnt, int residInc)
+{
+ scsi_set_resid(SCpnt, scsi_get_resid(SCpnt) + residInc);
+}
+
static void nsp_cs_message(const char *func, int line, char *type, char *fmt, ...)
{
va_list args;
@@ -192,8 +197,10 @@ static int nsp_queuecommand(struct scsi_cmnd *SCpnt,
#endif
nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
- nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "SCpnt=0x%p target=%d lun=%d buff=0x%p bufflen=%d use_sg=%d",
- SCpnt, target, SCpnt->device->lun, SCpnt->request_buffer, SCpnt->request_bufflen, SCpnt->use_sg);
+ nsp_dbg(NSP_DEBUG_QUEUECOMMAND,
+ "SCpnt=0x%p target=%d lun=%d sglist=0x%p bufflen=%d sg_count=%d",
+ SCpnt, target, SCpnt->device->lun, scsi_sglist(SCpnt),
+ scsi_bufflen(SCpnt), scsi_sg_count(SCpnt));
//nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "before CurrentSC=0x%p", data->CurrentSC);
SCpnt->scsi_done = done;
@@ -225,7 +232,7 @@ static int nsp_queuecommand(struct scsi_cmnd *SCpnt,
SCpnt->SCp.have_data_in = IO_UNKNOWN;
SCpnt->SCp.sent_command = 0;
SCpnt->SCp.phase = PH_UNDETERMINED;
- SCpnt->resid = SCpnt->request_bufflen;
+ scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
/* setup scratch area
SCp.ptr : buffer pointer
@@ -233,14 +240,14 @@ static int nsp_queuecommand(struct scsi_cmnd *SCpnt,
SCp.buffer : next buffer
SCp.buffers_residual : left buffers in list
SCp.phase : current state of the command */
- if (SCpnt->use_sg) {
- SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->request_buffer;
+ if (scsi_bufflen(SCpnt)) {
+ SCpnt->SCp.buffer = scsi_sglist(SCpnt);
SCpnt->SCp.ptr = BUFFER_ADDR;
SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
- SCpnt->SCp.buffers_residual = SCpnt->use_sg - 1;
+ SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
} else {
- SCpnt->SCp.ptr = (char *) SCpnt->request_buffer;
- SCpnt->SCp.this_residual = SCpnt->request_bufflen;
+ SCpnt->SCp.ptr = NULL;
+ SCpnt->SCp.this_residual = 0;
SCpnt->SCp.buffer = NULL;
SCpnt->SCp.buffers_residual = 0;
}
@@ -721,7 +728,9 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt)
ocount = data->FifoCount;
nsp_dbg(NSP_DEBUG_DATA_IO, "in SCpnt=0x%p resid=%d ocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d",
- SCpnt, SCpnt->resid, ocount, SCpnt->SCp.ptr, SCpnt->SCp.this_residual, SCpnt->SCp.buffer, SCpnt->SCp.buffers_residual);
+ SCpnt, scsi_get_resid(SCpnt), ocount, SCpnt->SCp.ptr,
+ SCpnt->SCp.this_residual, SCpnt->SCp.buffer,
+ SCpnt->SCp.buffers_residual);
time_out = 1000;
@@ -771,7 +780,7 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt)
return;
}
- SCpnt->resid -= res;
+ nsp_inc_resid(SCpnt, -res);
SCpnt->SCp.ptr += res;
SCpnt->SCp.this_residual -= res;
ocount += res;
@@ -795,10 +804,12 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt)
if (time_out == 0) {
nsp_msg(KERN_DEBUG, "pio read timeout resid=%d this_residual=%d buffers_residual=%d",
- SCpnt->resid, SCpnt->SCp.this_residual, SCpnt->SCp.buffers_residual);
+ scsi_get_resid(SCpnt), SCpnt->SCp.this_residual,
+ SCpnt->SCp.buffers_residual);
}
nsp_dbg(NSP_DEBUG_DATA_IO, "read ocount=0x%x", ocount);
- nsp_dbg(NSP_DEBUG_DATA_IO, "r cmd=%d resid=0x%x\n", data->CmdId, SCpnt->resid);
+ nsp_dbg(NSP_DEBUG_DATA_IO, "r cmd=%d resid=0x%x\n", data->CmdId,
+ scsi_get_resid(SCpnt));
}
/*
@@ -816,7 +827,9 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt)
ocount = data->FifoCount;
nsp_dbg(NSP_DEBUG_DATA_IO, "in fifocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d resid=0x%x",
- data->FifoCount, SCpnt->SCp.ptr, SCpnt->SCp.this_residual, SCpnt->SCp.buffer, SCpnt->SCp.buffers_residual, SCpnt->resid);
+ data->FifoCount, SCpnt->SCp.ptr, SCpnt->SCp.this_residual,
+ SCpnt->SCp.buffer, SCpnt->SCp.buffers_residual,
+ scsi_get_resid(SCpnt));
time_out = 1000;
@@ -830,7 +843,7 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt)
nsp_dbg(NSP_DEBUG_DATA_IO, "phase changed stat=0x%x, res=%d\n", stat, res);
/* Put back pointer */
- SCpnt->resid += res;
+ nsp_inc_resid(SCpnt, res);
SCpnt->SCp.ptr -= res;
SCpnt->SCp.this_residual += res;
ocount -= res;
@@ -866,7 +879,7 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt)
break;
}
- SCpnt->resid -= res;
+ nsp_inc_resid(SCpnt, -res);
SCpnt->SCp.ptr += res;
SCpnt->SCp.this_residual -= res;
ocount += res;
@@ -886,10 +899,12 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt)
data->FifoCount = ocount;
if (time_out == 0) {
- nsp_msg(KERN_DEBUG, "pio write timeout resid=0x%x", SCpnt->resid);
+ nsp_msg(KERN_DEBUG, "pio write timeout resid=0x%x",
+ scsi_get_resid(SCpnt));
}
nsp_dbg(NSP_DEBUG_DATA_IO, "write ocount=0x%x", ocount);
- nsp_dbg(NSP_DEBUG_DATA_IO, "w cmd=%d resid=0x%x\n", data->CmdId, SCpnt->resid);
+ nsp_dbg(NSP_DEBUG_DATA_IO, "w cmd=%d resid=0x%x\n", data->CmdId,
+ scsi_get_resid(SCpnt));
}
#undef RFIFO_CRIT
#undef WFIFO_CRIT
@@ -911,9 +926,8 @@ static int nsp_nexus(struct scsi_cmnd *SCpnt)
nsp_index_write(base, SYNCREG, sync->SyncRegister);
nsp_index_write(base, ACKWIDTH, sync->AckWidth);
- if (SCpnt->use_sg == 0 ||
- SCpnt->resid % 4 != 0 ||
- SCpnt->resid <= PAGE_SIZE ) {
+ if (scsi_get_resid(SCpnt) % 4 != 0 ||
+ scsi_get_resid(SCpnt) <= PAGE_SIZE ) {
data->TransferMode = MODE_IO8;
} else if (nsp_burst_mode == BURST_MEM32) {
data->TransferMode = MODE_MEM32;
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 67ee51a3d7e1..f655ae320b48 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -750,18 +750,16 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
cmd->SCp.phase++;
case 4: /* Phase 4 - Setup scatter/gather buffers */
- if (cmd->use_sg) {
- /* if many buffers are available, start filling the first */
- cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
+ if (scsi_bufflen(cmd)) {
+ cmd->SCp.buffer = scsi_sglist(cmd);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
} else {
- /* else fill the only available buffer */
cmd->SCp.buffer = NULL;
- cmd->SCp.this_residual = cmd->request_bufflen;
- cmd->SCp.ptr = cmd->request_buffer;
+ cmd->SCp.this_residual = 0;
+ cmd->SCp.ptr = NULL;
}
- cmd->SCp.buffers_residual = cmd->use_sg - 1;
+ cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
cmd->SCp.phase++;
case 5: /* Phase 5 - Data transfer stage */
diff --git a/drivers/scsi/psi240i.c b/drivers/scsi/psi240i.c
deleted file mode 100644
index 899e89d6fe67..000000000000
--- a/drivers/scsi/psi240i.c
+++ /dev/null
@@ -1,689 +0,0 @@
-/*+M*************************************************************************
- * Perceptive Solutions, Inc. PSI-240I device driver proc support for Linux.
- *
- * Copyright (c) 1997 Perceptive Solutions, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
- * File Name: psi240i.c
- *
- * Description: SCSI driver for the PSI240I EIDE interface card.
- *
- *-M*************************************************************************/
-
-#include <linux/module.h>
-
-#include <linux/blkdev.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/proc_fs.h>
-#include <linux/spinlock.h>
-#include <linux/stat.h>
-
-#include <asm/dma.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-
-#include "psi240i.h"
-#include "psi_chip.h"
-
-//#define DEBUG 1
-
-#ifdef DEBUG
-#define DEB(x) x
-#else
-#define DEB(x)
-#endif
-
-#define MAXBOARDS 6 /* Increase this and the sizes of the arrays below, if you need more. */
-
-#define PORT_DATA 0
-#define PORT_ERROR 1
-#define PORT_SECTOR_COUNT 2
-#define PORT_LBA_0 3
-#define PORT_LBA_8 4
-#define PORT_LBA_16 5
-#define PORT_LBA_24 6
-#define PORT_STAT_CMD 7
-#define PORT_SEL_FAIL 8
-#define PORT_IRQ_STATUS 9
-#define PORT_ADDRESS 10
-#define PORT_FAIL 11
-#define PORT_ALT_STAT 12
-
-typedef struct
- {
- UCHAR device; // device code
- UCHAR byte6; // device select register image
- UCHAR spigot; // spigot number
- UCHAR expectingIRQ; // flag for expecting and interrupt
- USHORT sectors; // number of sectors per track
- USHORT heads; // number of heads
- USHORT cylinders; // number of cylinders for this device
- USHORT spareword; // placeholder
- ULONG blocks; // number of blocks on device
- } OUR_DEVICE, *POUR_DEVICE;
-
-typedef struct
- {
- USHORT ports[13];
- OUR_DEVICE device[8];
- struct scsi_cmnd *pSCmnd;
- IDE_STRUCT ide;
- ULONG startSector;
- USHORT sectorCount;
- struct scsi_cmnd *SCpnt;
- VOID *buffer;
- USHORT expectingIRQ;
- } ADAPTER240I, *PADAPTER240I;
-
-#define HOSTDATA(host) ((PADAPTER240I)&host->hostdata)
-
-static struct Scsi_Host *PsiHost[6] = {NULL,}; /* One for each IRQ level (10-15) */
-static IDENTIFY_DATA identifyData;
-static SETUP ChipSetup;
-
-static USHORT portAddr[6] = {CHIP_ADRS_0, CHIP_ADRS_1, CHIP_ADRS_2, CHIP_ADRS_3, CHIP_ADRS_4, CHIP_ADRS_5};
-
-/****************************************************************
- * Name: WriteData :LOCAL
- *
- * Description: Write data to device.
- *
- * Parameters: padapter - Pointer adapter data structure.
- *
- * Returns: TRUE if drive does not assert DRQ in time.
- *
- ****************************************************************/
-static int WriteData (PADAPTER240I padapter)
- {
- ULONG timer;
- USHORT *pports = padapter->ports;
-
- timer = jiffies + TIMEOUT_DRQ; // calculate the timeout value
- do {
- if ( inb_p (pports[PORT_STAT_CMD]) & IDE_STATUS_DRQ )
- {
- outsw (pports[PORT_DATA], padapter->buffer, (USHORT)padapter->ide.ide.ide[2] * 256);
- return 0;
- }
- } while ( time_after(timer, jiffies) ); // test for timeout
-
- padapter->ide.ide.ides.cmd = 0; // null out the command byte
- return 1;
- }
-/****************************************************************
- * Name: IdeCmd :LOCAL
- *
- * Description: Process a queued command from the SCSI manager.
- *
- * Parameters: padapter - Pointer adapter data structure.
- *
- * Returns: Zero if no error or status register contents on error.
- *
- ****************************************************************/
-static UCHAR IdeCmd (PADAPTER240I padapter)
- {
- ULONG timer;
- USHORT *pports = padapter->ports;
- UCHAR status;
-
- outb_p (padapter->ide.ide.ides.spigot, pports[PORT_SEL_FAIL]); // select the spigot
- outb_p (padapter->ide.ide.ide[6], pports[PORT_LBA_24]); // select the drive
- timer = jiffies + TIMEOUT_READY; // calculate the timeout value
- do {
- status = inb_p (padapter->ports[PORT_STAT_CMD]);
- if ( status & IDE_STATUS_DRDY )
- {
- outb_p (padapter->ide.ide.ide[2], pports[PORT_SECTOR_COUNT]);
- outb_p (padapter->ide.ide.ide[3], pports[PORT_LBA_0]);
- outb_p (padapter->ide.ide.ide[4], pports[PORT_LBA_8]);
- outb_p (padapter->ide.ide.ide[5], pports[PORT_LBA_16]);
- padapter->expectingIRQ = 1;
- outb_p (padapter->ide.ide.ide[7], pports[PORT_STAT_CMD]);
-
- if ( padapter->ide.ide.ides.cmd == IDE_CMD_WRITE_MULTIPLE )
- return (WriteData (padapter));
-
- return 0;
- }
- } while ( time_after(timer, jiffies) ); // test for timeout
-
- padapter->ide.ide.ides.cmd = 0; // null out the command byte
- return status;
- }
-/****************************************************************
- * Name: SetupTransfer :LOCAL
- *
- * Description: Setup a data transfer command.
- *
- * Parameters: padapter - Pointer adapter data structure.
- * drive - Drive/head register upper nibble only.
- *
- * Returns: TRUE if no data to transfer.
- *
- ****************************************************************/
-static int SetupTransfer (PADAPTER240I padapter, UCHAR drive)
- {
- if ( padapter->sectorCount )
- {
- *(ULONG *)padapter->ide.ide.ides.lba = padapter->startSector;
- padapter->ide.ide.ide[6] |= drive;
- padapter->ide.ide.ides.sectors = ( padapter->sectorCount > SECTORSXFER ) ? SECTORSXFER : padapter->sectorCount;
- padapter->sectorCount -= padapter->ide.ide.ides.sectors; // bump the start and count for next xfer
- padapter->startSector += padapter->ide.ide.ides.sectors;
- return 0;
- }
- else
- {
- padapter->ide.ide.ides.cmd = 0; // null out the command byte
- padapter->SCpnt = NULL;
- return 1;
- }
- }
-/****************************************************************
- * Name: DecodeError :LOCAL
- *
- * Description: Decode and process device errors.
- *
- * Parameters: pshost - Pointer to host data block.
- * status - Status register code.
- *
- * Returns: The driver status code.
- *
- ****************************************************************/
-static ULONG DecodeError (struct Scsi_Host *pshost, UCHAR status)
- {
- PADAPTER240I padapter = HOSTDATA(pshost);
- UCHAR error;
-
- padapter->expectingIRQ = 0;
- padapter->SCpnt = NULL;
- if ( status & IDE_STATUS_WRITE_FAULT )
- {
- return DID_PARITY << 16;
- }
- if ( status & IDE_STATUS_BUSY )
- return DID_BUS_BUSY << 16;
-
- error = inb_p (padapter->ports[PORT_ERROR]);
- DEB(printk ("\npsi240i error register: %x", error));
- switch ( error )
- {
- case IDE_ERROR_AMNF:
- case IDE_ERROR_TKONF:
- case IDE_ERROR_ABRT:
- case IDE_ERROR_IDFN:
- case IDE_ERROR_UNC:
- case IDE_ERROR_BBK:
- default:
- return DID_ERROR << 16;
- }
- return DID_ERROR << 16;
- }
-/****************************************************************
- * Name: Irq_Handler :LOCAL
- *
- * Description: Interrupt handler.
- *
- * Parameters: irq - Hardware IRQ number.
- * dev_id -
- *
- * Returns: TRUE if drive is not ready in time.
- *
- ****************************************************************/
-static void Irq_Handler (int irq, void *dev_id)
- {
- struct Scsi_Host *shost; // Pointer to host data block
- PADAPTER240I padapter; // Pointer to adapter control structure
- USHORT *pports; // I/O port array
- struct scsi_cmnd *SCpnt;
- UCHAR status;
- int z;
-
- DEB(printk ("\npsi240i received interrupt\n"));
-
- shost = PsiHost[irq - 10];
- if ( !shost )
- panic ("Splunge!");
-
- padapter = HOSTDATA(shost);
- pports = padapter->ports;
- SCpnt = padapter->SCpnt;
-
- if ( !padapter->expectingIRQ )
- {
- DEB(printk ("\npsi240i Unsolicited interrupt\n"));
- return;
- }
- padapter->expectingIRQ = 0;
-
- status = inb_p (padapter->ports[PORT_STAT_CMD]); // read the device status
- if ( status & (IDE_STATUS_ERROR | IDE_STATUS_WRITE_FAULT) )
- goto irqerror;
-
- DEB(printk ("\npsi240i processing interrupt"));
- switch ( padapter->ide.ide.ides.cmd ) // decide how to handle the interrupt
- {
- case IDE_CMD_READ_MULTIPLE:
- if ( status & IDE_STATUS_DRQ )
- {
- insw (pports[PORT_DATA], padapter->buffer, (USHORT)padapter->ide.ide.ides.sectors * 256);
- padapter->buffer += padapter->ide.ide.ides.sectors * 512;
- if ( SetupTransfer (padapter, padapter->ide.ide.ide[6] & 0xF0) )
- {
- SCpnt->result = DID_OK << 16;
- padapter->SCpnt = NULL;
- SCpnt->scsi_done (SCpnt);
- return;
- }
- if ( !(status = IdeCmd (padapter)) )
- return;
- }
- break;
-
- case IDE_CMD_WRITE_MULTIPLE:
- padapter->buffer += padapter->ide.ide.ides.sectors * 512;
- if ( SetupTransfer (padapter, padapter->ide.ide.ide[6] & 0xF0) )
- {
- SCpnt->result = DID_OK << 16;
- padapter->SCpnt = NULL;
- SCpnt->scsi_done (SCpnt);
- return;
- }
- if ( !(status = IdeCmd (padapter)) )
- return;
- break;
-
- case IDE_COMMAND_IDENTIFY:
- {
- PINQUIRYDATA pinquiryData = SCpnt->request_buffer;
-
- if ( status & IDE_STATUS_DRQ )
- {
- insw (pports[PORT_DATA], &identifyData, sizeof (identifyData) >> 1);
-
- memset (pinquiryData, 0, SCpnt->request_bufflen); // Zero INQUIRY data structure.
- pinquiryData->DeviceType = 0;
- pinquiryData->Versions = 2;
- pinquiryData->AdditionalLength = 35 - 4;
-
- // Fill in vendor identification fields.
- for ( z = 0; z < 8; z += 2 )
- {
- pinquiryData->VendorId[z] = ((UCHAR *)identifyData.ModelNumber)[z + 1];
- pinquiryData->VendorId[z + 1] = ((UCHAR *)identifyData.ModelNumber)[z];
- }
-
- // Initialize unused portion of product id.
- for ( z = 0; z < 4; z++ )
- pinquiryData->ProductId[12 + z] = ' ';
-
- // Move firmware revision from IDENTIFY data to
- // product revision in INQUIRY data.
- for ( z = 0; z < 4; z += 2 )
- {
- pinquiryData->ProductRevisionLevel[z] = ((UCHAR *)identifyData.FirmwareRevision)[z + 1];
- pinquiryData->ProductRevisionLevel[z + 1] = ((UCHAR *)identifyData.FirmwareRevision)[z];
- }
-
- SCpnt->result = DID_OK << 16;
- padapter->SCpnt = NULL;
- SCpnt->scsi_done (SCpnt);
- return;
- }
- break;
- }
-
- default:
- SCpnt->result = DID_OK << 16;
- padapter->SCpnt = NULL;
- SCpnt->scsi_done (SCpnt);
- return;
- }
-
-irqerror:;
- DEB(printk ("\npsi240i error Device Status: %X\n", status));
- SCpnt->result = DecodeError (shost, status);
- SCpnt->scsi_done (SCpnt);
- }
-
-static irqreturn_t do_Irq_Handler (int irq, void *dev_id)
-{
- unsigned long flags;
- struct Scsi_Host *dev = dev_id;
-
- spin_lock_irqsave(dev->host_lock, flags);
- Irq_Handler(irq, dev_id);
- spin_unlock_irqrestore(dev->host_lock, flags);
- return IRQ_HANDLED;
-}
-
-/****************************************************************
- * Name: Psi240i_QueueCommand
- *
- * Description: Process a queued command from the SCSI manager.
- *
- * Parameters: SCpnt - Pointer to SCSI command structure.
- * done - Pointer to done function to call.
- *
- * Returns: Status code.
- *
- ****************************************************************/
-static int Psi240i_QueueCommand(struct scsi_cmnd *SCpnt,
- void (*done)(struct scsi_cmnd *))
- {
- UCHAR *cdb = (UCHAR *)SCpnt->cmnd;
- // Pointer to SCSI CDB
- PADAPTER240I padapter = HOSTDATA (SCpnt->device->host);
- // Pointer to adapter control structure
- POUR_DEVICE pdev = &padapter->device [SCpnt->device->id];
- // Pointer to device information
- UCHAR rc;
- // command return code
-
- SCpnt->scsi_done = done;
- padapter->ide.ide.ides.spigot = pdev->spigot;
- padapter->buffer = SCpnt->request_buffer;
- if (done)
- {
- if ( !pdev->device )
- {
- SCpnt->result = DID_BAD_TARGET << 16;
- done (SCpnt);
- return 0;
- }
- }
- else
- {
- printk("psi240i_queuecommand: %02X: done can't be NULL\n", *cdb);
- return 0;
- }
-
- switch ( *cdb )
- {
- case SCSIOP_INQUIRY: // inquiry CDB
- {
- padapter->ide.ide.ide[6] = pdev->byte6;
- padapter->ide.ide.ides.cmd = IDE_COMMAND_IDENTIFY;
- break;
- }
-
- case SCSIOP_TEST_UNIT_READY: // test unit ready CDB
- SCpnt->result = DID_OK << 16;
- done (SCpnt);
- return 0;
-
- case SCSIOP_READ_CAPACITY: // read capctiy CDB
- {
- PREAD_CAPACITY_DATA pdata = (PREAD_CAPACITY_DATA)SCpnt->request_buffer;
-
- pdata->blksiz = 0x20000;
- XANY2SCSI ((UCHAR *)&pdata->blks, pdev->blocks);
- SCpnt->result = DID_OK << 16;
- done (SCpnt);
- return 0;
- }
-
- case SCSIOP_VERIFY: // verify CDB
- *(ULONG *)padapter->ide.ide.ides.lba = XSCSI2LONG (&cdb[2]);
- padapter->ide.ide.ide[6] |= pdev->byte6;
- padapter->ide.ide.ide[2] = (UCHAR)((USHORT)cdb[8] | ((USHORT)cdb[7] << 8));
- padapter->ide.ide.ides.cmd = IDE_COMMAND_VERIFY;
- break;
-
- case SCSIOP_READ: // read10 CDB
- padapter->startSector = XSCSI2LONG (&cdb[2]);
- padapter->sectorCount = (USHORT)cdb[8] | ((USHORT)cdb[7] << 8);
- SetupTransfer (padapter, pdev->byte6);
- padapter->ide.ide.ides.cmd = IDE_CMD_READ_MULTIPLE;
- break;
-
- case SCSIOP_READ6: // read6 CDB
- padapter->startSector = SCSI2LONG (&cdb[1]);
- padapter->sectorCount = cdb[4];
- SetupTransfer (padapter, pdev->byte6);
- padapter->ide.ide.ides.cmd = IDE_CMD_READ_MULTIPLE;
- break;
-
- case SCSIOP_WRITE: // write10 CDB
- padapter->startSector = XSCSI2LONG (&cdb[2]);
- padapter->sectorCount = (USHORT)cdb[8] | ((USHORT)cdb[7] << 8);
- SetupTransfer (padapter, pdev->byte6);
- padapter->ide.ide.ides.cmd = IDE_CMD_WRITE_MULTIPLE;
- break;
- case SCSIOP_WRITE6: // write6 CDB
- padapter->startSector = SCSI2LONG (&cdb[1]);
- padapter->sectorCount = cdb[4];
- SetupTransfer (padapter, pdev->byte6);
- padapter->ide.ide.ides.cmd = IDE_CMD_WRITE_MULTIPLE;
- break;
-
- default:
- DEB (printk ("psi240i_queuecommand: Unsupported command %02X\n", *cdb));
- SCpnt->result = DID_ERROR << 16;
- done (SCpnt);
- return 0;
- }
-
- padapter->SCpnt = SCpnt; // Save this command data
-
- rc = IdeCmd (padapter);
- if ( rc )
- {
- padapter->expectingIRQ = 0;
- DEB (printk ("psi240i_queuecommand: %02X, %02X: Device failed to respond for command\n", *cdb, padapter->ide.ide.ides.cmd));
- SCpnt->result = DID_ERROR << 16;
- done (SCpnt);
- return 0;
- }
- DEB (printk("psi240i_queuecommand: %02X, %02X now waiting for interrupt ", *cdb, padapter->ide.ide.ides.cmd));
- return 0;
- }
-
-/***************************************************************************
- * Name: ReadChipMemory
- *
- * Description: Read information from controller memory.
- *
- * Parameters: psetup - Pointer to memory image of setup information.
- * base - base address of memory.
- * length - lenght of data space in bytes.
- * port - I/O address of data port.
- *
- * Returns: Nothing.
- *
- **************************************************************************/
-static void ReadChipMemory (void *pdata, USHORT base, USHORT length, USHORT port)
- {
- USHORT z, zz;
- UCHAR *pd = (UCHAR *)pdata;
- outb_p (SEL_NONE, port + REG_SEL_FAIL); // setup data port
- zz = 0;
- while ( zz < length )
- {
- outw_p (base, port + REG_ADDRESS); // setup address
-
- for ( z = 0; z < 8; z++ )
- {
- if ( (zz + z) < length )
- *pd++ = inb_p (port + z); // read data byte
- }
- zz += 8;
- base += 8;
- }
- }
-/****************************************************************
- * Name: Psi240i_Detect
- *
- * Description: Detect and initialize our boards.
- *
- * Parameters: tpnt - Pointer to SCSI host template structure.
- *
- * Returns: Number of adapters found.
- *
- ****************************************************************/
-static int Psi240i_Detect (struct scsi_host_template *tpnt)
- {
- int board;
- int count = 0;
- int unit;
- int z;
- USHORT port, port_range = 16;
- CHIP_CONFIG_N chipConfig;
- CHIP_DEVICE_N chipDevice[8];
- struct Scsi_Host *pshost;
-
- for ( board = 0; board < MAXBOARDS; board++ ) // scan for I/O ports
- {
- pshost = NULL;
- port = portAddr[board]; // get base address to test
- if ( !request_region (port, port_range, "psi240i") )
- continue;
- if ( inb_p (port + REG_FAIL) != CHIP_ID ) // do the first test for likley hood that it is us
- goto host_init_failure;
- outb_p (SEL_NONE, port + REG_SEL_FAIL); // setup EEPROM/RAM access
- outw (0, port + REG_ADDRESS); // setup EEPROM address zero
- if ( inb_p (port) != 0x55 ) // test 1st byte
- goto host_init_failure; // nope
- if ( inb_p (port + 1) != 0xAA ) // test 2nd byte
- goto host_init_failure; // nope
-
- // at this point our board is found and can be accessed. Now we need to initialize
- // our informatation and register with the kernel.
-
-
- ReadChipMemory (&chipConfig, CHIP_CONFIG, sizeof (chipConfig), port);
- ReadChipMemory (&chipDevice, CHIP_DEVICE, sizeof (chipDevice), port);
- ReadChipMemory (&ChipSetup, CHIP_EEPROM_DATA, sizeof (ChipSetup), port);
-
- if ( !chipConfig.numDrives ) // if no devices on this board
- goto host_init_failure;
-
- pshost = scsi_register (tpnt, sizeof(ADAPTER240I));
- if(pshost == NULL)
- goto host_init_failure;
-
- PsiHost[chipConfig.irq - 10] = pshost;
- pshost->unique_id = port;
- pshost->io_port = port;
- pshost->n_io_port = 16; /* Number of bytes of I/O space used */
- pshost->irq = chipConfig.irq;
-
- for ( z = 0; z < 11; z++ ) // build regester address array
- HOSTDATA(pshost)->ports[z] = port + z;
- HOSTDATA(pshost)->ports[11] = port + REG_FAIL;
- HOSTDATA(pshost)->ports[12] = port + REG_ALT_STAT;
- DEB (printk ("\nPorts ="));
- DEB (for (z=0;z<13;z++) printk(" %#04X",HOSTDATA(pshost)->ports[z]););
-
- for ( z = 0; z < chipConfig.numDrives; ++z )
- {
- unit = chipDevice[z].channel & 0x0F;
- HOSTDATA(pshost)->device[unit].device = ChipSetup.setupDevice[unit].device;
- HOSTDATA(pshost)->device[unit].byte6 = (UCHAR)(((unit & 1) << 4) | 0xE0);
- HOSTDATA(pshost)->device[unit].spigot = (UCHAR)(1 << (unit >> 1));
- HOSTDATA(pshost)->device[unit].sectors = ChipSetup.setupDevice[unit].sectors;
- HOSTDATA(pshost)->device[unit].heads = ChipSetup.setupDevice[unit].heads;
- HOSTDATA(pshost)->device[unit].cylinders = ChipSetup.setupDevice[unit].cylinders;
- HOSTDATA(pshost)->device[unit].blocks = ChipSetup.setupDevice[unit].blocks;
- DEB (printk ("\nHOSTDATA->device = %X", HOSTDATA(pshost)->device[unit].device));
- DEB (printk ("\n byte6 = %X", HOSTDATA(pshost)->device[unit].byte6));
- DEB (printk ("\n spigot = %X", HOSTDATA(pshost)->device[unit].spigot));
- DEB (printk ("\n sectors = %X", HOSTDATA(pshost)->device[unit].sectors));
- DEB (printk ("\n heads = %X", HOSTDATA(pshost)->device[unit].heads));
- DEB (printk ("\n cylinders = %X", HOSTDATA(pshost)->device[unit].cylinders));
- DEB (printk ("\n blocks = %lX", HOSTDATA(pshost)->device[unit].blocks));
- }
-
- if ( request_irq (chipConfig.irq, do_Irq_Handler, 0, "psi240i", pshost) == 0 )
- {
- printk("\nPSI-240I EIDE CONTROLLER: at I/O = %x IRQ = %d\n", port, chipConfig.irq);
- printk("(C) 1997 Perceptive Solutions, Inc. All rights reserved\n\n");
- count++;
- continue;
- }
-
- printk ("Unable to allocate IRQ for PSI-240I controller.\n");
-
-host_init_failure:
-
- release_region (port, port_range);
- if (pshost)
- scsi_unregister (pshost);
-
- }
- return count;
- }
-
-static int Psi240i_Release(struct Scsi_Host *shost)
-{
- if (shost->irq)
- free_irq(shost->irq, NULL);
- if (shost->io_port && shost->n_io_port)
- release_region(shost->io_port, shost->n_io_port);
- scsi_unregister(shost);
- return 0;
-}
-
-/****************************************************************
- * Name: Psi240i_BiosParam
- *
- * Description: Process the biosparam request from the SCSI manager to
- * return C/H/S data.
- *
- * Parameters: disk - Pointer to SCSI disk structure.
- * dev - Major/minor number from kernel.
- * geom - Pointer to integer array to place geometry data.
- *
- * Returns: zero.
- *
- ****************************************************************/
-static int Psi240i_BiosParam (struct scsi_device *sdev, struct block_device *dev,
- sector_t capacity, int geom[])
- {
- POUR_DEVICE pdev;
-
- pdev = &(HOSTDATA(sdev->host)->device[sdev_id(sdev)]);
-
- geom[0] = pdev->heads;
- geom[1] = pdev->sectors;
- geom[2] = pdev->cylinders;
- return 0;
- }
-
-MODULE_LICENSE("GPL");
-
-static struct scsi_host_template driver_template = {
- .proc_name = "psi240i",
- .name = "PSI-240I EIDE Disk Controller",
- .detect = Psi240i_Detect,
- .release = Psi240i_Release,
- .queuecommand = Psi240i_QueueCommand,
- .bios_param = Psi240i_BiosParam,
- .can_queue = 1,
- .this_id = -1,
- .sg_tablesize = SG_NONE,
- .cmd_per_lun = 1,
- .use_clustering = DISABLE_CLUSTERING,
-};
-#include "scsi_module.c"
diff --git a/drivers/scsi/psi240i.h b/drivers/scsi/psi240i.h
deleted file mode 100644
index 21ebb9214004..000000000000
--- a/drivers/scsi/psi240i.h
+++ /dev/null
@@ -1,315 +0,0 @@
-/*+M*************************************************************************
- * Perceptive Solutions, Inc. PSI-240I device driver proc support for Linux.
- *
- * Copyright (c) 1997 Perceptive Solutions, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
- * File Name: psi240i.h
- *
- * Description: Header file for the SCSI driver for the PSI240I
- * EIDE interface card.
- *
- *-M*************************************************************************/
-#ifndef _PSI240I_H
-#define _PSI240I_H
-
-#include <linux/types.h>
-
-#ifndef PSI_EIDE_SCSIOP
-#define PSI_EIDE_SCSIOP 1
-
-/************************************************/
-/* Some defines that we like */
-/************************************************/
-#define CHAR char
-#define UCHAR unsigned char
-#define SHORT short
-#define USHORT unsigned short
-#define BOOL unsigned short
-#define LONG long
-#define ULONG unsigned long
-#define VOID void
-
-/************************************************/
-/* Timeout konstants */
-/************************************************/
-#define TIMEOUT_READY 10 // 100 mSec
-#define TIMEOUT_DRQ 40 // 400 mSec
-
-/************************************************/
-/* Misc. macros */
-/************************************************/
-#define ANY2SCSI(up, p) \
-((UCHAR *)up)[0] = (((ULONG)(p)) >> 8); \
-((UCHAR *)up)[1] = ((ULONG)(p));
-
-#define SCSI2LONG(up) \
-( (((long)*(((UCHAR *)up))) << 16) \
-+ (((long)(((UCHAR *)up)[1])) << 8) \
-+ ((long)(((UCHAR *)up)[2])) )
-
-#define XANY2SCSI(up, p) \
-((UCHAR *)up)[0] = ((long)(p)) >> 24; \
-((UCHAR *)up)[1] = ((long)(p)) >> 16; \
-((UCHAR *)up)[2] = ((long)(p)) >> 8; \
-((UCHAR *)up)[3] = ((long)(p));
-
-#define XSCSI2LONG(up) \
-( (((long)(((UCHAR *)up)[0])) << 24) \
-+ (((long)(((UCHAR *)up)[1])) << 16) \
-+ (((long)(((UCHAR *)up)[2])) << 8) \
-+ ((long)(((UCHAR *)up)[3])) )
-
-/************************************************/
-/* SCSI CDB operation codes */
-/************************************************/
-#define SCSIOP_TEST_UNIT_READY 0x00
-#define SCSIOP_REZERO_UNIT 0x01
-#define SCSIOP_REWIND 0x01
-#define SCSIOP_REQUEST_BLOCK_ADDR 0x02
-#define SCSIOP_REQUEST_SENSE 0x03
-#define SCSIOP_FORMAT_UNIT 0x04
-#define SCSIOP_READ_BLOCK_LIMITS 0x05
-#define SCSIOP_REASSIGN_BLOCKS 0x07
-#define SCSIOP_READ6 0x08
-#define SCSIOP_RECEIVE 0x08
-#define SCSIOP_WRITE6 0x0A
-#define SCSIOP_PRINT 0x0A
-#define SCSIOP_SEND 0x0A
-#define SCSIOP_SEEK6 0x0B
-#define SCSIOP_TRACK_SELECT 0x0B
-#define SCSIOP_SLEW_PRINT 0x0B
-#define SCSIOP_SEEK_BLOCK 0x0C
-#define SCSIOP_PARTITION 0x0D
-#define SCSIOP_READ_REVERSE 0x0F
-#define SCSIOP_WRITE_FILEMARKS 0x10
-#define SCSIOP_FLUSH_BUFFER 0x10
-#define SCSIOP_SPACE 0x11
-#define SCSIOP_INQUIRY 0x12
-#define SCSIOP_VERIFY6 0x13
-#define SCSIOP_RECOVER_BUF_DATA 0x14
-#define SCSIOP_MODE_SELECT 0x15
-#define SCSIOP_RESERVE_UNIT 0x16
-#define SCSIOP_RELEASE_UNIT 0x17
-#define SCSIOP_COPY 0x18
-#define SCSIOP_ERASE 0x19
-#define SCSIOP_MODE_SENSE 0x1A
-#define SCSIOP_START_STOP_UNIT 0x1B
-#define SCSIOP_STOP_PRINT 0x1B
-#define SCSIOP_LOAD_UNLOAD 0x1B
-#define SCSIOP_RECEIVE_DIAGNOSTIC 0x1C
-#define SCSIOP_SEND_DIAGNOSTIC 0x1D
-#define SCSIOP_MEDIUM_REMOVAL 0x1E
-#define SCSIOP_READ_CAPACITY 0x25
-#define SCSIOP_READ 0x28
-#define SCSIOP_WRITE 0x2A
-#define SCSIOP_SEEK 0x2B
-#define SCSIOP_LOCATE 0x2B
-#define SCSIOP_WRITE_VERIFY 0x2E
-#define SCSIOP_VERIFY 0x2F
-#define SCSIOP_SEARCH_DATA_HIGH 0x30
-#define SCSIOP_SEARCH_DATA_EQUAL 0x31
-#define SCSIOP_SEARCH_DATA_LOW 0x32
-#define SCSIOP_SET_LIMITS 0x33
-#define SCSIOP_READ_POSITION 0x34
-#define SCSIOP_SYNCHRONIZE_CACHE 0x35
-#define SCSIOP_COMPARE 0x39
-#define SCSIOP_COPY_COMPARE 0x3A
-#define SCSIOP_WRITE_DATA_BUFF 0x3B
-#define SCSIOP_READ_DATA_BUFF 0x3C
-#define SCSIOP_CHANGE_DEFINITION 0x40
-#define SCSIOP_READ_SUB_CHANNEL 0x42
-#define SCSIOP_READ_TOC 0x43
-#define SCSIOP_READ_HEADER 0x44
-#define SCSIOP_PLAY_AUDIO 0x45
-#define SCSIOP_PLAY_AUDIO_MSF 0x47
-#define SCSIOP_PLAY_TRACK_INDEX 0x48
-#define SCSIOP_PLAY_TRACK_RELATIVE 0x49
-#define SCSIOP_PAUSE_RESUME 0x4B
-#define SCSIOP_LOG_SELECT 0x4C
-#define SCSIOP_LOG_SENSE 0x4D
-#define SCSIOP_MODE_SELECT10 0x55
-#define SCSIOP_MODE_SENSE10 0x5A
-#define SCSIOP_LOAD_UNLOAD_SLOT 0xA6
-#define SCSIOP_MECHANISM_STATUS 0xBD
-#define SCSIOP_READ_CD 0xBE
-
-// IDE command definitions
-#define IDE_COMMAND_ATAPI_RESET 0x08
-#define IDE_COMMAND_READ 0x20
-#define IDE_COMMAND_WRITE 0x30
-#define IDE_COMMAND_RECALIBRATE 0x10
-#define IDE_COMMAND_SEEK 0x70
-#define IDE_COMMAND_SET_PARAMETERS 0x91
-#define IDE_COMMAND_VERIFY 0x40
-#define IDE_COMMAND_ATAPI_PACKET 0xA0
-#define IDE_COMMAND_ATAPI_IDENTIFY 0xA1
-#define IDE_CMD_READ_MULTIPLE 0xC4
-#define IDE_CMD_WRITE_MULTIPLE 0xC5
-#define IDE_CMD_SET_MULTIPLE 0xC6
-#define IDE_COMMAND_WRITE_DMA 0xCA
-#define IDE_COMMAND_READ_DMA 0xC8
-#define IDE_COMMAND_IDENTIFY 0xEC
-
-// IDE status definitions
-#define IDE_STATUS_ERROR 0x01
-#define IDE_STATUS_INDEX 0x02
-#define IDE_STATUS_CORRECTED_ERROR 0x04
-#define IDE_STATUS_DRQ 0x08
-#define IDE_STATUS_DSC 0x10
-#define IDE_STATUS_WRITE_FAULT 0x20
-#define IDE_STATUS_DRDY 0x40
-#define IDE_STATUS_BUSY 0x80
-
-// IDE error definitions
-#define IDE_ERROR_AMNF 0x01
-#define IDE_ERROR_TKONF 0x02
-#define IDE_ERROR_ABRT 0x04
-#define IDE_ERROR_MCR 0x08
-#define IDE_ERROR_IDFN 0x10
-#define IDE_ERROR_MC 0x20
-#define IDE_ERROR_UNC 0x40
-#define IDE_ERROR_BBK 0x80
-
-// IDE interface structure
-typedef struct _IDE_STRUCT
- {
- union
- {
- UCHAR ide[9];
- struct
- {
- USHORT data;
- UCHAR sectors;
- UCHAR lba[4];
- UCHAR cmd;
- UCHAR spigot;
- } ides;
- } ide;
- } IDE_STRUCT;
-
-// SCSI read capacity structure
-typedef struct _READ_CAPACITY_DATA
- {
- ULONG blks; /* total blocks (converted to little endian) */
- ULONG blksiz; /* size of each (converted to little endian) */
- } READ_CAPACITY_DATA, *PREAD_CAPACITY_DATA;
-
-// SCSI inquiry data
-#ifndef HOSTS_C
-
-typedef struct _INQUIRYDATA
- {
- UCHAR DeviceType :5;
- UCHAR DeviceTypeQualifier :3;
- UCHAR DeviceTypeModifier :7;
- UCHAR RemovableMedia :1;
- UCHAR Versions;
- UCHAR ResponseDataFormat;
- UCHAR AdditionalLength;
- UCHAR Reserved[2];
- UCHAR SoftReset :1;
- UCHAR CommandQueue :1;
- UCHAR Reserved2 :1;
- UCHAR LinkedCommands :1;
- UCHAR Synchronous :1;
- UCHAR Wide16Bit :1;
- UCHAR Wide32Bit :1;
- UCHAR RelativeAddressing :1;
- UCHAR VendorId[8];
- UCHAR ProductId[16];
- UCHAR ProductRevisionLevel[4];
- UCHAR VendorSpecific[20];
- UCHAR Reserved3[40];
- } INQUIRYDATA, *PINQUIRYDATA;
-#endif
-
-// IDE IDENTIFY data
-typedef struct _IDENTIFY_DATA
- {
- USHORT GeneralConfiguration; // 00
- USHORT NumberOfCylinders; // 02
- USHORT Reserved1; // 04
- USHORT NumberOfHeads; // 06
- USHORT UnformattedBytesPerTrack; // 08
- USHORT UnformattedBytesPerSector; // 0A
- USHORT SectorsPerTrack; // 0C
- USHORT VendorUnique1[3]; // 0E
- USHORT SerialNumber[10]; // 14
- USHORT BufferType; // 28
- USHORT BufferSectorSize; // 2A
- USHORT NumberOfEccBytes; // 2C
- USHORT FirmwareRevision[4]; // 2E
- USHORT ModelNumber[20]; // 36
- UCHAR MaximumBlockTransfer; // 5E
- UCHAR VendorUnique2; // 5F
- USHORT DoubleWordIo; // 60
- USHORT Capabilities; // 62
- USHORT Reserved2; // 64
- UCHAR VendorUnique3; // 66
- UCHAR PioCycleTimingMode; // 67
- UCHAR VendorUnique4; // 68
- UCHAR DmaCycleTimingMode; // 69
- USHORT TranslationFieldsValid:1; // 6A
- USHORT Reserved3:15;
- USHORT NumberOfCurrentCylinders; // 6C
- USHORT NumberOfCurrentHeads; // 6E
- USHORT CurrentSectorsPerTrack; // 70
- ULONG CurrentSectorCapacity; // 72
- USHORT Reserved4[197]; // 76
- } IDENTIFY_DATA, *PIDENTIFY_DATA;
-
-// Identify data without the Reserved4.
-typedef struct _IDENTIFY_DATA2 {
- USHORT GeneralConfiguration; // 00
- USHORT NumberOfCylinders; // 02
- USHORT Reserved1; // 04
- USHORT NumberOfHeads; // 06
- USHORT UnformattedBytesPerTrack; // 08
- USHORT UnformattedBytesPerSector; // 0A
- USHORT SectorsPerTrack; // 0C
- USHORT VendorUnique1[3]; // 0E
- USHORT SerialNumber[10]; // 14
- USHORT BufferType; // 28
- USHORT BufferSectorSize; // 2A
- USHORT NumberOfEccBytes; // 2C
- USHORT FirmwareRevision[4]; // 2E
- USHORT ModelNumber[20]; // 36
- UCHAR MaximumBlockTransfer; // 5E
- UCHAR VendorUnique2; // 5F
- USHORT DoubleWordIo; // 60
- USHORT Capabilities; // 62
- USHORT Reserved2; // 64
- UCHAR VendorUnique3; // 66
- UCHAR PioCycleTimingMode; // 67
- UCHAR VendorUnique4; // 68
- UCHAR DmaCycleTimingMode; // 69
- USHORT TranslationFieldsValid:1; // 6A
- USHORT Reserved3:15;
- USHORT NumberOfCurrentCylinders; // 6C
- USHORT NumberOfCurrentHeads; // 6E
- USHORT CurrentSectorsPerTrack; // 70
- ULONG CurrentSectorCapacity; // 72
- } IDENTIFY_DATA2, *PIDENTIFY_DATA2;
-
-#endif // PSI_EIDE_SCSIOP
-
-// function prototypes
-int Psi240i_Command(struct scsi_cmnd *SCpnt);
-int Psi240i_Abort(struct scsi_cmnd *SCpnt);
-int Psi240i_Reset(struct scsi_cmnd *SCpnt, unsigned int flags);
-#endif
diff --git a/drivers/scsi/psi_chip.h b/drivers/scsi/psi_chip.h
deleted file mode 100644
index 224cf8f64c97..000000000000
--- a/drivers/scsi/psi_chip.h
+++ /dev/null
@@ -1,195 +0,0 @@
-/*+M*************************************************************************
- * Perceptive Solutions, Inc. PSI-240I device driver proc support for Linux.
- *
- * Copyright (c) 1997 Perceptive Solutions, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
- * File Name: psi_chip.h
- *
- * Description: This file contains the interface defines and
- * error codes.
- *
- *-M*************************************************************************/
-#ifndef PSI_CHIP
-#define PSI_CHIP
-
-/************************************************/
-/* Misc konstants */
-/************************************************/
-#define CHIP_MAXDRIVES 8
-
-/************************************************/
-/* Chip I/O addresses */
-/************************************************/
-#define CHIP_ADRS_0 0x0130
-#define CHIP_ADRS_1 0x0150
-#define CHIP_ADRS_2 0x0190
-#define CHIP_ADRS_3 0x0210
-#define CHIP_ADRS_4 0x0230
-#define CHIP_ADRS_5 0x0250
-
-/************************************************/
-/* EEPROM locations */
-/************************************************/
-#define CHIP_EEPROM_BIOS 0x0000 // BIOS base address
-#define CHIP_EEPROM_DATA 0x2000 // SETUP data base address
-#define CHIP_EEPROM_FACTORY 0x2400 // FACTORY data base address
-#define CHIP_EEPROM_SETUP 0x3000 // SETUP PROGRAM base address
-
-#define CHIP_EEPROM_SIZE 32768U // size of the entire EEPROM
-#define CHIP_EEPROM_BIOS_SIZE 8192 // size of the BIOS in bytes
-#define CHIP_EEPROM_DATA_SIZE 4096 // size of factory, setup, log data block in bytes
-#define CHIP_EEPROM_SETUP_SIZE 20480U // size of the setup program in bytes
-
-/************************************************/
-/* Chip Interrupts */
-/************************************************/
-#define CHIP_IRQ_10 0x72
-#define CHIP_IRQ_11 0x73
-#define CHIP_IRQ_12 0x74
-
-/************************************************/
-/* Chip Setup addresses */
-/************************************************/
-#define CHIP_SETUP_BASE 0x0000C000L
-
-/************************************************/
-/* Chip Register address offsets */
-/************************************************/
-#define REG_DATA 0x00
-#define REG_ERROR 0x01
-#define REG_SECTOR_COUNT 0x02
-#define REG_LBA_0 0x03
-#define REG_LBA_8 0x04
-#define REG_LBA_16 0x05
-#define REG_LBA_24 0x06
-#define REG_STAT_CMD 0x07
-#define REG_SEL_FAIL 0x08
-#define REG_IRQ_STATUS 0x09
-#define REG_ADDRESS 0x0A
-#define REG_FAIL 0x0C
-#define REG_ALT_STAT 0x0E
-#define REG_DRIVE_ADRS 0x0F
-
-/************************************************/
-/* Chip RAM locations */
-/************************************************/
-#define CHIP_DEVICE 0x8000
-#define CHIP_DEVICE_0 0x8000
-#define CHIP_DEVICE_1 0x8008
-#define CHIP_DEVICE_2 0x8010
-#define CHIP_DEVICE_3 0x8018
-#define CHIP_DEVICE_4 0x8020
-#define CHIP_DEVICE_5 0x8028
-#define CHIP_DEVICE_6 0x8030
-#define CHIP_DEVICE_7 0x8038
-typedef struct
- {
- UCHAR channel; // channel of this device (0-8).
- UCHAR spt; // Sectors Per Track.
- ULONG spc; // Sectors Per Cylinder.
- } CHIP_DEVICE_N;
-
-#define CHIP_CONFIG 0x8100 // address of boards configuration.
-typedef struct
- {
- UCHAR irq; // interrupt request channel number
- UCHAR numDrives; // Number of accessible drives
- UCHAR fastFormat; // Boolean for fast format enable
- } CHIP_CONFIG_N;
-
-#define CHIP_MAP 0x8108 // eight byte device type map.
-
-
-#define CHIP_RAID 0x8120 // array of RAID signature structures and LBA
-#define CHIP_RAID_1 0x8120
-#define CHIP_RAID_2 0x8130
-#define CHIP_RAID_3 0x8140
-#define CHIP_RAID_4 0x8150
-
-/************************************************/
-/* Chip Register Masks */
-/************************************************/
-#define CHIP_ID 0x7B
-#define SEL_RAM 0x8000
-#define MASK_FAIL 0x80
-
-/************************************************/
-/* Chip cable select bits */
-/************************************************/
-#define SECTORSXFER 8
-
-/************************************************/
-/* Chip cable select bits */
-/************************************************/
-#define SEL_NONE 0x00
-#define SEL_1 0x01
-#define SEL_2 0x02
-#define SEL_3 0x04
-#define SEL_4 0x08
-
-/************************************************/
-/* Programmable Interrupt Controller*/
-/************************************************/
-#define PIC1 0x20 // first 8259 base port address
-#define PIC2 0xA0 // second 8259 base port address
-#define INT_OCW1 1 // Operation Control Word 1: IRQ mask
-#define EOI 0x20 // non-specific end-of-interrupt
-
-/************************************************/
-/* Device/Geometry controls */
-/************************************************/
-#define GEOMETRY_NONE 0x0 // No device
-#define GEOMETRY_AUTO 0x1 // Geometry set automatically
-#define GEOMETRY_USER 0x2 // User supplied geometry
-
-#define DEVICE_NONE 0x0 // No device present
-#define DEVICE_INACTIVE 0x1 // device present but not registered active
-#define DEVICE_ATAPI 0x2 // ATAPI device (CD_ROM, Tape, Etc...)
-#define DEVICE_DASD_NONLBA 0x3 // Non LBA incompatible device
-#define DEVICE_DASD_LBA 0x4 // LBA compatible device
-
-/************************************************/
-/* Setup Structure Definitions */
-/************************************************/
-typedef struct // device setup parameters
- {
- UCHAR geometryControl; // geometry control flags
- UCHAR device; // device code
- USHORT sectors; // number of sectors per track
- USHORT heads; // number of heads
- USHORT cylinders; // number of cylinders for this device
- ULONG blocks; // number of blocks on device
- USHORT spare1;
- USHORT spare2;
- } SETUP_DEVICE, *PSETUP_DEVICE;
-
-typedef struct // master setup structure
- {
- USHORT startupDelay;
- USHORT promptBIOS;
- USHORT fastFormat;
- USHORT spare2;
- USHORT spare3;
- USHORT spare4;
- USHORT spare5;
- USHORT spare6;
- SETUP_DEVICE setupDevice[8];
- } SETUP, *PSETUP;
-
-#endif
-
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 288640756099..c94906abfee3 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -528,7 +528,7 @@ __setup("qla1280=", qla1280_setup);
#define CMD_CDBLEN(Cmnd) Cmnd->cmd_len
#define CMD_CDBP(Cmnd) Cmnd->cmnd
#define CMD_SNSP(Cmnd) Cmnd->sense_buffer
-#define CMD_SNSLEN(Cmnd) sizeof(Cmnd->sense_buffer)
+#define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE
#define CMD_RESULT(Cmnd) Cmnd->result
#define CMD_HANDLE(Cmnd) Cmnd->host_scribble
#define CMD_REQUEST(Cmnd) Cmnd->request->cmd
@@ -3715,7 +3715,7 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
} else
sense_sz = 0;
memset(cmd->sense_buffer + sense_sz, 0,
- sizeof(cmd->sense_buffer) - sense_sz);
+ SCSI_SENSE_BUFFERSIZE - sense_sz);
dprintk(2, "qla1280_status_entry: Check "
"condition Sense data, b %i, t %i, "
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index 71ddb5db4944..c51fd1f86639 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,4 +1,4 @@
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
- qla_dbg.o qla_sup.o qla_attr.o qla_mid.o
+ qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index fb388b8c07cf..adf97320574b 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -9,7 +9,7 @@
#include <linux/kthread.h>
#include <linux/vmalloc.h>
-int qla24xx_vport_disable(struct fc_vport *, bool);
+static int qla24xx_vport_disable(struct fc_vport *, bool);
/* SYSFS attributes --------------------------------------------------------- */
@@ -958,7 +958,7 @@ qla2x00_issue_lip(struct Scsi_Host *shost)
{
scsi_qla_host_t *ha = shost_priv(shost);
- set_bit(LOOP_RESET_NEEDED, &ha->dpc_flags);
+ qla2x00_loop_reset(ha);
return 0;
}
@@ -967,35 +967,51 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
{
scsi_qla_host_t *ha = shost_priv(shost);
int rval;
- uint16_t mb_stat[1];
- link_stat_t stat_buf;
+ struct link_statistics *stats;
+ dma_addr_t stats_dma;
struct fc_host_statistics *pfc_host_stat;
- rval = QLA_FUNCTION_FAILED;
pfc_host_stat = &ha->fc_host_stat;
memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
+ stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
+ if (stats == NULL) {
+ DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
+ __func__, ha->host_no));
+ goto done;
+ }
+ memset(stats, 0, DMA_POOL_SIZE);
+
+ rval = QLA_FUNCTION_FAILED;
if (IS_FWI2_CAPABLE(ha)) {
- rval = qla24xx_get_isp_stats(ha, (uint32_t *)&stat_buf,
- sizeof(stat_buf) / 4, mb_stat);
+ rval = qla24xx_get_isp_stats(ha, stats, stats_dma);
} else if (atomic_read(&ha->loop_state) == LOOP_READY &&
!test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) &&
!test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) &&
!ha->dpc_active) {
/* Must be in a 'READY' state for statistics retrieval. */
- rval = qla2x00_get_link_status(ha, ha->loop_id, &stat_buf,
- mb_stat);
+ rval = qla2x00_get_link_status(ha, ha->loop_id, stats,
+ stats_dma);
}
if (rval != QLA_SUCCESS)
- goto done;
+ goto done_free;
+
+ pfc_host_stat->link_failure_count = stats->link_fail_cnt;
+ pfc_host_stat->loss_of_sync_count = stats->loss_sync_cnt;
+ pfc_host_stat->loss_of_signal_count = stats->loss_sig_cnt;
+ pfc_host_stat->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
+ pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt;
+ pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt;
+ if (IS_FWI2_CAPABLE(ha)) {
+ pfc_host_stat->tx_frames = stats->tx_frames;
+ pfc_host_stat->rx_frames = stats->rx_frames;
+ pfc_host_stat->dumped_frames = stats->dumped_frames;
+ pfc_host_stat->nos_count = stats->nos_rcvd;
+ }
- pfc_host_stat->link_failure_count = stat_buf.link_fail_cnt;
- pfc_host_stat->loss_of_sync_count = stat_buf.loss_sync_cnt;
- pfc_host_stat->loss_of_signal_count = stat_buf.loss_sig_cnt;
- pfc_host_stat->prim_seq_protocol_err_count = stat_buf.prim_seq_err_cnt;
- pfc_host_stat->invalid_tx_word_count = stat_buf.inval_xmit_word_cnt;
- pfc_host_stat->invalid_crc_count = stat_buf.inval_crc_cnt;
+done_free:
+ dma_pool_free(ha->s_dma_pool, stats, stats_dma);
done:
return pfc_host_stat;
}
@@ -1113,7 +1129,7 @@ vport_create_failed_2:
return FC_VPORT_FAILED;
}
-int
+static int
qla24xx_vport_delete(struct fc_vport *fc_vport)
{
scsi_qla_host_t *ha = shost_priv(fc_vport->shost);
@@ -1124,7 +1140,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
down(&ha->vport_sem);
ha->cur_vport_count--;
- clear_bit(vha->vp_idx, (unsigned long *)ha->vp_idx_map);
+ clear_bit(vha->vp_idx, ha->vp_idx_map);
up(&ha->vport_sem);
kfree(vha->node_name);
@@ -1146,7 +1162,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
return 0;
}
-int
+static int
qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
{
scsi_qla_host_t *vha = fc_vport->dd_data;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index eaa04dabcdf6..d88e98c476b0 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1051,6 +1051,7 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
struct qla25xx_fw_dump *fw;
uint32_t ext_mem_cnt;
void *nxt;
+ struct qla2xxx_fce_chain *fcec;
risc_address = ext_mem_cnt = 0;
flags = 0;
@@ -1321,10 +1322,31 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
if (rval != QLA_SUCCESS)
goto qla25xx_fw_dump_failed_0;
+ /* Fibre Channel Trace Buffer. */
nxt = qla2xxx_copy_queues(ha, nxt);
if (ha->eft)
memcpy(nxt, ha->eft, ntohl(ha->fw_dump->eft_size));
+ /* Fibre Channel Event Buffer. */
+ if (!ha->fce)
+ goto qla25xx_fw_dump_failed_0;
+
+ ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
+
+ fcec = nxt + ntohl(ha->fw_dump->eft_size);
+ fcec->type = __constant_htonl(DUMP_CHAIN_FCE | DUMP_CHAIN_LAST);
+ fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
+ fce_calc_size(ha->fce_bufs));
+ fcec->size = htonl(fce_calc_size(ha->fce_bufs));
+ fcec->addr_l = htonl(LSD(ha->fce_dma));
+ fcec->addr_h = htonl(MSD(ha->fce_dma));
+
+ iter_reg = fcec->eregs;
+ for (cnt = 0; cnt < 8; cnt++)
+ *iter_reg++ = htonl(ha->fce_mb[cnt]);
+
+ memcpy(iter_reg, ha->fce, ntohl(fcec->size));
+
qla25xx_fw_dump_failed_0:
if (rval != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
@@ -1428,21 +1450,6 @@ qla2x00_print_scsi_cmd(struct scsi_cmnd * cmd)
printk(" sp flags=0x%x\n", sp->flags);
}
-void
-qla2x00_dump_pkt(void *pkt)
-{
- uint32_t i;
- uint8_t *data = (uint8_t *) pkt;
-
- for (i = 0; i < 64; i++) {
- if (!(i % 4))
- printk("\n%02x: ", i);
-
- printk("%02x ", data[i]);
- }
- printk("\n");
-}
-
#if defined(QL_DEBUG_ROUTINES)
/*
* qla2x00_formatted_dump_buffer
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index a50ecf0b7c84..524598afc81c 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -256,6 +256,25 @@ struct qla25xx_fw_dump {
#define EFT_BYTES_PER_BUFFER 0x4000
#define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS))
+#define FCE_NUM_BUFFERS 64
+#define FCE_BYTES_PER_BUFFER 0x400
+#define FCE_SIZE ((FCE_BYTES_PER_BUFFER) * (FCE_NUM_BUFFERS))
+#define fce_calc_size(b) ((FCE_BYTES_PER_BUFFER) * (b))
+
+struct qla2xxx_fce_chain {
+ uint32_t type;
+ uint32_t chain_size;
+
+ uint32_t size;
+ uint32_t addr_l;
+ uint32_t addr_h;
+ uint32_t eregs[8];
+};
+
+#define DUMP_CHAIN_VARIANT 0x80000000
+#define DUMP_CHAIN_FCE 0x7FFFFAF0
+#define DUMP_CHAIN_LAST 0x80000000
+
struct qla2xxx_fw_dump {
uint8_t signature[4];
uint32_t version;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 04e8cbca4c0d..6f129da37589 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -623,9 +623,6 @@ typedef struct {
#define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */
#define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */
-#define TC_ENABLE 4
-#define TC_DISABLE 5
-
/* Firmware return data sizes */
#define FCAL_MAP_SIZE 128
@@ -862,14 +859,20 @@ typedef struct {
#define GLSO_SEND_RPS BIT_0
#define GLSO_USE_DID BIT_3
-typedef struct {
- uint32_t link_fail_cnt;
- uint32_t loss_sync_cnt;
- uint32_t loss_sig_cnt;
- uint32_t prim_seq_err_cnt;
- uint32_t inval_xmit_word_cnt;
- uint32_t inval_crc_cnt;
-} link_stat_t;
+struct link_statistics {
+ uint32_t link_fail_cnt;
+ uint32_t loss_sync_cnt;
+ uint32_t loss_sig_cnt;
+ uint32_t prim_seq_err_cnt;
+ uint32_t inval_xmit_word_cnt;
+ uint32_t inval_crc_cnt;
+ uint32_t unused1[0x1b];
+ uint32_t tx_frames;
+ uint32_t rx_frames;
+ uint32_t dumped_frames;
+ uint32_t unused2[2];
+ uint32_t nos_rcvd;
+};
/*
* NVRAM Command values.
@@ -2116,14 +2119,6 @@ struct qla_msix_entry {
#define WATCH_INTERVAL 1 /* number of seconds */
-/* NPIV */
-#define MAX_MULTI_ID_LOOP 126
-#define MAX_MULTI_ID_FABRIC 64
-#define MAX_NUM_VPORT_LOOP (MAX_MULTI_ID_LOOP - 1)
-#define MAX_NUM_VPORT_FABRIC (MAX_MULTI_ID_FABRIC - 1)
-#define MAX_NUM_VHBA_LOOP (MAX_MULTI_ID_LOOP - 1)
-#define MAX_NUM_VHBA_FABRIC (MAX_MULTI_ID_FABRIC - 1)
-
/*
* Linux Host Adapter structure
*/
@@ -2161,6 +2156,7 @@ typedef struct scsi_qla_host {
uint32_t gpsc_supported :1;
uint32_t vsan_enabled :1;
uint32_t npiv_supported :1;
+ uint32_t fce_enabled :1;
} flags;
atomic_t loop_state;
@@ -2273,8 +2269,7 @@ typedef struct scsi_qla_host {
int bars;
device_reg_t __iomem *iobase; /* Base I/O address */
- unsigned long pio_address;
- unsigned long pio_length;
+ resource_size_t pio_address;
#define MIN_IOBASE_LEN 0x100
/* ISP ring lock, rings, and indexes */
@@ -2416,9 +2411,9 @@ typedef struct scsi_qla_host {
#define MBX_INTR_WAIT 2
#define MBX_UPDATE_FLASH_ACTIVE 3
- struct semaphore mbx_cmd_sem; /* Serialialize mbx access */
struct semaphore vport_sem; /* Virtual port synchronization */
- struct semaphore mbx_intr_sem; /* Used for completion notification */
+ struct completion mbx_cmd_comp; /* Serialize mbx access */
+ struct completion mbx_intr_comp; /* Used for completion notification */
uint32_t mbx_flags;
#define MBX_IN_PROGRESS BIT_0
@@ -2455,6 +2450,15 @@ typedef struct scsi_qla_host {
dma_addr_t eft_dma;
void *eft;
+ struct dentry *dfs_dir;
+ struct dentry *dfs_fce;
+ dma_addr_t fce_dma;
+ void *fce;
+ uint32_t fce_bufs;
+ uint16_t fce_mb[8];
+ uint64_t fce_wr, fce_rd;
+ struct mutex fce_mutex;
+
uint8_t host_str[16];
uint32_t pci_attr;
uint16_t chip_revision;
@@ -2507,7 +2511,7 @@ typedef struct scsi_qla_host {
struct list_head vp_list; /* list of VP */
struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
- uint8_t vp_idx_map[16];
+ unsigned long vp_idx_map[(MAX_MULTI_ID_FABRIC / 8) / sizeof(unsigned long)];
uint16_t num_vhosts; /* number of vports created */
uint16_t num_vsans; /* number of vsan created */
uint16_t vp_idx; /* vport ID */
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
new file mode 100644
index 000000000000..1479c60441c8
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -0,0 +1,175 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2005 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static struct dentry *qla2x00_dfs_root;
+static atomic_t qla2x00_dfs_root_count;
+
+static int
+qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
+{
+ scsi_qla_host_t *ha = s->private;
+ uint32_t cnt;
+ uint32_t *fce;
+ uint64_t fce_start;
+
+ mutex_lock(&ha->fce_mutex);
+
+ seq_printf(s, "FCE Trace Buffer\n");
+ seq_printf(s, "In Pointer = %llx\n\n", ha->fce_wr);
+ seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma);
+ seq_printf(s, "FCE Enable Registers\n");
+ seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
+ ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
+ ha->fce_mb[5], ha->fce_mb[6]);
+
+ fce = (uint32_t *) ha->fce;
+ fce_start = (unsigned long long) ha->fce_dma;
+ for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) {
+ if (cnt % 8 == 0)
+ seq_printf(s, "\n%llx: ",
+ (unsigned long long)((cnt * 4) + fce_start));
+ else
+ seq_printf(s, " ");
+ seq_printf(s, "%08x", *fce++);
+ }
+
+ seq_printf(s, "\nEnd\n");
+
+ mutex_unlock(&ha->fce_mutex);
+
+ return 0;
+}
+
+static int
+qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
+{
+ scsi_qla_host_t *ha = inode->i_private;
+ int rval;
+
+ if (!ha->flags.fce_enabled)
+ goto out;
+
+ mutex_lock(&ha->fce_mutex);
+
+ /* Pause tracing to flush FCE buffers. */
+ rval = qla2x00_disable_fce_trace(ha, &ha->fce_wr, &ha->fce_rd);
+ if (rval)
+ qla_printk(KERN_WARNING, ha,
+ "DebugFS: Unable to disable FCE (%d).\n", rval);
+
+ ha->flags.fce_enabled = 0;
+
+ mutex_unlock(&ha->fce_mutex);
+out:
+ return single_open(file, qla2x00_dfs_fce_show, ha);
+}
+
+static int
+qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
+{
+ scsi_qla_host_t *ha = inode->i_private;
+ int rval;
+
+ if (ha->flags.fce_enabled)
+ goto out;
+
+ mutex_lock(&ha->fce_mutex);
+
+ /* Re-enable FCE tracing. */
+ ha->flags.fce_enabled = 1;
+ memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
+ rval = qla2x00_enable_fce_trace(ha, ha->fce_dma, ha->fce_bufs,
+ ha->fce_mb, &ha->fce_bufs);
+ if (rval) {
+ qla_printk(KERN_WARNING, ha,
+ "DebugFS: Unable to reinitialize FCE (%d).\n", rval);
+ ha->flags.fce_enabled = 0;
+ }
+
+ mutex_unlock(&ha->fce_mutex);
+out:
+ return single_release(inode, file);
+}
+
+static const struct file_operations dfs_fce_ops = {
+ .open = qla2x00_dfs_fce_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = qla2x00_dfs_fce_release,
+};
+
+int
+qla2x00_dfs_setup(scsi_qla_host_t *ha)
+{
+ if (!IS_QLA25XX(ha))
+ goto out;
+ if (!ha->fce)
+ goto out;
+
+ if (qla2x00_dfs_root)
+ goto create_dir;
+
+ atomic_set(&qla2x00_dfs_root_count, 0);
+ qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
+ if (!qla2x00_dfs_root) {
+ qla_printk(KERN_NOTICE, ha,
+ "DebugFS: Unable to create root directory.\n");
+ goto out;
+ }
+
+create_dir:
+ if (ha->dfs_dir)
+ goto create_nodes;
+
+ mutex_init(&ha->fce_mutex);
+ ha->dfs_dir = debugfs_create_dir(ha->host_str, qla2x00_dfs_root);
+ if (!ha->dfs_dir) {
+ qla_printk(KERN_NOTICE, ha,
+ "DebugFS: Unable to create ha directory.\n");
+ goto out;
+ }
+
+ atomic_inc(&qla2x00_dfs_root_count);
+
+create_nodes:
+ ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, ha,
+ &dfs_fce_ops);
+ if (!ha->dfs_fce) {
+ qla_printk(KERN_NOTICE, ha,
+ "DebugFS: Unable to fce node.\n");
+ goto out;
+ }
+out:
+ return 0;
+}
+
+int
+qla2x00_dfs_remove(scsi_qla_host_t *ha)
+{
+ if (ha->dfs_fce) {
+ debugfs_remove(ha->dfs_fce);
+ ha->dfs_fce = NULL;
+ }
+
+ if (ha->dfs_dir) {
+ debugfs_remove(ha->dfs_dir);
+ ha->dfs_dir = NULL;
+ atomic_dec(&qla2x00_dfs_root_count);
+ }
+
+ if (atomic_read(&qla2x00_dfs_root_count) == 0 &&
+ qla2x00_dfs_root) {
+ debugfs_remove(qla2x00_dfs_root);
+ qla2x00_dfs_root = NULL;
+ }
+
+ return 0;
+}
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 25364b1aaf12..9337e138ed63 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -952,9 +952,31 @@ struct device_reg_24xx {
uint32_t iobase_sdata;
};
+/* Trace Control *************************************************************/
+
+#define TC_AEN_DISABLE 0
+
+#define TC_EFT_ENABLE 4
+#define TC_EFT_DISABLE 5
+
+#define TC_FCE_ENABLE 8
+#define TC_FCE_OPTIONS 0
+#define TC_FCE_DEFAULT_RX_SIZE 2112
+#define TC_FCE_DEFAULT_TX_SIZE 2112
+#define TC_FCE_DISABLE 9
+#define TC_FCE_DISABLE_TRACE BIT_0
+
/* MID Support ***************************************************************/
-#define MAX_MID_VPS 125
+#define MIN_MULTI_ID_FABRIC 64 /* Must be power-of-2. */
+#define MAX_MULTI_ID_FABRIC 256 /* ... */
+
+#define for_each_mapped_vp_idx(_ha, _idx) \
+ for (_idx = find_next_bit((_ha)->vp_idx_map, \
+ (_ha)->max_npiv_vports + 1, 1); \
+ _idx <= (_ha)->max_npiv_vports; \
+ _idx = find_next_bit((_ha)->vp_idx_map, \
+ (_ha)->max_npiv_vports + 1, _idx + 1)) \
struct mid_conf_entry_24xx {
uint16_t reserved_1;
@@ -982,7 +1004,7 @@ struct mid_init_cb_24xx {
uint16_t count;
uint16_t options;
- struct mid_conf_entry_24xx entries[MAX_MID_VPS];
+ struct mid_conf_entry_24xx entries[MAX_MULTI_ID_FABRIC];
};
@@ -1002,10 +1024,6 @@ struct mid_db_entry_24xx {
uint8_t reserved_1;
};
-struct mid_db_24xx {
- struct mid_db_entry_24xx entries[MAX_MID_VPS];
-};
-
/*
* Virtual Fabric ID type definition.
*/
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 09cb2a908059..ba35fc26ce6b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -65,33 +65,25 @@ extern int ql2xextended_error_logging;
extern int ql2xqfullrampup;
extern int num_hosts;
+extern int qla2x00_loop_reset(scsi_qla_host_t *);
+
/*
* Global Functions in qla_mid.c source file.
*/
-extern struct scsi_host_template qla2x00_driver_template;
extern struct scsi_host_template qla24xx_driver_template;
extern struct scsi_transport_template *qla2xxx_transport_vport_template;
-extern uint8_t qla2x00_mem_alloc(scsi_qla_host_t *);
extern void qla2x00_timer(scsi_qla_host_t *);
extern void qla2x00_start_timer(scsi_qla_host_t *, void *, unsigned long);
-extern void qla2x00_stop_timer(scsi_qla_host_t *);
-extern uint32_t qla24xx_allocate_vp_id(scsi_qla_host_t *);
extern void qla24xx_deallocate_vp_id(scsi_qla_host_t *);
extern int qla24xx_disable_vp (scsi_qla_host_t *);
extern int qla24xx_enable_vp (scsi_qla_host_t *);
-extern void qla2x00_mem_free(scsi_qla_host_t *);
extern int qla24xx_control_vp(scsi_qla_host_t *, int );
extern int qla24xx_modify_vp_config(scsi_qla_host_t *);
extern int qla2x00_send_change_request(scsi_qla_host_t *, uint16_t, uint16_t);
extern void qla2x00_vp_stop_timer(scsi_qla_host_t *);
extern int qla24xx_configure_vhba (scsi_qla_host_t *);
-extern int qla24xx_get_vp_entry(scsi_qla_host_t *, uint16_t, int);
-extern int qla24xx_get_vp_database(scsi_qla_host_t *, uint16_t);
-extern int qla2x00_do_dpc_vp(scsi_qla_host_t *);
extern void qla24xx_report_id_acquisition(scsi_qla_host_t *,
struct vp_rpt_id_entry_24xx *);
-extern scsi_qla_host_t * qla24xx_find_vhost_by_name(scsi_qla_host_t *,
- uint8_t *);
extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *);
@@ -103,8 +95,6 @@ extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int);
-extern int qla2x00_down_timeout(struct semaphore *, unsigned long);
-
extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *);
@@ -113,7 +103,6 @@ extern void qla2xxx_wake_dpc(scsi_qla_host_t *);
extern void qla2x00_alert_all_vps(scsi_qla_host_t *, uint16_t *);
extern void qla2x00_async_event(scsi_qla_host_t *, uint16_t *);
extern void qla2x00_vp_abort_isp(scsi_qla_host_t *);
-extern int qla24xx_vport_delete(struct fc_vport *);
/*
* Global Function Prototypes in qla_iocb.c source file.
@@ -222,21 +211,16 @@ extern int
qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map);
extern int
-qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, link_stat_t *,
- uint16_t *);
+qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *,
+ dma_addr_t);
extern int
-qla24xx_get_isp_stats(scsi_qla_host_t *, uint32_t *, uint32_t, uint16_t *);
+qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
+ dma_addr_t);
extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *);
extern int qla24xx_abort_target(fc_port_t *);
-extern int qla2x00_system_error(scsi_qla_host_t *);
-
-extern int
-qla2x00_get_serdes_params(scsi_qla_host_t *, uint16_t *, uint16_t *,
- uint16_t *);
-
extern int
qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t);
@@ -244,13 +228,19 @@ extern int
qla2x00_stop_firmware(scsi_qla_host_t *);
extern int
-qla2x00_trace_control(scsi_qla_host_t *, uint16_t, dma_addr_t, uint16_t);
+qla2x00_enable_eft_trace(scsi_qla_host_t *, dma_addr_t, uint16_t);
+extern int
+qla2x00_disable_eft_trace(scsi_qla_host_t *);
extern int
-qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t, uint16_t);
+qla2x00_enable_fce_trace(scsi_qla_host_t *, dma_addr_t, uint16_t , uint16_t *,
+ uint32_t *);
extern int
-qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t *, uint16_t *);
+qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
+
+extern int
+qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t, uint16_t);
extern int
qla2x00_set_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t *);
@@ -270,11 +260,7 @@ extern void qla2x00_free_irqs(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_sup.c source file.
*/
-extern void qla2x00_lock_nvram_access(scsi_qla_host_t *);
-extern void qla2x00_unlock_nvram_access(scsi_qla_host_t *);
extern void qla2x00_release_nvram_protection(scsi_qla_host_t *);
-extern uint16_t qla2x00_get_nvram_word(scsi_qla_host_t *, uint32_t);
-extern void qla2x00_write_nvram_word(scsi_qla_host_t *, uint32_t, uint16_t);
extern uint32_t *qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *,
uint32_t, uint32_t);
extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
@@ -321,7 +307,6 @@ extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
extern void qla2x00_dump_regs(scsi_qla_host_t *);
extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
extern void qla2x00_print_scsi_cmd(struct scsi_cmnd *);
-extern void qla2x00_dump_pkt(void *);
/*
* Global Function Prototypes in qla_gs.c source file.
@@ -356,4 +341,10 @@ extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
extern void qla2x00_init_host_attr(scsi_qla_host_t *);
extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
+
+/*
+ * Global Function Prototypes in qla_dfs.c source file.
+ */
+extern int qla2x00_dfs_setup(scsi_qla_host_t *);
+extern int qla2x00_dfs_remove(scsi_qla_host_t *);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 191dafd89be0..d0633ca894be 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -732,9 +732,9 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
{
int rval;
uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
- eft_size;
- dma_addr_t eft_dma;
- void *eft;
+ eft_size, fce_size;
+ dma_addr_t tc_dma;
+ void *tc;
if (ha->fw_dump) {
qla_printk(KERN_WARNING, ha,
@@ -743,7 +743,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
}
ha->fw_dumped = 0;
- fixed_size = mem_size = eft_size = 0;
+ fixed_size = mem_size = eft_size = fce_size = 0;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
fixed_size = sizeof(struct qla2100_fw_dump);
} else if (IS_QLA23XX(ha)) {
@@ -758,21 +758,21 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
sizeof(uint32_t);
/* Allocate memory for Extended Trace Buffer. */
- eft = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &eft_dma,
+ tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
GFP_KERNEL);
- if (!eft) {
+ if (!tc) {
qla_printk(KERN_WARNING, ha, "Unable to allocate "
"(%d KB) for EFT.\n", EFT_SIZE / 1024);
goto cont_alloc;
}
- rval = qla2x00_trace_control(ha, TC_ENABLE, eft_dma,
- EFT_NUM_BUFFERS);
+ memset(tc, 0, EFT_SIZE);
+ rval = qla2x00_enable_eft_trace(ha, tc_dma, EFT_NUM_BUFFERS);
if (rval) {
qla_printk(KERN_WARNING, ha, "Unable to initialize "
"EFT (%d).\n", rval);
- dma_free_coherent(&ha->pdev->dev, EFT_SIZE, eft,
- eft_dma);
+ dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
+ tc_dma);
goto cont_alloc;
}
@@ -780,9 +780,40 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
EFT_SIZE / 1024);
eft_size = EFT_SIZE;
- memset(eft, 0, eft_size);
- ha->eft_dma = eft_dma;
- ha->eft = eft;
+ ha->eft_dma = tc_dma;
+ ha->eft = tc;
+
+ /* Allocate memory for Fibre Channel Event Buffer. */
+ if (!IS_QLA25XX(ha))
+ goto cont_alloc;
+
+ tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
+ GFP_KERNEL);
+ if (!tc) {
+ qla_printk(KERN_WARNING, ha, "Unable to allocate "
+ "(%d KB) for FCE.\n", FCE_SIZE / 1024);
+ goto cont_alloc;
+ }
+
+ memset(tc, 0, FCE_SIZE);
+ rval = qla2x00_enable_fce_trace(ha, tc_dma, FCE_NUM_BUFFERS,
+ ha->fce_mb, &ha->fce_bufs);
+ if (rval) {
+ qla_printk(KERN_WARNING, ha, "Unable to initialize "
+ "FCE (%d).\n", rval);
+ dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
+ tc_dma);
+ ha->flags.fce_enabled = 0;
+ goto cont_alloc;
+ }
+
+ qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n",
+ FCE_SIZE / 1024);
+
+ fce_size = sizeof(struct qla2xxx_fce_chain) + EFT_SIZE;
+ ha->flags.fce_enabled = 1;
+ ha->fce_dma = tc_dma;
+ ha->fce = tc;
}
cont_alloc:
req_q_size = ha->request_q_length * sizeof(request_t);
@@ -790,7 +821,7 @@ cont_alloc:
dump_size = offsetof(struct qla2xxx_fw_dump, isp);
dump_size += fixed_size + mem_size + req_q_size + rsp_q_size +
- eft_size;
+ eft_size + fce_size;
ha->fw_dump = vmalloc(dump_size);
if (!ha->fw_dump) {
@@ -922,9 +953,9 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
ha->flags.npiv_supported = 1;
if ((!ha->max_npiv_vports) ||
((ha->max_npiv_vports + 1) %
- MAX_MULTI_ID_FABRIC))
+ MIN_MULTI_ID_FABRIC))
ha->max_npiv_vports =
- MAX_NUM_VPORT_FABRIC;
+ MIN_MULTI_ID_FABRIC - 1;
}
if (ql2xallocfwdump)
@@ -1162,7 +1193,10 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
DEBUG(printk("scsi(%ld): Issue init firmware.\n", ha->host_no));
- mid_init_cb->count = ha->max_npiv_vports;
+ if (ha->flags.npiv_supported)
+ mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
+
+ mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
rval = qla2x00_init_firmware(ha, ha->init_cb_size);
if (rval) {
@@ -2566,14 +2600,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
/* Bypass virtual ports of the same host. */
if (pha->num_vhosts) {
- vp_index = find_next_bit(
- (unsigned long *)pha->vp_idx_map,
- MAX_MULTI_ID_FABRIC + 1, 1);
-
- for (;vp_index <= MAX_MULTI_ID_FABRIC;
- vp_index = find_next_bit(
- (unsigned long *)pha->vp_idx_map,
- MAX_MULTI_ID_FABRIC + 1, vp_index + 1)) {
+ for_each_mapped_vp_idx(pha, vp_index) {
empty_vp_index = 1;
found_vp = 0;
list_for_each_entry(vha, &pha->vp_list,
@@ -2592,7 +2619,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
new_fcport->d_id.b24 == vha->d_id.b24)
break;
}
- if (vp_index <= MAX_MULTI_ID_FABRIC)
+
+ if (vp_index <= pha->max_npiv_vports)
continue;
}
@@ -3245,7 +3273,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
if (ha->eft) {
- rval = qla2x00_trace_control(ha, TC_ENABLE,
+ rval = qla2x00_enable_eft_trace(ha,
ha->eft_dma, EFT_NUM_BUFFERS);
if (rval) {
qla_printk(KERN_WARNING, ha,
@@ -3253,6 +3281,21 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
"(%d).\n", rval);
}
}
+
+ if (ha->fce) {
+ ha->flags.fce_enabled = 1;
+ memset(ha->fce, 0,
+ fce_calc_size(ha->fce_bufs));
+ rval = qla2x00_enable_fce_trace(ha,
+ ha->fce_dma, ha->fce_bufs, ha->fce_mb,
+ &ha->fce_bufs);
+ if (rval) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to reinitialize FCE "
+ "(%d).\n", rval);
+ ha->flags.fce_enabled = 0;
+ }
+ }
} else { /* failed the ISP abort */
ha->flags.online = 1;
if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) {
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 1104bd2eed40..642a0c3f09c6 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -104,7 +104,7 @@ qla2100_intr_handler(int irq, void *dev_id)
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) {
set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- up(&ha->mbx_intr_sem);
+ complete(&ha->mbx_intr_comp);
}
return (IRQ_HANDLED);
@@ -216,7 +216,7 @@ qla2300_intr_handler(int irq, void *dev_id)
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) {
set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- up(&ha->mbx_intr_sem);
+ complete(&ha->mbx_intr_comp);
}
return (IRQ_HANDLED);
@@ -347,10 +347,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
break;
case MBA_SYSTEM_ERR: /* System Error */
- mb[1] = RD_MAILBOX_REG(ha, reg, 1);
- mb[2] = RD_MAILBOX_REG(ha, reg, 2);
- mb[3] = RD_MAILBOX_REG(ha, reg, 3);
-
qla_printk(KERN_INFO, ha,
"ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
mb[1], mb[2], mb[3]);
@@ -579,12 +575,15 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
/* Check if the Vport has issued a SCR */
if (ha->parent && test_bit(VP_SCR_NEEDED, &ha->vp_flags))
break;
+ /* Only handle SCNs for our Vport index. */
+ if (ha->flags.npiv_supported && ha->vp_idx != mb[3])
+ break;
DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
ha->host_no));
DEBUG(printk(KERN_INFO
- "scsi(%ld): RSCN database changed -- %04x %04x.\n",
- ha->host_no, mb[1], mb[2]));
+ "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
+ ha->host_no, mb[1], mb[2], mb[3]));
rscn_entry = (mb[1] << 16) | mb[2];
host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) |
@@ -823,6 +822,35 @@ qla2x00_process_response_queue(struct scsi_qla_host *ha)
WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), ha->rsp_ring_index);
}
+static inline void
+qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
+{
+ struct scsi_cmnd *cp = sp->cmd;
+
+ if (sense_len >= SCSI_SENSE_BUFFERSIZE)
+ sense_len = SCSI_SENSE_BUFFERSIZE;
+
+ CMD_ACTUAL_SNSLEN(cp) = sense_len;
+ sp->request_sense_length = sense_len;
+ sp->request_sense_ptr = cp->sense_buffer;
+ if (sp->request_sense_length > 32)
+ sense_len = 32;
+
+ memcpy(cp->sense_buffer, sense_data, sense_len);
+
+ sp->request_sense_ptr += sense_len;
+ sp->request_sense_length -= sense_len;
+ if (sp->request_sense_length != 0)
+ sp->ha->status_srb = sp;
+
+ DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
+ "cmd=%p pid=%ld\n", __func__, sp->ha->host_no, cp->device->channel,
+ cp->device->id, cp->device->lun, cp, cp->serial_number));
+ if (sense_len)
+ DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
+ CMD_ACTUAL_SNSLEN(cp)));
+}
+
/**
* qla2x00_status_entry() - Process a Status IOCB entry.
* @ha: SCSI driver HA context
@@ -977,36 +1005,11 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
if (lscsi_status != SS_CHECK_CONDITION)
break;
- /* Copy Sense Data into sense buffer. */
- memset(cp->sense_buffer, 0, sizeof(cp->sense_buffer));
-
+ memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
if (!(scsi_status & SS_SENSE_LEN_VALID))
break;
- if (sense_len >= sizeof(cp->sense_buffer))
- sense_len = sizeof(cp->sense_buffer);
-
- CMD_ACTUAL_SNSLEN(cp) = sense_len;
- sp->request_sense_length = sense_len;
- sp->request_sense_ptr = cp->sense_buffer;
-
- if (sp->request_sense_length > 32)
- sense_len = 32;
-
- memcpy(cp->sense_buffer, sense_data, sense_len);
-
- sp->request_sense_ptr += sense_len;
- sp->request_sense_length -= sense_len;
- if (sp->request_sense_length != 0)
- ha->status_srb = sp;
-
- DEBUG5(printk("%s(): Check condition Sense data, "
- "scsi(%ld:%d:%d:%d) cmd=%p pid=%ld\n", __func__,
- ha->host_no, cp->device->channel, cp->device->id,
- cp->device->lun, cp, cp->serial_number));
- if (sense_len)
- DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
- CMD_ACTUAL_SNSLEN(cp)));
+ qla2x00_handle_sense(sp, sense_data, sense_len);
break;
case CS_DATA_UNDERRUN:
@@ -1061,34 +1064,11 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
if (lscsi_status != SS_CHECK_CONDITION)
break;
- /* Copy Sense Data into sense buffer */
- memset(cp->sense_buffer, 0, sizeof(cp->sense_buffer));
-
+ memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
if (!(scsi_status & SS_SENSE_LEN_VALID))
break;
- if (sense_len >= sizeof(cp->sense_buffer))
- sense_len = sizeof(cp->sense_buffer);
-
- CMD_ACTUAL_SNSLEN(cp) = sense_len;
- sp->request_sense_length = sense_len;
- sp->request_sense_ptr = cp->sense_buffer;
-
- if (sp->request_sense_length > 32)
- sense_len = 32;
-
- memcpy(cp->sense_buffer, sense_data, sense_len);
-
- sp->request_sense_ptr += sense_len;
- sp->request_sense_length -= sense_len;
- if (sp->request_sense_length != 0)
- ha->status_srb = sp;
-
- DEBUG5(printk("%s(): Check condition Sense data, "
- "scsi(%ld:%d:%d:%d) cmd=%p pid=%ld\n",
- __func__, ha->host_no, cp->device->channel,
- cp->device->id, cp->device->lun, cp,
- cp->serial_number));
+ qla2x00_handle_sense(sp, sense_data, sense_len);
/*
* In case of a Underrun condition, set both the lscsi
@@ -1108,10 +1088,6 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
cp->result = DID_ERROR << 16 | lscsi_status;
}
-
- if (sense_len)
- DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
- CMD_ACTUAL_SNSLEN(cp)));
} else {
/*
* If RISC reports underrun and target does not report
@@ -1621,7 +1597,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) {
set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- up(&ha->mbx_intr_sem);
+ complete(&ha->mbx_intr_comp);
}
return IRQ_HANDLED;
@@ -1758,7 +1734,7 @@ qla24xx_msix_default(int irq, void *dev_id)
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) {
set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- up(&ha->mbx_intr_sem);
+ complete(&ha->mbx_intr_comp);
}
return IRQ_HANDLED;
@@ -1853,6 +1829,18 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
goto skip_msix;
}
+ if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
+ (ha->pdev->subsystem_device == 0x7040 ||
+ ha->pdev->subsystem_device == 0x7041 ||
+ ha->pdev->subsystem_device == 0x1705)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
+ ha->pdev->subsystem_vendor,
+ ha->pdev->subsystem_device));
+
+ goto skip_msi;
+ }
+
ret = qla24xx_enable_msix(ha);
if (!ret) {
DEBUG2(qla_printk(KERN_INFO, ha,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index ccd662a6f5dc..0c10c0b0fb73 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -8,19 +8,6 @@
#include <linux/delay.h>
-static void
-qla2x00_mbx_sem_timeout(unsigned long data)
-{
- struct semaphore *sem_ptr = (struct semaphore *)data;
-
- DEBUG11(printk("qla2x00_sem_timeout: entered.\n"));
-
- if (sem_ptr != NULL) {
- up(sem_ptr);
- }
-
- DEBUG11(printk("qla2x00_mbx_sem_timeout: exiting.\n"));
-}
/*
* qla2x00_mailbox_command
@@ -47,7 +34,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
int rval;
unsigned long flags = 0;
device_reg_t __iomem *reg;
- struct timer_list tmp_intr_timer;
uint8_t abort_active;
uint8_t io_lock_on;
uint16_t command;
@@ -72,7 +58,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
* non ISP abort time.
*/
if (!abort_active) {
- if (qla2x00_down_timeout(&ha->mbx_cmd_sem, mcp->tov * HZ)) {
+ if (!wait_for_completion_timeout(&ha->mbx_cmd_comp,
+ mcp->tov * HZ)) {
/* Timeout occurred. Return error. */
DEBUG2_3_11(printk("%s(%ld): cmd access timeout. "
"Exiting.\n", __func__, ha->host_no));
@@ -135,22 +122,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
/* Wait for mbx cmd completion until timeout */
if (!abort_active && io_lock_on) {
- /* sleep on completion semaphore */
- DEBUG11(printk("%s(%ld): INTERRUPT MODE. Initializing timer.\n",
- __func__, ha->host_no));
-
- init_timer(&tmp_intr_timer);
- tmp_intr_timer.data = (unsigned long)&ha->mbx_intr_sem;
- tmp_intr_timer.expires = jiffies + mcp->tov * HZ;
- tmp_intr_timer.function =
- (void (*)(unsigned long))qla2x00_mbx_sem_timeout;
-
- DEBUG11(printk("%s(%ld): Adding timer.\n", __func__,
- ha->host_no));
- add_timer(&tmp_intr_timer);
-
- DEBUG11(printk("%s(%ld): going to unlock & sleep. "
- "time=0x%lx.\n", __func__, ha->host_no, jiffies));
set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
@@ -160,17 +131,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- /* Wait for either the timer to expire
- * or the mbox completion interrupt
- */
- down(&ha->mbx_intr_sem);
+ wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
- DEBUG11(printk("%s(%ld): waking up. time=0x%lx\n", __func__,
- ha->host_no, jiffies));
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
- /* delete the timer */
- del_timer(&tmp_intr_timer);
} else {
DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__,
ha->host_no, command));
@@ -299,7 +263,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
/* Allow next mbx cmd to come in. */
if (!abort_active)
- up(&ha->mbx_cmd_sem);
+ complete(&ha->mbx_cmd_comp);
if (rval) {
DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, "
@@ -905,7 +869,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
mcp->mb[9] = ha->vp_idx;
- mcp->out_mb = MBX_0;
+ mcp->out_mb = MBX_9|MBX_0;
mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->tov = 30;
mcp->flags = 0;
@@ -1016,7 +980,7 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n",
ha->host_no));
- if (ha->flags.npiv_supported)
+ if (ha->fw_attributes & BIT_2)
mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
else
mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
@@ -2042,29 +2006,20 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
*/
int
qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
- link_stat_t *ret_buf, uint16_t *status)
+ struct link_statistics *stats, dma_addr_t stats_dma)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- link_stat_t *stat_buf;
- dma_addr_t stat_buf_dma;
+ uint32_t *siter, *diter, dwords;
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
- stat_buf = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &stat_buf_dma);
- if (stat_buf == NULL) {
- DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
- __func__, ha->host_no));
- return BIT_0;
- }
- memset(stat_buf, 0, sizeof(link_stat_t));
-
mcp->mb[0] = MBC_GET_LINK_STATUS;
- mcp->mb[2] = MSW(stat_buf_dma);
- mcp->mb[3] = LSW(stat_buf_dma);
- mcp->mb[6] = MSW(MSD(stat_buf_dma));
- mcp->mb[7] = LSW(MSD(stat_buf_dma));
+ mcp->mb[2] = MSW(stats_dma);
+ mcp->mb[3] = LSW(stats_dma);
+ mcp->mb[6] = MSW(MSD(stats_dma));
+ mcp->mb[7] = LSW(MSD(stats_dma));
mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
mcp->in_mb = MBX_0;
if (IS_FWI2_CAPABLE(ha)) {
@@ -2089,78 +2044,43 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n",
__func__, ha->host_no, mcp->mb[0]));
- status[0] = mcp->mb[0];
- rval = BIT_1;
+ rval = QLA_FUNCTION_FAILED;
} else {
- /* copy over data -- firmware data is LE. */
- ret_buf->link_fail_cnt =
- le32_to_cpu(stat_buf->link_fail_cnt);
- ret_buf->loss_sync_cnt =
- le32_to_cpu(stat_buf->loss_sync_cnt);
- ret_buf->loss_sig_cnt =
- le32_to_cpu(stat_buf->loss_sig_cnt);
- ret_buf->prim_seq_err_cnt =
- le32_to_cpu(stat_buf->prim_seq_err_cnt);
- ret_buf->inval_xmit_word_cnt =
- le32_to_cpu(stat_buf->inval_xmit_word_cnt);
- ret_buf->inval_crc_cnt =
- le32_to_cpu(stat_buf->inval_crc_cnt);
-
- DEBUG11(printk("%s(%ld): stat dump: fail_cnt=%d "
- "loss_sync=%d loss_sig=%d seq_err=%d "
- "inval_xmt_word=%d inval_crc=%d.\n", __func__,
- ha->host_no, stat_buf->link_fail_cnt,
- stat_buf->loss_sync_cnt, stat_buf->loss_sig_cnt,
- stat_buf->prim_seq_err_cnt,
- stat_buf->inval_xmit_word_cnt,
- stat_buf->inval_crc_cnt));
+ /* Copy over data -- firmware data is LE. */
+ dwords = offsetof(struct link_statistics, unused1) / 4;
+ siter = diter = &stats->link_fail_cnt;
+ while (dwords--)
+ *diter++ = le32_to_cpu(*siter++);
}
} else {
/* Failed. */
DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
ha->host_no, rval));
- rval = BIT_1;
}
- dma_pool_free(ha->s_dma_pool, stat_buf, stat_buf_dma);
-
return rval;
}
int
-qla24xx_get_isp_stats(scsi_qla_host_t *ha, uint32_t *dwbuf, uint32_t dwords,
- uint16_t *status)
+qla24xx_get_isp_stats(scsi_qla_host_t *ha, struct link_statistics *stats,
+ dma_addr_t stats_dma)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- uint32_t *sbuf, *siter;
- dma_addr_t sbuf_dma;
+ uint32_t *siter, *diter, dwords;
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
- if (dwords > (DMA_POOL_SIZE / 4)) {
- DEBUG2_3_11(printk("%s(%ld): Unabled to retrieve %d DWORDs "
- "(max %d).\n", __func__, ha->host_no, dwords,
- DMA_POOL_SIZE / 4));
- return BIT_0;
- }
- sbuf = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &sbuf_dma);
- if (sbuf == NULL) {
- DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
- __func__, ha->host_no));
- return BIT_0;
- }
- memset(sbuf, 0, DMA_POOL_SIZE);
-
mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
- mcp->mb[2] = MSW(sbuf_dma);
- mcp->mb[3] = LSW(sbuf_dma);
- mcp->mb[6] = MSW(MSD(sbuf_dma));
- mcp->mb[7] = LSW(MSD(sbuf_dma));
- mcp->mb[8] = dwords;
+ mcp->mb[2] = MSW(stats_dma);
+ mcp->mb[3] = LSW(stats_dma);
+ mcp->mb[6] = MSW(MSD(stats_dma));
+ mcp->mb[7] = LSW(MSD(stats_dma));
+ mcp->mb[8] = sizeof(struct link_statistics) / 4;
+ mcp->mb[9] = ha->vp_idx;
mcp->mb[10] = 0;
- mcp->out_mb = MBX_10|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
+ mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
mcp->in_mb = MBX_2|MBX_1|MBX_0;
mcp->tov = 30;
mcp->flags = IOCTL_CMD;
@@ -2170,23 +2090,20 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, uint32_t *dwbuf, uint32_t dwords,
if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n",
__func__, ha->host_no, mcp->mb[0]));
- status[0] = mcp->mb[0];
- rval = BIT_1;
+ rval = QLA_FUNCTION_FAILED;
} else {
/* Copy over data -- firmware data is LE. */
- siter = sbuf;
+ dwords = sizeof(struct link_statistics) / 4;
+ siter = diter = &stats->link_fail_cnt;
while (dwords--)
- *dwbuf++ = le32_to_cpu(*siter++);
+ *diter++ = le32_to_cpu(*siter++);
}
} else {
/* Failed. */
DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
ha->host_no, rval));
- rval = BIT_1;
}
- dma_pool_free(ha->s_dma_pool, sbuf, sbuf_dma);
-
return rval;
}
@@ -2331,6 +2248,8 @@ atarget_done:
return rval;
}
+#if 0
+
int
qla2x00_system_error(scsi_qla_host_t *ha)
{
@@ -2360,47 +2279,7 @@ qla2x00_system_error(scsi_qla_host_t *ha)
return rval;
}
-/**
- * qla2x00_get_serdes_params() -
- * @ha: HA context
- *
- * Returns
- */
-int
-qla2x00_get_serdes_params(scsi_qla_host_t *ha, uint16_t *sw_em_1g,
- uint16_t *sw_em_2g, uint16_t *sw_em_4g)
-{
- int rval;
- mbx_cmd_t mc;
- mbx_cmd_t *mcp = &mc;
-
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
-
- mcp->mb[0] = MBC_SERDES_PARAMS;
- mcp->mb[1] = 0;
- mcp->out_mb = MBX_1|MBX_0;
- mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_0;
- mcp->tov = 30;
- mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
-
- if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
- ha->host_no, rval, mcp->mb[0]));
- } else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
-
- if (sw_em_1g)
- *sw_em_1g = mcp->mb[2];
- if (sw_em_2g)
- *sw_em_2g = mcp->mb[3];
- if (sw_em_4g)
- *sw_em_4g = mcp->mb[4];
- }
-
- return rval;
-}
+#endif /* 0 */
/**
* qla2x00_set_serdes_params() -
@@ -2471,7 +2350,7 @@ qla2x00_stop_firmware(scsi_qla_host_t *ha)
}
int
-qla2x00_trace_control(scsi_qla_host_t *ha, uint16_t ctrl, dma_addr_t eft_dma,
+qla2x00_enable_eft_trace(scsi_qla_host_t *ha, dma_addr_t eft_dma,
uint16_t buffers)
{
int rval;
@@ -2484,22 +2363,18 @@ qla2x00_trace_control(scsi_qla_host_t *ha, uint16_t ctrl, dma_addr_t eft_dma,
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
mcp->mb[0] = MBC_TRACE_CONTROL;
- mcp->mb[1] = ctrl;
- mcp->out_mb = MBX_1|MBX_0;
+ mcp->mb[1] = TC_EFT_ENABLE;
+ mcp->mb[2] = LSW(eft_dma);
+ mcp->mb[3] = MSW(eft_dma);
+ mcp->mb[4] = LSW(MSD(eft_dma));
+ mcp->mb[5] = MSW(MSD(eft_dma));
+ mcp->mb[6] = buffers;
+ mcp->mb[7] = TC_AEN_DISABLE;
+ mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_1|MBX_0;
- if (ctrl == TC_ENABLE) {
- mcp->mb[2] = LSW(eft_dma);
- mcp->mb[3] = MSW(eft_dma);
- mcp->mb[4] = LSW(MSD(eft_dma));
- mcp->mb[5] = MSW(MSD(eft_dma));
- mcp->mb[6] = buffers;
- mcp->mb[7] = 0;
- mcp->out_mb |= MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2;
- }
mcp->tov = 30;
mcp->flags = 0;
rval = qla2x00_mailbox_command(ha, mcp);
-
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
__func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
@@ -2511,8 +2386,7 @@ qla2x00_trace_control(scsi_qla_host_t *ha, uint16_t ctrl, dma_addr_t eft_dma,
}
int
-qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr,
- uint16_t off, uint16_t count)
+qla2x00_disable_eft_trace(scsi_qla_host_t *ha)
{
int rval;
mbx_cmd_t mc;
@@ -2523,24 +2397,16 @@ qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr,
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
- mcp->mb[0] = MBC_READ_SFP;
- mcp->mb[1] = addr;
- mcp->mb[2] = MSW(sfp_dma);
- mcp->mb[3] = LSW(sfp_dma);
- mcp->mb[6] = MSW(MSD(sfp_dma));
- mcp->mb[7] = LSW(MSD(sfp_dma));
- mcp->mb[8] = count;
- mcp->mb[9] = off;
- mcp->mb[10] = 0;
- mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
- mcp->in_mb = MBX_0;
+ mcp->mb[0] = MBC_TRACE_CONTROL;
+ mcp->mb[1] = TC_EFT_DISABLE;
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
mcp->tov = 30;
mcp->flags = 0;
rval = qla2x00_mailbox_command(ha, mcp);
-
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
- ha->host_no, rval, mcp->mb[0]));
+ DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
+ __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
} else {
DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
}
@@ -2549,176 +2415,168 @@ qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr,
}
int
-qla2x00_get_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
- uint16_t *port_speed, uint16_t *mb)
+qla2x00_enable_fce_trace(scsi_qla_host_t *ha, dma_addr_t fce_dma,
+ uint16_t buffers, uint16_t *mb, uint32_t *dwords)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_IIDMA_CAPABLE(ha))
+ if (!IS_QLA25XX(ha))
return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
- mcp->mb[0] = MBC_PORT_PARAMS;
- mcp->mb[1] = loop_id;
- mcp->mb[2] = mcp->mb[3] = mcp->mb[4] = mcp->mb[5] = 0;
- mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
- mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
+ mcp->mb[0] = MBC_TRACE_CONTROL;
+ mcp->mb[1] = TC_FCE_ENABLE;
+ mcp->mb[2] = LSW(fce_dma);
+ mcp->mb[3] = MSW(fce_dma);
+ mcp->mb[4] = LSW(MSD(fce_dma));
+ mcp->mb[5] = MSW(MSD(fce_dma));
+ mcp->mb[6] = buffers;
+ mcp->mb[7] = TC_AEN_DISABLE;
+ mcp->mb[8] = 0;
+ mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
+ mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
+ mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
+ MBX_1|MBX_0;
+ mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->tov = 30;
mcp->flags = 0;
rval = qla2x00_mailbox_command(ha, mcp);
-
- /* Return mailbox statuses. */
- if (mb != NULL) {
- mb[0] = mcp->mb[0];
- mb[1] = mcp->mb[1];
- mb[3] = mcp->mb[3];
- mb[4] = mcp->mb[4];
- mb[5] = mcp->mb[5];
- }
-
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- ha->host_no, rval));
+ DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
+ __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
} else {
DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
- if (port_speed)
- *port_speed = mcp->mb[3];
+
+ if (mb)
+ memcpy(mb, mcp->mb, 8 * sizeof(*mb));
+ if (dwords)
+ *dwords = mcp->mb[6];
}
return rval;
}
int
-qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
- uint16_t port_speed, uint16_t *mb)
+qla2x00_disable_fce_trace(scsi_qla_host_t *ha, uint64_t *wr, uint64_t *rd)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_IIDMA_CAPABLE(ha))
+ if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
- mcp->mb[0] = MBC_PORT_PARAMS;
- mcp->mb[1] = loop_id;
- mcp->mb[2] = BIT_0;
- mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
- mcp->mb[4] = mcp->mb[5] = 0;
- mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
- mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
+ mcp->mb[0] = MBC_TRACE_CONTROL;
+ mcp->mb[1] = TC_FCE_DISABLE;
+ mcp->mb[2] = TC_FCE_DISABLE_TRACE;
+ mcp->out_mb = MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
+ MBX_1|MBX_0;
mcp->tov = 30;
mcp->flags = 0;
rval = qla2x00_mailbox_command(ha, mcp);
-
- /* Return mailbox statuses. */
- if (mb != NULL) {
- mb[0] = mcp->mb[0];
- mb[1] = mcp->mb[1];
- mb[3] = mcp->mb[3];
- mb[4] = mcp->mb[4];
- mb[5] = mcp->mb[5];
- }
-
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- ha->host_no, rval));
+ DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
+ __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
} else {
DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+
+ if (wr)
+ *wr = (uint64_t) mcp->mb[5] << 48 |
+ (uint64_t) mcp->mb[4] << 32 |
+ (uint64_t) mcp->mb[3] << 16 |
+ (uint64_t) mcp->mb[2];
+ if (rd)
+ *rd = (uint64_t) mcp->mb[9] << 48 |
+ (uint64_t) mcp->mb[8] << 32 |
+ (uint64_t) mcp->mb[7] << 16 |
+ (uint64_t) mcp->mb[6];
}
return rval;
}
-/*
- * qla24xx_get_vp_database
- * Get the VP's database for all configured ports.
- *
- * Input:
- * ha = adapter block pointer.
- * size = size of initialization control block.
- *
- * Returns:
- * qla2x00 local function return status code.
- *
- * Context:
- * Kernel context.
- */
int
-qla24xx_get_vp_database(scsi_qla_host_t *ha, uint16_t size)
+qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr,
+ uint16_t off, uint16_t count)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("scsi(%ld):%s - entered.\n",
- ha->host_no, __func__));
+ if (!IS_FWI2_CAPABLE(ha))
+ return QLA_FUNCTION_FAILED;
- mcp->mb[0] = MBC_MID_GET_VP_DATABASE;
- mcp->mb[2] = MSW(ha->init_cb_dma);
- mcp->mb[3] = LSW(ha->init_cb_dma);
- mcp->mb[4] = 0;
- mcp->mb[5] = 0;
- mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
- mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
- mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
- mcp->in_mb = MBX_1|MBX_0;
- mcp->buf_size = size;
- mcp->flags = MBX_DMA_OUT;
- mcp->tov = MBX_TOV_SECONDS;
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+
+ mcp->mb[0] = MBC_READ_SFP;
+ mcp->mb[1] = addr;
+ mcp->mb[2] = MSW(sfp_dma);
+ mcp->mb[3] = LSW(sfp_dma);
+ mcp->mb[6] = MSW(MSD(sfp_dma));
+ mcp->mb[7] = LSW(MSD(sfp_dma));
+ mcp->mb[8] = count;
+ mcp->mb[9] = off;
+ mcp->mb[10] = 0;
+ mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = 30;
+ mcp->flags = 0;
rval = qla2x00_mailbox_command(ha, mcp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- DEBUG2_3_11(printk("%s(%ld): failed=%x "
- "mb0=%x.\n",
- __func__, ha->host_no, rval, mcp->mb[0]));
+ DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
+ ha->host_no, rval, mcp->mb[0]));
} else {
- /*EMPTY*/
- DEBUG11(printk("%s(%ld): done.\n",
- __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
}
return rval;
}
int
-qla24xx_get_vp_entry(scsi_qla_host_t *ha, uint16_t size, int vp_id)
+qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
+ uint16_t port_speed, uint16_t *mb)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ if (!IS_IIDMA_CAPABLE(ha))
+ return QLA_FUNCTION_FAILED;
+
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
- mcp->mb[0] = MBC_MID_GET_VP_ENTRY;
- mcp->mb[2] = MSW(ha->init_cb_dma);
- mcp->mb[3] = LSW(ha->init_cb_dma);
- mcp->mb[4] = 0;
- mcp->mb[5] = 0;
- mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
- mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
- mcp->mb[9] = vp_id;
- mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
- mcp->in_mb = MBX_0;
- mcp->buf_size = size;
- mcp->flags = MBX_DMA_OUT;
+ mcp->mb[0] = MBC_PORT_PARAMS;
+ mcp->mb[1] = loop_id;
+ mcp->mb[2] = BIT_0;
+ mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
+ mcp->mb[4] = mcp->mb[5] = 0;
+ mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
mcp->tov = 30;
+ mcp->flags = 0;
rval = qla2x00_mailbox_command(ha, mcp);
+ /* Return mailbox statuses. */
+ if (mb != NULL) {
+ mb[0] = mcp->mb[0];
+ mb[1] = mcp->mb[1];
+ mb[3] = mcp->mb[3];
+ mb[4] = mcp->mb[4];
+ mb[5] = mcp->mb[5];
+ }
+
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- DEBUG2_3_11(printk("qla24xx_get_vp_entry(%ld): failed=%x "
- "mb0=%x.\n",
- ha->host_no, rval, mcp->mb[0]));
+ DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
+ ha->host_no, rval));
} else {
- /*EMPTY*/
- DEBUG11(printk("qla24xx_get_vp_entry(%ld): done.\n",
- ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
}
return rval;
@@ -2873,7 +2731,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
DEBUG11(printk("%s(%ld): entered. Enabling index %d\n", __func__,
ha->host_no, vp_index));
- if (vp_index == 0 || vp_index >= MAX_MULTI_ID_LOOP)
+ if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
return QLA_PARAMETER_ERROR;
vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 821ee74aadc6..cf784cdafb01 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -39,7 +39,7 @@ qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
}
}
-uint32_t
+static uint32_t
qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
{
uint32_t vp_id;
@@ -47,16 +47,15 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
/* Find an empty slot and assign an vp_id */
down(&ha->vport_sem);
- vp_id = find_first_zero_bit((unsigned long *)ha->vp_idx_map,
- MAX_MULTI_ID_FABRIC);
- if (vp_id > MAX_MULTI_ID_FABRIC) {
- DEBUG15(printk ("vp_id %d is bigger than MAX_MULTI_ID_FABRID\n",
- vp_id));
+ vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
+ if (vp_id > ha->max_npiv_vports) {
+ DEBUG15(printk ("vp_id %d is bigger than max-supported %d.\n",
+ vp_id, ha->max_npiv_vports));
up(&ha->vport_sem);
return vp_id;
}
- set_bit(vp_id, (unsigned long *)ha->vp_idx_map);
+ set_bit(vp_id, ha->vp_idx_map);
ha->num_vhosts++;
vha->vp_idx = vp_id;
list_add_tail(&vha->vp_list, &ha->vp_list);
@@ -73,12 +72,12 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
down(&ha->vport_sem);
vp_id = vha->vp_idx;
ha->num_vhosts--;
- clear_bit(vp_id, (unsigned long *)ha->vp_idx_map);
+ clear_bit(vp_id, ha->vp_idx_map);
list_del(&vha->vp_list);
up(&ha->vport_sem);
}
-scsi_qla_host_t *
+static scsi_qla_host_t *
qla24xx_find_vhost_by_name(scsi_qla_host_t *ha, uint8_t *port_name)
{
scsi_qla_host_t *vha;
@@ -216,11 +215,7 @@ qla2x00_alert_all_vps(scsi_qla_host_t *ha, uint16_t *mb)
if (ha->parent)
return;
- i = find_next_bit((unsigned long *)ha->vp_idx_map,
- MAX_MULTI_ID_FABRIC + 1, 1);
- for (;i <= MAX_MULTI_ID_FABRIC;
- i = find_next_bit((unsigned long *)ha->vp_idx_map,
- MAX_MULTI_ID_FABRIC + 1, i + 1)) {
+ for_each_mapped_vp_idx(ha, i) {
vp_idx_matched = 0;
list_for_each_entry(vha, &ha->vp_list, vp_list) {
@@ -270,7 +265,7 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
qla24xx_enable_vp(vha);
}
-int
+static int
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
{
if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
@@ -311,11 +306,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *ha)
clear_bit(VP_DPC_NEEDED, &ha->dpc_flags);
- i = find_next_bit((unsigned long *)ha->vp_idx_map,
- MAX_MULTI_ID_FABRIC + 1, 1);
- for (;i <= MAX_MULTI_ID_FABRIC;
- i = find_next_bit((unsigned long *)ha->vp_idx_map,
- MAX_MULTI_ID_FABRIC + 1, i + 1)) {
+ for_each_mapped_vp_idx(ha, i) {
vp_idx_matched = 0;
list_for_each_entry(vha, &ha->vp_list, vp_list) {
@@ -350,15 +341,17 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
/* Check up unique WWPN */
u64_to_wwn(fc_vport->port_name, port_name);
+ if (!memcmp(port_name, ha->port_name, WWN_SIZE))
+ return VPCERR_BAD_WWN;
vha = qla24xx_find_vhost_by_name(ha, port_name);
if (vha)
return VPCERR_BAD_WWN;
/* Check up max-npiv-supports */
if (ha->num_vhosts > ha->max_npiv_vports) {
- DEBUG15(printk("scsi(%ld): num_vhosts %d is bigger than "
- "max_npv_vports %d.\n", ha->host_no,
- (uint16_t) ha->num_vhosts, (int) ha->max_npiv_vports));
+ DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than "
+ "max_npv_vports %ud.\n", ha->host_no,
+ ha->num_vhosts, ha->max_npiv_vports));
return VPCERR_UNSUPPORTED;
}
return 0;
@@ -412,8 +405,9 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
}
vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
- init_MUTEX(&vha->mbx_cmd_sem);
- init_MUTEX_LOCKED(&vha->mbx_intr_sem);
+ init_completion(&vha->mbx_cmd_comp);
+ complete(&vha->mbx_cmd_comp);
+ init_completion(&vha->mbx_intr_comp);
INIT_LIST_HEAD(&vha->list);
INIT_LIST_HEAD(&vha->fcports);
@@ -450,7 +444,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
num_hosts++;
down(&ha->vport_sem);
- set_bit(vha->vp_idx, (unsigned long *)ha->vp_idx_map);
+ set_bit(vha->vp_idx, ha->vp_idx_map);
ha->cur_vport_count++;
up(&ha->vport_sem);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8ecc0470b8f3..aba1e6d48066 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -105,13 +105,12 @@ static int qla2xxx_eh_abort(struct scsi_cmnd *);
static int qla2xxx_eh_device_reset(struct scsi_cmnd *);
static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
-static int qla2x00_loop_reset(scsi_qla_host_t *ha);
static int qla2x00_device_reset(scsi_qla_host_t *, fc_port_t *);
static int qla2x00_change_queue_depth(struct scsi_device *, int);
static int qla2x00_change_queue_type(struct scsi_device *, int);
-struct scsi_host_template qla2x00_driver_template = {
+static struct scsi_host_template qla2x00_driver_template = {
.module = THIS_MODULE,
.name = QLA2XXX_DRIVER_NAME,
.queuecommand = qla2x00_queuecommand,
@@ -179,13 +178,6 @@ struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
* Timer routines
*/
-void qla2x00_timer(scsi_qla_host_t *);
-
-__inline__ void qla2x00_start_timer(scsi_qla_host_t *,
- void *, unsigned long);
-static __inline__ void qla2x00_restart_timer(scsi_qla_host_t *, unsigned long);
-__inline__ void qla2x00_stop_timer(scsi_qla_host_t *);
-
__inline__ void
qla2x00_start_timer(scsi_qla_host_t *ha, void *func, unsigned long interval)
{
@@ -203,7 +195,7 @@ qla2x00_restart_timer(scsi_qla_host_t *ha, unsigned long interval)
mod_timer(&ha->timer, jiffies + interval * HZ);
}
-__inline__ void
+static __inline__ void
qla2x00_stop_timer(scsi_qla_host_t *ha)
{
del_timer_sync(&ha->timer);
@@ -214,12 +206,11 @@ static int qla2x00_do_dpc(void *data);
static void qla2x00_rst_aen(scsi_qla_host_t *);
-uint8_t qla2x00_mem_alloc(scsi_qla_host_t *);
-void qla2x00_mem_free(scsi_qla_host_t *ha);
+static uint8_t qla2x00_mem_alloc(scsi_qla_host_t *);
+static void qla2x00_mem_free(scsi_qla_host_t *ha);
static int qla2x00_allocate_sp_pool( scsi_qla_host_t *ha);
static void qla2x00_free_sp_pool(scsi_qla_host_t *ha);
static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *);
-void qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *);
/* -------------------------------------------------------------------------- */
@@ -1060,7 +1051,7 @@ eh_host_reset_lock:
* Returns:
* 0 = success
*/
-static int
+int
qla2x00_loop_reset(scsi_qla_host_t *ha)
{
int ret;
@@ -1479,8 +1470,7 @@ qla2x00_set_isp_flags(scsi_qla_host_t *ha)
static int
qla2x00_iospace_config(scsi_qla_host_t *ha)
{
- unsigned long pio, pio_len, pio_flags;
- unsigned long mmio, mmio_len, mmio_flags;
+ resource_size_t pio;
if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) {
@@ -1495,10 +1485,8 @@ qla2x00_iospace_config(scsi_qla_host_t *ha)
/* We only need PIO for Flash operations on ISP2312 v2 chips. */
pio = pci_resource_start(ha->pdev, 0);
- pio_len = pci_resource_len(ha->pdev, 0);
- pio_flags = pci_resource_flags(ha->pdev, 0);
- if (pio_flags & IORESOURCE_IO) {
- if (pio_len < MIN_IOBASE_LEN) {
+ if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
+ if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
qla_printk(KERN_WARNING, ha,
"Invalid PCI I/O region size (%s)...\n",
pci_name(ha->pdev));
@@ -1511,28 +1499,23 @@ qla2x00_iospace_config(scsi_qla_host_t *ha)
pio = 0;
}
ha->pio_address = pio;
- ha->pio_length = pio_len;
skip_pio:
/* Use MMIO operations for all accesses. */
- mmio = pci_resource_start(ha->pdev, 1);
- mmio_len = pci_resource_len(ha->pdev, 1);
- mmio_flags = pci_resource_flags(ha->pdev, 1);
-
- if (!(mmio_flags & IORESOURCE_MEM)) {
+ if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
qla_printk(KERN_ERR, ha,
- "region #0 not an MMIO resource (%s), aborting\n",
+ "region #1 not an MMIO resource (%s), aborting\n",
pci_name(ha->pdev));
goto iospace_error_exit;
}
- if (mmio_len < MIN_IOBASE_LEN) {
+ if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
qla_printk(KERN_ERR, ha,
"Invalid PCI mem region size (%s), aborting\n",
pci_name(ha->pdev));
goto iospace_error_exit;
}
- ha->iobase = ioremap(mmio, MIN_IOBASE_LEN);
+ ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
if (!ha->iobase) {
qla_printk(KERN_ERR, ha,
"cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
@@ -1701,9 +1684,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* load the F/W, read paramaters, and init the H/W */
ha->instance = num_hosts;
- init_MUTEX(&ha->mbx_cmd_sem);
init_MUTEX(&ha->vport_sem);
- init_MUTEX_LOCKED(&ha->mbx_intr_sem);
+ init_completion(&ha->mbx_cmd_comp);
+ complete(&ha->mbx_cmd_comp);
+ init_completion(&ha->mbx_intr_comp);
INIT_LIST_HEAD(&ha->list);
INIT_LIST_HEAD(&ha->fcports);
@@ -1807,6 +1791,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
qla2x00_init_host_attr(ha);
+ qla2x00_dfs_setup(ha);
+
qla_printk(KERN_INFO, ha, "\n"
" QLogic Fibre Channel HBA Driver: %s\n"
" QLogic %s - %s\n"
@@ -1838,6 +1824,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
ha = pci_get_drvdata(pdev);
+ qla2x00_dfs_remove(ha);
+
qla2x00_free_sysfs_attr(ha);
fc_remove_host(ha->host);
@@ -1871,8 +1859,11 @@ qla2x00_free_device(scsi_qla_host_t *ha)
kthread_stop(t);
}
+ if (ha->flags.fce_enabled)
+ qla2x00_disable_fce_trace(ha, NULL, NULL);
+
if (ha->eft)
- qla2x00_trace_control(ha, TC_DISABLE, 0, 0);
+ qla2x00_disable_eft_trace(ha);
ha->flags.online = 0;
@@ -2016,7 +2007,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
* 0 = success.
* 1 = failure.
*/
-uint8_t
+static uint8_t
qla2x00_mem_alloc(scsi_qla_host_t *ha)
{
char name[16];
@@ -2213,7 +2204,7 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
* Input:
* ha = adapter block pointer.
*/
-void
+static void
qla2x00_mem_free(scsi_qla_host_t *ha)
{
struct list_head *fcpl, *fcptemp;
@@ -2228,6 +2219,10 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
/* free sp pool */
qla2x00_free_sp_pool(ha);
+ if (ha->fce)
+ dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
+ ha->fce_dma);
+
if (ha->fw_dump) {
if (ha->eft)
dma_free_coherent(&ha->pdev->dev,
@@ -2748,23 +2743,6 @@ qla2x00_timer(scsi_qla_host_t *ha)
qla2x00_restart_timer(ha, WATCH_INTERVAL);
}
-/* XXX(hch): crude hack to emulate a down_timeout() */
-int
-qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout)
-{
- const unsigned int step = 100; /* msecs */
- unsigned int iterations = jiffies_to_msecs(timeout)/100;
-
- do {
- if (!down_trylock(sema))
- return 0;
- if (msleep_interruptible(step))
- break;
- } while (--iterations > 0);
-
- return -ETIMEDOUT;
-}
-
/* Firmware interface routines. */
#define FW_BLOBS 6
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index ad2fa01bd233..b68fb73613ed 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -22,7 +22,7 @@ static void qla2x00_nv_write(scsi_qla_host_t *, uint16_t);
* qla2x00_lock_nvram_access() -
* @ha: HA context
*/
-void
+static void
qla2x00_lock_nvram_access(scsi_qla_host_t *ha)
{
uint16_t data;
@@ -55,7 +55,7 @@ qla2x00_lock_nvram_access(scsi_qla_host_t *ha)
* qla2x00_unlock_nvram_access() -
* @ha: HA context
*/
-void
+static void
qla2x00_unlock_nvram_access(scsi_qla_host_t *ha)
{
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -74,7 +74,7 @@ qla2x00_unlock_nvram_access(scsi_qla_host_t *ha)
*
* Returns the word read from nvram @addr.
*/
-uint16_t
+static uint16_t
qla2x00_get_nvram_word(scsi_qla_host_t *ha, uint32_t addr)
{
uint16_t data;
@@ -93,7 +93,7 @@ qla2x00_get_nvram_word(scsi_qla_host_t *ha, uint32_t addr)
* @addr: Address in NVRAM to write
* @data: word to program
*/
-void
+static void
qla2x00_write_nvram_word(scsi_qla_host_t *ha, uint32_t addr, uint16_t data)
{
int count;
@@ -550,7 +550,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
int ret;
uint32_t liter, miter;
uint32_t sec_mask, rest_addr, conf_addr;
- uint32_t fdata, findex ;
+ uint32_t fdata, findex, cnt;
uint8_t man_id, flash_id;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
dma_addr_t optrom_dma;
@@ -690,8 +690,14 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
0xff0000) | ((fdata >> 16) & 0xff));
}
- /* Enable flash write-protection. */
+ /* Enable flash write-protection and wait for completion. */
qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), 0x9c);
+ for (cnt = 300; cnt &&
+ qla24xx_read_flash_dword(ha,
+ flash_conf_to_access_addr(0x005)) & BIT_0;
+ cnt--) {
+ udelay(10);
+ }
/* Disable flash write. */
WRT_REG_DWORD(&reg->ctrl_status,
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index ae6f7a2fb19f..2c2f6b4697c7 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.02.00-k5"
+#define QLA2XXX_VERSION "8.02.00-k7"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index d692c713416a..cbe0a17ced5f 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -5,6 +5,7 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
+#include <scsi/iscsi_if.h>
#include "ql4_def.h"
#include "ql4_glbl.h"
#include "ql4_dbg.h"
@@ -1305,7 +1306,8 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
atomic_set(&ddb_entry->relogin_timer, 0);
clear_bit(DF_RELOGIN, &ddb_entry->flags);
clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
- iscsi_if_create_session_done(ddb_entry->conn);
+ iscsi_session_event(ddb_entry->sess,
+ ISCSI_KEVENT_CREATE_SESSION);
/*
* Change the lun state to READY in case the lun TIMEOUT before
* the device came back.
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 4a154beb0d39..0f029d0d7315 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -123,15 +123,14 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
break;
/* Copy Sense Data into sense buffer. */
- memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
+ memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
sensebytecnt = le16_to_cpu(sts_entry->senseDataByteCnt);
if (sensebytecnt == 0)
break;
memcpy(cmd->sense_buffer, sts_entry->senseData,
- min(sensebytecnt,
- (uint16_t) sizeof(cmd->sense_buffer)));
+ min_t(uint16_t, sensebytecnt, SCSI_SENSE_BUFFERSIZE));
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
"ASC/ASCQ = %02x/%02x\n", ha->host_no,
@@ -208,8 +207,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
break;
/* Copy Sense Data into sense buffer. */
- memset(cmd->sense_buffer, 0,
- sizeof(cmd->sense_buffer));
+ memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
sensebytecnt =
le16_to_cpu(sts_entry->senseDataByteCnt);
@@ -217,8 +215,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
break;
memcpy(cmd->sense_buffer, sts_entry->senseData,
- min(sensebytecnt,
- (uint16_t) sizeof(cmd->sense_buffer)));
+ min_t(uint16_t, sensebytecnt, SCSI_SENSE_BUFFERSIZE));
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
"ASC/ASCQ = %02x/%02x\n", ha->host_no,
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 89460d27c689..f55b9f7d9396 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -298,8 +298,7 @@ void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry)
return;
if (ddb_entry->conn) {
- iscsi_if_destroy_session_done(ddb_entry->conn);
- iscsi_destroy_conn(ddb_entry->conn);
+ atomic_set(&ddb_entry->state, DDB_STATE_DEAD);
iscsi_remove_session(ddb_entry->sess);
}
iscsi_free_session(ddb_entry->sess);
@@ -309,6 +308,7 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
{
int err;
+ ddb_entry->sess->recovery_tmo = ddb_entry->ha->port_down_retry_count;
err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index);
if (err) {
DEBUG2(printk(KERN_ERR "Could not add session.\n"));
@@ -321,9 +321,6 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
DEBUG2(printk(KERN_ERR "Could not add connection.\n"));
return -ENOMEM;
}
-
- ddb_entry->sess->recovery_tmo = ddb_entry->ha->port_down_retry_count;
- iscsi_if_create_session_done(ddb_entry->conn);
return 0;
}
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 7a2e7986b038..65455ab1f3b9 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -871,11 +871,12 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
struct scatterlist *sg, *s;
int i, n;
- if (Cmnd->use_sg) {
+ if (scsi_bufflen(Cmnd)) {
int sg_count;
- sg = (struct scatterlist *) Cmnd->request_buffer;
- sg_count = sbus_map_sg(qpti->sdev, sg, Cmnd->use_sg, Cmnd->sc_data_direction);
+ sg = scsi_sglist(Cmnd);
+ sg_count = sbus_map_sg(qpti->sdev, sg, scsi_sg_count(Cmnd),
+ Cmnd->sc_data_direction);
ds = cmd->dataseg;
cmd->segment_cnt = sg_count;
@@ -914,16 +915,6 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
}
sg_count -= n;
}
- } else if (Cmnd->request_bufflen) {
- Cmnd->SCp.ptr = (char *)(unsigned long)
- sbus_map_single(qpti->sdev,
- Cmnd->request_buffer,
- Cmnd->request_bufflen,
- Cmnd->sc_data_direction);
-
- cmd->dataseg[0].d_base = (u32) ((unsigned long)Cmnd->SCp.ptr);
- cmd->dataseg[0].d_count = Cmnd->request_bufflen;
- cmd->segment_cnt = 1;
} else {
cmd->dataseg[0].d_base = 0;
cmd->dataseg[0].d_count = 0;
@@ -1151,7 +1142,7 @@ static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti)
if (sts->state_flags & SF_GOT_SENSE)
memcpy(Cmnd->sense_buffer, sts->req_sense_data,
- sizeof(Cmnd->sense_buffer));
+ SCSI_SENSE_BUFFERSIZE);
if (sts->hdr.entry_type == ENTRY_STATUS)
Cmnd->result =
@@ -1159,17 +1150,11 @@ static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti)
else
Cmnd->result = DID_ERROR << 16;
- if (Cmnd->use_sg) {
+ if (scsi_bufflen(Cmnd))
sbus_unmap_sg(qpti->sdev,
- (struct scatterlist *)Cmnd->request_buffer,
- Cmnd->use_sg,
+ scsi_sglist(Cmnd), scsi_sg_count(Cmnd),
Cmnd->sc_data_direction);
- } else if (Cmnd->request_bufflen) {
- sbus_unmap_single(qpti->sdev,
- (__u32)((unsigned long)Cmnd->SCp.ptr),
- Cmnd->request_bufflen,
- Cmnd->sc_data_direction);
- }
+
qpti->cmd_count[Cmnd->device->id]--;
sbus_writew(out_ptr, qpti->qregs + MBOX5);
Cmnd->host_scribble = (unsigned char *) done_queue;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 0fb1709ce5e3..1a9fba6a9f92 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -122,6 +122,11 @@ static const char *const scsi_device_types[] = {
"Automation/Drive ",
};
+/**
+ * scsi_device_type - Return 17 char string indicating device type.
+ * @type: type number to look up
+ */
+
const char * scsi_device_type(unsigned type)
{
if (type == 0x1e)
@@ -136,32 +141,45 @@ const char * scsi_device_type(unsigned type)
EXPORT_SYMBOL(scsi_device_type);
struct scsi_host_cmd_pool {
- struct kmem_cache *slab;
- unsigned int users;
- char *name;
- unsigned int slab_flags;
- gfp_t gfp_mask;
+ struct kmem_cache *cmd_slab;
+ struct kmem_cache *sense_slab;
+ unsigned int users;
+ char *cmd_name;
+ char *sense_name;
+ unsigned int slab_flags;
+ gfp_t gfp_mask;
};
static struct scsi_host_cmd_pool scsi_cmd_pool = {
- .name = "scsi_cmd_cache",
+ .cmd_name = "scsi_cmd_cache",
+ .sense_name = "scsi_sense_cache",
.slab_flags = SLAB_HWCACHE_ALIGN,
};
static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
- .name = "scsi_cmd_cache(DMA)",
+ .cmd_name = "scsi_cmd_cache(DMA)",
+ .sense_name = "scsi_sense_cache(DMA)",
.slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
.gfp_mask = __GFP_DMA,
};
static DEFINE_MUTEX(host_cmd_pool_mutex);
+/**
+ * __scsi_get_command - Allocate a struct scsi_cmnd
+ * @shost: host to transmit command
+ * @gfp_mask: allocation mask
+ *
+ * Description: allocate a struct scsi_cmd from host's slab, recycling from the
+ * host's free_list if necessary.
+ */
struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
{
struct scsi_cmnd *cmd;
+ unsigned char *buf;
- cmd = kmem_cache_alloc(shost->cmd_pool->slab,
- gfp_mask | shost->cmd_pool->gfp_mask);
+ cmd = kmem_cache_alloc(shost->cmd_pool->cmd_slab,
+ gfp_mask | shost->cmd_pool->gfp_mask);
if (unlikely(!cmd)) {
unsigned long flags;
@@ -173,19 +191,32 @@ struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
list_del_init(&cmd->list);
}
spin_unlock_irqrestore(&shost->free_list_lock, flags);
+
+ if (cmd) {
+ buf = cmd->sense_buffer;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->sense_buffer = buf;
+ }
+ } else {
+ buf = kmem_cache_alloc(shost->cmd_pool->sense_slab,
+ gfp_mask | shost->cmd_pool->gfp_mask);
+ if (likely(buf)) {
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->sense_buffer = buf;
+ } else {
+ kmem_cache_free(shost->cmd_pool->cmd_slab, cmd);
+ cmd = NULL;
+ }
}
return cmd;
}
EXPORT_SYMBOL_GPL(__scsi_get_command);
-/*
- * Function: scsi_get_command()
- *
- * Purpose: Allocate and setup a scsi command block
- *
- * Arguments: dev - parent scsi device
- * gfp_mask- allocator flags
+/**
+ * scsi_get_command - Allocate and setup a scsi command block
+ * @dev: parent scsi device
+ * @gfp_mask: allocator flags
*
* Returns: The allocated scsi command structure.
*/
@@ -202,7 +233,6 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
if (likely(cmd != NULL)) {
unsigned long flags;
- memset(cmd, 0, sizeof(*cmd));
cmd->device = dev;
init_timer(&cmd->eh_timeout);
INIT_LIST_HEAD(&cmd->list);
@@ -217,6 +247,12 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
}
EXPORT_SYMBOL(scsi_get_command);
+/**
+ * __scsi_put_command - Free a struct scsi_cmnd
+ * @shost: dev->host
+ * @cmd: Command to free
+ * @dev: parent scsi device
+ */
void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
struct device *dev)
{
@@ -230,19 +266,19 @@ void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
}
spin_unlock_irqrestore(&shost->free_list_lock, flags);
- if (likely(cmd != NULL))
- kmem_cache_free(shost->cmd_pool->slab, cmd);
+ if (likely(cmd != NULL)) {
+ kmem_cache_free(shost->cmd_pool->sense_slab,
+ cmd->sense_buffer);
+ kmem_cache_free(shost->cmd_pool->cmd_slab, cmd);
+ }
put_device(dev);
}
EXPORT_SYMBOL(__scsi_put_command);
-/*
- * Function: scsi_put_command()
- *
- * Purpose: Free a scsi command block
- *
- * Arguments: cmd - command block to free
+/**
+ * scsi_put_command - Free a scsi command block
+ * @cmd: command block to free
*
* Returns: Nothing.
*
@@ -263,12 +299,13 @@ void scsi_put_command(struct scsi_cmnd *cmd)
}
EXPORT_SYMBOL(scsi_put_command);
-/*
- * Function: scsi_setup_command_freelist()
- *
- * Purpose: Setup the command freelist for a scsi host.
+/**
+ * scsi_setup_command_freelist - Setup the command freelist for a scsi host.
+ * @shost: host to allocate the freelist for.
*
- * Arguments: shost - host to allocate the freelist for.
+ * Description: The command freelist protects against system-wide out of memory
+ * deadlock by preallocating one SCSI command structure for each host, so the
+ * system can always write to a swap file on a device associated with that host.
*
* Returns: Nothing.
*/
@@ -282,16 +319,24 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)
/*
* Select a command slab for this host and create it if not
- * yet existant.
+ * yet existent.
*/
mutex_lock(&host_cmd_pool_mutex);
pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool);
if (!pool->users) {
- pool->slab = kmem_cache_create(pool->name,
- sizeof(struct scsi_cmnd), 0,
- pool->slab_flags, NULL);
- if (!pool->slab)
+ pool->cmd_slab = kmem_cache_create(pool->cmd_name,
+ sizeof(struct scsi_cmnd), 0,
+ pool->slab_flags, NULL);
+ if (!pool->cmd_slab)
+ goto fail;
+
+ pool->sense_slab = kmem_cache_create(pool->sense_name,
+ SCSI_SENSE_BUFFERSIZE, 0,
+ pool->slab_flags, NULL);
+ if (!pool->sense_slab) {
+ kmem_cache_destroy(pool->cmd_slab);
goto fail;
+ }
}
pool->users++;
@@ -301,29 +346,36 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)
/*
* Get one backup command for this host.
*/
- cmd = kmem_cache_alloc(shost->cmd_pool->slab,
- GFP_KERNEL | shost->cmd_pool->gfp_mask);
+ cmd = kmem_cache_alloc(shost->cmd_pool->cmd_slab,
+ GFP_KERNEL | shost->cmd_pool->gfp_mask);
if (!cmd)
goto fail2;
- list_add(&cmd->list, &shost->free_list);
+
+ cmd->sense_buffer = kmem_cache_alloc(shost->cmd_pool->sense_slab,
+ GFP_KERNEL |
+ shost->cmd_pool->gfp_mask);
+ if (!cmd->sense_buffer)
+ goto fail2;
+
+ list_add(&cmd->list, &shost->free_list);
return 0;
fail2:
- if (!--pool->users)
- kmem_cache_destroy(pool->slab);
- return -ENOMEM;
+ if (cmd)
+ kmem_cache_free(shost->cmd_pool->cmd_slab, cmd);
+ mutex_lock(&host_cmd_pool_mutex);
+ if (!--pool->users) {
+ kmem_cache_destroy(pool->cmd_slab);
+ kmem_cache_destroy(pool->sense_slab);
+ }
fail:
mutex_unlock(&host_cmd_pool_mutex);
return -ENOMEM;
-
}
-/*
- * Function: scsi_destroy_command_freelist()
- *
- * Purpose: Release the command freelist for a scsi host.
- *
- * Arguments: shost - host that's freelist is going to be destroyed
+/**
+ * scsi_destroy_command_freelist - Release the command freelist for a scsi host.
+ * @shost: host whose freelist is going to be destroyed
*/
void scsi_destroy_command_freelist(struct Scsi_Host *shost)
{
@@ -332,12 +384,16 @@ void scsi_destroy_command_freelist(struct Scsi_Host *shost)
cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
list_del_init(&cmd->list);
- kmem_cache_free(shost->cmd_pool->slab, cmd);
+ kmem_cache_free(shost->cmd_pool->sense_slab,
+ cmd->sense_buffer);
+ kmem_cache_free(shost->cmd_pool->cmd_slab, cmd);
}
mutex_lock(&host_cmd_pool_mutex);
- if (!--shost->cmd_pool->users)
- kmem_cache_destroy(shost->cmd_pool->slab);
+ if (!--shost->cmd_pool->users) {
+ kmem_cache_destroy(shost->cmd_pool->cmd_slab);
+ kmem_cache_destroy(shost->cmd_pool->sense_slab);
+ }
mutex_unlock(&host_cmd_pool_mutex);
}
@@ -441,8 +497,12 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
}
#endif
-/*
- * Assign a serial number to the request for error recovery
+/**
+ * scsi_cmd_get_serial - Assign a serial number to a command
+ * @host: the scsi host
+ * @cmd: command to assign serial number to
+ *
+ * Description: a serial number identifies a request for error recovery
* and debugging purposes. Protected by the Host_Lock of host.
*/
static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
@@ -452,14 +512,12 @@ static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd
cmd->serial_number = host->cmd_serial_number++;
}
-/*
- * Function: scsi_dispatch_command
- *
- * Purpose: Dispatch a command to the low-level driver.
- *
- * Arguments: cmd - command block we are dispatching.
+/**
+ * scsi_dispatch_command - Dispatch a command to the low-level driver.
+ * @cmd: command block we are dispatching.
*
- * Notes:
+ * Return: nonzero return request was rejected and device's queue needs to be
+ * plugged.
*/
int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
{
@@ -585,7 +643,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
/**
* scsi_req_abort_cmd -- Request command recovery for the specified command
- * cmd: pointer to the SCSI command of interest
+ * @cmd: pointer to the SCSI command of interest
*
* This function requests that SCSI Core start recovery for the
* command by deleting the timer and adding the command to the eh
@@ -606,9 +664,9 @@ EXPORT_SYMBOL(scsi_req_abort_cmd);
* @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
* ownership back to SCSI Core -- i.e. the LLDD has finished with it.
*
- * This function is the mid-level's (SCSI Core) interrupt routine, which
- * regains ownership of the SCSI command (de facto) from a LLDD, and enqueues
- * the command to the done queue for further processing.
+ * Description: This function is the mid-level's (SCSI Core) interrupt routine,
+ * which regains ownership of the SCSI command (de facto) from a LLDD, and
+ * enqueues the command to the done queue for further processing.
*
* This is the producer of the done queue who enqueues at the tail.
*
@@ -617,7 +675,7 @@ EXPORT_SYMBOL(scsi_req_abort_cmd);
static void scsi_done(struct scsi_cmnd *cmd)
{
/*
- * We don't have to worry about this one timing out any more.
+ * We don't have to worry about this one timing out anymore.
* If we are unable to remove the timer, then the command
* has already timed out. In which case, we have no choice but to
* let the timeout function run, as we have no idea where in fact
@@ -660,10 +718,11 @@ static struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
}
-/*
- * Function: scsi_finish_command
+/**
+ * scsi_finish_command - cleanup and pass command back to upper layer
+ * @cmd: the command
*
- * Purpose: Pass command off to upper layer for finishing of I/O
+ * Description: Pass command off to upper layer for finishing of I/O
* request, waking processes that are waiting on results,
* etc.
*/
@@ -708,18 +767,14 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
}
EXPORT_SYMBOL(scsi_finish_command);
-/*
- * Function: scsi_adjust_queue_depth()
- *
- * Purpose: Allow low level drivers to tell us to change the queue depth
- * on a specific SCSI device
- *
- * Arguments: sdev - SCSI Device in question
- * tagged - Do we use tagged queueing (non-0) or do we treat
- * this device as an untagged device (0)
- * tags - Number of tags allowed if tagged queueing enabled,
- * or number of commands the low level driver can
- * queue up in non-tagged mode (as per cmd_per_lun).
+/**
+ * scsi_adjust_queue_depth - Let low level drivers change a device's queue depth
+ * @sdev: SCSI Device in question
+ * @tagged: Do we use tagged queueing (non-0) or do we treat
+ * this device as an untagged device (0)
+ * @tags: Number of tags allowed if tagged queueing enabled,
+ * or number of commands the low level driver can
+ * queue up in non-tagged mode (as per cmd_per_lun).
*
* Returns: Nothing
*
@@ -742,8 +797,8 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
- /* Check to see if the queue is managed by the block layer
- * if it is, and we fail to adjust the depth, exit */
+ /* Check to see if the queue is managed by the block layer.
+ * If it is, and we fail to adjust the depth, exit. */
if (blk_queue_tagged(sdev->request_queue) &&
blk_queue_resize_tags(sdev->request_queue, tags) != 0)
goto out;
@@ -772,20 +827,17 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
}
EXPORT_SYMBOL(scsi_adjust_queue_depth);
-/*
- * Function: scsi_track_queue_full()
+/**
+ * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth
+ * @sdev: SCSI Device in question
+ * @depth: Current number of outstanding SCSI commands on this device,
+ * not counting the one returned as QUEUE_FULL.
*
- * Purpose: This function will track successive QUEUE_FULL events on a
+ * Description: This function will track successive QUEUE_FULL events on a
* specific SCSI device to determine if and when there is a
* need to adjust the queue depth on the device.
*
- * Arguments: sdev - SCSI Device in question
- * depth - Current number of outstanding SCSI commands on
- * this device, not counting the one returned as
- * QUEUE_FULL.
- *
- * Returns: 0 - No change needed
- * >0 - Adjust queue depth to this new depth
+ * Returns: 0 - No change needed, >0 - Adjust queue depth to this new depth,
* -1 - Drop back to untagged operation using host->cmd_per_lun
* as the untagged command depth
*
@@ -824,10 +876,10 @@ int scsi_track_queue_full(struct scsi_device *sdev, int depth)
EXPORT_SYMBOL(scsi_track_queue_full);
/**
- * scsi_device_get - get an addition reference to a scsi_device
+ * scsi_device_get - get an additional reference to a scsi_device
* @sdev: device to get a reference to
*
- * Gets a reference to the scsi_device and increments the use count
+ * Description: Gets a reference to the scsi_device and increments the use count
* of the underlying LLDD module. You must hold host_lock of the
* parent Scsi_Host or already have a reference when calling this.
*/
@@ -849,8 +901,8 @@ EXPORT_SYMBOL(scsi_device_get);
* scsi_device_put - release a reference to a scsi_device
* @sdev: device to release a reference on.
*
- * Release a reference to the scsi_device and decrements the use count
- * of the underlying LLDD module. The device is freed once the last
+ * Description: Release a reference to the scsi_device and decrements the use
+ * count of the underlying LLDD module. The device is freed once the last
* user vanishes.
*/
void scsi_device_put(struct scsi_device *sdev)
@@ -867,7 +919,7 @@ void scsi_device_put(struct scsi_device *sdev)
}
EXPORT_SYMBOL(scsi_device_put);
-/* helper for shost_for_each_device, thus not documented */
+/* helper for shost_for_each_device, see that for documentation */
struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
struct scsi_device *prev)
{
@@ -895,6 +947,8 @@ EXPORT_SYMBOL(__scsi_iterate_devices);
/**
* starget_for_each_device - helper to walk all devices of a target
* @starget: target whose devices we want to iterate over.
+ * @data: Opaque passed to each function call.
+ * @fn: Function to call on each device
*
* This traverses over each device of @starget. The devices have
* a reference that must be released by scsi_host_put when breaking
@@ -946,13 +1000,13 @@ EXPORT_SYMBOL(__starget_for_each_device);
* @starget: SCSI target pointer
* @lun: SCSI Logical Unit Number
*
- * Looks up the scsi_device with the specified @lun for a give
- * @starget. The returned scsi_device does not have an additional
+ * Description: Looks up the scsi_device with the specified @lun for a given
+ * @starget. The returned scsi_device does not have an additional
* reference. You must hold the host's host_lock over this call and
* any access to the returned scsi_device.
*
- * Note: The only reason why drivers would want to use this is because
- * they're need to access the device list in irq context. Otherwise you
+ * Note: The only reason why drivers should use this is because
+ * they need to access the device list in irq context. Otherwise you
* really want to use scsi_device_lookup_by_target instead.
**/
struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
@@ -974,9 +1028,9 @@ EXPORT_SYMBOL(__scsi_device_lookup_by_target);
* @starget: SCSI target pointer
* @lun: SCSI Logical Unit Number
*
- * Looks up the scsi_device with the specified @channel, @id, @lun for a
- * give host. The returned scsi_device has an additional reference that
- * needs to be release with scsi_host_put once you're done with it.
+ * Description: Looks up the scsi_device with the specified @channel, @id, @lun
+ * for a given host. The returned scsi_device has an additional reference that
+ * needs to be released with scsi_device_put once you're done with it.
**/
struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
uint lun)
@@ -996,19 +1050,19 @@ struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
EXPORT_SYMBOL(scsi_device_lookup_by_target);
/**
- * scsi_device_lookup - find a device given the host (UNLOCKED)
+ * __scsi_device_lookup - find a device given the host (UNLOCKED)
* @shost: SCSI host pointer
* @channel: SCSI channel (zero if only one channel)
- * @pun: SCSI target number (physical unit number)
+ * @id: SCSI target number (physical unit number)
* @lun: SCSI Logical Unit Number
*
- * Looks up the scsi_device with the specified @channel, @id, @lun for a
- * give host. The returned scsi_device does not have an additional reference.
- * You must hold the host's host_lock over this call and any access to the
- * returned scsi_device.
+ * Description: Looks up the scsi_device with the specified @channel, @id, @lun
+ * for a given host. The returned scsi_device does not have an additional
+ * reference. You must hold the host's host_lock over this call and any access
+ * to the returned scsi_device.
*
* Note: The only reason why drivers would want to use this is because
- * they're need to access the device list in irq context. Otherwise you
+ * they need to access the device list in irq context. Otherwise you
* really want to use scsi_device_lookup instead.
**/
struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
@@ -1033,9 +1087,9 @@ EXPORT_SYMBOL(__scsi_device_lookup);
* @id: SCSI target number (physical unit number)
* @lun: SCSI Logical Unit Number
*
- * Looks up the scsi_device with the specified @channel, @id, @lun for a
- * give host. The returned scsi_device has an additional reference that
- * needs to be release with scsi_host_put once you're done with it.
+ * Description: Looks up the scsi_device with the specified @channel, @id, @lun
+ * for a given host. The returned scsi_device has an additional reference that
+ * needs to be released with scsi_device_put once you're done with it.
**/
struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
uint channel, uint id, uint lun)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 46cae5a212de..82c06f0a9d02 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -329,7 +329,7 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
if (done == NULL)
return 0; /* assume mid level reprocessing command */
- SCpnt->resid = 0;
+ scsi_set_resid(SCpnt, 0);
if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
printk(KERN_INFO "scsi_debug: cmd ");
for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
@@ -603,26 +603,16 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
void * kaddr_off;
struct scatterlist * sg;
- if (0 == scp->request_bufflen)
+ if (0 == scsi_bufflen(scp))
return 0;
- if (NULL == scp->request_buffer)
+ if (NULL == scsi_sglist(scp))
return (DID_ERROR << 16);
if (! ((scp->sc_data_direction == DMA_BIDIRECTIONAL) ||
(scp->sc_data_direction == DMA_FROM_DEVICE)))
return (DID_ERROR << 16);
- if (0 == scp->use_sg) {
- req_len = scp->request_bufflen;
- act_len = (req_len < arr_len) ? req_len : arr_len;
- memcpy(scp->request_buffer, arr, act_len);
- if (scp->resid)
- scp->resid -= act_len;
- else
- scp->resid = req_len - act_len;
- return 0;
- }
active = 1;
req_len = act_len = 0;
- scsi_for_each_sg(scp, sg, scp->use_sg, k) {
+ scsi_for_each_sg(scp, sg, scsi_sg_count(scp), k) {
if (active) {
kaddr = (unsigned char *)
kmap_atomic(sg_page(sg), KM_USER0);
@@ -640,10 +630,10 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
}
req_len += sg->length;
}
- if (scp->resid)
- scp->resid -= act_len;
+ if (scsi_get_resid(scp))
+ scsi_set_resid(scp, scsi_get_resid(scp) - act_len);
else
- scp->resid = req_len - act_len;
+ scsi_set_resid(scp, req_len - act_len);
return 0;
}
@@ -656,22 +646,15 @@ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
void * kaddr_off;
struct scatterlist * sg;
- if (0 == scp->request_bufflen)
+ if (0 == scsi_bufflen(scp))
return 0;
- if (NULL == scp->request_buffer)
+ if (NULL == scsi_sglist(scp))
return -1;
if (! ((scp->sc_data_direction == DMA_BIDIRECTIONAL) ||
(scp->sc_data_direction == DMA_TO_DEVICE)))
return -1;
- if (0 == scp->use_sg) {
- req_len = scp->request_bufflen;
- len = (req_len < max_arr_len) ? req_len : max_arr_len;
- memcpy(arr, scp->request_buffer, len);
- return len;
- }
- sg = scsi_sglist(scp);
req_len = fin = 0;
- for (k = 0; k < scp->use_sg; ++k, sg = sg_next(sg)) {
+ scsi_for_each_sg(scp, sg, scsi_sg_count(scp), k) {
kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
if (NULL == kaddr)
return -1;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 348cc5a6e3cd..b8de041bc0ae 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -276,11 +276,12 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
}
/**
- * scsi_dev_info_list_add: add one dev_info list entry.
+ * scsi_dev_info_list_add - add one dev_info list entry.
+ * @compatible: if true, null terminate short strings. Otherwise space pad.
* @vendor: vendor string
* @model: model (product) string
* @strflags: integer string
- * @flag: if strflags NULL, use this flag value
+ * @flags: if strflags NULL, use this flag value
*
* Description:
* Create and add one dev_info entry for @vendor, @model, @strflags or
@@ -322,8 +323,7 @@ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
}
/**
- * scsi_dev_info_list_add_str: parse dev_list and add to the
- * scsi_dev_info_list.
+ * scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list.
* @dev_list: string of device flags to add
*
* Description:
@@ -374,15 +374,15 @@ static int scsi_dev_info_list_add_str(char *dev_list)
}
/**
- * get_device_flags - get device specific flags from the dynamic device
- * list. Called during scan time.
+ * get_device_flags - get device specific flags from the dynamic device list.
+ * @sdev: &scsi_device to get flags for
* @vendor: vendor name
* @model: model name
*
* Description:
* Search the scsi_dev_info_list for an entry matching @vendor and
* @model, if found, return the matching flags value, else return
- * the host or global default settings.
+ * the host or global default settings. Called during scan time.
**/
int scsi_get_device_flags(struct scsi_device *sdev,
const unsigned char *vendor,
@@ -483,13 +483,11 @@ stop_output:
}
/*
- * proc_scsi_dev_info_write: allow additions to the scsi_dev_info_list via
- * /proc.
+ * proc_scsi_dev_info_write - allow additions to scsi_dev_info_list via /proc.
*
- * Use: echo "vendor:model:flag" > /proc/scsi/device_info
- *
- * To add a black/white list entry for vendor and model with an integer
- * value of flag to the scsi device info list.
+ * Description: Adds a black/white list entry for vendor and model with an
+ * integer value of flag to the scsi device info list.
+ * To use, echo "vendor:model:flag" > /proc/scsi/device_info
*/
static int proc_scsi_devinfo_write(struct file *file, const char __user *buf,
unsigned long length, void *data)
@@ -532,8 +530,7 @@ MODULE_PARM_DESC(default_dev_flags,
"scsi default device flag integer value");
/**
- * scsi_dev_info_list_delete: called from scsi.c:exit_scsi to remove
- * the scsi_dev_info_list.
+ * scsi_dev_info_list_delete - called from scsi.c:exit_scsi to remove the scsi_dev_info_list.
**/
void scsi_exit_devinfo(void)
{
@@ -552,13 +549,12 @@ void scsi_exit_devinfo(void)
}
/**
- * scsi_dev_list_init: set up the dynamic device list.
- * @dev_list: string of device flags to add
+ * scsi_init_devinfo - set up the dynamic device list.
*
* Description:
- * Add command line @dev_list entries, then add
+ * Add command line entries from scsi_dev_flags, then add
* scsi_static_device_list entries to the scsi device info list.
- **/
+ */
int __init scsi_init_devinfo(void)
{
#ifdef CONFIG_SCSI_PROC_FS
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index ebaca4ca4a13..547e85aa414f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -62,7 +62,7 @@ void scsi_eh_wakeup(struct Scsi_Host *shost)
* @shost: SCSI host to invoke error handling on.
*
* Schedule SCSI EH without scmd.
- **/
+ */
void scsi_schedule_eh(struct Scsi_Host *shost)
{
unsigned long flags;
@@ -86,7 +86,7 @@ EXPORT_SYMBOL_GPL(scsi_schedule_eh);
*
* Return value:
* 0 on failure.
- **/
+ */
int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
{
struct Scsi_Host *shost = scmd->device->host;
@@ -121,7 +121,7 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
* This should be turned into an inline function. Each scsi command
* has its own timer, and as it is added to the queue, we set up the
* timer. When the command completes, we cancel the timer.
- **/
+ */
void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
void (*complete)(struct scsi_cmnd *))
{
@@ -155,7 +155,7 @@ void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
* Return value:
* 1 if we were able to detach the timer. 0 if we blew it, and the
* timer function has already started to run.
- **/
+ */
int scsi_delete_timer(struct scsi_cmnd *scmd)
{
int rtn;
@@ -181,7 +181,7 @@ int scsi_delete_timer(struct scsi_cmnd *scmd)
* only in that the normal completion handling might run, but if the
* normal completion function determines that the timer has already
* fired, then it mustn't do anything.
- **/
+ */
void scsi_times_out(struct scsi_cmnd *scmd)
{
enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *);
@@ -224,7 +224,7 @@ void scsi_times_out(struct scsi_cmnd *scmd)
*
* Return value:
* 0 when dev was taken offline by error recovery. 1 OK to proceed.
- **/
+ */
int scsi_block_when_processing_errors(struct scsi_device *sdev)
{
int online;
@@ -245,7 +245,7 @@ EXPORT_SYMBOL(scsi_block_when_processing_errors);
* scsi_eh_prt_fail_stats - Log info on failures.
* @shost: scsi host being recovered.
* @work_q: Queue of scsi cmds to process.
- **/
+ */
static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
struct list_head *work_q)
{
@@ -295,7 +295,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
* Notes:
* When a deferred error is detected the current command has
* not been executed and needs retrying.
- **/
+ */
static int scsi_check_sense(struct scsi_cmnd *scmd)
{
struct scsi_sense_hdr sshdr;
@@ -398,7 +398,7 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
* queued during error recovery. the main difference here is that we
* don't allow for the possibility of retries here, and we are a lot
* more restrictive about what we consider acceptable.
- **/
+ */
static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
{
/*
@@ -452,7 +452,7 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
/**
* scsi_eh_done - Completion function for error handling.
* @scmd: Cmd that is done.
- **/
+ */
static void scsi_eh_done(struct scsi_cmnd *scmd)
{
struct completion *eh_action;
@@ -469,7 +469,7 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
/**
* scsi_try_host_reset - ask host adapter to reset itself
* @scmd: SCSI cmd to send hsot reset.
- **/
+ */
static int scsi_try_host_reset(struct scsi_cmnd *scmd)
{
unsigned long flags;
@@ -498,7 +498,7 @@ static int scsi_try_host_reset(struct scsi_cmnd *scmd)
/**
* scsi_try_bus_reset - ask host to perform a bus reset
* @scmd: SCSI cmd to send bus reset.
- **/
+ */
static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
{
unsigned long flags;
@@ -533,7 +533,7 @@ static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
* unreliable for a given host, then the host itself needs to put a
* timer on it, and set the host back to a consistent state prior to
* returning.
- **/
+ */
static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
{
int rtn;
@@ -568,7 +568,7 @@ static int __scsi_try_to_abort_cmd(struct scsi_cmnd *scmd)
* author of the low-level driver wishes this operation to be timed,
* they can provide this facility themselves. helper functions in
* scsi_error.c can be supplied to make this easier to do.
- **/
+ */
static int scsi_try_to_abort_cmd(struct scsi_cmnd *scmd)
{
/*
@@ -601,7 +601,7 @@ static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd)
* sent must be one that does not transfer any data. If @sense_bytes != 0
* @cmnd is ignored and this functions sets up a REQUEST_SENSE command
* and cmnd buffers to read @sense_bytes into @scmd->sense_buffer.
- **/
+ */
void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
unsigned char *cmnd, int cmnd_size, unsigned sense_bytes)
{
@@ -625,7 +625,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
if (sense_bytes) {
scmd->request_bufflen = min_t(unsigned,
- sizeof(scmd->sense_buffer), sense_bytes);
+ SCSI_SENSE_BUFFERSIZE, sense_bytes);
sg_init_one(&ses->sense_sgl, scmd->sense_buffer,
scmd->request_bufflen);
scmd->request_buffer = &ses->sense_sgl;
@@ -657,7 +657,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
* Zero the sense buffer. The scsi spec mandates that any
* untransferred sense data should be interpreted as being zero.
*/
- memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
+ memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
}
EXPORT_SYMBOL(scsi_eh_prep_cmnd);
@@ -667,7 +667,7 @@ EXPORT_SYMBOL(scsi_eh_prep_cmnd);
* @ses: saved information from a coresponding call to scsi_prep_eh_cmnd
*
* Undo any damage done by above scsi_prep_eh_cmnd().
- **/
+ */
void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
{
/*
@@ -697,7 +697,7 @@ EXPORT_SYMBOL(scsi_eh_restore_cmnd);
*
* Return value:
* SUCCESS or FAILED or NEEDS_RETRY
- **/
+ */
static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
int cmnd_size, int timeout, unsigned sense_bytes)
{
@@ -765,7 +765,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
* Some hosts automatically obtain this information, others require
* that we obtain it on our own. This function will *not* return until
* the command either times out, or it completes.
- **/
+ */
static int scsi_request_sense(struct scsi_cmnd *scmd)
{
return scsi_send_eh_cmnd(scmd, NULL, 0, SENSE_TIMEOUT, ~0);
@@ -779,10 +779,10 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)
* Notes:
* We don't want to use the normal command completion while we are are
* still handling errors - it may cause other commands to be queued,
- * and that would disturb what we are doing. thus we really want to
+ * and that would disturb what we are doing. Thus we really want to
* keep a list of pending commands for final completion, and once we
* are ready to leave error handling we handle completion for real.
- **/
+ */
void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
{
scmd->device->host->host_failed--;
@@ -794,7 +794,7 @@ EXPORT_SYMBOL(scsi_eh_finish_cmd);
/**
* scsi_eh_get_sense - Get device sense data.
* @work_q: Queue of commands to process.
- * @done_q: Queue of proccessed commands..
+ * @done_q: Queue of processed commands.
*
* Description:
* See if we need to request sense information. if so, then get it
@@ -802,7 +802,7 @@ EXPORT_SYMBOL(scsi_eh_finish_cmd);
*
* Notes:
* This has the unfortunate side effect that if a shost adapter does
- * not automatically request sense information, that we end up shutting
+ * not automatically request sense information, we end up shutting
* it down before we request it.
*
* All drivers should request sense information internally these days,
@@ -810,7 +810,7 @@ EXPORT_SYMBOL(scsi_eh_finish_cmd);
*
* XXX: Long term this code should go away, but that needs an audit of
* all LLDDs first.
- **/
+ */
int scsi_eh_get_sense(struct list_head *work_q,
struct list_head *done_q)
{
@@ -858,11 +858,11 @@ EXPORT_SYMBOL_GPL(scsi_eh_get_sense);
/**
* scsi_eh_tur - Send TUR to device.
- * @scmd: Scsi cmd to send TUR
+ * @scmd: &scsi_cmnd to send TUR
*
* Return value:
* 0 - Device is ready. 1 - Device NOT ready.
- **/
+ */
static int scsi_eh_tur(struct scsi_cmnd *scmd)
{
static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
@@ -887,17 +887,17 @@ retry_tur:
}
/**
- * scsi_eh_abort_cmds - abort canceled commands.
- * @shost: scsi host being recovered.
- * @eh_done_q: list_head for processed commands.
+ * scsi_eh_abort_cmds - abort pending commands.
+ * @work_q: &list_head for pending commands.
+ * @done_q: &list_head for processed commands.
*
* Decription:
* Try and see whether or not it makes sense to try and abort the
- * running command. this only works out to be the case if we have one
- * command that has timed out. if the command simply failed, it makes
+ * running command. This only works out to be the case if we have one
+ * command that has timed out. If the command simply failed, it makes
* no sense to try and abort the command, since as far as the shost
* adapter is concerned, it isn't running.
- **/
+ */
static int scsi_eh_abort_cmds(struct list_head *work_q,
struct list_head *done_q)
{
@@ -931,11 +931,11 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
/**
* scsi_eh_try_stu - Send START_UNIT to device.
- * @scmd: Scsi cmd to send START_UNIT
+ * @scmd: &scsi_cmnd to send START_UNIT
*
* Return value:
* 0 - Device is ready. 1 - Device NOT ready.
- **/
+ */
static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
{
static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
@@ -956,13 +956,14 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
/**
* scsi_eh_stu - send START_UNIT if needed
- * @shost: scsi host being recovered.
- * @eh_done_q: list_head for processed commands.
+ * @shost: &scsi host being recovered.
+ * @work_q: &list_head for pending commands.
+ * @done_q: &list_head for processed commands.
*
* Notes:
* If commands are failing due to not ready, initializing command required,
* try revalidating the device, which will end up sending a start unit.
- **/
+ */
static int scsi_eh_stu(struct Scsi_Host *shost,
struct list_head *work_q,
struct list_head *done_q)
@@ -1008,14 +1009,15 @@ static int scsi_eh_stu(struct Scsi_Host *shost,
/**
* scsi_eh_bus_device_reset - send bdr if needed
* @shost: scsi host being recovered.
- * @eh_done_q: list_head for processed commands.
+ * @work_q: &list_head for pending commands.
+ * @done_q: &list_head for processed commands.
*
* Notes:
- * Try a bus device reset. still, look to see whether we have multiple
+ * Try a bus device reset. Still, look to see whether we have multiple
* devices that are jammed or not - if we have multiple devices, it
* makes no sense to try bus_device_reset - we really would need to try
* a bus_reset instead.
- **/
+ */
static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
struct list_head *work_q,
struct list_head *done_q)
@@ -1063,9 +1065,10 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
/**
* scsi_eh_bus_reset - send a bus reset
- * @shost: scsi host being recovered.
- * @eh_done_q: list_head for processed commands.
- **/
+ * @shost: &scsi host being recovered.
+ * @work_q: &list_head for pending commands.
+ * @done_q: &list_head for processed commands.
+ */
static int scsi_eh_bus_reset(struct Scsi_Host *shost,
struct list_head *work_q,
struct list_head *done_q)
@@ -1122,7 +1125,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
* scsi_eh_host_reset - send a host reset
* @work_q: list_head for processed commands.
* @done_q: list_head for processed commands.
- **/
+ */
static int scsi_eh_host_reset(struct list_head *work_q,
struct list_head *done_q)
{
@@ -1157,8 +1160,7 @@ static int scsi_eh_host_reset(struct list_head *work_q,
* scsi_eh_offline_sdevs - offline scsi devices that fail to recover
* @work_q: list_head for processed commands.
* @done_q: list_head for processed commands.
- *
- **/
+ */
static void scsi_eh_offline_sdevs(struct list_head *work_q,
struct list_head *done_q)
{
@@ -1191,7 +1193,7 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q,
* is woken. In cases where the error code indicates an error that
* doesn't require the error handler read (i.e. we don't need to
* abort/reset), this function should return SUCCESS.
- **/
+ */
int scsi_decide_disposition(struct scsi_cmnd *scmd)
{
int rtn;
@@ -1372,7 +1374,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
*
* If scsi_allocate_request() fails for what ever reason, we
* completely forget to lock the door.
- **/
+ */
static void scsi_eh_lock_door(struct scsi_device *sdev)
{
unsigned char cmnd[MAX_COMMAND_SIZE];
@@ -1396,7 +1398,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
* Notes:
* When we entered the error handler, we blocked all further i/o to
* this device. we need to 'reverse' this process.
- **/
+ */
static void scsi_restart_operations(struct Scsi_Host *shost)
{
struct scsi_device *sdev;
@@ -1440,9 +1442,9 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
/**
* scsi_eh_ready_devs - check device ready state and recover if not.
* @shost: host to be recovered.
- * @eh_done_q: list_head for processed commands.
- *
- **/
+ * @work_q: &list_head for pending commands.
+ * @done_q: &list_head for processed commands.
+ */
void scsi_eh_ready_devs(struct Scsi_Host *shost,
struct list_head *work_q,
struct list_head *done_q)
@@ -1458,8 +1460,7 @@ EXPORT_SYMBOL_GPL(scsi_eh_ready_devs);
/**
* scsi_eh_flush_done_q - finish processed commands or retry them.
* @done_q: list_head of processed commands.
- *
- **/
+ */
void scsi_eh_flush_done_q(struct list_head *done_q)
{
struct scsi_cmnd *scmd, *next;
@@ -1513,7 +1514,7 @@ EXPORT_SYMBOL(scsi_eh_flush_done_q);
* scsi_finish_cmd() called for it. we do all of the retry stuff
* here, so when we restart the host after we return it should have an
* empty queue.
- **/
+ */
static void scsi_unjam_host(struct Scsi_Host *shost)
{
unsigned long flags;
@@ -1540,7 +1541,7 @@ static void scsi_unjam_host(struct Scsi_Host *shost)
* Notes:
* This is the main error handling loop. This is run as a kernel thread
* for every SCSI host and handles all error handling activity.
- **/
+ */
int scsi_error_handler(void *data)
{
struct Scsi_Host *shost = data;
@@ -1769,7 +1770,7 @@ EXPORT_SYMBOL(scsi_reset_provider);
*
* Return value:
* 1 if valid sense data information found, else 0;
- **/
+ */
int scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
struct scsi_sense_hdr *sshdr)
{
@@ -1819,14 +1820,12 @@ int scsi_command_normalize_sense(struct scsi_cmnd *cmd,
struct scsi_sense_hdr *sshdr)
{
return scsi_normalize_sense(cmd->sense_buffer,
- sizeof(cmd->sense_buffer), sshdr);
+ SCSI_SENSE_BUFFERSIZE, sshdr);
}
EXPORT_SYMBOL(scsi_command_normalize_sense);
/**
- * scsi_sense_desc_find - search for a given descriptor type in
- * descriptor sense data format.
- *
+ * scsi_sense_desc_find - search for a given descriptor type in descriptor sense data format.
* @sense_buffer: byte array of descriptor format sense data
* @sb_len: number of valid bytes in sense_buffer
* @desc_type: value of descriptor type to find
@@ -1837,7 +1836,7 @@ EXPORT_SYMBOL(scsi_command_normalize_sense);
*
* Return value:
* pointer to start of (first) descriptor if found else NULL
- **/
+ */
const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
int desc_type)
{
@@ -1865,9 +1864,7 @@ const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
EXPORT_SYMBOL(scsi_sense_desc_find);
/**
- * scsi_get_sense_info_fld - attempts to get information field from
- * sense data (either fixed or descriptor format)
- *
+ * scsi_get_sense_info_fld - get information field from sense data (either fixed or descriptor format)
* @sense_buffer: byte array of sense data
* @sb_len: number of valid bytes in sense_buffer
* @info_out: pointer to 64 integer where 8 or 4 byte information
@@ -1875,7 +1872,7 @@ EXPORT_SYMBOL(scsi_sense_desc_find);
*
* Return value:
* 1 if information field found, 0 if not found.
- **/
+ */
int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
u64 * info_out)
{
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 32293f451669..28b19ef26309 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -174,10 +174,15 @@ static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg)
}
-/*
- * the scsi_ioctl() function differs from most ioctls in that it does
- * not take a major/minor number as the dev field. Rather, it takes
- * a pointer to a scsi_devices[] element, a structure.
+/**
+ * scsi_ioctl - Dispatch ioctl to scsi device
+ * @sdev: scsi device receiving ioctl
+ * @cmd: which ioctl is it
+ * @arg: data associated with ioctl
+ *
+ * Description: The scsi_ioctl() function differs from most ioctls in that it
+ * does not take a major/minor number as the dev field. Rather, it takes
+ * a pointer to a &struct scsi_device.
*/
int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
{
@@ -239,7 +244,7 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
return scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
case SCSI_IOCTL_TEST_UNIT_READY:
return scsi_test_unit_ready(sdev, IOCTL_NORMAL_TIMEOUT,
- NORMAL_RETRIES);
+ NORMAL_RETRIES, NULL);
case SCSI_IOCTL_START_UNIT:
scsi_cmd[0] = START_STOP;
scsi_cmd[1] = 0;
@@ -264,9 +269,12 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
}
EXPORT_SYMBOL(scsi_ioctl);
-/*
- * the scsi_nonblock_ioctl() function is designed for ioctls which may
- * be executed even if the device is in recovery.
+/**
+ * scsi_nonblock_ioctl() - Handle SG_SCSI_RESET
+ * @sdev: scsi device receiving ioctl
+ * @cmd: Must be SC_SCSI_RESET
+ * @arg: pointer to int containing SG_SCSI_RESET_{DEVICE,BUS,HOST}
+ * @filp: either NULL or a &struct file which must have the O_NONBLOCK flag.
*/
int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
void __user *arg, struct file *filp)
@@ -276,7 +284,7 @@ int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
/* The first set of iocts may be executed even if we're doing
* error processing, as long as the device was opened
* non-blocking */
- if (filp && filp->f_flags & O_NONBLOCK) {
+ if (filp && (filp->f_flags & O_NONBLOCK)) {
if (scsi_host_in_recovery(sdev->host))
return -ENODEV;
} else if (!scsi_block_when_processing_errors(sdev))
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index a9ac5b1b1667..4cf902efbdbf 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -175,7 +175,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
*
* returns the req->errors value which is the scsi_cmnd result
* field.
- **/
+ */
int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
unsigned char *sense, int timeout, int retries, int flags)
@@ -274,7 +274,7 @@ static void scsi_bi_endio(struct bio *bio, int error)
/**
* scsi_req_map_sg - map a scatterlist into a request
* @rq: request to fill
- * @sg: scatterlist
+ * @sgl: scatterlist
* @nsegs: number of elements
* @bufflen: len of buffer
* @gfp: memory allocation flags
@@ -365,14 +365,16 @@ free_bios:
* @sdev: scsi device
* @cmd: scsi command
* @cmd_len: length of scsi cdb
- * @data_direction: data direction
+ * @data_direction: DMA_TO_DEVICE, DMA_FROM_DEVICE, or DMA_NONE
* @buffer: data buffer (this can be a kernel buffer or scatterlist)
* @bufflen: len of buffer
* @use_sg: if buffer is a scatterlist this is the number of elements
* @timeout: request timeout in seconds
* @retries: number of times to retry request
- * @flags: or into request flags
- **/
+ * @privdata: data passed to done()
+ * @done: callback function when done
+ * @gfp: memory allocation flags
+ */
int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
int cmd_len, int data_direction, void *buffer, unsigned bufflen,
int use_sg, int timeout, int retries, void *privdata,
@@ -439,7 +441,7 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
{
cmd->serial_number = 0;
cmd->resid = 0;
- memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
+ memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
if (cmd->cmd_len == 0)
cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
}
@@ -524,7 +526,7 @@ static void scsi_run_queue(struct request_queue *q)
struct Scsi_Host *shost = sdev->host;
unsigned long flags;
- if (sdev->single_lun)
+ if (scsi_target(sdev)->single_lun)
scsi_single_lun_run(sdev);
spin_lock_irqsave(shost->host_lock, flags);
@@ -1102,7 +1104,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
*
* Returns: 0 on success
* BLKPREP_DEFER if the failure is retryable
- * BLKPREP_KILL if the failure is fatal
*/
static int scsi_init_io(struct scsi_cmnd *cmd)
{
@@ -1136,17 +1137,9 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
* each segment.
*/
count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
- if (likely(count <= cmd->use_sg)) {
- cmd->use_sg = count;
- return BLKPREP_OK;
- }
-
- printk(KERN_ERR "Incorrect number of segments after building list\n");
- printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
- printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
- req->current_nr_sectors);
-
- return BLKPREP_KILL;
+ BUG_ON(count > cmd->use_sg);
+ cmd->use_sg = count;
+ return BLKPREP_OK;
}
static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
@@ -1557,7 +1550,7 @@ static void scsi_request_fn(struct request_queue *q)
if (!scsi_host_queue_ready(q, shost, sdev))
goto not_ready;
- if (sdev->single_lun) {
+ if (scsi_target(sdev)->single_lun) {
if (scsi_target(sdev)->starget_sdev_user &&
scsi_target(sdev)->starget_sdev_user != sdev)
goto not_ready;
@@ -1675,6 +1668,14 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
if (!shost->use_clustering)
clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+
+ /*
+ * set a reasonable default alignment on word boundaries: the
+ * host and device may alter it using
+ * blk_queue_update_dma_alignment() later.
+ */
+ blk_queue_dma_alignment(q, 0x03);
+
return q;
}
EXPORT_SYMBOL(__scsi_alloc_queue);
@@ -1804,7 +1805,7 @@ void scsi_exit_queue(void)
* @timeout: command timeout
* @retries: number of retries before failing
* @data: returns a structure abstracting the mode header data
- * @sense: place to put sense data (or NULL if no sense to be collected).
+ * @sshdr: place to put sense data (or NULL if no sense to be collected).
* must be SCSI_SENSE_BUFFERSIZE big.
*
* Returns zero if successful; negative error number or scsi
@@ -1871,8 +1872,7 @@ scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
EXPORT_SYMBOL_GPL(scsi_mode_select);
/**
- * scsi_mode_sense - issue a mode sense, falling back from 10 to
- * six bytes if necessary.
+ * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
* @sdev: SCSI device to be queried
* @dbd: set if mode sense will allow block descriptors to be returned
* @modepage: mode page being requested
@@ -1881,13 +1881,13 @@ EXPORT_SYMBOL_GPL(scsi_mode_select);
* @timeout: command timeout
* @retries: number of retries before failing
* @data: returns a structure abstracting the mode header data
- * @sense: place to put sense data (or NULL if no sense to be collected).
+ * @sshdr: place to put sense data (or NULL if no sense to be collected).
* must be SCSI_SENSE_BUFFERSIZE big.
*
* Returns zero if unsuccessful, or the header offset (either 4
* or 8 depending on whether a six or ten byte command was
* issued) if successful.
- **/
+ */
int
scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
unsigned char *buffer, int len, int timeout, int retries,
@@ -1981,40 +1981,69 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
}
EXPORT_SYMBOL(scsi_mode_sense);
+/**
+ * scsi_test_unit_ready - test if unit is ready
+ * @sdev: scsi device to change the state of.
+ * @timeout: command timeout
+ * @retries: number of retries before failing
+ * @sshdr_external: Optional pointer to struct scsi_sense_hdr for
+ * returning sense. Make sure that this is cleared before passing
+ * in.
+ *
+ * Returns zero if unsuccessful or an error if TUR failed. For
+ * removable media, a return of NOT_READY or UNIT_ATTENTION is
+ * translated to success, with the ->changed flag updated.
+ **/
int
-scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
+scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
+ struct scsi_sense_hdr *sshdr_external)
{
char cmd[] = {
TEST_UNIT_READY, 0, 0, 0, 0, 0,
};
- struct scsi_sense_hdr sshdr;
+ struct scsi_sense_hdr *sshdr;
int result;
-
- result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
- timeout, retries);
+
+ if (!sshdr_external)
+ sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
+ else
+ sshdr = sshdr_external;
+
+ /* try to eat the UNIT_ATTENTION if there are enough retries */
+ do {
+ result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
+ timeout, retries);
+ } while ((driver_byte(result) & DRIVER_SENSE) &&
+ sshdr && sshdr->sense_key == UNIT_ATTENTION &&
+ --retries);
+
+ if (!sshdr)
+ /* could not allocate sense buffer, so can't process it */
+ return result;
if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
- if ((scsi_sense_valid(&sshdr)) &&
- ((sshdr.sense_key == UNIT_ATTENTION) ||
- (sshdr.sense_key == NOT_READY))) {
+ if ((scsi_sense_valid(sshdr)) &&
+ ((sshdr->sense_key == UNIT_ATTENTION) ||
+ (sshdr->sense_key == NOT_READY))) {
sdev->changed = 1;
result = 0;
}
}
+ if (!sshdr_external)
+ kfree(sshdr);
return result;
}
EXPORT_SYMBOL(scsi_test_unit_ready);
/**
- * scsi_device_set_state - Take the given device through the device
- * state model.
+ * scsi_device_set_state - Take the given device through the device state model.
* @sdev: scsi device to change the state of.
* @state: state to change to.
*
* Returns zero if unsuccessful or an error if the requested
* transition is illegal.
- **/
+ */
int
scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
{
@@ -2264,7 +2293,7 @@ EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
* Must be called with user context, may sleep.
*
* Returns zero if unsuccessful or an error if not.
- **/
+ */
int
scsi_device_quiesce(struct scsi_device *sdev)
{
@@ -2289,7 +2318,7 @@ EXPORT_SYMBOL(scsi_device_quiesce);
* queues.
*
* Must be called with user context, may sleep.
- **/
+ */
void
scsi_device_resume(struct scsi_device *sdev)
{
@@ -2326,8 +2355,7 @@ scsi_target_resume(struct scsi_target *starget)
EXPORT_SYMBOL(scsi_target_resume);
/**
- * scsi_internal_device_block - internal function to put a device
- * temporarily into the SDEV_BLOCK state
+ * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
* @sdev: device to block
*
* Block request made by scsi lld's to temporarily stop all
@@ -2342,7 +2370,7 @@ EXPORT_SYMBOL(scsi_target_resume);
* state, all commands are deferred until the scsi lld reenables
* the device with scsi_device_unblock or device_block_tmo fires.
* This routine assumes the host_lock is held on entry.
- **/
+ */
int
scsi_internal_device_block(struct scsi_device *sdev)
{
@@ -2382,7 +2410,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block);
* (which must be a legal transition) allowing the midlayer to
* goose the queue for this device. This routine assumes the
* host_lock is held upon entry.
- **/
+ */
int
scsi_internal_device_unblock(struct scsi_device *sdev)
{
@@ -2460,7 +2488,7 @@ EXPORT_SYMBOL_GPL(scsi_target_unblock);
/**
* scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
- * @sg: scatter-gather list
+ * @sgl: scatter-gather list
* @sg_count: number of segments in sg
* @offset: offset in bytes into sg, on return offset into the mapped area
* @len: bytes to map, on return number of bytes mapped
@@ -2509,8 +2537,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
EXPORT_SYMBOL(scsi_kmap_atomic_sg);
/**
- * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously
- * mapped with scsi_kmap_atomic_sg
+ * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
* @virt: virtual address to be unmapped
*/
void scsi_kunmap_atomic_sg(void *virt)
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index 40579edca101..3e1591828171 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -32,11 +32,12 @@ EXPORT_SYMBOL_GPL(scsi_nl_sock);
/**
- * scsi_nl_rcv_msg -
- * Receive message handler. Extracts message from a receive buffer.
+ * scsi_nl_rcv_msg - Receive message handler.
+ * @skb: socket receive buffer
+ *
+ * Description: Extracts message from a receive buffer.
* Validates message header and calls appropriate transport message handler
*
- * @skb: socket receive buffer
*
**/
static void
@@ -99,9 +100,7 @@ next_msg:
/**
- * scsi_nl_rcv_event -
- * Event handler for a netlink socket.
- *
+ * scsi_nl_rcv_event - Event handler for a netlink socket.
* @this: event notifier block
* @event: event type
* @ptr: event payload
@@ -129,9 +128,7 @@ static struct notifier_block scsi_netlink_notifier = {
/**
- * scsi_netlink_init -
- * Called by SCSI subsystem to intialize the SCSI transport netlink
- * interface
+ * scsi_netlink_init - Called by SCSI subsystem to intialize the SCSI transport netlink interface
*
**/
void
@@ -160,9 +157,7 @@ scsi_netlink_init(void)
/**
- * scsi_netlink_exit -
- * Called by SCSI subsystem to disable the SCSI transport netlink
- * interface
+ * scsi_netlink_exit - Called by SCSI subsystem to disable the SCSI transport netlink interface
*
**/
void
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index bb6f051beda8..ed395154a5b1 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -45,6 +45,16 @@ static struct proc_dir_entry *proc_scsi;
/* Protect sht->present and sht->proc_dir */
static DEFINE_MUTEX(global_host_template_mutex);
+/**
+ * proc_scsi_read - handle read from /proc by calling host's proc_info() command
+ * @buffer: passed to proc_info
+ * @start: passed to proc_info
+ * @offset: passed to proc_info
+ * @length: passed to proc_info
+ * @eof: returns whether length read was less than requested
+ * @data: pointer to a &struct Scsi_Host
+ */
+
static int proc_scsi_read(char *buffer, char **start, off_t offset,
int length, int *eof, void *data)
{
@@ -57,6 +67,13 @@ static int proc_scsi_read(char *buffer, char **start, off_t offset,
return n;
}
+/**
+ * proc_scsi_write_proc - Handle write to /proc by calling host's proc_info()
+ * @file: not used
+ * @buf: source of data to write.
+ * @count: number of bytes (at most PROC_BLOCK_SIZE) to write.
+ * @data: pointer to &struct Scsi_Host
+ */
static int proc_scsi_write_proc(struct file *file, const char __user *buf,
unsigned long count, void *data)
{
@@ -80,6 +97,13 @@ out:
return ret;
}
+/**
+ * scsi_proc_hostdir_add - Create directory in /proc for a scsi host
+ * @sht: owner of this directory
+ *
+ * Sets sht->proc_dir to the new directory.
+ */
+
void scsi_proc_hostdir_add(struct scsi_host_template *sht)
{
if (!sht->proc_info)
@@ -97,6 +121,10 @@ void scsi_proc_hostdir_add(struct scsi_host_template *sht)
mutex_unlock(&global_host_template_mutex);
}
+/**
+ * scsi_proc_hostdir_rm - remove directory in /proc for a scsi host
+ * @sht: owner of directory
+ */
void scsi_proc_hostdir_rm(struct scsi_host_template *sht)
{
if (!sht->proc_info)
@@ -110,6 +138,11 @@ void scsi_proc_hostdir_rm(struct scsi_host_template *sht)
mutex_unlock(&global_host_template_mutex);
}
+
+/**
+ * scsi_proc_host_add - Add entry for this host to appropriate /proc dir
+ * @shost: host to add
+ */
void scsi_proc_host_add(struct Scsi_Host *shost)
{
struct scsi_host_template *sht = shost->hostt;
@@ -133,6 +166,10 @@ void scsi_proc_host_add(struct Scsi_Host *shost)
p->owner = sht->module;
}
+/**
+ * scsi_proc_host_rm - remove this host's entry from /proc
+ * @shost: which host
+ */
void scsi_proc_host_rm(struct Scsi_Host *shost)
{
char name[10];
@@ -143,7 +180,14 @@ void scsi_proc_host_rm(struct Scsi_Host *shost)
sprintf(name,"%d", shost->host_no);
remove_proc_entry(name, shost->hostt->proc_dir);
}
-
+/**
+ * proc_print_scsidevice - return data about this host
+ * @dev: A scsi device
+ * @data: &struct seq_file to output to.
+ *
+ * Description: prints Host, Channel, Id, Lun, Vendor, Model, Rev, Type,
+ * and revision.
+ */
static int proc_print_scsidevice(struct device *dev, void *data)
{
struct scsi_device *sdev = to_scsi_device(dev);
@@ -189,6 +233,21 @@ static int proc_print_scsidevice(struct device *dev, void *data)
return 0;
}
+/**
+ * scsi_add_single_device - Respond to user request to probe for/add device
+ * @host: user-supplied decimal integer
+ * @channel: user-supplied decimal integer
+ * @id: user-supplied decimal integer
+ * @lun: user-supplied decimal integer
+ *
+ * Description: called by writing "scsi add-single-device" to /proc/scsi/scsi.
+ *
+ * does scsi_host_lookup() and either user_scan() if that transport
+ * type supports it, or else scsi_scan_host_selected()
+ *
+ * Note: this seems to be aimed exclusively at SCSI parallel busses.
+ */
+
static int scsi_add_single_device(uint host, uint channel, uint id, uint lun)
{
struct Scsi_Host *shost;
@@ -206,6 +265,16 @@ static int scsi_add_single_device(uint host, uint channel, uint id, uint lun)
return error;
}
+/**
+ * scsi_remove_single_device - Respond to user request to remove a device
+ * @host: user-supplied decimal integer
+ * @channel: user-supplied decimal integer
+ * @id: user-supplied decimal integer
+ * @lun: user-supplied decimal integer
+ *
+ * Description: called by writing "scsi remove-single-device" to
+ * /proc/scsi/scsi. Does a scsi_device_lookup() and scsi_remove_device()
+ */
static int scsi_remove_single_device(uint host, uint channel, uint id, uint lun)
{
struct scsi_device *sdev;
@@ -226,6 +295,25 @@ static int scsi_remove_single_device(uint host, uint channel, uint id, uint lun)
return error;
}
+/**
+ * proc_scsi_write - handle writes to /proc/scsi/scsi
+ * @file: not used
+ * @buf: buffer to write
+ * @length: length of buf, at most PAGE_SIZE
+ * @ppos: not used
+ *
+ * Description: this provides a legacy mechanism to add or remove devices by
+ * Host, Channel, ID, and Lun. To use,
+ * "echo 'scsi add-single-device 0 1 2 3' > /proc/scsi/scsi" or
+ * "echo 'scsi remove-single-device 0 1 2 3' > /proc/scsi/scsi" with
+ * "0 1 2 3" replaced by the Host, Channel, Id, and Lun.
+ *
+ * Note: this seems to be aimed at parallel SCSI. Most modern busses (USB,
+ * SATA, Firewire, Fibre Channel, etc) dynamically assign these values to
+ * provide a unique identifier and nothing more.
+ */
+
+
static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
size_t length, loff_t *ppos)
{
@@ -291,6 +379,11 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
return err;
}
+/**
+ * proc_scsi_show - show contents of /proc/scsi/scsi (attached devices)
+ * @s: output goes here
+ * @p: not used
+ */
static int proc_scsi_show(struct seq_file *s, void *p)
{
seq_printf(s, "Attached devices:\n");
@@ -298,10 +391,17 @@ static int proc_scsi_show(struct seq_file *s, void *p)
return 0;
}
+/**
+ * proc_scsi_open - glue function
+ * @inode: not used
+ * @file: passed to single_open()
+ *
+ * Associates proc_scsi_show with this file
+ */
static int proc_scsi_open(struct inode *inode, struct file *file)
{
/*
- * We don't really needs this for the write case but it doesn't
+ * We don't really need this for the write case but it doesn't
* harm either.
*/
return single_open(file, proc_scsi_show, NULL);
@@ -315,6 +415,9 @@ static const struct file_operations proc_scsi_operations = {
.release = single_release,
};
+/**
+ * scsi_init_procfs - create scsi and scsi/scsi in procfs
+ */
int __init scsi_init_procfs(void)
{
struct proc_dir_entry *pde;
@@ -336,6 +439,9 @@ err1:
return -ENOMEM;
}
+/**
+ * scsi_exit_procfs - Remove scsi/scsi and scsi from procfs
+ */
void scsi_exit_procfs(void)
{
remove_proc_entry("scsi/scsi", NULL);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 40ea71cd2ca6..1dc165ad17fb 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -221,6 +221,9 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
/**
* scsi_alloc_sdev - allocate and setup a scsi_Device
+ * @starget: which target to allocate a &scsi_device for
+ * @lun: which lun
+ * @hostdata: usually NULL and set by ->slave_alloc instead
*
* Description:
* Allocate, initialize for io, and return a pointer to a scsi_Device.
@@ -472,7 +475,6 @@ static void scsi_target_reap_usercontext(struct work_struct *work)
/**
* scsi_target_reap - check to see if target is in use and destroy if not
- *
* @starget: target to be checked
*
* This is used after removing a LUN or doing a last put of the target
@@ -863,7 +865,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
sdev->no_start_on_add = 1;
if (*bflags & BLIST_SINGLELUN)
- sdev->single_lun = 1;
+ scsi_target(sdev)->single_lun = 1;
sdev->use_10_for_rw = 1;
@@ -928,8 +930,7 @@ static inline void scsi_destroy_sdev(struct scsi_device *sdev)
#ifdef CONFIG_SCSI_LOGGING
/**
- * scsi_inq_str - print INQUIRY data from min to max index,
- * strip trailing whitespace
+ * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace
* @buf: Output buffer with at least end-first+1 bytes of space
* @inq: Inquiry buffer (input)
* @first: Offset of string into inq
@@ -957,9 +958,10 @@ static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
* scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it
* @starget: pointer to target device structure
* @lun: LUN of target device
- * @sdevscan: probe the LUN corresponding to this scsi_device
- * @sdevnew: store the value of any new scsi_device allocated
* @bflagsp: store bflags here if not NULL
+ * @sdevp: probe the LUN corresponding to this scsi_device
+ * @rescan: if nonzero skip some code only needed on first scan
+ * @hostdata: passed to scsi_alloc_sdev()
*
* Description:
* Call scsi_probe_lun, if a LUN with an attached device is found,
@@ -1110,6 +1112,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
* scsi_sequential_lun_scan - sequentially scan a SCSI target
* @starget: pointer to target structure to scan
* @bflags: black/white list flag for LUN 0
+ * @scsi_level: Which version of the standard does this device adhere to
+ * @rescan: passed to scsi_probe_add_lun()
*
* Description:
* Generally, scan from LUN 1 (LUN 0 is assumed to already have been
@@ -1220,7 +1224,7 @@ EXPORT_SYMBOL(scsilun_to_int);
/**
* int_to_scsilun: reverts an int into a scsi_lun
- * @int: integer to be reverted
+ * @lun: integer to be reverted
* @scsilun: struct scsi_lun to be set.
*
* Description:
@@ -1252,18 +1256,22 @@ EXPORT_SYMBOL(int_to_scsilun);
/**
* scsi_report_lun_scan - Scan using SCSI REPORT LUN results
- * @sdevscan: scan the host, channel, and id of this scsi_device
+ * @starget: which target
+ * @bflags: Zero or a mix of BLIST_NOLUN, BLIST_REPORTLUN2, or BLIST_NOREPORTLUN
+ * @rescan: nonzero if we can skip code only needed on first scan
*
* Description:
- * If @sdevscan is for a SCSI-3 or up device, send a REPORT LUN
- * command, and scan the resulting list of LUNs by calling
- * scsi_probe_and_add_lun.
+ * Fast scanning for modern (SCSI-3) devices by sending a REPORT LUN command.
+ * Scan the resulting list of LUNs by calling scsi_probe_and_add_lun.
*
- * Modifies sdevscan->lun.
+ * If BLINK_REPORTLUN2 is set, scan a target that supports more than 8
+ * LUNs even if it's older than SCSI-3.
+ * If BLIST_NOREPORTLUN is set, return 1 always.
+ * If BLIST_NOLUN is set, return 0 always.
*
* Return:
* 0: scan completed (or no memory, so further scanning is futile)
- * 1: no report lun scan, or not configured
+ * 1: could not scan with REPORT LUN
**/
static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
int rescan)
@@ -1481,6 +1489,7 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
if (scsi_host_scan_allowed(shost))
scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata);
mutex_unlock(&shost->scan_mutex);
+ transport_configure_device(&starget->dev);
scsi_target_reap(starget);
put_device(&starget->dev);
@@ -1561,6 +1570,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
out_reap:
/* now determine if the target has any children at all
* and if not, nuke it */
+ transport_configure_device(&starget->dev);
scsi_target_reap(starget);
put_device(&starget->dev);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 00b386677392..ed83cdb6e67d 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1018,6 +1018,7 @@ int scsi_sysfs_add_host(struct Scsi_Host *shost)
}
transport_register_device(&shost->shost_gendev);
+ transport_configure_device(&shost->shost_gendev);
return 0;
}
diff --git a/drivers/scsi/scsi_tgt_if.c b/drivers/scsi/scsi_tgt_if.c
index 9815a1a2db24..d2557dbc2dc1 100644
--- a/drivers/scsi/scsi_tgt_if.c
+++ b/drivers/scsi/scsi_tgt_if.c
@@ -112,7 +112,7 @@ int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, u64 itn_id,
memset(&ev, 0, sizeof(ev));
ev.p.cmd_req.host_no = shost->host_no;
ev.p.cmd_req.itn_id = itn_id;
- ev.p.cmd_req.data_len = cmd->request_bufflen;
+ ev.p.cmd_req.data_len = scsi_bufflen(cmd);
memcpy(ev.p.cmd_req.scb, cmd->cmnd, sizeof(ev.p.cmd_req.scb));
memcpy(ev.p.cmd_req.lun, lun, sizeof(ev.p.cmd_req.lun));
ev.p.cmd_req.attribute = cmd->tag;
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index a91761c3645f..93ece8f4e5de 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -180,7 +180,7 @@ static void scsi_tgt_cmd_destroy(struct work_struct *work)
container_of(work, struct scsi_tgt_cmd, work);
struct scsi_cmnd *cmd = tcmd->rq->special;
- dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction,
+ dprintk("cmd %p %d %u\n", cmd, cmd->sc_data_direction,
rq_data_dir(cmd->request));
scsi_unmap_user_pages(tcmd);
scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd);
@@ -327,11 +327,11 @@ static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
{
struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
- dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request));
+ dprintk("cmd %p %u\n", cmd, rq_data_dir(cmd->request));
scsi_tgt_uspace_send_status(cmd, tcmd->itn_id, tcmd->tag);
- if (cmd->request_buffer)
+ if (scsi_sglist(cmd))
scsi_free_sgtable(cmd);
queue_work(scsi_tgtd, &tcmd->work);
@@ -342,7 +342,7 @@ static int scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
int err;
- dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request));
+ dprintk("cmd %p %u\n", cmd, rq_data_dir(cmd->request));
err = shost->hostt->transfer_response(cmd, scsi_tgt_cmd_done);
switch (err) {
@@ -365,16 +365,12 @@ static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
cmd->request_bufflen = rq->data_len;
- dprintk("cmd %p cnt %d %lu\n", cmd, cmd->use_sg, rq_data_dir(rq));
- count = blk_rq_map_sg(rq->q, rq, cmd->request_buffer);
- if (likely(count <= cmd->use_sg)) {
- cmd->use_sg = count;
- return 0;
- }
-
- eprintk("cmd %p cnt %d\n", cmd, cmd->use_sg);
- scsi_free_sgtable(cmd);
- return -EINVAL;
+ dprintk("cmd %p cnt %d %lu\n", cmd, scsi_sg_count(cmd),
+ rq_data_dir(rq));
+ count = blk_rq_map_sg(rq->q, rq, scsi_sglist(cmd));
+ BUG_ON(count > cmd->use_sg);
+ cmd->use_sg = count;
+ return 0;
}
/* TODO: test this crap and replace bio_map_user with new interface maybe */
@@ -496,8 +492,8 @@ int scsi_tgt_kspace_exec(int host_no, u64 itn_id, int result, u64 tag,
}
cmd = rq->special;
- dprintk("cmd %p scb %x result %d len %d bufflen %u %lu %x\n",
- cmd, cmd->cmnd[0], result, len, cmd->request_bufflen,
+ dprintk("cmd %p scb %x result %d len %d bufflen %u %u %x\n",
+ cmd, cmd->cmnd[0], result, len, scsi_bufflen(cmd),
rq_data_dir(rq), cmd->cmnd[0]);
if (result == TASK_ABORTED) {
@@ -617,7 +613,7 @@ int scsi_tgt_kspace_it_nexus_rsp(int host_no, u64 itn_id, int result)
struct Scsi_Host *shost;
int err = -EINVAL;
- dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid);
+ dprintk("%d %d%llx\n", host_no, result, (unsigned long long)itn_id);
shost = scsi_host_lookup(host_no);
if (IS_ERR(shost)) {
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 7a7cfe583b2a..b1119da6e88c 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -481,9 +481,9 @@ MODULE_PARM_DESC(dev_loss_tmo,
" exceeded, the scsi target is removed. Value should be"
" between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT.");
-/**
+/*
* Netlink Infrastructure
- **/
+ */
static atomic_t fc_event_seq;
@@ -491,10 +491,10 @@ static atomic_t fc_event_seq;
* fc_get_event_number - Obtain the next sequential FC event number
*
* Notes:
- * We could have inline'd this, but it would have required fc_event_seq to
+ * We could have inlined this, but it would have required fc_event_seq to
* be exposed. For now, live with the subroutine call.
* Atomic used to avoid lock/unlock...
- **/
+ */
u32
fc_get_event_number(void)
{
@@ -505,7 +505,6 @@ EXPORT_SYMBOL(fc_get_event_number);
/**
* fc_host_post_event - called to post an even on an fc_host.
- *
* @shost: host the event occurred on
* @event_number: fc event number obtained from get_fc_event_number()
* @event_code: fc_host event being posted
@@ -513,7 +512,7 @@ EXPORT_SYMBOL(fc_get_event_number);
*
* Notes:
* This routine assumes no locks are held on entry.
- **/
+ */
void
fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
enum fc_host_event_code event_code, u32 event_data)
@@ -579,17 +578,16 @@ EXPORT_SYMBOL(fc_host_post_event);
/**
- * fc_host_post_vendor_event - called to post a vendor unique event on
- * a fc_host
- *
+ * fc_host_post_vendor_event - called to post a vendor unique event on an fc_host
* @shost: host the event occurred on
* @event_number: fc event number obtained from get_fc_event_number()
* @data_len: amount, in bytes, of vendor unique data
* @data_buf: pointer to vendor unique data
+ * @vendor_id: Vendor id
*
* Notes:
* This routine assumes no locks are held on entry.
- **/
+ */
void
fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
u32 data_len, char * data_buf, u64 vendor_id)
@@ -1900,7 +1898,6 @@ static int fc_vport_match(struct attribute_container *cont,
/**
* fc_timed_out - FC Transport I/O timeout intercept handler
- *
* @scmd: The SCSI command which timed out
*
* This routine protects against error handlers getting invoked while a
@@ -1920,7 +1917,7 @@ static int fc_vport_match(struct attribute_container *cont,
*
* Notes:
* This routine assumes no locks are held on entry.
- **/
+ */
static enum scsi_eh_timer_return
fc_timed_out(struct scsi_cmnd *scmd)
{
@@ -2133,7 +2130,7 @@ EXPORT_SYMBOL(fc_release_transport);
* 1 - work queued for execution
* 0 - work is already queued
* -EINVAL - work queue doesn't exist
- **/
+ */
static int
fc_queue_work(struct Scsi_Host *shost, struct work_struct *work)
{
@@ -2152,7 +2149,7 @@ fc_queue_work(struct Scsi_Host *shost, struct work_struct *work)
/**
* fc_flush_work - Flush a fc_host's workqueue.
* @shost: Pointer to Scsi_Host bound to fc_host.
- **/
+ */
static void
fc_flush_work(struct Scsi_Host *shost)
{
@@ -2175,7 +2172,7 @@ fc_flush_work(struct Scsi_Host *shost)
*
* Return value:
* 1 on success / 0 already queued / < 0 for error
- **/
+ */
static int
fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work,
unsigned long delay)
@@ -2195,7 +2192,7 @@ fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work,
/**
* fc_flush_devloss - Flush a fc_host's devloss workqueue.
* @shost: Pointer to Scsi_Host bound to fc_host.
- **/
+ */
static void
fc_flush_devloss(struct Scsi_Host *shost)
{
@@ -2212,21 +2209,20 @@ fc_flush_devloss(struct Scsi_Host *shost)
/**
- * fc_remove_host - called to terminate any fc_transport-related elements
- * for a scsi host.
- * @rport: remote port to be unblocked.
+ * fc_remove_host - called to terminate any fc_transport-related elements for a scsi host.
+ * @shost: Which &Scsi_Host
*
* This routine is expected to be called immediately preceeding the
* a driver's call to scsi_remove_host().
*
* WARNING: A driver utilizing the fc_transport, which fails to call
- * this routine prior to scsi_remote_host(), will leave dangling
+ * this routine prior to scsi_remove_host(), will leave dangling
* objects in /sys/class/fc_remote_ports. Access to any of these
* objects can result in a system crash !!!
*
* Notes:
* This routine assumes no locks are held on entry.
- **/
+ */
void
fc_remove_host(struct Scsi_Host *shost)
{
@@ -2281,10 +2277,10 @@ EXPORT_SYMBOL(fc_remove_host);
/**
* fc_starget_delete - called to delete the scsi decendents of an rport
- * (target and all sdevs)
- *
* @work: remote port to be operated on.
- **/
+ *
+ * Deletes target and all sdevs.
+ */
static void
fc_starget_delete(struct work_struct *work)
{
@@ -2303,9 +2299,8 @@ fc_starget_delete(struct work_struct *work)
/**
* fc_rport_final_delete - finish rport termination and delete it.
- *
* @work: remote port to be deleted.
- **/
+ */
static void
fc_rport_final_delete(struct work_struct *work)
{
@@ -2375,7 +2370,7 @@ fc_rport_final_delete(struct work_struct *work)
*
* Notes:
* This routine assumes no locks are held on entry.
- **/
+ */
static struct fc_rport *
fc_rport_create(struct Scsi_Host *shost, int channel,
struct fc_rport_identifiers *ids)
@@ -2462,8 +2457,7 @@ delete_rport:
}
/**
- * fc_remote_port_add - notifies the fc transport of the existence
- * of a remote FC port.
+ * fc_remote_port_add - notify fc transport of the existence of a remote FC port.
* @shost: scsi host the remote port is connected to.
* @channel: Channel on shost port connected to.
* @ids: The world wide names, fc address, and FC4 port
@@ -2499,7 +2493,7 @@ delete_rport:
*
* Notes:
* This routine assumes no locks are held on entry.
- **/
+ */
struct fc_rport *
fc_remote_port_add(struct Scsi_Host *shost, int channel,
struct fc_rport_identifiers *ids)
@@ -2683,19 +2677,18 @@ EXPORT_SYMBOL(fc_remote_port_add);
/**
- * fc_remote_port_delete - notifies the fc transport that a remote
- * port is no longer in existence.
+ * fc_remote_port_delete - notifies the fc transport that a remote port is no longer in existence.
* @rport: The remote port that no longer exists
*
* The LLDD calls this routine to notify the transport that a remote
* port is no longer part of the topology. Note: Although a port
* may no longer be part of the topology, it may persist in the remote
* ports displayed by the fc_host. We do this under 2 conditions:
- * - If the port was a scsi target, we delay its deletion by "blocking" it.
+ * 1) If the port was a scsi target, we delay its deletion by "blocking" it.
* This allows the port to temporarily disappear, then reappear without
* disrupting the SCSI device tree attached to it. During the "blocked"
* period the port will still exist.
- * - If the port was a scsi target and disappears for longer than we
+ * 2) If the port was a scsi target and disappears for longer than we
* expect, we'll delete the port and the tear down the SCSI device tree
* attached to it. However, we want to semi-persist the target id assigned
* to that port if it eventually does exist. The port structure will
@@ -2709,7 +2702,8 @@ EXPORT_SYMBOL(fc_remote_port_add);
* temporary blocked state. From the LLDD's perspective, the rport no
* longer exists. From the SCSI midlayer's perspective, the SCSI target
* exists, but all sdevs on it are blocked from further I/O. The following
- * is then expected:
+ * is then expected.
+ *
* If the remote port does not return (signaled by a LLDD call to
* fc_remote_port_add()) within the dev_loss_tmo timeout, then the
* scsi target is removed - killing all outstanding i/o and removing the
@@ -2731,7 +2725,7 @@ EXPORT_SYMBOL(fc_remote_port_add);
*
* Notes:
* This routine assumes no locks are held on entry.
- **/
+ */
void
fc_remote_port_delete(struct fc_rport *rport)
{
@@ -2792,12 +2786,12 @@ fc_remote_port_delete(struct fc_rport *rport)
EXPORT_SYMBOL(fc_remote_port_delete);
/**
- * fc_remote_port_rolechg - notifies the fc transport that the roles
- * on a remote may have changed.
+ * fc_remote_port_rolechg - notifies the fc transport that the roles on a remote may have changed.
* @rport: The remote port that changed.
+ * @roles: New roles for this port.
*
- * The LLDD calls this routine to notify the transport that the roles
- * on a remote port may have changed. The largest effect of this is
+ * Description: The LLDD calls this routine to notify the transport that the
+ * roles on a remote port may have changed. The largest effect of this is
* if a port now becomes a FCP Target, it must be allocated a
* scsi target id. If the port is no longer a FCP target, any
* scsi target id value assigned to it will persist in case the
@@ -2810,7 +2804,7 @@ EXPORT_SYMBOL(fc_remote_port_delete);
*
* Notes:
* This routine assumes no locks are held on entry.
- **/
+ */
void
fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
{
@@ -2875,12 +2869,12 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
EXPORT_SYMBOL(fc_remote_port_rolechg);
/**
- * fc_timeout_deleted_rport - Timeout handler for a deleted remote port,
- * which we blocked, and has now failed to return
- * in the allotted time.
- *
+ * fc_timeout_deleted_rport - Timeout handler for a deleted remote port.
* @work: rport target that failed to reappear in the allotted time.
- **/
+ *
+ * Description: An attempt to delete a remote port blocks, and if it fails
+ * to return in the allotted time this gets called.
+ */
static void
fc_timeout_deleted_rport(struct work_struct *work)
{
@@ -2984,14 +2978,12 @@ fc_timeout_deleted_rport(struct work_struct *work)
}
/**
- * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a
- * disconnected SCSI target.
- *
+ * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a disconnected SCSI target.
* @work: rport to terminate io on.
*
* Notes: Only requests the failure of the io, not that all are flushed
* prior to returning.
- **/
+ */
static void
fc_timeout_fail_rport_io(struct work_struct *work)
{
@@ -3008,9 +3000,8 @@ fc_timeout_fail_rport_io(struct work_struct *work)
/**
* fc_scsi_scan_rport - called to perform a scsi scan on a remote port.
- *
* @work: remote port to be scanned.
- **/
+ */
static void
fc_scsi_scan_rport(struct work_struct *work)
{
@@ -3047,7 +3038,7 @@ fc_scsi_scan_rport(struct work_struct *work)
*
* Notes:
* This routine assumes no locks are held on entry.
- **/
+ */
static int
fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev,
struct fc_vport_identifiers *ids, struct fc_vport **ret_vport)
@@ -3172,7 +3163,7 @@ delete_vport:
*
* Notes:
* This routine assumes no locks are held on entry.
- **/
+ */
int
fc_vport_terminate(struct fc_vport *vport)
{
@@ -3232,9 +3223,8 @@ EXPORT_SYMBOL(fc_vport_terminate);
/**
* fc_vport_sched_delete - workq-based delete request for a vport
- *
* @work: vport to be deleted.
- **/
+ */
static void
fc_vport_sched_delete(struct work_struct *work)
{
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 5428d15f23c6..ef0e74264880 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -30,10 +30,10 @@
#include <scsi/scsi_transport_iscsi.h>
#include <scsi/iscsi_if.h>
-#define ISCSI_SESSION_ATTRS 15
+#define ISCSI_SESSION_ATTRS 18
#define ISCSI_CONN_ATTRS 11
#define ISCSI_HOST_ATTRS 4
-#define ISCSI_TRANSPORT_VERSION "2.0-724"
+#define ISCSI_TRANSPORT_VERSION "2.0-867"
struct iscsi_internal {
int daemon_pid;
@@ -50,6 +50,7 @@ struct iscsi_internal {
};
static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
+static struct workqueue_struct *iscsi_eh_timer_workq;
/*
* list of registered transports and lock that must
@@ -115,6 +116,8 @@ static struct attribute_group iscsi_transport_group = {
.attrs = iscsi_transport_attrs,
};
+
+
static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
struct class_device *cdev)
{
@@ -124,13 +127,30 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
memset(ihost, 0, sizeof(*ihost));
INIT_LIST_HEAD(&ihost->sessions);
mutex_init(&ihost->mutex);
+
+ snprintf(ihost->unbind_workq_name, KOBJ_NAME_LEN, "iscsi_unbind_%d",
+ shost->host_no);
+ ihost->unbind_workq = create_singlethread_workqueue(
+ ihost->unbind_workq_name);
+ if (!ihost->unbind_workq)
+ return -ENOMEM;
+ return 0;
+}
+
+static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
+ struct class_device *cdev)
+{
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct iscsi_host *ihost = shost->shost_data;
+
+ destroy_workqueue(ihost->unbind_workq);
return 0;
}
static DECLARE_TRANSPORT_CLASS(iscsi_host_class,
"iscsi_host",
iscsi_setup_host,
- NULL,
+ iscsi_remove_host,
NULL);
static DECLARE_TRANSPORT_CLASS(iscsi_session_class,
@@ -252,7 +272,7 @@ static void session_recovery_timedout(struct work_struct *work)
void iscsi_unblock_session(struct iscsi_cls_session *session)
{
if (!cancel_delayed_work(&session->recovery_work))
- flush_scheduled_work();
+ flush_workqueue(iscsi_eh_timer_workq);
scsi_target_unblock(&session->dev);
}
EXPORT_SYMBOL_GPL(iscsi_unblock_session);
@@ -260,11 +280,40 @@ EXPORT_SYMBOL_GPL(iscsi_unblock_session);
void iscsi_block_session(struct iscsi_cls_session *session)
{
scsi_target_block(&session->dev);
- schedule_delayed_work(&session->recovery_work,
- session->recovery_tmo * HZ);
+ queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work,
+ session->recovery_tmo * HZ);
}
EXPORT_SYMBOL_GPL(iscsi_block_session);
+static void __iscsi_unbind_session(struct work_struct *work)
+{
+ struct iscsi_cls_session *session =
+ container_of(work, struct iscsi_cls_session,
+ unbind_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+ struct iscsi_host *ihost = shost->shost_data;
+
+ /* Prevent new scans and make sure scanning is not in progress */
+ mutex_lock(&ihost->mutex);
+ if (list_empty(&session->host_list)) {
+ mutex_unlock(&ihost->mutex);
+ return;
+ }
+ list_del_init(&session->host_list);
+ mutex_unlock(&ihost->mutex);
+
+ scsi_remove_target(&session->dev);
+ iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
+}
+
+static int iscsi_unbind_session(struct iscsi_cls_session *session)
+{
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+ struct iscsi_host *ihost = shost->shost_data;
+
+ return queue_work(ihost->unbind_workq, &session->unbind_work);
+}
+
struct iscsi_cls_session *
iscsi_alloc_session(struct Scsi_Host *shost,
struct iscsi_transport *transport)
@@ -281,6 +330,7 @@ iscsi_alloc_session(struct Scsi_Host *shost,
INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
INIT_LIST_HEAD(&session->host_list);
INIT_LIST_HEAD(&session->sess_list);
+ INIT_WORK(&session->unbind_work, __iscsi_unbind_session);
/* this is released in the dev's release function */
scsi_host_get(shost);
@@ -297,6 +347,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
{
struct Scsi_Host *shost = iscsi_session_to_shost(session);
struct iscsi_host *ihost;
+ unsigned long flags;
int err;
ihost = shost->shost_data;
@@ -313,9 +364,15 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
}
transport_register_device(&session->dev);
+ spin_lock_irqsave(&sesslock, flags);
+ list_add(&session->sess_list, &sesslist);
+ spin_unlock_irqrestore(&sesslock, flags);
+
mutex_lock(&ihost->mutex);
list_add(&session->host_list, &ihost->sessions);
mutex_unlock(&ihost->mutex);
+
+ iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
return 0;
release_host:
@@ -328,9 +385,10 @@ EXPORT_SYMBOL_GPL(iscsi_add_session);
* iscsi_create_session - create iscsi class session
* @shost: scsi host
* @transport: iscsi transport
+ * @target_id: which target
*
* This can be called from a LLD or iscsi_transport.
- **/
+ */
struct iscsi_cls_session *
iscsi_create_session(struct Scsi_Host *shost,
struct iscsi_transport *transport,
@@ -350,19 +408,58 @@ iscsi_create_session(struct Scsi_Host *shost,
}
EXPORT_SYMBOL_GPL(iscsi_create_session);
+static void iscsi_conn_release(struct device *dev)
+{
+ struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev);
+ struct device *parent = conn->dev.parent;
+
+ kfree(conn);
+ put_device(parent);
+}
+
+static int iscsi_is_conn_dev(const struct device *dev)
+{
+ return dev->release == iscsi_conn_release;
+}
+
+static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
+{
+ if (!iscsi_is_conn_dev(dev))
+ return 0;
+ return iscsi_destroy_conn(iscsi_dev_to_conn(dev));
+}
+
void iscsi_remove_session(struct iscsi_cls_session *session)
{
struct Scsi_Host *shost = iscsi_session_to_shost(session);
struct iscsi_host *ihost = shost->shost_data;
+ unsigned long flags;
+ int err;
- if (!cancel_delayed_work(&session->recovery_work))
- flush_scheduled_work();
+ spin_lock_irqsave(&sesslock, flags);
+ list_del(&session->sess_list);
+ spin_unlock_irqrestore(&sesslock, flags);
- mutex_lock(&ihost->mutex);
- list_del(&session->host_list);
- mutex_unlock(&ihost->mutex);
+ /*
+ * If we are blocked let commands flow again. The lld or iscsi
+ * layer should set up the queuecommand to fail commands.
+ */
+ iscsi_unblock_session(session);
+ iscsi_unbind_session(session);
+ /*
+ * If the session dropped while removing devices then we need to make
+ * sure it is not blocked
+ */
+ if (!cancel_delayed_work(&session->recovery_work))
+ flush_workqueue(iscsi_eh_timer_workq);
+ flush_workqueue(ihost->unbind_workq);
- scsi_remove_target(&session->dev);
+ /* hw iscsi may not have removed all connections from session */
+ err = device_for_each_child(&session->dev, NULL,
+ iscsi_iter_destroy_conn_fn);
+ if (err)
+ dev_printk(KERN_ERR, &session->dev, "iscsi: Could not delete "
+ "all connections for session. Error %d.\n", err);
transport_unregister_device(&session->dev);
device_del(&session->dev);
@@ -371,9 +468,9 @@ EXPORT_SYMBOL_GPL(iscsi_remove_session);
void iscsi_free_session(struct iscsi_cls_session *session)
{
+ iscsi_session_event(session, ISCSI_KEVENT_DESTROY_SESSION);
put_device(&session->dev);
}
-
EXPORT_SYMBOL_GPL(iscsi_free_session);
/**
@@ -382,7 +479,7 @@ EXPORT_SYMBOL_GPL(iscsi_free_session);
*
* Can be called by a LLD or iscsi_transport. There must not be
* any running connections.
- **/
+ */
int iscsi_destroy_session(struct iscsi_cls_session *session)
{
iscsi_remove_session(session);
@@ -391,20 +488,6 @@ int iscsi_destroy_session(struct iscsi_cls_session *session)
}
EXPORT_SYMBOL_GPL(iscsi_destroy_session);
-static void iscsi_conn_release(struct device *dev)
-{
- struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev);
- struct device *parent = conn->dev.parent;
-
- kfree(conn);
- put_device(parent);
-}
-
-static int iscsi_is_conn_dev(const struct device *dev)
-{
- return dev->release == iscsi_conn_release;
-}
-
/**
* iscsi_create_conn - create iscsi class connection
* @session: iscsi cls session
@@ -418,12 +501,13 @@ static int iscsi_is_conn_dev(const struct device *dev)
* for software iscsi we could be trying to preallocate a connection struct
* in which case there could be two connection structs and cid would be
* non-zero.
- **/
+ */
struct iscsi_cls_conn *
iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
{
struct iscsi_transport *transport = session->transport;
struct iscsi_cls_conn *conn;
+ unsigned long flags;
int err;
conn = kzalloc(sizeof(*conn) + transport->conndata_size, GFP_KERNEL);
@@ -452,6 +536,11 @@ iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
goto release_parent_ref;
}
transport_register_device(&conn->dev);
+
+ spin_lock_irqsave(&connlock, flags);
+ list_add(&conn->conn_list, &connlist);
+ conn->active = 1;
+ spin_unlock_irqrestore(&connlock, flags);
return conn;
release_parent_ref:
@@ -465,17 +554,23 @@ EXPORT_SYMBOL_GPL(iscsi_create_conn);
/**
* iscsi_destroy_conn - destroy iscsi class connection
- * @session: iscsi cls session
+ * @conn: iscsi cls session
*
* This can be called from a LLD or iscsi_transport.
- **/
+ */
int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&connlock, flags);
+ conn->active = 0;
+ list_del(&conn->conn_list);
+ spin_unlock_irqrestore(&connlock, flags);
+
transport_unregister_device(&conn->dev);
device_unregister(&conn->dev);
return 0;
}
-
EXPORT_SYMBOL_GPL(iscsi_destroy_conn);
/*
@@ -685,132 +780,74 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
}
/**
- * iscsi_if_destroy_session_done - send session destr. completion event
- * @conn: last connection for session
- *
- * This is called by HW iscsi LLDs to notify userpsace that its HW has
- * removed a session.
- **/
-int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn)
+ * iscsi_session_event - send session destr. completion event
+ * @session: iscsi class session
+ * @event: type of event
+ */
+int iscsi_session_event(struct iscsi_cls_session *session,
+ enum iscsi_uevent_e event)
{
struct iscsi_internal *priv;
- struct iscsi_cls_session *session;
struct Scsi_Host *shost;
struct iscsi_uevent *ev;
struct sk_buff *skb;
struct nlmsghdr *nlh;
- unsigned long flags;
int rc, len = NLMSG_SPACE(sizeof(*ev));
- priv = iscsi_if_transport_lookup(conn->transport);
+ priv = iscsi_if_transport_lookup(session->transport);
if (!priv)
return -EINVAL;
-
- session = iscsi_dev_to_session(conn->dev.parent);
shost = iscsi_session_to_shost(session);
skb = alloc_skb(len, GFP_KERNEL);
if (!skb) {
- dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
- "session creation event\n");
+ dev_printk(KERN_ERR, &session->dev, "Cannot notify userspace "
+ "of session event %u\n", event);
return -ENOMEM;
}
nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
ev = NLMSG_DATA(nlh);
- ev->transport_handle = iscsi_handle(conn->transport);
- ev->type = ISCSI_KEVENT_DESTROY_SESSION;
- ev->r.d_session.host_no = shost->host_no;
- ev->r.d_session.sid = session->sid;
-
- /*
- * this will occur if the daemon is not up, so we just warn
- * the user and when the daemon is restarted it will handle it
- */
- rc = iscsi_broadcast_skb(skb, GFP_KERNEL);
- if (rc < 0)
- dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
- "session destruction event. Check iscsi daemon\n");
-
- spin_lock_irqsave(&sesslock, flags);
- list_del(&session->sess_list);
- spin_unlock_irqrestore(&sesslock, flags);
+ ev->transport_handle = iscsi_handle(session->transport);
- spin_lock_irqsave(&connlock, flags);
- conn->active = 0;
- list_del(&conn->conn_list);
- spin_unlock_irqrestore(&connlock, flags);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(iscsi_if_destroy_session_done);
-
-/**
- * iscsi_if_create_session_done - send session creation completion event
- * @conn: leading connection for session
- *
- * This is called by HW iscsi LLDs to notify userpsace that its HW has
- * created a session or a existing session is back in the logged in state.
- **/
-int iscsi_if_create_session_done(struct iscsi_cls_conn *conn)
-{
- struct iscsi_internal *priv;
- struct iscsi_cls_session *session;
- struct Scsi_Host *shost;
- struct iscsi_uevent *ev;
- struct sk_buff *skb;
- struct nlmsghdr *nlh;
- unsigned long flags;
- int rc, len = NLMSG_SPACE(sizeof(*ev));
-
- priv = iscsi_if_transport_lookup(conn->transport);
- if (!priv)
+ ev->type = event;
+ switch (event) {
+ case ISCSI_KEVENT_DESTROY_SESSION:
+ ev->r.d_session.host_no = shost->host_no;
+ ev->r.d_session.sid = session->sid;
+ break;
+ case ISCSI_KEVENT_CREATE_SESSION:
+ ev->r.c_session_ret.host_no = shost->host_no;
+ ev->r.c_session_ret.sid = session->sid;
+ break;
+ case ISCSI_KEVENT_UNBIND_SESSION:
+ ev->r.unbind_session.host_no = shost->host_no;
+ ev->r.unbind_session.sid = session->sid;
+ break;
+ default:
+ dev_printk(KERN_ERR, &session->dev, "Invalid event %u.\n",
+ event);
+ kfree_skb(skb);
return -EINVAL;
-
- session = iscsi_dev_to_session(conn->dev.parent);
- shost = iscsi_session_to_shost(session);
-
- skb = alloc_skb(len, GFP_KERNEL);
- if (!skb) {
- dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
- "session creation event\n");
- return -ENOMEM;
}
- nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
- ev = NLMSG_DATA(nlh);
- ev->transport_handle = iscsi_handle(conn->transport);
- ev->type = ISCSI_UEVENT_CREATE_SESSION;
- ev->r.c_session_ret.host_no = shost->host_no;
- ev->r.c_session_ret.sid = session->sid;
-
/*
* this will occur if the daemon is not up, so we just warn
* the user and when the daemon is restarted it will handle it
*/
rc = iscsi_broadcast_skb(skb, GFP_KERNEL);
if (rc < 0)
- dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
- "session creation event. Check iscsi daemon\n");
-
- spin_lock_irqsave(&sesslock, flags);
- list_add(&session->sess_list, &sesslist);
- spin_unlock_irqrestore(&sesslock, flags);
-
- spin_lock_irqsave(&connlock, flags);
- list_add(&conn->conn_list, &connlist);
- conn->active = 1;
- spin_unlock_irqrestore(&connlock, flags);
+ dev_printk(KERN_ERR, &session->dev, "Cannot notify userspace "
+ "of session event %u. Check iscsi daemon\n", event);
return rc;
}
-EXPORT_SYMBOL_GPL(iscsi_if_create_session_done);
+EXPORT_SYMBOL_GPL(iscsi_session_event);
static int
iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
{
struct iscsi_transport *transport = priv->iscsi_transport;
struct iscsi_cls_session *session;
- unsigned long flags;
uint32_t hostno;
session = transport->create_session(transport, &priv->t,
@@ -821,10 +858,6 @@ iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
if (!session)
return -ENOMEM;
- spin_lock_irqsave(&sesslock, flags);
- list_add(&session->sess_list, &sesslist);
- spin_unlock_irqrestore(&sesslock, flags);
-
ev->r.c_session_ret.host_no = hostno;
ev->r.c_session_ret.sid = session->sid;
return 0;
@@ -835,7 +868,6 @@ iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
{
struct iscsi_cls_conn *conn;
struct iscsi_cls_session *session;
- unsigned long flags;
session = iscsi_session_lookup(ev->u.c_conn.sid);
if (!session) {
@@ -854,28 +886,17 @@ iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
ev->r.c_conn_ret.sid = session->sid;
ev->r.c_conn_ret.cid = conn->cid;
-
- spin_lock_irqsave(&connlock, flags);
- list_add(&conn->conn_list, &connlist);
- conn->active = 1;
- spin_unlock_irqrestore(&connlock, flags);
-
return 0;
}
static int
iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
{
- unsigned long flags;
struct iscsi_cls_conn *conn;
conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid);
if (!conn)
return -EINVAL;
- spin_lock_irqsave(&connlock, flags);
- conn->active = 0;
- list_del(&conn->conn_list);
- spin_unlock_irqrestore(&connlock, flags);
if (transport->destroy_conn)
transport->destroy_conn(conn);
@@ -1002,7 +1023,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
struct iscsi_internal *priv;
struct iscsi_cls_session *session;
struct iscsi_cls_conn *conn;
- unsigned long flags;
priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
if (!priv)
@@ -1020,13 +1040,16 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
break;
case ISCSI_UEVENT_DESTROY_SESSION:
session = iscsi_session_lookup(ev->u.d_session.sid);
- if (session) {
- spin_lock_irqsave(&sesslock, flags);
- list_del(&session->sess_list);
- spin_unlock_irqrestore(&sesslock, flags);
-
+ if (session)
transport->destroy_session(session);
- } else
+ else
+ err = -EINVAL;
+ break;
+ case ISCSI_UEVENT_UNBIND_SESSION:
+ session = iscsi_session_lookup(ev->u.d_session.sid);
+ if (session)
+ iscsi_unbind_session(session);
+ else
err = -EINVAL;
break;
case ISCSI_UEVENT_CREATE_CONN:
@@ -1179,6 +1202,8 @@ iscsi_conn_attr(port, ISCSI_PARAM_CONN_PORT);
iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN);
iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS);
iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS);
+iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO);
+iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO);
#define iscsi_cdev_to_session(_cdev) \
iscsi_dev_to_session(_cdev->dev)
@@ -1217,6 +1242,9 @@ iscsi_session_attr(username, ISCSI_PARAM_USERNAME, 1);
iscsi_session_attr(username_in, ISCSI_PARAM_USERNAME_IN, 1);
iscsi_session_attr(password, ISCSI_PARAM_PASSWORD, 1);
iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
+iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
+iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
+iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
#define iscsi_priv_session_attr_show(field, format) \
static ssize_t \
@@ -1413,6 +1441,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
SETUP_CONN_RD_ATTR(exp_statsn, ISCSI_EXP_STATSN);
SETUP_CONN_RD_ATTR(persistent_address, ISCSI_PERSISTENT_ADDRESS);
SETUP_CONN_RD_ATTR(persistent_port, ISCSI_PERSISTENT_PORT);
+ SETUP_CONN_RD_ATTR(ping_tmo, ISCSI_PING_TMO);
+ SETUP_CONN_RD_ATTR(recv_tmo, ISCSI_RECV_TMO);
BUG_ON(count > ISCSI_CONN_ATTRS);
priv->conn_attrs[count] = NULL;
@@ -1438,6 +1468,9 @@ iscsi_register_transport(struct iscsi_transport *tt)
SETUP_SESSION_RD_ATTR(password_in, ISCSI_USERNAME_IN);
SETUP_SESSION_RD_ATTR(username, ISCSI_PASSWORD);
SETUP_SESSION_RD_ATTR(username_in, ISCSI_PASSWORD_IN);
+ SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT);
+ SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
+ SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
BUG_ON(count > ISCSI_SESSION_ATTRS);
@@ -1518,8 +1551,14 @@ static __init int iscsi_transport_init(void)
goto unregister_session_class;
}
+ iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
+ if (!iscsi_eh_timer_workq)
+ goto release_nls;
+
return 0;
+release_nls:
+ sock_release(nls->sk_socket);
unregister_session_class:
transport_class_unregister(&iscsi_session_class);
unregister_conn_class:
@@ -1533,6 +1572,7 @@ unregister_transport_class:
static void __exit iscsi_transport_exit(void)
{
+ destroy_workqueue(iscsi_eh_timer_workq);
sock_release(nls->sk_socket);
transport_class_unregister(&iscsi_connection_class);
transport_class_unregister(&iscsi_session_class);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 3120f4b3a11a..f2149d0bb999 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -173,6 +173,7 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
handler = to_sas_internal(shost->transportt)->f->smp_handler;
ret = handler(shost, rphy, req);
+ req->errors = ret;
spin_lock_irq(q->queue_lock);
@@ -323,7 +324,7 @@ static int do_sas_phy_delete(struct device *dev, void *data)
}
/**
- * sas_remove_children -- tear down a devices SAS data structures
+ * sas_remove_children - tear down a devices SAS data structures
* @dev: device belonging to the sas object
*
* Removes all SAS PHYs and remote PHYs for a given object
@@ -336,7 +337,7 @@ void sas_remove_children(struct device *dev)
EXPORT_SYMBOL(sas_remove_children);
/**
- * sas_remove_host -- tear down a Scsi_Host's SAS data structures
+ * sas_remove_host - tear down a Scsi_Host's SAS data structures
* @shost: Scsi Host that is torn down
*
* Removes all SAS PHYs and remote PHYs for a given Scsi_Host.
@@ -577,7 +578,7 @@ static void sas_phy_release(struct device *dev)
}
/**
- * sas_phy_alloc -- allocates and initialize a SAS PHY structure
+ * sas_phy_alloc - allocates and initialize a SAS PHY structure
* @parent: Parent device
* @number: Phy index
*
@@ -618,7 +619,7 @@ struct sas_phy *sas_phy_alloc(struct device *parent, int number)
EXPORT_SYMBOL(sas_phy_alloc);
/**
- * sas_phy_add -- add a SAS PHY to the device hierarchy
+ * sas_phy_add - add a SAS PHY to the device hierarchy
* @phy: The PHY to be added
*
* Publishes a SAS PHY to the rest of the system.
@@ -638,7 +639,7 @@ int sas_phy_add(struct sas_phy *phy)
EXPORT_SYMBOL(sas_phy_add);
/**
- * sas_phy_free -- free a SAS PHY
+ * sas_phy_free - free a SAS PHY
* @phy: SAS PHY to free
*
* Frees the specified SAS PHY.
@@ -655,7 +656,7 @@ void sas_phy_free(struct sas_phy *phy)
EXPORT_SYMBOL(sas_phy_free);
/**
- * sas_phy_delete -- remove SAS PHY
+ * sas_phy_delete - remove SAS PHY
* @phy: SAS PHY to remove
*
* Removes the specified SAS PHY. If the SAS PHY has an
@@ -677,7 +678,7 @@ sas_phy_delete(struct sas_phy *phy)
EXPORT_SYMBOL(sas_phy_delete);
/**
- * scsi_is_sas_phy -- check if a struct device represents a SAS PHY
+ * scsi_is_sas_phy - check if a struct device represents a SAS PHY
* @dev: device to check
*
* Returns:
@@ -843,7 +844,6 @@ EXPORT_SYMBOL(sas_port_alloc_num);
/**
* sas_port_add - add a SAS port to the device hierarchy
- *
* @port: port to be added
*
* publishes a port to the rest of the system
@@ -868,7 +868,7 @@ int sas_port_add(struct sas_port *port)
EXPORT_SYMBOL(sas_port_add);
/**
- * sas_port_free -- free a SAS PORT
+ * sas_port_free - free a SAS PORT
* @port: SAS PORT to free
*
* Frees the specified SAS PORT.
@@ -885,7 +885,7 @@ void sas_port_free(struct sas_port *port)
EXPORT_SYMBOL(sas_port_free);
/**
- * sas_port_delete -- remove SAS PORT
+ * sas_port_delete - remove SAS PORT
* @port: SAS PORT to remove
*
* Removes the specified SAS PORT. If the SAS PORT has an
@@ -924,7 +924,7 @@ void sas_port_delete(struct sas_port *port)
EXPORT_SYMBOL(sas_port_delete);
/**
- * scsi_is_sas_port -- check if a struct device represents a SAS port
+ * scsi_is_sas_port - check if a struct device represents a SAS port
* @dev: device to check
*
* Returns:
@@ -1309,6 +1309,7 @@ static void sas_rphy_initialize(struct sas_rphy *rphy)
/**
* sas_end_device_alloc - allocate an rphy for an end device
+ * @parent: which port
*
* Allocates an SAS remote PHY structure, connected to @parent.
*
@@ -1345,6 +1346,8 @@ EXPORT_SYMBOL(sas_end_device_alloc);
/**
* sas_expander_alloc - allocate an rphy for an end device
+ * @parent: which port
+ * @type: SAS_EDGE_EXPANDER_DEVICE or SAS_FANOUT_EXPANDER_DEVICE
*
* Allocates an SAS remote PHY structure, connected to @parent.
*
@@ -1383,7 +1386,7 @@ struct sas_rphy *sas_expander_alloc(struct sas_port *parent,
EXPORT_SYMBOL(sas_expander_alloc);
/**
- * sas_rphy_add -- add a SAS remote PHY to the device hierarchy
+ * sas_rphy_add - add a SAS remote PHY to the device hierarchy
* @rphy: The remote PHY to be added
*
* Publishes a SAS remote PHY to the rest of the system.
@@ -1430,8 +1433,8 @@ int sas_rphy_add(struct sas_rphy *rphy)
EXPORT_SYMBOL(sas_rphy_add);
/**
- * sas_rphy_free -- free a SAS remote PHY
- * @rphy SAS remote PHY to free
+ * sas_rphy_free - free a SAS remote PHY
+ * @rphy: SAS remote PHY to free
*
* Frees the specified SAS remote PHY.
*
@@ -1459,7 +1462,7 @@ void sas_rphy_free(struct sas_rphy *rphy)
EXPORT_SYMBOL(sas_rphy_free);
/**
- * sas_rphy_delete -- remove and free SAS remote PHY
+ * sas_rphy_delete - remove and free SAS remote PHY
* @rphy: SAS remote PHY to remove and free
*
* Removes the specified SAS remote PHY and frees it.
@@ -1473,7 +1476,7 @@ sas_rphy_delete(struct sas_rphy *rphy)
EXPORT_SYMBOL(sas_rphy_delete);
/**
- * sas_rphy_remove -- remove SAS remote PHY
+ * sas_rphy_remove - remove SAS remote PHY
* @rphy: SAS remote phy to remove
*
* Removes the specified SAS remote PHY.
@@ -1504,7 +1507,7 @@ sas_rphy_remove(struct sas_rphy *rphy)
EXPORT_SYMBOL(sas_rphy_remove);
/**
- * scsi_is_sas_rphy -- check if a struct device represents a SAS remote PHY
+ * scsi_is_sas_rphy - check if a struct device represents a SAS remote PHY
* @dev: device to check
*
* Returns:
@@ -1604,7 +1607,7 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
SETUP_TEMPLATE(expander_attrs, expander_##field, S_IRUGO, 1)
/**
- * sas_attach_transport -- instantiate SAS transport template
+ * sas_attach_transport - instantiate SAS transport template
* @ft: SAS transport class function template
*/
struct scsi_transport_template *
@@ -1715,7 +1718,7 @@ sas_attach_transport(struct sas_function_template *ft)
EXPORT_SYMBOL(sas_attach_transport);
/**
- * sas_release_transport -- release SAS transport template instance
+ * sas_release_transport - release SAS transport template instance
* @t: transport template instance
*/
void sas_release_transport(struct scsi_transport_template *t)
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 4df21c92ff1e..1fb60313a516 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -52,13 +52,6 @@
struct spi_internal {
struct scsi_transport_template t;
struct spi_function_template *f;
- /* The actual attributes */
- struct class_device_attribute private_attrs[SPI_NUM_ATTRS];
- /* The array of null terminated pointers to attributes
- * needed by scsi_sysfs.c */
- struct class_device_attribute *attrs[SPI_NUM_ATTRS + SPI_OTHER_ATTRS + 1];
- struct class_device_attribute private_host_attrs[SPI_HOST_ATTRS];
- struct class_device_attribute *host_attrs[SPI_HOST_ATTRS + 1];
};
#define to_spi_internal(tmpl) container_of(tmpl, struct spi_internal, t)
@@ -174,17 +167,20 @@ static int spi_host_setup(struct transport_container *tc, struct device *dev,
return 0;
}
+static int spi_host_configure(struct transport_container *tc,
+ struct device *dev,
+ struct class_device *cdev);
+
static DECLARE_TRANSPORT_CLASS(spi_host_class,
"spi_host",
spi_host_setup,
NULL,
- NULL);
+ spi_host_configure);
static int spi_host_match(struct attribute_container *cont,
struct device *dev)
{
struct Scsi_Host *shost;
- struct spi_internal *i;
if (!scsi_is_host_device(dev))
return 0;
@@ -194,11 +190,13 @@ static int spi_host_match(struct attribute_container *cont,
!= &spi_host_class.class)
return 0;
- i = to_spi_internal(shost->transportt);
-
- return &i->t.host_attrs.ac == cont;
+ return &shost->transportt->host_attrs.ac == cont;
}
+static int spi_target_configure(struct transport_container *tc,
+ struct device *dev,
+ struct class_device *cdev);
+
static int spi_device_configure(struct transport_container *tc,
struct device *dev,
struct class_device *cdev)
@@ -300,8 +298,10 @@ store_spi_transport_##field(struct class_device *cdev, const char *buf, \
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \
struct spi_internal *i = to_spi_internal(shost->transportt); \
\
+ if (!i->f->set_##field) \
+ return -EINVAL; \
val = simple_strtoul(buf, NULL, 0); \
- i->f->set_##field(starget, val); \
+ i->f->set_##field(starget, val); \
return count; \
}
@@ -317,6 +317,8 @@ store_spi_transport_##field(struct class_device *cdev, const char *buf, \
struct spi_transport_attrs *tp \
= (struct spi_transport_attrs *)&starget->starget_data; \
\
+ if (i->f->set_##field) \
+ return -EINVAL; \
val = simple_strtoul(buf, NULL, 0); \
if (val > tp->max_##field) \
val = tp->max_##field; \
@@ -327,14 +329,14 @@ store_spi_transport_##field(struct class_device *cdev, const char *buf, \
#define spi_transport_rd_attr(field, format_string) \
spi_transport_show_function(field, format_string) \
spi_transport_store_function(field, format_string) \
-static CLASS_DEVICE_ATTR(field, S_IRUGO | S_IWUSR, \
+static CLASS_DEVICE_ATTR(field, S_IRUGO, \
show_spi_transport_##field, \
store_spi_transport_##field);
#define spi_transport_simple_attr(field, format_string) \
spi_transport_show_simple(field, format_string) \
spi_transport_store_simple(field, format_string) \
-static CLASS_DEVICE_ATTR(field, S_IRUGO | S_IWUSR, \
+static CLASS_DEVICE_ATTR(field, S_IRUGO, \
show_spi_transport_##field, \
store_spi_transport_##field);
@@ -342,7 +344,7 @@ static CLASS_DEVICE_ATTR(field, S_IRUGO | S_IWUSR, \
spi_transport_show_function(field, format_string) \
spi_transport_store_max(field, format_string) \
spi_transport_simple_attr(max_##field, format_string) \
-static CLASS_DEVICE_ATTR(field, S_IRUGO | S_IWUSR, \
+static CLASS_DEVICE_ATTR(field, S_IRUGO, \
show_spi_transport_##field, \
store_spi_transport_##field);
@@ -472,6 +474,9 @@ store_spi_transport_period(struct class_device *cdev, const char *buf,
(struct spi_transport_attrs *)&starget->starget_data;
int period, retval;
+ if (!i->f->set_period)
+ return -EINVAL;
+
retval = store_spi_transport_period_helper(cdev, buf, count, &period);
if (period < tp->min_period)
@@ -482,7 +487,7 @@ store_spi_transport_period(struct class_device *cdev, const char *buf,
return retval;
}
-static CLASS_DEVICE_ATTR(period, S_IRUGO | S_IWUSR,
+static CLASS_DEVICE_ATTR(period, S_IRUGO,
show_spi_transport_period,
store_spi_transport_period);
@@ -490,9 +495,14 @@ static ssize_t
show_spi_transport_min_period(struct class_device *cdev, char *buf)
{
struct scsi_target *starget = transport_class_to_starget(cdev);
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct spi_internal *i = to_spi_internal(shost->transportt);
struct spi_transport_attrs *tp =
(struct spi_transport_attrs *)&starget->starget_data;
+ if (!i->f->set_period)
+ return -EINVAL;
+
return show_spi_transport_period_helper(buf, tp->min_period);
}
@@ -509,7 +519,7 @@ store_spi_transport_min_period(struct class_device *cdev, const char *buf,
}
-static CLASS_DEVICE_ATTR(min_period, S_IRUGO | S_IWUSR,
+static CLASS_DEVICE_ATTR(min_period, S_IRUGO,
show_spi_transport_min_period,
store_spi_transport_min_period);
@@ -531,12 +541,15 @@ static ssize_t store_spi_host_signalling(struct class_device *cdev,
struct spi_internal *i = to_spi_internal(shost->transportt);
enum spi_signal_type type = spi_signal_to_value(buf);
+ if (!i->f->set_signalling)
+ return -EINVAL;
+
if (type != SPI_SIGNAL_UNKNOWN)
i->f->set_signalling(shost, type);
return count;
}
-static CLASS_DEVICE_ATTR(signalling, S_IRUGO | S_IWUSR,
+static CLASS_DEVICE_ATTR(signalling, S_IRUGO,
show_spi_host_signalling,
store_spi_host_signalling);
@@ -1262,35 +1275,6 @@ int spi_print_msg(const unsigned char *msg)
EXPORT_SYMBOL(spi_print_msg);
#endif /* ! CONFIG_SCSI_CONSTANTS */
-#define SETUP_ATTRIBUTE(field) \
- i->private_attrs[count] = class_device_attr_##field; \
- if (!i->f->set_##field) { \
- i->private_attrs[count].attr.mode = S_IRUGO; \
- i->private_attrs[count].store = NULL; \
- } \
- i->attrs[count] = &i->private_attrs[count]; \
- if (i->f->show_##field) \
- count++
-
-#define SETUP_RELATED_ATTRIBUTE(field, rel_field) \
- i->private_attrs[count] = class_device_attr_##field; \
- if (!i->f->set_##rel_field) { \
- i->private_attrs[count].attr.mode = S_IRUGO; \
- i->private_attrs[count].store = NULL; \
- } \
- i->attrs[count] = &i->private_attrs[count]; \
- if (i->f->show_##rel_field) \
- count++
-
-#define SETUP_HOST_ATTRIBUTE(field) \
- i->private_host_attrs[count] = class_device_attr_##field; \
- if (!i->f->set_##field) { \
- i->private_host_attrs[count].attr.mode = S_IRUGO; \
- i->private_host_attrs[count].store = NULL; \
- } \
- i->host_attrs[count] = &i->private_host_attrs[count]; \
- count++
-
static int spi_device_match(struct attribute_container *cont,
struct device *dev)
{
@@ -1343,16 +1327,156 @@ static DECLARE_TRANSPORT_CLASS(spi_transport_class,
"spi_transport",
spi_setup_transport_attrs,
NULL,
- NULL);
+ spi_target_configure);
static DECLARE_ANON_TRANSPORT_CLASS(spi_device_class,
spi_device_match,
spi_device_configure);
+static struct attribute *host_attributes[] = {
+ &class_device_attr_signalling.attr,
+ NULL
+};
+
+static struct attribute_group host_attribute_group = {
+ .attrs = host_attributes,
+};
+
+static int spi_host_configure(struct transport_container *tc,
+ struct device *dev,
+ struct class_device *cdev)
+{
+ struct kobject *kobj = &cdev->kobj;
+ struct Scsi_Host *shost = transport_class_to_shost(cdev);
+ struct spi_internal *si = to_spi_internal(shost->transportt);
+ struct attribute *attr = &class_device_attr_signalling.attr;
+ int rc = 0;
+
+ if (si->f->set_signalling)
+ rc = sysfs_chmod_file(kobj, attr, attr->mode | S_IWUSR);
+
+ return rc;
+}
+
+/* returns true if we should be showing the variable. Also
+ * overloads the return by setting 1<<1 if the attribute should
+ * be writeable */
+#define TARGET_ATTRIBUTE_HELPER(name) \
+ (si->f->show_##name ? 1 : 0) + \
+ (si->f->set_##name ? 2 : 0)
+
+static int target_attribute_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct class_device *cdev =
+ container_of(kobj, struct class_device, kobj);
+ struct scsi_target *starget = transport_class_to_starget(cdev);
+ struct Scsi_Host *shost = transport_class_to_shost(cdev);
+ struct spi_internal *si = to_spi_internal(shost->transportt);
+
+ if (attr == &class_device_attr_period.attr &&
+ spi_support_sync(starget))
+ return TARGET_ATTRIBUTE_HELPER(period);
+ else if (attr == &class_device_attr_min_period.attr &&
+ spi_support_sync(starget))
+ return TARGET_ATTRIBUTE_HELPER(period);
+ else if (attr == &class_device_attr_offset.attr &&
+ spi_support_sync(starget))
+ return TARGET_ATTRIBUTE_HELPER(offset);
+ else if (attr == &class_device_attr_max_offset.attr &&
+ spi_support_sync(starget))
+ return TARGET_ATTRIBUTE_HELPER(offset);
+ else if (attr == &class_device_attr_width.attr &&
+ spi_support_wide(starget))
+ return TARGET_ATTRIBUTE_HELPER(width);
+ else if (attr == &class_device_attr_max_width.attr &&
+ spi_support_wide(starget))
+ return TARGET_ATTRIBUTE_HELPER(width);
+ else if (attr == &class_device_attr_iu.attr &&
+ spi_support_ius(starget))
+ return TARGET_ATTRIBUTE_HELPER(iu);
+ else if (attr == &class_device_attr_dt.attr &&
+ spi_support_dt(starget))
+ return TARGET_ATTRIBUTE_HELPER(dt);
+ else if (attr == &class_device_attr_qas.attr &&
+ spi_support_qas(starget))
+ return TARGET_ATTRIBUTE_HELPER(qas);
+ else if (attr == &class_device_attr_wr_flow.attr &&
+ spi_support_ius(starget))
+ return TARGET_ATTRIBUTE_HELPER(wr_flow);
+ else if (attr == &class_device_attr_rd_strm.attr &&
+ spi_support_ius(starget))
+ return TARGET_ATTRIBUTE_HELPER(rd_strm);
+ else if (attr == &class_device_attr_rti.attr &&
+ spi_support_ius(starget))
+ return TARGET_ATTRIBUTE_HELPER(rti);
+ else if (attr == &class_device_attr_pcomp_en.attr &&
+ spi_support_ius(starget))
+ return TARGET_ATTRIBUTE_HELPER(pcomp_en);
+ else if (attr == &class_device_attr_hold_mcs.attr &&
+ spi_support_ius(starget))
+ return TARGET_ATTRIBUTE_HELPER(hold_mcs);
+ else if (attr == &class_device_attr_revalidate.attr)
+ return 1;
+
+ return 0;
+}
+
+static struct attribute *target_attributes[] = {
+ &class_device_attr_period.attr,
+ &class_device_attr_min_period.attr,
+ &class_device_attr_offset.attr,
+ &class_device_attr_max_offset.attr,
+ &class_device_attr_width.attr,
+ &class_device_attr_max_width.attr,
+ &class_device_attr_iu.attr,
+ &class_device_attr_dt.attr,
+ &class_device_attr_qas.attr,
+ &class_device_attr_wr_flow.attr,
+ &class_device_attr_rd_strm.attr,
+ &class_device_attr_rti.attr,
+ &class_device_attr_pcomp_en.attr,
+ &class_device_attr_hold_mcs.attr,
+ &class_device_attr_revalidate.attr,
+ NULL
+};
+
+static struct attribute_group target_attribute_group = {
+ .attrs = target_attributes,
+ .is_visible = target_attribute_is_visible,
+};
+
+static int spi_target_configure(struct transport_container *tc,
+ struct device *dev,
+ struct class_device *cdev)
+{
+ struct kobject *kobj = &cdev->kobj;
+ int i;
+ struct attribute *attr;
+ int rc;
+
+ for (i = 0; (attr = target_attributes[i]) != NULL; i++) {
+ int j = target_attribute_group.is_visible(kobj, attr, i);
+
+ /* FIXME: as well as returning -EEXIST, which we'd like
+ * to ignore, sysfs also does a WARN_ON and dumps a trace,
+ * which is bad, so temporarily, skip attributes that are
+ * already visible (the revalidate one) */
+ if (j && attr != &class_device_attr_revalidate.attr)
+ rc = sysfs_add_file_to_group(kobj, attr,
+ target_attribute_group.name);
+ /* and make the attribute writeable if we have a set
+ * function */
+ if ((j & 1))
+ rc = sysfs_chmod_file(kobj, attr, attr->mode | S_IWUSR);
+ }
+
+ return 0;
+}
+
struct scsi_transport_template *
spi_attach_transport(struct spi_function_template *ft)
{
- int count = 0;
struct spi_internal *i = kzalloc(sizeof(struct spi_internal),
GFP_KERNEL);
@@ -1360,47 +1484,17 @@ spi_attach_transport(struct spi_function_template *ft)
return NULL;
i->t.target_attrs.ac.class = &spi_transport_class.class;
- i->t.target_attrs.ac.attrs = &i->attrs[0];
+ i->t.target_attrs.ac.grp = &target_attribute_group;
i->t.target_attrs.ac.match = spi_target_match;
transport_container_register(&i->t.target_attrs);
i->t.target_size = sizeof(struct spi_transport_attrs);
i->t.host_attrs.ac.class = &spi_host_class.class;
- i->t.host_attrs.ac.attrs = &i->host_attrs[0];
+ i->t.host_attrs.ac.grp = &host_attribute_group;
i->t.host_attrs.ac.match = spi_host_match;
transport_container_register(&i->t.host_attrs);
i->t.host_size = sizeof(struct spi_host_attrs);
i->f = ft;
- SETUP_ATTRIBUTE(period);
- SETUP_RELATED_ATTRIBUTE(min_period, period);
- SETUP_ATTRIBUTE(offset);
- SETUP_RELATED_ATTRIBUTE(max_offset, offset);
- SETUP_ATTRIBUTE(width);
- SETUP_RELATED_ATTRIBUTE(max_width, width);
- SETUP_ATTRIBUTE(iu);
- SETUP_ATTRIBUTE(dt);
- SETUP_ATTRIBUTE(qas);
- SETUP_ATTRIBUTE(wr_flow);
- SETUP_ATTRIBUTE(rd_strm);
- SETUP_ATTRIBUTE(rti);
- SETUP_ATTRIBUTE(pcomp_en);
- SETUP_ATTRIBUTE(hold_mcs);
-
- /* if you add an attribute but forget to increase SPI_NUM_ATTRS
- * this bug will trigger */
- BUG_ON(count > SPI_NUM_ATTRS);
-
- i->attrs[count++] = &class_device_attr_revalidate;
-
- i->attrs[count] = NULL;
-
- count = 0;
- SETUP_HOST_ATTRIBUTE(signalling);
-
- BUG_ON(count > SPI_HOST_ATTRS);
-
- i->host_attrs[count] = NULL;
-
return &i->t;
}
EXPORT_SYMBOL(spi_attach_transport);
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index 65c584db33bd..2445c98ae95e 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -185,11 +185,10 @@ static int srp_host_match(struct attribute_container *cont, struct device *dev)
/**
* srp_rport_add - add a SRP remote port to the device hierarchy
- *
* @shost: scsi host the remote port is connected to.
* @ids: The port id for the remote port.
*
- * publishes a port to the rest of the system
+ * Publishes a port to the rest of the system.
*/
struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
struct srp_rport_identifiers *ids)
@@ -242,8 +241,8 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
EXPORT_SYMBOL_GPL(srp_rport_add);
/**
- * srp_rport_del -- remove a SRP remote port
- * @port: SRP remote port to remove
+ * srp_rport_del - remove a SRP remote port
+ * @rport: SRP remote port to remove
*
* Removes the specified SRP remote port.
*/
@@ -271,7 +270,7 @@ static int do_srp_rport_del(struct device *dev, void *data)
}
/**
- * srp_remove_host -- tear down a Scsi_Host's SRP data structures
+ * srp_remove_host - tear down a Scsi_Host's SRP data structures
* @shost: Scsi Host that is torn down
*
* Removes all SRP remote ports for a given Scsi_Host.
@@ -297,7 +296,7 @@ static int srp_it_nexus_response(struct Scsi_Host *shost, u64 nexus, int result)
}
/**
- * srp_attach_transport -- instantiate SRP transport template
+ * srp_attach_transport - instantiate SRP transport template
* @ft: SRP transport class function template
*/
struct scsi_transport_template *
@@ -337,7 +336,7 @@ srp_attach_transport(struct srp_function_template *ft)
EXPORT_SYMBOL_GPL(srp_attach_transport);
/**
- * srp_release_transport -- release SRP transport template instance
+ * srp_release_transport - release SRP transport template instance
* @t: transport template instance
*/
void srp_release_transport(struct scsi_transport_template *t)
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index cd68a66c7bb3..3f21bc65e8c6 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -24,6 +24,14 @@
static int setsize(unsigned long capacity, unsigned int *cyls, unsigned int *hds,
unsigned int *secs);
+/**
+ * scsi_bios_ptable - Read PC partition table out of first sector of device.
+ * @dev: from this device
+ *
+ * Description: Reads the first sector from the device and returns %0x42 bytes
+ * starting at offset %0x1be.
+ * Returns: partition table in kmalloc(GFP_KERNEL) memory, or NULL on error.
+ */
unsigned char *scsi_bios_ptable(struct block_device *dev)
{
unsigned char *res = kmalloc(66, GFP_KERNEL);
@@ -43,15 +51,17 @@ unsigned char *scsi_bios_ptable(struct block_device *dev)
}
EXPORT_SYMBOL(scsi_bios_ptable);
-/*
- * Function : int scsicam_bios_param (struct block_device *bdev, ector_t capacity, int *ip)
+/**
+ * scsicam_bios_param - Determine geometry of a disk in cylinders/heads/sectors.
+ * @bdev: which device
+ * @capacity: size of the disk in sectors
+ * @ip: return value: ip[0]=heads, ip[1]=sectors, ip[2]=cylinders
*
- * Purpose : to determine the BIOS mapping used for a drive in a
+ * Description : determine the BIOS mapping/geometry used for a drive in a
* SCSI-CAM system, storing the results in ip as required
* by the HDIO_GETGEO ioctl().
*
* Returns : -1 on failure, 0 on success.
- *
*/
int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip)
@@ -98,15 +108,18 @@ int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip)
}
EXPORT_SYMBOL(scsicam_bios_param);
-/*
- * Function : static int scsi_partsize(unsigned char *buf, unsigned long
- * capacity,unsigned int *cyls, unsigned int *hds, unsigned int *secs);
+/**
+ * scsi_partsize - Parse cylinders/heads/sectors from PC partition table
+ * @buf: partition table, see scsi_bios_ptable()
+ * @capacity: size of the disk in sectors
+ * @cyls: put cylinders here
+ * @hds: put heads here
+ * @secs: put sectors here
*
- * Purpose : to determine the BIOS mapping used to create the partition
+ * Description: determine the BIOS mapping/geometry used to create the partition
* table, storing the results in *cyls, *hds, and *secs
*
- * Returns : -1 on failure, 0 on success.
- *
+ * Returns: -1 on failure, 0 on success.
*/
int scsi_partsize(unsigned char *buf, unsigned long capacity,
@@ -194,7 +207,7 @@ EXPORT_SYMBOL(scsi_partsize);
*
* WORKING X3T9.2
* DRAFT 792D
- *
+ * see http://www.t10.org/ftp/t10/drafts/cam/cam-r12b.pdf
*
* Revision 6
* 10-MAR-94
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a69b155f39a2..24eba3118b5a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -395,6 +395,15 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
goto out;
}
+ /*
+ * Some devices (some sdcards for one) don't like it if the
+ * last sector gets read in a larger then 1 sector read.
+ */
+ if (unlikely(sdp->last_sector_bug &&
+ rq->nr_sectors > sdp->sector_size / 512 &&
+ block + this_count == get_capacity(disk)))
+ this_count -= sdp->sector_size / 512;
+
SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "block=%llu\n",
(unsigned long long)block));
@@ -736,6 +745,7 @@ static int sd_media_changed(struct gendisk *disk)
{
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device;
+ struct scsi_sense_hdr *sshdr = NULL;
int retval;
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_media_changed\n"));
@@ -749,8 +759,11 @@ static int sd_media_changed(struct gendisk *disk)
* can deal with it then. It is only because of unrecoverable errors
* that we would ever take a device offline in the first place.
*/
- if (!scsi_device_online(sdp))
- goto not_present;
+ if (!scsi_device_online(sdp)) {
+ set_media_not_present(sdkp);
+ retval = 1;
+ goto out;
+ }
/*
* Using TEST_UNIT_READY enables differentiation between drive with
@@ -762,8 +775,12 @@ static int sd_media_changed(struct gendisk *disk)
* sd_revalidate() is called.
*/
retval = -ENODEV;
- if (scsi_block_when_processing_errors(sdp))
- retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES);
+
+ if (scsi_block_when_processing_errors(sdp)) {
+ sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
+ retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
+ sshdr);
+ }
/*
* Unable to test, unit probably not ready. This usually
@@ -771,8 +788,13 @@ static int sd_media_changed(struct gendisk *disk)
* and we will figure it out later once the drive is
* available again.
*/
- if (retval)
- goto not_present;
+ if (retval || (scsi_sense_valid(sshdr) &&
+ /* 0x3a is medium not present */
+ sshdr->asc == 0x3a)) {
+ set_media_not_present(sdkp);
+ retval = 1;
+ goto out;
+ }
/*
* For removable scsi disk we have to recognise the presence
@@ -783,12 +805,12 @@ static int sd_media_changed(struct gendisk *disk)
retval = sdp->changed;
sdp->changed = 0;
-
+out:
+ if (retval != sdkp->previous_state)
+ sdev_evt_send_simple(sdp, SDEV_EVT_MEDIA_CHANGE, GFP_KERNEL);
+ sdkp->previous_state = retval;
+ kfree(sshdr);
return retval;
-
-not_present:
- set_media_not_present(sdkp);
- return 1;
}
static int sd_sync_cache(struct scsi_disk *sdkp)
diff --git a/drivers/scsi/seagate.c b/drivers/scsi/seagate.c
deleted file mode 100644
index b11324479b5b..000000000000
--- a/drivers/scsi/seagate.c
+++ /dev/null
@@ -1,1667 +0,0 @@
-/*
- * seagate.c Copyright (C) 1992, 1993 Drew Eckhardt
- * low level scsi driver for ST01/ST02, Future Domain TMC-885,
- * TMC-950 by Drew Eckhardt <drew@colorado.edu>
- *
- * Note : TMC-880 boards don't work because they have two bits in
- * the status register flipped, I'll fix this "RSN"
- * [why do I have strong feeling that above message is from 1993? :-)
- * pavel@ucw.cz]
- *
- * This card does all the I/O via memory mapped I/O, so there is no need
- * to check or allocate a region of the I/O address space.
- */
-
-/* 1996 - to use new read{b,w,l}, write{b,w,l}, and phys_to_virt
- * macros, replaced assembler routines with C. There's probably a
- * performance hit, but I only have a cdrom and can't tell. Define
- * SEAGATE_USE_ASM if you want the old assembler code -- SJT
- *
- * 1998-jul-29 - created DPRINTK macros and made it work under
- * linux 2.1.112, simplified some #defines etc. <pavel@ucw.cz>
- *
- * Aug 2000 - aeb - deleted seagate_st0x_biosparam(). It would try to
- * read the physical disk geometry, a bad mistake. Of course it doesn't
- * matter much what geometry one invents, but on large disks it
- * returned 256 (or more) heads, causing all kind of failures.
- * Of course this means that people might see a different geometry now,
- * so boot parameters may be necessary in some cases.
- */
-
-/*
- * Configuration :
- * To use without BIOS -DOVERRIDE=base_address -DCONTROLLER=FD or SEAGATE
- * -DIRQ will override the default of 5.
- * Note: You can now set these options from the kernel's "command line".
- * The syntax is:
- *
- * st0x=ADDRESS,IRQ (for a Seagate controller)
- * or:
- * tmc8xx=ADDRESS,IRQ (for a TMC-8xx or TMC-950 controller)
- * eg:
- * tmc8xx=0xC8000,15
- *
- * will configure the driver for a TMC-8xx style controller using IRQ 15
- * with a base address of 0xC8000.
- *
- * -DARBITRATE
- * Will cause the host adapter to arbitrate for the
- * bus for better SCSI-II compatibility, rather than just
- * waiting for BUS FREE and then doing its thing. Should
- * let us do one command per Lun when I integrate my
- * reorganization changes into the distribution sources.
- *
- * -DDEBUG=65535
- * Will activate debug code.
- *
- * -DFAST or -DFAST32
- * Will use blind transfers where possible
- *
- * -DPARITY
- * This will enable parity.
- *
- * -DSEAGATE_USE_ASM
- * Will use older seagate assembly code. should be (very small amount)
- * Faster.
- *
- * -DSLOW_RATE=50
- * Will allow compatibility with broken devices that don't
- * handshake fast enough (ie, some CD ROM's) for the Seagate
- * code.
- *
- * 50 is some number, It will let you specify a default
- * transfer rate if handshaking isn't working correctly.
- *
- * -DOLDCNTDATASCEME There is a new sceme to set the CONTROL
- * and DATA reigsters which complies more closely
- * with the SCSI2 standard. This hopefully eliminates
- * the need to swap the order these registers are
- * 'messed' with. It makes the following two options
- * obsolete. To reenable the old sceme define this.
- *
- * The following to options are patches from the SCSI.HOWTO
- *
- * -DSWAPSTAT This will swap the definitions for STAT_MSG and STAT_CD.
- *
- * -DSWAPCNTDATA This will swap the order that seagate.c messes with
- * the CONTROL an DATA registers.
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/signal.h>
-#include <linux/string.h>
-#include <linux/proc_fs.h>
-#include <linux/init.h>
-#include <linux/blkdev.h>
-#include <linux/stat.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-
-#include <asm/system.h>
-#include <asm/uaccess.h>
-
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi.h>
-
-#include <scsi/scsi_dbg.h>
-#include <scsi/scsi_host.h>
-
-
-#ifdef DEBUG
-#define DPRINTK( when, msg... ) do { if ( (DEBUG & (when)) == (when) ) printk( msg ); } while (0)
-#else
-#define DPRINTK( when, msg... ) do { } while (0)
-#define DEBUG 0
-#endif
-#define DANY( msg... ) DPRINTK( 0xffff, msg );
-
-#ifndef IRQ
-#define IRQ 5
-#endif
-
-#ifdef FAST32
-#define FAST
-#endif
-
-#undef LINKED /* Linked commands are currently broken! */
-
-#if defined(OVERRIDE) && !defined(CONTROLLER)
-#error Please use -DCONTROLLER=SEAGATE or -DCONTROLLER=FD to override controller type
-#endif
-
-#ifndef __i386__
-#undef SEAGATE_USE_ASM
-#endif
-
-/*
- Thanks to Brian Antoine for the example code in his Messy-Loss ST-01
- driver, and Mitsugu Suzuki for information on the ST-01
- SCSI host.
-*/
-
-/*
- CONTROL defines
-*/
-
-#define CMD_RST 0x01
-#define CMD_SEL 0x02
-#define CMD_BSY 0x04
-#define CMD_ATTN 0x08
-#define CMD_START_ARB 0x10
-#define CMD_EN_PARITY 0x20
-#define CMD_INTR 0x40
-#define CMD_DRVR_ENABLE 0x80
-
-/*
- STATUS
-*/
-#ifdef SWAPSTAT
-#define STAT_MSG 0x08
-#define STAT_CD 0x02
-#else
-#define STAT_MSG 0x02
-#define STAT_CD 0x08
-#endif
-
-#define STAT_BSY 0x01
-#define STAT_IO 0x04
-#define STAT_REQ 0x10
-#define STAT_SEL 0x20
-#define STAT_PARITY 0x40
-#define STAT_ARB_CMPL 0x80
-
-/*
- REQUESTS
-*/
-
-#define REQ_MASK (STAT_CD | STAT_IO | STAT_MSG)
-#define REQ_DATAOUT 0
-#define REQ_DATAIN STAT_IO
-#define REQ_CMDOUT STAT_CD
-#define REQ_STATIN (STAT_CD | STAT_IO)
-#define REQ_MSGOUT (STAT_MSG | STAT_CD)
-#define REQ_MSGIN (STAT_MSG | STAT_CD | STAT_IO)
-
-extern volatile int seagate_st0x_timeout;
-
-#ifdef PARITY
-#define BASE_CMD CMD_EN_PARITY
-#else
-#define BASE_CMD 0
-#endif
-
-/*
- Debugging code
-*/
-
-#define PHASE_BUS_FREE 1
-#define PHASE_ARBITRATION 2
-#define PHASE_SELECTION 4
-#define PHASE_DATAIN 8
-#define PHASE_DATAOUT 0x10
-#define PHASE_CMDOUT 0x20
-#define PHASE_MSGIN 0x40
-#define PHASE_MSGOUT 0x80
-#define PHASE_STATUSIN 0x100
-#define PHASE_ETC (PHASE_DATAIN | PHASE_DATAOUT | PHASE_CMDOUT | PHASE_MSGIN | PHASE_MSGOUT | PHASE_STATUSIN)
-#define PRINT_COMMAND 0x200
-#define PHASE_EXIT 0x400
-#define PHASE_RESELECT 0x800
-#define DEBUG_FAST 0x1000
-#define DEBUG_SG 0x2000
-#define DEBUG_LINKED 0x4000
-#define DEBUG_BORKEN 0x8000
-
-/*
- * Control options - these are timeouts specified in .01 seconds.
- */
-
-/* 30, 20 work */
-#define ST0X_BUS_FREE_DELAY 25
-#define ST0X_SELECTION_DELAY 25
-
-#define SEAGATE 1 /* these determine the type of the controller */
-#define FD 2
-
-#define ST0X_ID_STR "Seagate ST-01/ST-02"
-#define FD_ID_STR "TMC-8XX/TMC-950"
-
-static int internal_command (unsigned char target, unsigned char lun,
- const void *cmnd,
- void *buff, int bufflen, int reselect);
-
-static int incommand; /* set if arbitration has finished
- and we are in some command phase. */
-
-static unsigned int base_address = 0; /* Where the card ROM starts, used to
- calculate memory mapped register
- location. */
-
-static void __iomem *st0x_cr_sr; /* control register write, status
- register read. 256 bytes in
- length.
- Read is status of SCSI BUS, as per
- STAT masks. */
-
-static void __iomem *st0x_dr; /* data register, read write 256
- bytes in length. */
-
-static volatile int st0x_aborted = 0; /* set when we are aborted, ie by a
- time out, etc. */
-
-static unsigned char controller_type = 0; /* set to SEAGATE for ST0x
- boards or FD for TMC-8xx
- boards */
-static int irq = IRQ;
-
-module_param(base_address, uint, 0);
-module_param(controller_type, byte, 0);
-module_param(irq, int, 0);
-MODULE_LICENSE("GPL");
-
-
-#define retcode(result) (((result) << 16) | (message << 8) | status)
-#define STATUS ((u8) readb(st0x_cr_sr))
-#define DATA ((u8) readb(st0x_dr))
-#define WRITE_CONTROL(d) { writeb((d), st0x_cr_sr); }
-#define WRITE_DATA(d) { writeb((d), st0x_dr); }
-
-#ifndef OVERRIDE
-static unsigned int seagate_bases[] = {
- 0xc8000, 0xca000, 0xcc000,
- 0xce000, 0xdc000, 0xde000
-};
-
-typedef struct {
- const unsigned char *signature;
- unsigned offset;
- unsigned length;
- unsigned char type;
-} Signature;
-
-static Signature __initdata signatures[] = {
- {"ST01 v1.7 (C) Copyright 1987 Seagate", 15, 37, SEAGATE},
- {"SCSI BIOS 2.00 (C) Copyright 1987 Seagate", 15, 40, SEAGATE},
-
-/*
- * The following two lines are NOT mistakes. One detects ROM revision
- * 3.0.0, the other 3.2. Since seagate has only one type of SCSI adapter,
- * and this is not going to change, the "SEAGATE" and "SCSI" together
- * are probably "good enough"
- */
-
- {"SEAGATE SCSI BIOS ", 16, 17, SEAGATE},
- {"SEAGATE SCSI BIOS ", 17, 17, SEAGATE},
-
-/*
- * However, future domain makes several incompatible SCSI boards, so specific
- * signatures must be used.
- */
-
- {"FUTURE DOMAIN CORP. (C) 1986-1989 V5.0C2/14/89", 5, 46, FD},
- {"FUTURE DOMAIN CORP. (C) 1986-1989 V6.0A7/28/89", 5, 46, FD},
- {"FUTURE DOMAIN CORP. (C) 1986-1990 V6.0105/31/90", 5, 47, FD},
- {"FUTURE DOMAIN CORP. (C) 1986-1990 V6.0209/18/90", 5, 47, FD},
- {"FUTURE DOMAIN CORP. (C) 1986-1990 V7.009/18/90", 5, 46, FD},
- {"FUTURE DOMAIN CORP. (C) 1992 V8.00.004/02/92", 5, 44, FD},
- {"IBM F1 BIOS V1.1004/30/92", 5, 25, FD},
- {"FUTURE DOMAIN TMC-950", 5, 21, FD},
- /* Added for 2.2.16 by Matthias_Heidbrink@b.maus.de */
- {"IBM F1 V1.2009/22/93", 5, 25, FD},
-};
-
-#define NUM_SIGNATURES ARRAY_SIZE(signatures)
-#endif /* n OVERRIDE */
-
-/*
- * hostno stores the hostnumber, as told to us by the init routine.
- */
-
-static int hostno = -1;
-static void seagate_reconnect_intr (int, void *);
-static irqreturn_t do_seagate_reconnect_intr (int, void *);
-static int seagate_st0x_bus_reset(struct scsi_cmnd *);
-
-#ifdef FAST
-static int fast = 1;
-#else
-#define fast 0
-#endif
-
-#ifdef SLOW_RATE
-/*
- * Support for broken devices :
- * The Seagate board has a handshaking problem. Namely, a lack
- * thereof for slow devices. You can blast 600K/second through
- * it if you are polling for each byte, more if you do a blind
- * transfer. In the first case, with a fast device, REQ will
- * transition high-low or high-low-high before your loop restarts
- * and you'll have no problems. In the second case, the board
- * will insert wait states for up to 13.2 usecs for REQ to
- * transition low->high, and everything will work.
- *
- * However, there's nothing in the state machine that says
- * you *HAVE* to see a high-low-high set of transitions before
- * sending the next byte, and slow things like the Trantor CD ROMS
- * will break because of this.
- *
- * So, we need to slow things down, which isn't as simple as it
- * seems. We can't slow things down period, because then people
- * who don't recompile their kernels will shoot me for ruining
- * their performance. We need to do it on a case per case basis.
- *
- * The best for performance will be to, only for borken devices
- * (this is stored on a per-target basis in the scsi_devices array)
- *
- * Wait for a low->high transition before continuing with that
- * transfer. If we timeout, continue anyways. We don't need
- * a long timeout, because REQ should only be asserted until the
- * corresponding ACK is received and processed.
- *
- * Note that we can't use the system timer for this, because of
- * resolution, and we *really* can't use the timer chip since
- * gettimeofday() and the beeper routines use that. So,
- * the best thing for us to do will be to calibrate a timing
- * loop in the initialization code using the timer chip before
- * gettimeofday() can screw with it.
- *
- * FIXME: this is broken (not borken :-). Empty loop costs less than
- * loop with ISA access in it! -- pavel@ucw.cz
- */
-
-static int borken_calibration = 0;
-
-static void __init borken_init (void)
-{
- register int count = 0, start = jiffies + 1, stop = start + 25;
-
- /* FIXME: There may be a better approach, this is a straight port for
- now */
- preempt_disable();
- while (time_before (jiffies, start))
- cpu_relax();
- for (; time_before (jiffies, stop); ++count)
- cpu_relax();
- preempt_enable();
-
-/*
- * Ok, we now have a count for .25 seconds. Convert to a
- * count per second and divide by transfer rate in K. */
-
- borken_calibration = (count * 4) / (SLOW_RATE * 1024);
-
- if (borken_calibration < 1)
- borken_calibration = 1;
-}
-
-static inline void borken_wait (void)
-{
- register int count;
-
- for (count = borken_calibration; count && (STATUS & STAT_REQ); --count)
- cpu_relax();
-
-#if (DEBUG & DEBUG_BORKEN)
- if (count)
- printk ("scsi%d : borken timeout\n", hostno);
-#endif
-}
-
-#endif /* def SLOW_RATE */
-
-/* These beasts only live on ISA, and ISA means 8MHz. Each ULOOP()
- * contains at least one ISA access, which takes more than 0.125
- * usec. So if we loop 8 times time in usec, we are safe.
- */
-
-#define ULOOP( i ) for (clock = i*8;;)
-#define TIMEOUT (!(clock--))
-
-static int __init seagate_st0x_detect (struct scsi_host_template * tpnt)
-{
- struct Scsi_Host *instance;
- int i, j;
- unsigned long cr, dr;
-
- tpnt->proc_name = "seagate";
-/*
- * First, we try for the manual override.
- */
- DANY ("Autodetecting ST0x / TMC-8xx\n");
-
- if (hostno != -1) {
- printk (KERN_ERR "seagate_st0x_detect() called twice?!\n");
- return 0;
- }
-
-/* If the user specified the controller type from the command line,
- controller_type will be non-zero, so don't try to detect one */
-
- if (!controller_type) {
-#ifdef OVERRIDE
- base_address = OVERRIDE;
- controller_type = CONTROLLER;
-
- DANY ("Base address overridden to %x, controller type is %s\n",
- base_address,
- controller_type == SEAGATE ? "SEAGATE" : "FD");
-#else /* OVERRIDE */
-/*
- * To detect this card, we simply look for the signature
- * from the BIOS version notice in all the possible locations
- * of the ROM's. This has a nice side effect of not trashing
- * any register locations that might be used by something else.
- *
- * XXX - note that we probably should be probing the address
- * space for the on-board RAM instead.
- */
-
- for (i = 0; i < ARRAY_SIZE(seagate_bases); ++i) {
- void __iomem *p = ioremap(seagate_bases[i], 0x2000);
- if (!p)
- continue;
- for (j = 0; j < NUM_SIGNATURES; ++j)
- if (check_signature(p + signatures[j].offset, signatures[j].signature, signatures[j].length)) {
- base_address = seagate_bases[i];
- controller_type = signatures[j].type;
- break;
- }
- iounmap(p);
- }
-#endif /* OVERRIDE */
- }
- /* (! controller_type) */
- tpnt->this_id = (controller_type == SEAGATE) ? 7 : 6;
- tpnt->name = (controller_type == SEAGATE) ? ST0X_ID_STR : FD_ID_STR;
-
- if (!base_address) {
- printk(KERN_INFO "seagate: ST0x/TMC-8xx not detected.\n");
- return 0;
- }
-
- cr = base_address + (controller_type == SEAGATE ? 0x1a00 : 0x1c00);
- dr = cr + 0x200;
- st0x_cr_sr = ioremap(cr, 0x100);
- st0x_dr = ioremap(dr, 0x100);
-
- DANY("%s detected. Base address = %x, cr = %x, dr = %x\n",
- tpnt->name, base_address, cr, dr);
-
- /*
- * At all times, we will use IRQ 5. Should also check for IRQ3
- * if we lose our first interrupt.
- */
- instance = scsi_register (tpnt, 0);
- if (instance == NULL)
- return 0;
-
- hostno = instance->host_no;
- if (request_irq (irq, do_seagate_reconnect_intr, IRQF_DISABLED, (controller_type == SEAGATE) ? "seagate" : "tmc-8xx", instance)) {
- printk(KERN_ERR "scsi%d : unable to allocate IRQ%d\n", hostno, irq);
- return 0;
- }
- instance->irq = irq;
- instance->io_port = base_address;
-#ifdef SLOW_RATE
- printk(KERN_INFO "Calibrating borken timer... ");
- borken_init();
- printk(" %d cycles per transfer\n", borken_calibration);
-#endif
- printk (KERN_INFO "This is one second... ");
- {
- int clock;
- ULOOP (1 * 1000 * 1000) {
- STATUS;
- if (TIMEOUT)
- break;
- }
- }
-
- printk ("done, %s options:"
-#ifdef ARBITRATE
- " ARBITRATE"
-#endif
-#if DEBUG
- " DEBUG"
-#endif
-#ifdef FAST
- " FAST"
-#ifdef FAST32
- "32"
-#endif
-#endif
-#ifdef LINKED
- " LINKED"
-#endif
-#ifdef PARITY
- " PARITY"
-#endif
-#ifdef SEAGATE_USE_ASM
- " SEAGATE_USE_ASM"
-#endif
-#ifdef SLOW_RATE
- " SLOW_RATE"
-#endif
-#ifdef SWAPSTAT
- " SWAPSTAT"
-#endif
-#ifdef SWAPCNTDATA
- " SWAPCNTDATA"
-#endif
- "\n", tpnt->name);
- return 1;
-}
-
-static const char *seagate_st0x_info (struct Scsi_Host *shpnt)
-{
- static char buffer[64];
-
- snprintf(buffer, 64, "%s at irq %d, address 0x%05X",
- (controller_type == SEAGATE) ? ST0X_ID_STR : FD_ID_STR,
- irq, base_address);
- return buffer;
-}
-
-/*
- * These are our saved pointers for the outstanding command that is
- * waiting for a reconnect
- */
-
-static unsigned char current_target, current_lun;
-static unsigned char *current_cmnd, *current_data;
-static int current_nobuffs;
-static struct scatterlist *current_buffer;
-static int current_bufflen;
-
-#ifdef LINKED
-/*
- * linked_connected indicates whether or not we are currently connected to
- * linked_target, linked_lun and in an INFORMATION TRANSFER phase,
- * using linked commands.
- */
-
-static int linked_connected = 0;
-static unsigned char linked_target, linked_lun;
-#endif
-
-static void (*done_fn) (struct scsi_cmnd *) = NULL;
-static struct scsi_cmnd *SCint = NULL;
-
-/*
- * These control whether or not disconnect / reconnect will be attempted,
- * or are being attempted.
- */
-
-#define NO_RECONNECT 0
-#define RECONNECT_NOW 1
-#define CAN_RECONNECT 2
-
-/*
- * LINKED_RIGHT indicates that we are currently connected to the correct target
- * for this command, LINKED_WRONG indicates that we are connected to the wrong
- * target. Note that these imply CAN_RECONNECT and require defined(LINKED).
- */
-
-#define LINKED_RIGHT 3
-#define LINKED_WRONG 4
-
-/*
- * This determines if we are expecting to reconnect or not.
- */
-
-static int should_reconnect = 0;
-
-/*
- * The seagate_reconnect_intr routine is called when a target reselects the
- * host adapter. This occurs on the interrupt triggered by the target
- * asserting SEL.
- */
-
-static irqreturn_t do_seagate_reconnect_intr(int irq, void *dev_id)
-{
- unsigned long flags;
- struct Scsi_Host *dev = dev_id;
-
- spin_lock_irqsave (dev->host_lock, flags);
- seagate_reconnect_intr (irq, dev_id);
- spin_unlock_irqrestore (dev->host_lock, flags);
- return IRQ_HANDLED;
-}
-
-static void seagate_reconnect_intr (int irq, void *dev_id)
-{
- int temp;
- struct scsi_cmnd *SCtmp;
-
- DPRINTK (PHASE_RESELECT, "scsi%d : seagate_reconnect_intr() called\n", hostno);
-
- if (!should_reconnect)
- printk(KERN_WARNING "scsi%d: unexpected interrupt.\n", hostno);
- else {
- should_reconnect = 0;
-
- DPRINTK (PHASE_RESELECT, "scsi%d : internal_command(%d, %08x, %08x, RECONNECT_NOW\n",
- hostno, current_target, current_data, current_bufflen);
-
- temp = internal_command (current_target, current_lun, current_cmnd, current_data, current_bufflen, RECONNECT_NOW);
-
- if (msg_byte(temp) != DISCONNECT) {
- if (done_fn) {
- DPRINTK(PHASE_RESELECT, "scsi%d : done_fn(%d,%08x)", hostno, hostno, temp);
- if (!SCint)
- panic ("SCint == NULL in seagate");
- SCtmp = SCint;
- SCint = NULL;
- SCtmp->result = temp;
- done_fn(SCtmp);
- } else
- printk(KERN_ERR "done_fn() not defined.\n");
- }
- }
-}
-
-/*
- * The seagate_st0x_queue_command() function provides a queued interface
- * to the seagate SCSI driver. Basically, it just passes control onto the
- * seagate_command() function, after fixing it so that the done_fn()
- * is set to the one passed to the function. We have to be very careful,
- * because there are some commands on some devices that do not disconnect,
- * and if we simply call the done_fn when the command is done then another
- * command is started and queue_command is called again... We end up
- * overflowing the kernel stack, and this tends not to be such a good idea.
- */
-
-static int recursion_depth = 0;
-
-static int seagate_st0x_queue_command(struct scsi_cmnd * SCpnt,
- void (*done) (struct scsi_cmnd *))
-{
- int result, reconnect;
- struct scsi_cmnd *SCtmp;
-
- DANY ("seagate: que_command");
- done_fn = done;
- current_target = SCpnt->device->id;
- current_lun = SCpnt->device->lun;
- current_cmnd = SCpnt->cmnd;
- current_data = (unsigned char *) SCpnt->request_buffer;
- current_bufflen = SCpnt->request_bufflen;
- SCint = SCpnt;
- if (recursion_depth)
- return 1;
- recursion_depth++;
- do {
-#ifdef LINKED
- /*
- * Set linked command bit in control field of SCSI command.
- */
-
- current_cmnd[SCpnt->cmd_len] |= 0x01;
- if (linked_connected) {
- DPRINTK (DEBUG_LINKED, "scsi%d : using linked commands, current I_T_L nexus is ", hostno);
- if (linked_target == current_target && linked_lun == current_lun)
- {
- DPRINTK(DEBUG_LINKED, "correct\n");
- reconnect = LINKED_RIGHT;
- } else {
- DPRINTK(DEBUG_LINKED, "incorrect\n");
- reconnect = LINKED_WRONG;
- }
- } else
-#endif /* LINKED */
- reconnect = CAN_RECONNECT;
-
- result = internal_command(SCint->device->id, SCint->device->lun, SCint->cmnd,
- SCint->request_buffer, SCint->request_bufflen, reconnect);
- if (msg_byte(result) == DISCONNECT)
- break;
- SCtmp = SCint;
- SCint = NULL;
- SCtmp->result = result;
- done_fn(SCtmp);
- }
- while (SCint);
- recursion_depth--;
- return 0;
-}
-
-static int internal_command (unsigned char target, unsigned char lun,
- const void *cmnd, void *buff, int bufflen, int reselect)
-{
- unsigned char *data = NULL;
- struct scatterlist *buffer = NULL;
- int clock, temp, nobuffs = 0, done = 0, len = 0;
-#if DEBUG
- int transfered = 0, phase = 0, newphase;
-#endif
- register unsigned char status_read;
- unsigned char tmp_data, tmp_control, status = 0, message = 0;
- unsigned transfersize = 0, underflow = 0;
-#ifdef SLOW_RATE
- int borken = (int) SCint->device->borken; /* Does the current target require
- Very Slow I/O ? */
-#endif
-
- incommand = 0;
- st0x_aborted = 0;
-
-#if (DEBUG & PRINT_COMMAND)
- printk("scsi%d : target = %d, command = ", hostno, target);
- __scsi_print_command((unsigned char *) cmnd);
-#endif
-
-#if (DEBUG & PHASE_RESELECT)
- switch (reselect) {
- case RECONNECT_NOW:
- printk("scsi%d : reconnecting\n", hostno);
- break;
-#ifdef LINKED
- case LINKED_RIGHT:
- printk("scsi%d : connected, can reconnect\n", hostno);
- break;
- case LINKED_WRONG:
- printk("scsi%d : connected to wrong target, can reconnect\n",
- hostno);
- break;
-#endif
- case CAN_RECONNECT:
- printk("scsi%d : allowed to reconnect\n", hostno);
- break;
- default:
- printk("scsi%d : not allowed to reconnect\n", hostno);
- }
-#endif
-
- if (target == (controller_type == SEAGATE ? 7 : 6))
- return DID_BAD_TARGET;
-
- /*
- * We work it differently depending on if this is is "the first time,"
- * or a reconnect. If this is a reselect phase, then SEL will
- * be asserted, and we must skip selection / arbitration phases.
- */
-
- switch (reselect) {
- case RECONNECT_NOW:
- DPRINTK (PHASE_RESELECT, "scsi%d : phase RESELECT \n", hostno);
- /*
- * At this point, we should find the logical or of our ID
- * and the original target's ID on the BUS, with BSY, SEL,
- * and I/O signals asserted.
- *
- * After ARBITRATION phase is completed, only SEL, BSY,
- * and the target ID are asserted. A valid initiator ID
- * is not on the bus until IO is asserted, so we must wait
- * for that.
- */
- ULOOP (100 * 1000) {
- temp = STATUS;
- if ((temp & STAT_IO) && !(temp & STAT_BSY))
- break;
- if (TIMEOUT) {
- DPRINTK (PHASE_RESELECT, "scsi%d : RESELECT timed out while waiting for IO .\n", hostno);
- return (DID_BAD_INTR << 16);
- }
- }
-
- /*
- * After I/O is asserted by the target, we can read our ID
- * and its ID off of the BUS.
- */
-
- if (!((temp = DATA) & (controller_type == SEAGATE ? 0x80 : 0x40))) {
- DPRINTK (PHASE_RESELECT, "scsi%d : detected reconnect request to different target.\n\tData bus = %d\n", hostno, temp);
- return (DID_BAD_INTR << 16);
- }
-
- if (!(temp & (1 << current_target))) {
- printk(KERN_WARNING "scsi%d : Unexpected reselect interrupt. Data bus = %d\n", hostno, temp);
- return (DID_BAD_INTR << 16);
- }
-
- buffer = current_buffer;
- cmnd = current_cmnd; /* WDE add */
- data = current_data; /* WDE add */
- len = current_bufflen; /* WDE add */
- nobuffs = current_nobuffs;
-
- /*
- * We have determined that we have been selected. At this
- * point, we must respond to the reselection by asserting
- * BSY ourselves
- */
-
-#if 1
- WRITE_CONTROL (BASE_CMD | CMD_DRVR_ENABLE | CMD_BSY);
-#else
- WRITE_CONTROL (BASE_CMD | CMD_BSY);
-#endif
-
- /*
- * The target will drop SEL, and raise BSY, at which time
- * we must drop BSY.
- */
-
- ULOOP (100 * 1000) {
- if (!(STATUS & STAT_SEL))
- break;
- if (TIMEOUT) {
- WRITE_CONTROL (BASE_CMD | CMD_INTR);
- DPRINTK (PHASE_RESELECT, "scsi%d : RESELECT timed out while waiting for SEL.\n", hostno);
- return (DID_BAD_INTR << 16);
- }
- }
- WRITE_CONTROL (BASE_CMD);
- /*
- * At this point, we have connected with the target
- * and can get on with our lives.
- */
- break;
- case CAN_RECONNECT:
-#ifdef LINKED
- /*
- * This is a bletcherous hack, just as bad as the Unix #!
- * interpreter stuff. If it turns out we are using the wrong
- * I_T_L nexus, the easiest way to deal with it is to go into
- * our INFORMATION TRANSFER PHASE code, send a ABORT
- * message on MESSAGE OUT phase, and then loop back to here.
- */
-connect_loop:
-#endif
- DPRINTK (PHASE_BUS_FREE, "scsi%d : phase = BUS FREE \n", hostno);
-
- /*
- * BUS FREE PHASE
- *
- * On entry, we make sure that the BUS is in a BUS FREE
- * phase, by insuring that both BSY and SEL are low for
- * at least one bus settle delay. Several reads help
- * eliminate wire glitch.
- */
-
-#ifndef ARBITRATE
-#error FIXME: this is broken: we may not use jiffies here - we are under cli(). It will hardlock.
- clock = jiffies + ST0X_BUS_FREE_DELAY;
-
- while (((STATUS | STATUS | STATUS) & (STAT_BSY | STAT_SEL)) && (!st0x_aborted) && time_before (jiffies, clock))
- cpu_relax();
-
- if (time_after (jiffies, clock))
- return retcode (DID_BUS_BUSY);
- else if (st0x_aborted)
- return retcode (st0x_aborted);
-#endif
- DPRINTK (PHASE_SELECTION, "scsi%d : phase = SELECTION\n", hostno);
-
- clock = jiffies + ST0X_SELECTION_DELAY;
-
- /*
- * Arbitration/selection procedure :
- * 1. Disable drivers
- * 2. Write HOST adapter address bit
- * 3. Set start arbitration.
- * 4. We get either ARBITRATION COMPLETE or SELECT at this
- * point.
- * 5. OR our ID and targets on bus.
- * 6. Enable SCSI drivers and asserted SEL and ATTN
- */
-
-#ifdef ARBITRATE
- /* FIXME: verify host lock is always held here */
- WRITE_CONTROL(0);
- WRITE_DATA((controller_type == SEAGATE) ? 0x80 : 0x40);
- WRITE_CONTROL(CMD_START_ARB);
-
- ULOOP (ST0X_SELECTION_DELAY * 10000) {
- status_read = STATUS;
- if (status_read & STAT_ARB_CMPL)
- break;
- if (st0x_aborted) /* FIXME: What? We are going to do something even after abort? */
- break;
- if (TIMEOUT || (status_read & STAT_SEL)) {
- printk(KERN_WARNING "scsi%d : arbitration lost or timeout.\n", hostno);
- WRITE_CONTROL (BASE_CMD);
- return retcode (DID_NO_CONNECT);
- }
- }
- DPRINTK (PHASE_SELECTION, "scsi%d : arbitration complete\n", hostno);
-#endif
-
- /*
- * When the SCSI device decides that we're gawking at it,
- * it will respond by asserting BUSY on the bus.
- *
- * Note : the Seagate ST-01/02 product manual says that we
- * should twiddle the DATA register before the control
- * register. However, this does not work reliably so we do
- * it the other way around.
- *
- * Probably could be a problem with arbitration too, we
- * really should try this with a SCSI protocol or logic
- * analyzer to see what is going on.
- */
- tmp_data = (unsigned char) ((1 << target) | (controller_type == SEAGATE ? 0x80 : 0x40));
- tmp_control = BASE_CMD | CMD_DRVR_ENABLE | CMD_SEL | (reselect ? CMD_ATTN : 0);
-
- /* FIXME: verify host lock is always held here */
-#ifdef OLDCNTDATASCEME
-#ifdef SWAPCNTDATA
- WRITE_CONTROL (tmp_control);
- WRITE_DATA (tmp_data);
-#else
- WRITE_DATA (tmp_data);
- WRITE_CONTROL (tmp_control);
-#endif
-#else
- tmp_control ^= CMD_BSY; /* This is guesswork. What used to be in driver */
- WRITE_CONTROL (tmp_control); /* could never work: it sent data into control */
- WRITE_DATA (tmp_data); /* register and control info into data. Hopefully */
- tmp_control ^= CMD_BSY; /* fixed, but order of first two may be wrong. */
- WRITE_CONTROL (tmp_control); /* -- pavel@ucw.cz */
-#endif
-
- ULOOP (250 * 1000) {
- if (st0x_aborted) {
- /*
- * If we have been aborted, and we have a
- * command in progress, IE the target
- * still has BSY asserted, then we will
- * reset the bus, and notify the midlevel
- * driver to expect sense.
- */
-
- WRITE_CONTROL (BASE_CMD);
- if (STATUS & STAT_BSY) {
- printk(KERN_WARNING "scsi%d : BST asserted after we've been aborted.\n", hostno);
- seagate_st0x_bus_reset(NULL);
- return retcode (DID_RESET);
- }
- return retcode (st0x_aborted);
- }
- if (STATUS & STAT_BSY)
- break;
- if (TIMEOUT) {
- DPRINTK (PHASE_SELECTION, "scsi%d : NO CONNECT with target %d, stat = %x \n", hostno, target, STATUS);
- return retcode (DID_NO_CONNECT);
- }
- }
-
- /* Establish current pointers. Take into account scatter / gather */
-
- if ((nobuffs = SCint->use_sg)) {
-#if (DEBUG & DEBUG_SG)
- {
- int i;
- printk("scsi%d : scatter gather requested, using %d buffers.\n", hostno, nobuffs);
- for (i = 0; i < nobuffs; ++i)
- printk("scsi%d : buffer %d address = %p length = %d\n",
- hostno, i,
- sg_virt(&buffer[i]),
- buffer[i].length);
- }
-#endif
-
- buffer = (struct scatterlist *) SCint->request_buffer;
- len = buffer->length;
- data = sg_virt(buffer);
- } else {
- DPRINTK (DEBUG_SG, "scsi%d : scatter gather not requested.\n", hostno);
- buffer = NULL;
- len = SCint->request_bufflen;
- data = (unsigned char *) SCint->request_buffer;
- }
-
- DPRINTK (PHASE_DATAIN | PHASE_DATAOUT, "scsi%d : len = %d\n",
- hostno, len);
-
- break;
-#ifdef LINKED
- case LINKED_RIGHT:
- break;
- case LINKED_WRONG:
- break;
-#endif
- } /* end of switch(reselect) */
-
- /*
- * There are several conditions under which we wish to send a message :
- * 1. When we are allowing disconnect / reconnect, and need to
- * establish the I_T_L nexus via an IDENTIFY with the DiscPriv bit
- * set.
- *
- * 2. When we are doing linked commands, are have the wrong I_T_L
- * nexus established and want to send an ABORT message.
- */
-
- /* GCC does not like an ifdef inside a macro, so do it the hard way. */
-#ifdef LINKED
- WRITE_CONTROL (BASE_CMD | CMD_DRVR_ENABLE | (((reselect == CAN_RECONNECT)|| (reselect == LINKED_WRONG))? CMD_ATTN : 0));
-#else
- WRITE_CONTROL (BASE_CMD | CMD_DRVR_ENABLE | (((reselect == CAN_RECONNECT))? CMD_ATTN : 0));
-#endif
-
- /*
- * INFORMATION TRANSFER PHASE
- *
- * The nasty looking read / write inline assembler loops we use for
- * DATAIN and DATAOUT phases are approximately 4-5 times as fast as
- * the 'C' versions - since we're moving 1024 bytes of data, this
- * really adds up.
- *
- * SJT: The nasty-looking assembler is gone, so it's slower.
- *
- */
-
- DPRINTK (PHASE_ETC, "scsi%d : phase = INFORMATION TRANSFER\n", hostno);
-
- incommand = 1;
- transfersize = SCint->transfersize;
- underflow = SCint->underflow;
-
- /*
- * Now, we poll the device for status information,
- * and handle any requests it makes. Note that since we are unsure
- * of how much data will be flowing across the system, etc and
- * cannot make reasonable timeouts, that we will instead have the
- * midlevel driver handle any timeouts that occur in this phase.
- */
-
- while (((status_read = STATUS) & STAT_BSY) && !st0x_aborted && !done) {
-#ifdef PARITY
- if (status_read & STAT_PARITY) {
- printk(KERN_ERR "scsi%d : got parity error\n", hostno);
- st0x_aborted = DID_PARITY;
- }
-#endif
- if (status_read & STAT_REQ) {
-#if ((DEBUG & PHASE_ETC) == PHASE_ETC)
- if ((newphase = (status_read & REQ_MASK)) != phase) {
- phase = newphase;
- switch (phase) {
- case REQ_DATAOUT:
- printk ("scsi%d : phase = DATA OUT\n", hostno);
- break;
- case REQ_DATAIN:
- printk ("scsi%d : phase = DATA IN\n", hostno);
- break;
- case REQ_CMDOUT:
- printk
- ("scsi%d : phase = COMMAND OUT\n", hostno);
- break;
- case REQ_STATIN:
- printk ("scsi%d : phase = STATUS IN\n", hostno);
- break;
- case REQ_MSGOUT:
- printk
- ("scsi%d : phase = MESSAGE OUT\n", hostno);
- break;
- case REQ_MSGIN:
- printk ("scsi%d : phase = MESSAGE IN\n", hostno);
- break;
- default:
- printk ("scsi%d : phase = UNKNOWN\n", hostno);
- st0x_aborted = DID_ERROR;
- }
- }
-#endif
- switch (status_read & REQ_MASK) {
- case REQ_DATAOUT:
- /*
- * If we are in fast mode, then we simply splat
- * the data out in word-sized chunks as fast as
- * we can.
- */
-
- if (!len) {
-#if 0
- printk("scsi%d: underflow to target %d lun %d \n", hostno, target, lun);
- st0x_aborted = DID_ERROR;
- fast = 0;
-#endif
- break;
- }
-
- if (fast && transfersize
- && !(len % transfersize)
- && (len >= transfersize)
-#ifdef FAST32
- && !(transfersize % 4)
-#endif
- ) {
- DPRINTK (DEBUG_FAST,
- "scsi%d : FAST transfer, underflow = %d, transfersize = %d\n"
- " len = %d, data = %08x\n",
- hostno, SCint->underflow,
- SCint->transfersize, len,
- data);
-
- /* SJT: Start. Fast Write */
-#ifdef SEAGATE_USE_ASM
- __asm__ ("cld\n\t"
-#ifdef FAST32
- "shr $2, %%ecx\n\t"
- "1:\t"
- "lodsl\n\t"
- "movl %%eax, (%%edi)\n\t"
-#else
- "1:\t"
- "lodsb\n\t"
- "movb %%al, (%%edi)\n\t"
-#endif
- "loop 1b;"
- /* output */ :
- /* input */ :"D" (st0x_dr),
- "S"
- (data),
- "c" (SCint->transfersize)
-/* clobbered */
- : "eax", "ecx",
- "esi");
-#else /* SEAGATE_USE_ASM */
- memcpy_toio(st0x_dr, data, transfersize);
-#endif /* SEAGATE_USE_ASM */
-/* SJT: End */
- len -= transfersize;
- data += transfersize;
- DPRINTK (DEBUG_FAST, "scsi%d : FAST transfer complete len = %d data = %08x\n", hostno, len, data);
- } else {
- /*
- * We loop as long as we are in a
- * data out phase, there is data to
- * send, and BSY is still active.
- */
-
-/* SJT: Start. Slow Write. */
-#ifdef SEAGATE_USE_ASM
-
- int __dummy_1, __dummy_2;
-
-/*
- * We loop as long as we are in a data out phase, there is data to send,
- * and BSY is still active.
- */
-/* Local variables : len = ecx , data = esi,
- st0x_cr_sr = ebx, st0x_dr = edi
-*/
- __asm__ (
- /* Test for any data here at all. */
- "orl %%ecx, %%ecx\n\t"
- "jz 2f\n\t" "cld\n\t"
-/* "movl st0x_cr_sr, %%ebx\n\t" */
-/* "movl st0x_dr, %%edi\n\t" */
- "1:\t"
- "movb (%%ebx), %%al\n\t"
- /* Test for BSY */
- "test $1, %%al\n\t"
- "jz 2f\n\t"
- /* Test for data out phase - STATUS & REQ_MASK should be
- REQ_DATAOUT, which is 0. */
- "test $0xe, %%al\n\t"
- "jnz 2f\n\t"
- /* Test for REQ */
- "test $0x10, %%al\n\t"
- "jz 1b\n\t"
- "lodsb\n\t"
- "movb %%al, (%%edi)\n\t"
- "loop 1b\n\t" "2:\n"
- /* output */ :"=S" (data), "=c" (len),
- "=b"
- (__dummy_1),
- "=D" (__dummy_2)
-/* input */
- : "0" (data), "1" (len),
- "2" (st0x_cr_sr),
- "3" (st0x_dr)
-/* clobbered */
- : "eax");
-#else /* SEAGATE_USE_ASM */
- while (len) {
- unsigned char stat;
-
- stat = STATUS;
- if (!(stat & STAT_BSY)
- || ((stat & REQ_MASK) !=
- REQ_DATAOUT))
- break;
- if (stat & STAT_REQ) {
- WRITE_DATA (*data++);
- --len;
- }
- }
-#endif /* SEAGATE_USE_ASM */
-/* SJT: End. */
- }
-
- if (!len && nobuffs) {
- --nobuffs;
- ++buffer;
- len = buffer->length;
- data = sg_virt(buffer);
- DPRINTK (DEBUG_SG,
- "scsi%d : next scatter-gather buffer len = %d address = %08x\n",
- hostno, len, data);
- }
- break;
-
- case REQ_DATAIN:
-#ifdef SLOW_RATE
- if (borken) {
-#if (DEBUG & (PHASE_DATAIN))
- transfered += len;
-#endif
- for (; len && (STATUS & (REQ_MASK | STAT_REQ)) == (REQ_DATAIN | STAT_REQ); --len) {
- *data++ = DATA;
- borken_wait();
- }
-#if (DEBUG & (PHASE_DATAIN))
- transfered -= len;
-#endif
- } else
-#endif
-
- if (fast && transfersize
- && !(len % transfersize)
- && (len >= transfersize)
-#ifdef FAST32
- && !(transfersize % 4)
-#endif
- ) {
- DPRINTK (DEBUG_FAST,
- "scsi%d : FAST transfer, underflow = %d, transfersize = %d\n"
- " len = %d, data = %08x\n",
- hostno, SCint->underflow,
- SCint->transfersize, len,
- data);
-
-/* SJT: Start. Fast Read */
-#ifdef SEAGATE_USE_ASM
- __asm__ ("cld\n\t"
-#ifdef FAST32
- "shr $2, %%ecx\n\t"
- "1:\t"
- "movl (%%esi), %%eax\n\t"
- "stosl\n\t"
-#else
- "1:\t"
- "movb (%%esi), %%al\n\t"
- "stosb\n\t"
-#endif
- "loop 1b\n\t"
- /* output */ :
- /* input */ :"S" (st0x_dr),
- "D"
- (data),
- "c" (SCint->transfersize)
-/* clobbered */
- : "eax", "ecx",
- "edi");
-#else /* SEAGATE_USE_ASM */
- memcpy_fromio(data, st0x_dr, len);
-#endif /* SEAGATE_USE_ASM */
-/* SJT: End */
- len -= transfersize;
- data += transfersize;
-#if (DEBUG & PHASE_DATAIN)
- printk ("scsi%d: transfered += %d\n", hostno, transfersize);
- transfered += transfersize;
-#endif
-
- DPRINTK (DEBUG_FAST, "scsi%d : FAST transfer complete len = %d data = %08x\n", hostno, len, data);
- } else {
-
-#if (DEBUG & PHASE_DATAIN)
- printk ("scsi%d: transfered += %d\n", hostno, len);
- transfered += len; /* Assume we'll transfer it all, then
- subtract what we *didn't* transfer */
-#endif
-
-/*
- * We loop as long as we are in a data in phase, there is room to read,
- * and BSY is still active
- */
-
-/* SJT: Start. */
-#ifdef SEAGATE_USE_ASM
-
- int __dummy_3, __dummy_4;
-
-/* Dummy clobbering variables for the new gcc-2.95 */
-
-/*
- * We loop as long as we are in a data in phase, there is room to read,
- * and BSY is still active
- */
- /* Local variables : ecx = len, edi = data
- esi = st0x_cr_sr, ebx = st0x_dr */
- __asm__ (
- /* Test for room to read */
- "orl %%ecx, %%ecx\n\t"
- "jz 2f\n\t" "cld\n\t"
-/* "movl st0x_cr_sr, %%esi\n\t" */
-/* "movl st0x_dr, %%ebx\n\t" */
- "1:\t"
- "movb (%%esi), %%al\n\t"
- /* Test for BSY */
- "test $1, %%al\n\t"
- "jz 2f\n\t"
- /* Test for data in phase - STATUS & REQ_MASK should be REQ_DATAIN,
- = STAT_IO, which is 4. */
- "movb $0xe, %%ah\n\t"
- "andb %%al, %%ah\n\t"
- "cmpb $0x04, %%ah\n\t"
- "jne 2f\n\t"
- /* Test for REQ */
- "test $0x10, %%al\n\t"
- "jz 1b\n\t"
- "movb (%%ebx), %%al\n\t"
- "stosb\n\t"
- "loop 1b\n\t" "2:\n"
- /* output */ :"=D" (data), "=c" (len),
- "=S"
- (__dummy_3),
- "=b" (__dummy_4)
-/* input */
- : "0" (data), "1" (len),
- "2" (st0x_cr_sr),
- "3" (st0x_dr)
-/* clobbered */
- : "eax");
-#else /* SEAGATE_USE_ASM */
- while (len) {
- unsigned char stat;
-
- stat = STATUS;
- if (!(stat & STAT_BSY)
- || ((stat & REQ_MASK) !=
- REQ_DATAIN))
- break;
- if (stat & STAT_REQ) {
- *data++ = DATA;
- --len;
- }
- }
-#endif /* SEAGATE_USE_ASM */
-/* SJT: End. */
-#if (DEBUG & PHASE_DATAIN)
- printk ("scsi%d: transfered -= %d\n", hostno, len);
- transfered -= len; /* Since we assumed all of Len got *
- transfered, correct our mistake */
-#endif
- }
-
- if (!len && nobuffs) {
- --nobuffs;
- ++buffer;
- len = buffer->length;
- data = sg_virt(buffer);
- DPRINTK (DEBUG_SG, "scsi%d : next scatter-gather buffer len = %d address = %08x\n", hostno, len, data);
- }
- break;
-
- case REQ_CMDOUT:
- while (((status_read = STATUS) & STAT_BSY) &&
- ((status_read & REQ_MASK) == REQ_CMDOUT))
- if (status_read & STAT_REQ) {
- WRITE_DATA (*(const unsigned char *) cmnd);
- cmnd = 1 + (const unsigned char *)cmnd;
-#ifdef SLOW_RATE
- if (borken)
- borken_wait ();
-#endif
- }
- break;
-
- case REQ_STATIN:
- status = DATA;
- break;
-
- case REQ_MSGOUT:
- /*
- * We can only have sent a MSG OUT if we
- * requested to do this by raising ATTN.
- * So, we must drop ATTN.
- */
- WRITE_CONTROL (BASE_CMD | CMD_DRVR_ENABLE);
- /*
- * If we are reconnecting, then we must
- * send an IDENTIFY message in response
- * to MSGOUT.
- */
- switch (reselect) {
- case CAN_RECONNECT:
- WRITE_DATA (IDENTIFY (1, lun));
- DPRINTK (PHASE_RESELECT | PHASE_MSGOUT, "scsi%d : sent IDENTIFY message.\n", hostno);
- break;
-#ifdef LINKED
- case LINKED_WRONG:
- WRITE_DATA (ABORT);
- linked_connected = 0;
- reselect = CAN_RECONNECT;
- goto connect_loop;
- DPRINTK (PHASE_MSGOUT | DEBUG_LINKED, "scsi%d : sent ABORT message to cancel incorrect I_T_L nexus.\n", hostno);
-#endif /* LINKED */
- DPRINTK (DEBUG_LINKED, "correct\n");
- default:
- WRITE_DATA (NOP);
- printk("scsi%d : target %d requested MSGOUT, sent NOP message.\n", hostno, target);
- }
- break;
-
- case REQ_MSGIN:
- switch (message = DATA) {
- case DISCONNECT:
- DANY("seagate: deciding to disconnect\n");
- should_reconnect = 1;
- current_data = data; /* WDE add */
- current_buffer = buffer;
- current_bufflen = len; /* WDE add */
- current_nobuffs = nobuffs;
-#ifdef LINKED
- linked_connected = 0;
-#endif
- done = 1;
- DPRINTK ((PHASE_RESELECT | PHASE_MSGIN), "scsi%d : disconnected.\n", hostno);
- break;
-
-#ifdef LINKED
- case LINKED_CMD_COMPLETE:
- case LINKED_FLG_CMD_COMPLETE:
-#endif
- case COMMAND_COMPLETE:
- /*
- * Note : we should check for underflow here.
- */
- DPRINTK(PHASE_MSGIN, "scsi%d : command complete.\n", hostno);
- done = 1;
- break;
- case ABORT:
- DPRINTK(PHASE_MSGIN, "scsi%d : abort message.\n", hostno);
- done = 1;
- break;
- case SAVE_POINTERS:
- current_buffer = buffer;
- current_bufflen = len; /* WDE add */
- current_data = data; /* WDE mod */
- current_nobuffs = nobuffs;
- DPRINTK (PHASE_MSGIN, "scsi%d : pointers saved.\n", hostno);
- break;
- case RESTORE_POINTERS:
- buffer = current_buffer;
- cmnd = current_cmnd;
- data = current_data; /* WDE mod */
- len = current_bufflen;
- nobuffs = current_nobuffs;
- DPRINTK(PHASE_MSGIN, "scsi%d : pointers restored.\n", hostno);
- break;
- default:
-
- /*
- * IDENTIFY distinguishes itself
- * from the other messages by
- * setting the high bit.
- *
- * Note : we need to handle at
- * least one outstanding command
- * per LUN, and need to hash the
- * SCSI command for that I_T_L
- * nexus based on the known ID
- * (at this point) and LUN.
- */
-
- if (message & 0x80) {
- DPRINTK (PHASE_MSGIN, "scsi%d : IDENTIFY message received from id %d, lun %d.\n", hostno, target, message & 7);
- } else {
- /*
- * We should go into a
- * MESSAGE OUT phase, and
- * send a MESSAGE_REJECT
- * if we run into a message
- * that we don't like. The
- * seagate driver needs
- * some serious
- * restructuring first
- * though.
- */
- DPRINTK (PHASE_MSGIN, "scsi%d : unknown message %d from target %d.\n", hostno, message, target);
- }
- }
- break;
- default:
- printk(KERN_ERR "scsi%d : unknown phase.\n", hostno);
- st0x_aborted = DID_ERROR;
- } /* end of switch (status_read & REQ_MASK) */
-#ifdef SLOW_RATE
- /*
- * I really don't care to deal with borken devices in
- * each single byte transfer case (ie, message in,
- * message out, status), so I'll do the wait here if
- * necessary.
- */
- if(borken)
- borken_wait();
-#endif
-
- } /* if(status_read & STAT_REQ) ends */
- } /* while(((status_read = STATUS)...) ends */
-
- DPRINTK(PHASE_DATAIN | PHASE_DATAOUT | PHASE_EXIT, "scsi%d : Transfered %d bytes\n", hostno, transfered);
-
-#if (DEBUG & PHASE_EXIT)
-#if 0 /* Doesn't work for scatter/gather */
- printk("Buffer : \n");
- for(i = 0; i < 20; ++i)
- printk("%02x ", ((unsigned char *) data)[i]); /* WDE mod */
- printk("\n");
-#endif
- printk("scsi%d : status = ", hostno);
- scsi_print_status(status);
- printk(" message = %02x\n", message);
-#endif
-
- /* We shouldn't reach this until *after* BSY has been deasserted */
-
-#ifdef LINKED
- else
- {
- /*
- * Fix the message byte so that unsuspecting high level drivers
- * don't puke when they see a LINKED COMMAND message in place of
- * the COMMAND COMPLETE they may be expecting. Shouldn't be
- * necessary, but it's better to be on the safe side.
- *
- * A non LINKED* message byte will indicate that the command
- * completed, and we are now disconnected.
- */
-
- switch (message) {
- case LINKED_CMD_COMPLETE:
- case LINKED_FLG_CMD_COMPLETE:
- message = COMMAND_COMPLETE;
- linked_target = current_target;
- linked_lun = current_lun;
- linked_connected = 1;
- DPRINTK (DEBUG_LINKED, "scsi%d : keeping I_T_L nexus established for linked command.\n", hostno);
- /* We also will need to adjust status to accommodate intermediate
- conditions. */
- if ((status == INTERMEDIATE_GOOD) || (status == INTERMEDIATE_C_GOOD))
- status = GOOD;
- break;
- /*
- * We should also handle what are "normal" termination
- * messages here (ABORT, BUS_DEVICE_RESET?, and
- * COMMAND_COMPLETE individually, and flake if things
- * aren't right.
- */
- default:
- DPRINTK (DEBUG_LINKED, "scsi%d : closing I_T_L nexus.\n", hostno);
- linked_connected = 0;
- }
- }
-#endif /* LINKED */
-
- if (should_reconnect) {
- DPRINTK (PHASE_RESELECT, "scsi%d : exiting seagate_st0x_queue_command() with reconnect enabled.\n", hostno);
- WRITE_CONTROL (BASE_CMD | CMD_INTR);
- } else
- WRITE_CONTROL (BASE_CMD);
-
- return retcode (st0x_aborted);
-} /* end of internal_command */
-
-static int seagate_st0x_abort(struct scsi_cmnd * SCpnt)
-{
- st0x_aborted = DID_ABORT;
- return SUCCESS;
-}
-
-#undef ULOOP
-#undef TIMEOUT
-
-/*
- * the seagate_st0x_reset function resets the SCSI bus
- *
- * May be called with SCpnt = NULL
- */
-
-static int seagate_st0x_bus_reset(struct scsi_cmnd * SCpnt)
-{
- /* No timeouts - this command is going to fail because it was reset. */
- DANY ("scsi%d: Reseting bus... ", hostno);
-
- /* assert RESET signal on SCSI bus. */
- WRITE_CONTROL (BASE_CMD | CMD_RST);
-
- mdelay (20);
-
- WRITE_CONTROL (BASE_CMD);
- st0x_aborted = DID_RESET;
-
- DANY ("done.\n");
- return SUCCESS;
-}
-
-static int seagate_st0x_release(struct Scsi_Host *shost)
-{
- if (shost->irq)
- free_irq(shost->irq, shost);
- release_region(shost->io_port, shost->n_io_port);
- return 0;
-}
-
-static struct scsi_host_template driver_template = {
- .detect = seagate_st0x_detect,
- .release = seagate_st0x_release,
- .info = seagate_st0x_info,
- .queuecommand = seagate_st0x_queue_command,
- .eh_abort_handler = seagate_st0x_abort,
- .eh_bus_reset_handler = seagate_st0x_bus_reset,
- .can_queue = 1,
- .this_id = 7,
- .sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
- .use_clustering = DISABLE_CLUSTERING,
-};
-#include "scsi_module.c"
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index f1871ea04045..17216b76efdc 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -602,8 +602,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
* but is is possible that the app intended SG_DXFER_TO_DEV, because there
* is a non-zero input_size, so emit a warning.
*/
- if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV)
- if (printk_ratelimit())
+ if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
+ static char cmd[TASK_COMM_LEN];
+ if (strcmp(current->comm, cmd) && printk_ratelimit()) {
printk(KERN_WARNING
"sg_write: data in/out %d/%d bytes for SCSI command 0x%x--"
"guessing data in;\n" KERN_WARNING " "
@@ -611,6 +612,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
old_hdr.reply_len - (int)SZ_SG_HEADER,
input_size, (unsigned int) cmnd[0],
current->comm);
+ strcpy(cmd, current->comm);
+ }
+ }
k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
return (k < 0) ? k : count;
}
@@ -1418,7 +1422,6 @@ sg_add(struct class_device *cl_dev, struct class_interface *cl_intf)
goto out;
}
- class_set_devdata(cl_dev, sdp);
error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1);
if (error)
goto cdev_add_err;
@@ -1431,11 +1434,14 @@ sg_add(struct class_device *cl_dev, struct class_interface *cl_intf)
MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
cl_dev->dev, "%s",
disk->disk_name);
- if (IS_ERR(sg_class_member))
- printk(KERN_WARNING "sg_add: "
- "class_device_create failed\n");
+ if (IS_ERR(sg_class_member)) {
+ printk(KERN_ERR "sg_add: "
+ "class_device_create failed\n");
+ error = PTR_ERR(sg_class_member);
+ goto cdev_add_err;
+ }
class_set_devdata(sg_class_member, sdp);
- error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
+ error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
&sg_class_member->kobj, "generic");
if (error)
printk(KERN_ERR "sg_add: unable to make symlink "
@@ -1447,6 +1453,8 @@ sg_add(struct class_device *cl_dev, struct class_interface *cl_intf)
"Attached scsi generic sg%d type %d\n", sdp->index,
scsidp->type);
+ class_set_devdata(cl_dev, sdp);
+
return 0;
cdev_add_err:
@@ -2521,7 +2529,7 @@ sg_idr_max_id(int id, void *p, void *data)
static int
sg_last_dev(void)
{
- int k = 0;
+ int k = -1;
unsigned long iflags;
read_lock_irqsave(&sg_index_lock, iflags);
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index eef82758d047..d4ebe8c67ba9 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -159,6 +159,7 @@ void sgiwd93_reset(unsigned long base)
udelay(50);
hregs->ctrl = 0;
}
+EXPORT_SYMBOL_GPL(sgiwd93_reset);
static inline void init_hpc_chain(struct hpc_data *hd)
{
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index c61999031141..1fcee16fa36d 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -67,8 +67,6 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_WORM);
#define SR_DISKS 256
-#define MAX_RETRIES 3
-#define SR_TIMEOUT (30 * HZ)
#define SR_CAPABILITIES \
(CDC_CLOSE_TRAY|CDC_OPEN_TRAY|CDC_LOCK|CDC_SELECT_SPEED| \
CDC_SELECT_DISC|CDC_MULTI_SESSION|CDC_MCN|CDC_MEDIA_CHANGED| \
@@ -179,21 +177,28 @@ static int sr_media_change(struct cdrom_device_info *cdi, int slot)
{
struct scsi_cd *cd = cdi->handle;
int retval;
+ struct scsi_sense_hdr *sshdr;
if (CDSL_CURRENT != slot) {
/* no changer support */
return -EINVAL;
}
- retval = scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES);
- if (retval) {
- /* Unable to test, unit probably not ready. This usually
- * means there is no disc in the drive. Mark as changed,
- * and we will figure it out later once the drive is
- * available again. */
+ sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
+ retval = scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES,
+ sshdr);
+ if (retval || (scsi_sense_valid(sshdr) &&
+ /* 0x3a is medium not present */
+ sshdr->asc == 0x3a)) {
+ /* Media not present or unable to test, unit probably not
+ * ready. This usually means there is no disc in the drive.
+ * Mark as changed, and we will figure it out later once
+ * the drive is available again.
+ */
cd->device->changed = 1;
- return 1; /* This will force a flush, if called from
- * check_disk_change */
+ /* This will force a flush, if called from check_disk_change */
+ retval = 1;
+ goto out;
};
retval = cd->device->changed;
@@ -203,9 +208,17 @@ static int sr_media_change(struct cdrom_device_info *cdi, int slot)
if (retval) {
/* check multisession offset etc */
sr_cd_check(cdi);
-
get_sectorsize(cd);
}
+
+out:
+ /* Notify userspace, that media has changed. */
+ if (retval != cd->previous_state)
+ sdev_evt_send_simple(cd->device, SDEV_EVT_MEDIA_CHANGE,
+ GFP_KERNEL);
+ cd->previous_state = retval;
+ kfree(sshdr);
+
return retval;
}
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index d65de9621b27..81fbc0b78a52 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -20,6 +20,9 @@
#include <linux/genhd.h>
#include <linux/kref.h>
+#define MAX_RETRIES 3
+#define SR_TIMEOUT (30 * HZ)
+
struct scsi_device;
/* The CDROM is fairly slow, so we need a little extra time */
@@ -37,6 +40,7 @@ typedef struct scsi_cd {
unsigned xa_flag:1; /* CD has XA sectors ? */
unsigned readcd_known:1; /* drive supports READ_CD (0xbe) */
unsigned readcd_cdda:1; /* reading audio data using READ_CD */
+ unsigned previous_state:1; /* media has changed */
struct cdrom_device_info cdi;
/* We hold gendisk and scsi_device references on probe and use
* the refs on this kref to decide when to release them */
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index e1589f91706a..d5cebff1d646 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -275,18 +275,6 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
/* ---------------------------------------------------------------------- */
/* interface to cdrom.c */
-static int test_unit_ready(Scsi_CD *cd)
-{
- struct packet_command cgc;
-
- memset(&cgc, 0, sizeof(struct packet_command));
- cgc.cmd[0] = GPCMD_TEST_UNIT_READY;
- cgc.quiet = 1;
- cgc.data_direction = DMA_NONE;
- cgc.timeout = IOCTL_TIMEOUT;
- return sr_do_ioctl(cd, &cgc);
-}
-
int sr_tray_move(struct cdrom_device_info *cdi, int pos)
{
Scsi_CD *cd = cdi->handle;
@@ -310,14 +298,46 @@ int sr_lock_door(struct cdrom_device_info *cdi, int lock)
int sr_drive_status(struct cdrom_device_info *cdi, int slot)
{
+ struct scsi_cd *cd = cdi->handle;
+ struct scsi_sense_hdr sshdr;
+ struct media_event_desc med;
+
if (CDSL_CURRENT != slot) {
/* we have no changer support */
return -EINVAL;
}
- if (0 == test_unit_ready(cdi->handle))
+ if (0 == scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES,
+ &sshdr))
return CDS_DISC_OK;
- return CDS_TRAY_OPEN;
+ if (!cdrom_get_media_event(cdi, &med)) {
+ if (med.media_present)
+ return CDS_DISC_OK;
+ else if (med.door_open)
+ return CDS_TRAY_OPEN;
+ else
+ return CDS_NO_DISC;
+ }
+
+ /*
+ * 0x04 is format in progress .. but there must be a disc present!
+ */
+ if (sshdr.sense_key == NOT_READY && sshdr.asc == 0x04)
+ return CDS_DISC_OK;
+
+ /*
+ * If not using Mt Fuji extended media tray reports,
+ * just return TRAY_OPEN since ATAPI doesn't provide
+ * any other way to detect this...
+ */
+ if (scsi_sense_valid(&sshdr) &&
+ /* 0x3a is medium not present */
+ sshdr.asc == 0x3a)
+ return CDS_NO_DISC;
+ else
+ return CDS_TRAY_OPEN;
+
+ return CDS_DRIVE_NOT_READY;
}
int sr_disk_status(struct cdrom_device_info *cdi)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 328c47c6aeb1..71952703125a 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -9,7 +9,7 @@
Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky,
Michael Schaefer, J"org Weule, and Eric Youngdale.
- Copyright 1992 - 2007 Kai Makisara
+ Copyright 1992 - 2008 Kai Makisara
email Kai.Makisara@kolumbus.fi
Some small formal changes - aeb, 950809
@@ -17,7 +17,7 @@
Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
*/
-static const char *verstr = "20070203";
+static const char *verstr = "20080117";
#include <linux/module.h>
@@ -3214,8 +3214,7 @@ static int partition_tape(struct scsi_tape *STp, int size)
/* The ioctl command */
-static int st_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd_in, unsigned long arg)
+static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
{
int i, cmd_nr, cmd_type, bt;
int retval = 0;
@@ -3870,7 +3869,7 @@ static const struct file_operations st_fops =
.owner = THIS_MODULE,
.read = st_read,
.write = st_write,
- .ioctl = st_ioctl,
+ .unlocked_ioctl = st_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = st_compat_ioctl,
#endif
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
index 2dcde373b20e..bcaba86060ab 100644
--- a/drivers/scsi/sun3_NCR5380.c
+++ b/drivers/scsi/sun3_NCR5380.c
@@ -515,9 +515,9 @@ static __inline__ void initialize_SCp(struct scsi_cmnd *cmd)
* various queues are valid.
*/
- if (cmd->use_sg) {
- cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
- cmd->SCp.buffers_residual = cmd->use_sg - 1;
+ if (scsi_bufflen(cmd)) {
+ cmd->SCp.buffer = scsi_sglist(cmd);
+ cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
@@ -528,8 +528,8 @@ static __inline__ void initialize_SCp(struct scsi_cmnd *cmd)
} else {
cmd->SCp.buffer = NULL;
cmd->SCp.buffers_residual = 0;
- cmd->SCp.ptr = (char *) cmd->request_buffer;
- cmd->SCp.this_residual = cmd->request_bufflen;
+ cmd->SCp.ptr = NULL;
+ cmd->SCp.this_residual = 0;
}
}
@@ -935,7 +935,7 @@ static int NCR5380_queue_command(struct scsi_cmnd *cmd,
}
# endif
# ifdef NCR5380_STAT_LIMIT
- if (cmd->request_bufflen > NCR5380_STAT_LIMIT)
+ if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT)
# endif
switch (cmd->cmnd[0])
{
@@ -943,14 +943,14 @@ static int NCR5380_queue_command(struct scsi_cmnd *cmd,
case WRITE_6:
case WRITE_10:
hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase);
- hostdata->bytes_write[cmd->device->id] += cmd->request_bufflen;
+ hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);
hostdata->pendingw++;
break;
case READ:
case READ_6:
case READ_10:
hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase);
- hostdata->bytes_read[cmd->device->id] += cmd->request_bufflen;
+ hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);
hostdata->pendingr++;
break;
}
@@ -1345,7 +1345,7 @@ static void collect_stats(struct NCR5380_hostdata *hostdata,
struct scsi_cmnd *cmd)
{
# ifdef NCR5380_STAT_LIMIT
- if (cmd->request_bufflen > NCR5380_STAT_LIMIT)
+ if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT)
# endif
switch (cmd->cmnd[0])
{
@@ -1353,14 +1353,14 @@ static void collect_stats(struct NCR5380_hostdata *hostdata,
case WRITE_6:
case WRITE_10:
hostdata->time_write[cmd->device->id] += (jiffies - hostdata->timebase);
- /*hostdata->bytes_write[cmd->device->id] += cmd->request_bufflen;*/
+ /*hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);*/
hostdata->pendingw--;
break;
case READ:
case READ_6:
case READ_10:
hostdata->time_read[cmd->device->id] += (jiffies - hostdata->timebase);
- /*hostdata->bytes_read[cmd->device->id] += cmd->request_bufflen;*/
+ /*hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);*/
hostdata->pendingr--;
break;
}
@@ -1863,7 +1863,7 @@ static int do_abort (struct Scsi_Host *host)
* the target sees, so we just handshake.
*/
- while (!(tmp = NCR5380_read(STATUS_REG)) & SR_REQ);
+ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ));
NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
index 90cee94d9522..1f6fd1680335 100644
--- a/drivers/scsi/sym53c416.c
+++ b/drivers/scsi/sym53c416.c
@@ -328,27 +328,13 @@ static __inline__ unsigned int sym53c416_write(int base, unsigned char *buffer,
static irqreturn_t sym53c416_intr_handle(int irq, void *dev_id)
{
struct Scsi_Host *dev = dev_id;
- int base = 0;
+ int base = dev->io_port;
int i;
unsigned long flags = 0;
unsigned char status_reg, pio_int_reg, int_reg;
struct scatterlist *sg;
unsigned int tot_trans = 0;
- /* We search the base address of the host adapter which caused the interrupt */
- /* FIXME: should pass dev_id sensibly as hosts[i] */
- for(i = 0; i < host_index && !base; i++)
- if(irq == hosts[i].irq)
- base = hosts[i].base;
- /* If no adapter found, we cannot handle the interrupt. Leave a message */
- /* and continue. This should never happen... */
- if(!base)
- {
- printk(KERN_ERR "sym53c416: No host adapter defined for interrupt %d\n", irq);
- return IRQ_NONE;
- }
- /* Now we have the base address and we can start handling the interrupt */
-
spin_lock_irqsave(dev->host_lock,flags);
status_reg = inb(base + STATUS_REG);
pio_int_reg = inb(base + PIO_INT_REG);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 9e0908d1981a..21e926dcdab0 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -207,10 +207,9 @@ void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
/*
* Bounce back the sense data to user.
*/
- memset(&cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
+ memset(&cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
memcpy(cmd->sense_buffer, cp->sns_bbuf,
- min(sizeof(cmd->sense_buffer),
- (size_t)SYM_SNS_BBUF_LEN));
+ min(SCSI_SENSE_BUFFERSIZE, SYM_SNS_BBUF_LEN));
#if 0
/*
* If the device reports a UNIT ATTENTION condition
@@ -609,22 +608,24 @@ static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd)
*/
#define WAIT_FOR_PCI_RECOVERY 35
if (pci_channel_offline(pdev)) {
- struct completion *io_reset;
int finished_reset = 0;
init_completion(&eh_done);
spin_lock_irq(shost->host_lock);
/* Make sure we didn't race */
if (pci_channel_offline(pdev)) {
- if (!sym_data->io_reset)
- sym_data->io_reset = &eh_done;
- io_reset = sym_data->io_reset;
+ BUG_ON(sym_data->io_reset);
+ sym_data->io_reset = &eh_done;
} else {
finished_reset = 1;
}
spin_unlock_irq(shost->host_lock);
if (!finished_reset)
- finished_reset = wait_for_completion_timeout(io_reset,
+ finished_reset = wait_for_completion_timeout
+ (sym_data->io_reset,
WAIT_FOR_PCI_RECOVERY*HZ);
+ spin_lock_irq(shost->host_lock);
+ sym_data->io_reset = NULL;
+ spin_unlock_irq(shost->host_lock);
if (!finished_reset)
return SCSI_FAILED;
}
@@ -1744,7 +1745,7 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
return -ENODEV;
}
-static void __devexit sym2_remove(struct pci_dev *pdev)
+static void sym2_remove(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -1879,7 +1880,6 @@ static void sym2_io_resume(struct pci_dev *pdev)
spin_lock_irq(shost->host_lock);
if (sym_data->io_reset)
complete_all(sym_data->io_reset);
- sym_data->io_reset = NULL;
spin_unlock_irq(shost->host_lock);
}
@@ -2056,7 +2056,7 @@ static struct pci_driver sym2_driver = {
.name = NAME53C8XX,
.id_table = sym2_id_table,
.probe = sym2_probe,
- .remove = __devexit_p(sym2_remove),
+ .remove = sym2_remove,
.err_handler = &sym2_err_handler,
};
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 44193049c4ae..5b04ddfed26c 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -444,7 +444,7 @@ static int dc390_pci_map (struct dc390_srb* pSRB)
/* Map sense buffer */
if (pSRB->SRBFlag & AUTO_REQSENSE) {
- pSRB->pSegmentList = dc390_sg_build_single(&pSRB->Segmentx, pcmd->sense_buffer, sizeof(pcmd->sense_buffer));
+ pSRB->pSegmentList = dc390_sg_build_single(&pSRB->Segmentx, pcmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
pSRB->SGcount = pci_map_sg(pdev, pSRB->pSegmentList, 1,
DMA_FROM_DEVICE);
cmdp->saved_dma_handle = sg_dma_address(pSRB->pSegmentList);
@@ -599,7 +599,7 @@ dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_sr
DC390_write8 (ScsiFifo, pDCB->TargetLUN << 5);
DC390_write8 (ScsiFifo, 0);
DC390_write8 (ScsiFifo, 0);
- DC390_write8 (ScsiFifo, sizeof(scmd->sense_buffer));
+ DC390_write8 (ScsiFifo, SCSI_SENSE_BUFFERSIZE);
DC390_write8 (ScsiFifo, 0);
DEBUG1(printk (KERN_DEBUG "DC390: AutoReqSense !\n"));
}
@@ -1389,7 +1389,7 @@ dc390_CommandPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus
DC390_write8 (ScsiFifo, pDCB->TargetLUN << 5);
DC390_write8 (ScsiFifo, 0);
DC390_write8 (ScsiFifo, 0);
- DC390_write8 (ScsiFifo, sizeof(pSRB->pcmd->sense_buffer));
+ DC390_write8 (ScsiFifo, SCSI_SENSE_BUFFERSIZE);
DC390_write8 (ScsiFifo, 0);
DEBUG0(printk(KERN_DEBUG "DC390: AutoReqSense (CmndPhase)!\n"));
}
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 7edd6ceb13b2..4bc5407f9695 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1121,9 +1121,9 @@ static void map_dma(unsigned int i, unsigned int j) {
if (SCpnt->sense_buffer)
cpp->sense_addr = H2DEV(pci_map_single(HD(j)->pdev, SCpnt->sense_buffer,
- sizeof SCpnt->sense_buffer, PCI_DMA_FROMDEVICE));
+ SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE));
- cpp->sense_len = sizeof SCpnt->sense_buffer;
+ cpp->sense_len = SCSI_SENSE_BUFFERSIZE;
if (scsi_bufflen(SCpnt)) {
count = scsi_dma_map(SCpnt);
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index 6d1f0edd7985..75eca6b22db5 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -298,9 +298,16 @@ static inline int find_and_clear_bit_16(unsigned long *field)
{
int rv;
- if (*field == 0) panic("No free mscp");
- asm("xorl %0,%0\n0:\tbsfw %1,%w0\n\tbtr %0,%1\n\tjnc 0b"
- : "=&r" (rv), "=m" (*field) : "1" (*field));
+ if (*field == 0)
+ panic("No free mscp");
+
+ asm volatile (
+ "xorl %0,%0\n\t"
+ "0: bsfw %1,%w0\n\t"
+ "btr %0,%1\n\t"
+ "jnc 0b"
+ : "=&r" (rv), "=m" (*field) :);
+
return rv;
}
@@ -741,7 +748,7 @@ static int ultrastor_queuecommand(struct scsi_cmnd *SCpnt,
}
my_mscp->command_link = 0; /*???*/
my_mscp->scsi_command_link_id = 0; /*???*/
- my_mscp->length_of_sense_byte = sizeof SCpnt->sense_buffer;
+ my_mscp->length_of_sense_byte = SCSI_SENSE_BUFFERSIZE;
my_mscp->length_of_scsi_cdbs = SCpnt->cmd_len;
memcpy(my_mscp->scsi_cdbs, SCpnt->cmnd, my_mscp->length_of_scsi_cdbs);
my_mscp->adapter_status = 0;
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index fdbb92d1f722..f286c37da7e0 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -407,16 +407,16 @@ wd33c93_queuecommand(struct scsi_cmnd *cmd,
* - SCp.phase records this command's SRCID_ER bit setting
*/
- if (cmd->use_sg) {
- cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
- cmd->SCp.buffers_residual = cmd->use_sg - 1;
+ if (scsi_bufflen(cmd)) {
+ cmd->SCp.buffer = scsi_sglist(cmd);
+ cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
} else {
cmd->SCp.buffer = NULL;
cmd->SCp.buffers_residual = 0;
- cmd->SCp.ptr = (char *) cmd->request_buffer;
- cmd->SCp.this_residual = cmd->request_bufflen;
+ cmd->SCp.ptr = NULL;
+ cmd->SCp.this_residual = 0;
}
/* WD docs state that at the conclusion of a "LEVEL2" command, the
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index 03cd44f231df..b4304ae78527 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -1108,13 +1108,10 @@ static int wd7000_queuecommand(struct scsi_cmnd *SCpnt,
scb->host = host;
nseg = scsi_sg_count(SCpnt);
- if (nseg) {
+ if (nseg > 1) {
struct scatterlist *sg;
unsigned i;
- if (SCpnt->device->host->sg_tablesize == SG_NONE) {
- panic("wd7000_queuecommand: scatter/gather not supported.\n");
- }
dprintk("Using scatter/gather with %d elements.\n", nseg);
sgb = scb->sgb;
@@ -1128,7 +1125,10 @@ static int wd7000_queuecommand(struct scsi_cmnd *SCpnt,
}
} else {
scb->op = 0;
- any2scsi(scb->dataptr, isa_virt_to_bus(scsi_sglist(SCpnt)));
+ if (nseg) {
+ struct scatterlist *sg = scsi_sglist(SCpnt);
+ any2scsi(scb->dataptr, isa_page_to_bus(sg_page(sg)) + sg->offset);
+ }
any2scsi(scb->maxlen, scsi_bufflen(SCpnt));
}
@@ -1524,7 +1524,7 @@ static __init int wd7000_detect(struct scsi_host_template *tpnt)
* For boards before rev 6.0, scatter/gather isn't supported.
*/
if (host->rev1 < 6)
- sh->sg_tablesize = SG_NONE;
+ sh->sg_tablesize = 1;
present++; /* count it */
diff --git a/drivers/usb/storage/freecom.c b/drivers/usb/storage/freecom.c
index 88aa59ab7563..f5a4e8d6a3b1 100644
--- a/drivers/usb/storage/freecom.c
+++ b/drivers/usb/storage/freecom.c
@@ -132,8 +132,7 @@ freecom_readdata (struct scsi_cmnd *srb, struct us_data *us,
/* Now transfer all of our blocks. */
US_DEBUGP("Start of read\n");
- result = usb_stor_bulk_transfer_sg(us, ipipe, srb->request_buffer,
- count, srb->use_sg, &srb->resid);
+ result = usb_stor_bulk_srb(us, ipipe, srb);
US_DEBUGP("freecom_readdata done!\n");
if (result > USB_STOR_XFER_SHORT)
@@ -166,8 +165,7 @@ freecom_writedata (struct scsi_cmnd *srb, struct us_data *us,
/* Now transfer all of our blocks. */
US_DEBUGP("Start of write\n");
- result = usb_stor_bulk_transfer_sg(us, opipe, srb->request_buffer,
- count, srb->use_sg, &srb->resid);
+ result = usb_stor_bulk_srb(us, opipe, srb);
US_DEBUGP("freecom_writedata done!\n");
if (result > USB_STOR_XFER_SHORT)
@@ -281,7 +279,7 @@ int freecom_transport(struct scsi_cmnd *srb, struct us_data *us)
* and such will hang. */
US_DEBUGP("Device indicates that it has %d bytes available\n",
le16_to_cpu (fst->Count));
- US_DEBUGP("SCSI requested %d\n", srb->request_bufflen);
+ US_DEBUGP("SCSI requested %d\n", scsi_bufflen(srb));
/* Find the length we desire to read. */
switch (srb->cmnd[0]) {
@@ -292,12 +290,12 @@ int freecom_transport(struct scsi_cmnd *srb, struct us_data *us)
length = le16_to_cpu(fst->Count);
break;
default:
- length = srb->request_bufflen;
+ length = scsi_bufflen(srb);
}
/* verify that this amount is legal */
- if (length > srb->request_bufflen) {
- length = srb->request_bufflen;
+ if (length > scsi_bufflen(srb)) {
+ length = scsi_bufflen(srb);
US_DEBUGP("Truncating request to match buffer length: %d\n", length);
}
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 49ba6c0ff1e8..178e8c2a8a2f 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -49,6 +49,7 @@
#include <linux/slab.h>
#include <linux/hdreg.h>
#include <linux/ide.h>
+#include <linux/scatterlist.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -287,6 +288,7 @@ struct isd200_info {
/* maximum number of LUNs supported */
unsigned char MaxLUNs;
struct scsi_cmnd srb;
+ struct scatterlist sg;
};
@@ -398,6 +400,31 @@ static void isd200_build_sense(struct us_data *us, struct scsi_cmnd *srb)
* Transport routines
***********************************************************************/
+/**************************************************************************
+ * isd200_set_srb(), isd200_srb_set_bufflen()
+ *
+ * Two helpers to facilitate in initialization of scsi_cmnd structure
+ * Will need to change when struct scsi_cmnd changes
+ */
+static void isd200_set_srb(struct isd200_info *info,
+ enum dma_data_direction dir, void* buff, unsigned bufflen)
+{
+ struct scsi_cmnd *srb = &info->srb;
+
+ if (buff)
+ sg_init_one(&info->sg, buff, bufflen);
+
+ srb->sc_data_direction = dir;
+ srb->request_buffer = buff ? &info->sg : NULL;
+ srb->request_bufflen = bufflen;
+ srb->use_sg = buff ? 1 : 0;
+}
+
+static void isd200_srb_set_bufflen(struct scsi_cmnd *srb, unsigned bufflen)
+{
+ srb->request_bufflen = bufflen;
+}
+
/**************************************************************************
* isd200_action
@@ -432,9 +459,7 @@ static int isd200_action( struct us_data *us, int action,
ata.generic.RegisterSelect =
REG_CYLINDER_LOW | REG_CYLINDER_HIGH |
REG_STATUS | REG_ERROR;
- srb->sc_data_direction = DMA_FROM_DEVICE;
- srb->request_buffer = pointer;
- srb->request_bufflen = value;
+ isd200_set_srb(info, DMA_FROM_DEVICE, pointer, value);
break;
case ACTION_ENUM:
@@ -444,7 +469,7 @@ static int isd200_action( struct us_data *us, int action,
ACTION_SELECT_5;
ata.generic.RegisterSelect = REG_DEVICE_HEAD;
ata.write.DeviceHeadByte = value;
- srb->sc_data_direction = DMA_NONE;
+ isd200_set_srb(info, DMA_NONE, NULL, 0);
break;
case ACTION_RESET:
@@ -453,7 +478,7 @@ static int isd200_action( struct us_data *us, int action,
ACTION_SELECT_3|ACTION_SELECT_4;
ata.generic.RegisterSelect = REG_DEVICE_CONTROL;
ata.write.DeviceControlByte = ATA_DC_RESET_CONTROLLER;
- srb->sc_data_direction = DMA_NONE;
+ isd200_set_srb(info, DMA_NONE, NULL, 0);
break;
case ACTION_REENABLE:
@@ -462,7 +487,7 @@ static int isd200_action( struct us_data *us, int action,
ACTION_SELECT_3|ACTION_SELECT_4;
ata.generic.RegisterSelect = REG_DEVICE_CONTROL;
ata.write.DeviceControlByte = ATA_DC_REENABLE_CONTROLLER;
- srb->sc_data_direction = DMA_NONE;
+ isd200_set_srb(info, DMA_NONE, NULL, 0);
break;
case ACTION_SOFT_RESET:
@@ -471,21 +496,20 @@ static int isd200_action( struct us_data *us, int action,
ata.generic.RegisterSelect = REG_DEVICE_HEAD | REG_COMMAND;
ata.write.DeviceHeadByte = info->DeviceHead;
ata.write.CommandByte = WIN_SRST;
- srb->sc_data_direction = DMA_NONE;
+ isd200_set_srb(info, DMA_NONE, NULL, 0);
break;
case ACTION_IDENTIFY:
US_DEBUGP(" isd200_action(IDENTIFY)\n");
ata.generic.RegisterSelect = REG_COMMAND;
ata.write.CommandByte = WIN_IDENTIFY;
- srb->sc_data_direction = DMA_FROM_DEVICE;
- srb->request_buffer = (void *) info->id;
- srb->request_bufflen = sizeof(struct hd_driveid);
+ isd200_set_srb(info, DMA_FROM_DEVICE, info->id,
+ sizeof(struct hd_driveid));
break;
default:
US_DEBUGP("Error: Undefined action %d\n",action);
- break;
+ return ISD200_ERROR;
}
memcpy(srb->cmnd, &ata, sizeof(ata.generic));
@@ -590,7 +614,7 @@ static void isd200_invoke_transport( struct us_data *us,
return;
}
- if ((srb->resid > 0) &&
+ if ((scsi_get_resid(srb) > 0) &&
!((srb->cmnd[0] == REQUEST_SENSE) ||
(srb->cmnd[0] == INQUIRY) ||
(srb->cmnd[0] == MODE_SENSE) ||
@@ -1217,7 +1241,6 @@ static int isd200_get_inquiry_data( struct us_data *us )
return(retStatus);
}
-
/**************************************************************************
* isd200_scsi_to_ata
*
@@ -1266,7 +1289,7 @@ static int isd200_scsi_to_ata(struct scsi_cmnd *srb, struct us_data *us,
ataCdb->generic.TransferBlockSize = 1;
ataCdb->generic.RegisterSelect = REG_COMMAND;
ataCdb->write.CommandByte = ATA_COMMAND_GET_MEDIA_STATUS;
- srb->request_bufflen = 0;
+ isd200_srb_set_bufflen(srb, 0);
} else {
US_DEBUGP(" Media Status not supported, just report okay\n");
srb->result = SAM_STAT_GOOD;
@@ -1284,7 +1307,7 @@ static int isd200_scsi_to_ata(struct scsi_cmnd *srb, struct us_data *us,
ataCdb->generic.TransferBlockSize = 1;
ataCdb->generic.RegisterSelect = REG_COMMAND;
ataCdb->write.CommandByte = ATA_COMMAND_GET_MEDIA_STATUS;
- srb->request_bufflen = 0;
+ isd200_srb_set_bufflen(srb, 0);
} else {
US_DEBUGP(" Media Status not supported, just report okay\n");
srb->result = SAM_STAT_GOOD;
@@ -1390,7 +1413,7 @@ static int isd200_scsi_to_ata(struct scsi_cmnd *srb, struct us_data *us,
ataCdb->generic.RegisterSelect = REG_COMMAND;
ataCdb->write.CommandByte = (srb->cmnd[4] & 0x1) ?
WIN_DOORLOCK : WIN_DOORUNLOCK;
- srb->request_bufflen = 0;
+ isd200_srb_set_bufflen(srb, 0);
} else {
US_DEBUGP(" Not removeable media, just report okay\n");
srb->result = SAM_STAT_GOOD;
@@ -1416,7 +1439,7 @@ static int isd200_scsi_to_ata(struct scsi_cmnd *srb, struct us_data *us,
ataCdb->generic.TransferBlockSize = 1;
ataCdb->generic.RegisterSelect = REG_COMMAND;
ataCdb->write.CommandByte = ATA_COMMAND_GET_MEDIA_STATUS;
- srb->request_bufflen = 0;
+ isd200_srb_set_bufflen(srb, 0);
} else {
US_DEBUGP(" Nothing to do, just report okay\n");
srb->result = SAM_STAT_GOOD;
@@ -1525,7 +1548,7 @@ int isd200_Initialization(struct us_data *us)
void isd200_ata_command(struct scsi_cmnd *srb, struct us_data *us)
{
- int sendToTransport = 1;
+ int sendToTransport = 1, orig_bufflen;
union ata_cdb ataCdb;
/* Make sure driver was initialized */
@@ -1533,11 +1556,14 @@ void isd200_ata_command(struct scsi_cmnd *srb, struct us_data *us)
if (us->extra == NULL)
US_DEBUGP("ERROR Driver not initialized\n");
- /* Convert command */
- srb->resid = 0;
+ scsi_set_resid(srb, 0);
+ /* scsi_bufflen might change in protocol translation to ata */
+ orig_bufflen = scsi_bufflen(srb);
sendToTransport = isd200_scsi_to_ata(srb, us, &ataCdb);
/* send the command to the transport layer */
if (sendToTransport)
isd200_invoke_transport(us, srb, &ataCdb);
+
+ isd200_srb_set_bufflen(srb, orig_bufflen);
}
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c
index 889622baac20..a41ce21c0697 100644
--- a/drivers/usb/storage/protocol.c
+++ b/drivers/usb/storage/protocol.c
@@ -149,11 +149,7 @@ void usb_stor_transparent_scsi_command(struct scsi_cmnd *srb,
***********************************************************************/
/* Copy a buffer of length buflen to/from the srb's transfer buffer.
- * (Note: for scatter-gather transfers (srb->use_sg > 0), srb->request_buffer
- * points to a list of s-g entries and we ignore srb->request_bufflen.
- * For non-scatter-gather transfers, srb->request_buffer points to the
- * transfer buffer itself and srb->request_bufflen is the buffer's length.)
- * Update the *index and *offset variables so that the next copy will
+ * Update the **sgptr and *offset variables so that the next copy will
* pick up from where this one left off. */
unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
@@ -162,80 +158,64 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
{
unsigned int cnt;
- /* If not using scatter-gather, just transfer the data directly.
- * Make certain it will fit in the available buffer space. */
- if (srb->use_sg == 0) {
- if (*offset >= srb->request_bufflen)
- return 0;
- cnt = min(buflen, srb->request_bufflen - *offset);
- if (dir == TO_XFER_BUF)
- memcpy((unsigned char *) srb->request_buffer + *offset,
- buffer, cnt);
- else
- memcpy(buffer, (unsigned char *) srb->request_buffer +
- *offset, cnt);
- *offset += cnt;
-
- /* Using scatter-gather. We have to go through the list one entry
+ /* We have to go through the list one entry
* at a time. Each s-g entry contains some number of pages, and
* each page has to be kmap()'ed separately. If the page is already
* in kernel-addressable memory then kmap() will return its address.
* If the page is not directly accessible -- such as a user buffer
* located in high memory -- then kmap() will map it to a temporary
* position in the kernel's virtual address space. */
- } else {
- struct scatterlist *sg = *sgptr;
-
- if (!sg)
- sg = (struct scatterlist *) srb->request_buffer;
-
- /* This loop handles a single s-g list entry, which may
- * include multiple pages. Find the initial page structure
- * and the starting offset within the page, and update
- * the *offset and *index values for the next loop. */
- cnt = 0;
- while (cnt < buflen) {
- struct page *page = sg_page(sg) +
- ((sg->offset + *offset) >> PAGE_SHIFT);
- unsigned int poff =
- (sg->offset + *offset) & (PAGE_SIZE-1);
- unsigned int sglen = sg->length - *offset;
-
- if (sglen > buflen - cnt) {
-
- /* Transfer ends within this s-g entry */
- sglen = buflen - cnt;
- *offset += sglen;
- } else {
-
- /* Transfer continues to next s-g entry */
- *offset = 0;
- sg = sg_next(sg);
- }
-
- /* Transfer the data for all the pages in this
- * s-g entry. For each page: call kmap(), do the
- * transfer, and call kunmap() immediately after. */
- while (sglen > 0) {
- unsigned int plen = min(sglen, (unsigned int)
- PAGE_SIZE - poff);
- unsigned char *ptr = kmap(page);
-
- if (dir == TO_XFER_BUF)
- memcpy(ptr + poff, buffer + cnt, plen);
- else
- memcpy(buffer + cnt, ptr + poff, plen);
- kunmap(page);
-
- /* Start at the beginning of the next page */
- poff = 0;
- ++page;
- cnt += plen;
- sglen -= plen;
- }
+ struct scatterlist *sg = *sgptr;
+
+ if (!sg)
+ sg = scsi_sglist(srb);
+
+ /* This loop handles a single s-g list entry, which may
+ * include multiple pages. Find the initial page structure
+ * and the starting offset within the page, and update
+ * the *offset and **sgptr values for the next loop. */
+ cnt = 0;
+ while (cnt < buflen) {
+ struct page *page = sg_page(sg) +
+ ((sg->offset + *offset) >> PAGE_SHIFT);
+ unsigned int poff =
+ (sg->offset + *offset) & (PAGE_SIZE-1);
+ unsigned int sglen = sg->length - *offset;
+
+ if (sglen > buflen - cnt) {
+
+ /* Transfer ends within this s-g entry */
+ sglen = buflen - cnt;
+ *offset += sglen;
+ } else {
+
+ /* Transfer continues to next s-g entry */
+ *offset = 0;
+ sg = sg_next(sg);
+ }
+
+ /* Transfer the data for all the pages in this
+ * s-g entry. For each page: call kmap(), do the
+ * transfer, and call kunmap() immediately after. */
+ while (sglen > 0) {
+ unsigned int plen = min(sglen, (unsigned int)
+ PAGE_SIZE - poff);
+ unsigned char *ptr = kmap(page);
+
+ if (dir == TO_XFER_BUF)
+ memcpy(ptr + poff, buffer + cnt, plen);
+ else
+ memcpy(buffer + cnt, ptr + poff, plen);
+ kunmap(page);
+
+ /* Start at the beginning of the next page */
+ poff = 0;
+ ++page;
+ cnt += plen;
+ sglen -= plen;
}
- *sgptr = sg;
}
+ *sgptr = sg;
/* Return the amount actually transferred */
return cnt;
@@ -251,6 +231,6 @@ void usb_stor_set_xfer_buf(unsigned char *buffer,
usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, &offset,
TO_XFER_BUF);
- if (buflen < srb->request_bufflen)
- srb->resid = srb->request_bufflen - buflen;
+ if (buflen < scsi_bufflen(srb))
+ scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
}
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 7c9593b7b04e..8c1e2954f3b9 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -81,6 +81,16 @@ static int slave_alloc (struct scsi_device *sdev)
*/
sdev->inquiry_len = 36;
+ /* Scatter-gather buffers (all but the last) must have a length
+ * divisible by the bulk maxpacket size. Otherwise a data packet
+ * would end up being short, causing a premature end to the data
+ * transfer. Since high-speed bulk pipes have a maxpacket size
+ * of 512, we'll use that as the scsi device queue's DMA alignment
+ * mask. Guaranteeing proper alignment of the first buffer will
+ * have the desired effect because, except at the beginning and
+ * the end, scatter-gather buffers follow page boundaries. */
+ blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
+
/*
* The UFI spec treates the Peripheral Qualifier bits in an
* INQUIRY result as reserved and requires devices to set them
@@ -100,16 +110,6 @@ static int slave_configure(struct scsi_device *sdev)
{
struct us_data *us = host_to_us(sdev->host);
- /* Scatter-gather buffers (all but the last) must have a length
- * divisible by the bulk maxpacket size. Otherwise a data packet
- * would end up being short, causing a premature end to the data
- * transfer. Since high-speed bulk pipes have a maxpacket size
- * of 512, we'll use that as the scsi device queue's DMA alignment
- * mask. Guaranteeing proper alignment of the first buffer will
- * have the desired effect because, except at the beginning and
- * the end, scatter-gather buffers follow page boundaries. */
- blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
-
/* Many devices have trouble transfering more than 32KB at a time,
* while others have trouble with more than 64K. At this time we
* are limiting both to 32K (64 sectores).
@@ -187,6 +187,10 @@ static int slave_configure(struct scsi_device *sdev)
* automatically, requiring a START-STOP UNIT command. */
sdev->allow_restart = 1;
+ /* Some USB cardreaders have trouble reading an sdcard's last
+ * sector in a larger then 1 sector read, since the performance
+ * impact is negible we set this flag for all USB disks */
+ sdev->last_sector_bug = 1;
} else {
/* Non-disk-type devices don't need to blacklist any pages
diff --git a/drivers/usb/storage/sddr09.c b/drivers/usb/storage/sddr09.c
index b12202c5da2d..8972b17da843 100644
--- a/drivers/usb/storage/sddr09.c
+++ b/drivers/usb/storage/sddr09.c
@@ -1623,7 +1623,7 @@ int sddr09_transport(struct scsi_cmnd *srb, struct us_data *us)
return USB_STOR_TRANSPORT_ERROR;
}
- if (srb->request_bufflen == 0)
+ if (scsi_bufflen(srb) == 0)
return USB_STOR_TRANSPORT_GOOD;
if (srb->sc_data_direction == DMA_TO_DEVICE ||
@@ -1634,12 +1634,9 @@ int sddr09_transport(struct scsi_cmnd *srb, struct us_data *us)
US_DEBUGP("SDDR09: %s %d bytes\n",
(srb->sc_data_direction == DMA_TO_DEVICE) ?
"sending" : "receiving",
- srb->request_bufflen);
+ scsi_bufflen(srb));
- result = usb_stor_bulk_transfer_sg(us, pipe,
- srb->request_buffer,
- srb->request_bufflen,
- srb->use_sg, &srb->resid);
+ result = usb_stor_bulk_srb(us, pipe, srb);
return (result == USB_STOR_XFER_GOOD ?
USB_STOR_TRANSPORT_GOOD : USB_STOR_TRANSPORT_ERROR);
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index cb22a9ad1694..570c1250f6f3 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -130,7 +130,7 @@ static int usbat_write(struct us_data *us,
* Convenience function to perform a bulk read
*/
static int usbat_bulk_read(struct us_data *us,
- unsigned char *data,
+ void* buf,
unsigned int len,
int use_sg)
{
@@ -138,14 +138,14 @@ static int usbat_bulk_read(struct us_data *us,
return USB_STOR_XFER_GOOD;
US_DEBUGP("usbat_bulk_read: len = %d\n", len);
- return usb_stor_bulk_transfer_sg(us, us->recv_bulk_pipe, data, len, use_sg, NULL);
+ return usb_stor_bulk_transfer_sg(us, us->recv_bulk_pipe, buf, len, use_sg, NULL);
}
/*
* Convenience function to perform a bulk write
*/
static int usbat_bulk_write(struct us_data *us,
- unsigned char *data,
+ void* buf,
unsigned int len,
int use_sg)
{
@@ -153,7 +153,7 @@ static int usbat_bulk_write(struct us_data *us,
return USB_STOR_XFER_GOOD;
US_DEBUGP("usbat_bulk_write: len = %d\n", len);
- return usb_stor_bulk_transfer_sg(us, us->send_bulk_pipe, data, len, use_sg, NULL);
+ return usb_stor_bulk_transfer_sg(us, us->send_bulk_pipe, buf, len, use_sg, NULL);
}
/*
@@ -314,7 +314,7 @@ static int usbat_wait_not_busy(struct us_data *us, int minutes)
* Read block data from the data register
*/
static int usbat_read_block(struct us_data *us,
- unsigned char *content,
+ void* buf,
unsigned short len,
int use_sg)
{
@@ -337,7 +337,7 @@ static int usbat_read_block(struct us_data *us,
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
- result = usbat_bulk_read(us, content, len, use_sg);
+ result = usbat_bulk_read(us, buf, len, use_sg);
return (result == USB_STOR_XFER_GOOD ?
USB_STOR_TRANSPORT_GOOD : USB_STOR_TRANSPORT_ERROR);
}
@@ -347,7 +347,7 @@ static int usbat_read_block(struct us_data *us,
*/
static int usbat_write_block(struct us_data *us,
unsigned char access,
- unsigned char *content,
+ void* buf,
unsigned short len,
int minutes,
int use_sg)
@@ -372,7 +372,7 @@ static int usbat_write_block(struct us_data *us,
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
- result = usbat_bulk_write(us, content, len, use_sg);
+ result = usbat_bulk_write(us, buf, len, use_sg);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
@@ -392,7 +392,7 @@ static int usbat_hp8200e_rw_block_test(struct us_data *us,
unsigned char timeout,
unsigned char qualifier,
int direction,
- unsigned char *content,
+ void *buf,
unsigned short len,
int use_sg,
int minutes)
@@ -472,7 +472,7 @@ static int usbat_hp8200e_rw_block_test(struct us_data *us,
}
result = usb_stor_bulk_transfer_sg(us,
- pipe, content, len, use_sg, NULL);
+ pipe, buf, len, use_sg, NULL);
/*
* If we get a stall on the bulk download, we'll retry
@@ -606,7 +606,7 @@ static int usbat_multiple_write(struct us_data *us,
* other related details) are defined beforehand with _set_shuttle_features().
*/
static int usbat_read_blocks(struct us_data *us,
- unsigned char *buffer,
+ void* buffer,
int len,
int use_sg)
{
@@ -648,7 +648,7 @@ static int usbat_read_blocks(struct us_data *us,
* other related details) are defined beforehand with _set_shuttle_features().
*/
static int usbat_write_blocks(struct us_data *us,
- unsigned char *buffer,
+ void* buffer,
int len,
int use_sg)
{
@@ -1170,15 +1170,15 @@ static int usbat_hp8200e_handle_read10(struct us_data *us,
US_DEBUGP("handle_read10: transfersize %d\n",
srb->transfersize);
- if (srb->request_bufflen < 0x10000) {
+ if (scsi_bufflen(srb) < 0x10000) {
result = usbat_hp8200e_rw_block_test(us, USBAT_ATA,
registers, data, 19,
USBAT_ATA_DATA, USBAT_ATA_STATUS, 0xFD,
(USBAT_QUAL_FCQ | USBAT_QUAL_ALQ),
DMA_FROM_DEVICE,
- srb->request_buffer,
- srb->request_bufflen, srb->use_sg, 1);
+ scsi_sglist(srb),
+ scsi_bufflen(srb), scsi_sg_count(srb), 1);
return result;
}
@@ -1196,7 +1196,7 @@ static int usbat_hp8200e_handle_read10(struct us_data *us,
len <<= 16;
len |= data[7+7];
US_DEBUGP("handle_read10: GPCMD_READ_CD: len %d\n", len);
- srb->transfersize = srb->request_bufflen/len;
+ srb->transfersize = scsi_bufflen(srb)/len;
}
if (!srb->transfersize) {
@@ -1213,7 +1213,7 @@ static int usbat_hp8200e_handle_read10(struct us_data *us,
len = (65535/srb->transfersize) * srb->transfersize;
US_DEBUGP("Max read is %d bytes\n", len);
- len = min(len, srb->request_bufflen);
+ len = min(len, scsi_bufflen(srb));
buffer = kmalloc(len, GFP_NOIO);
if (buffer == NULL) /* bloody hell! */
return USB_STOR_TRANSPORT_FAILED;
@@ -1222,10 +1222,10 @@ static int usbat_hp8200e_handle_read10(struct us_data *us,
sector |= short_pack(data[7+5], data[7+4]);
transferred = 0;
- while (transferred != srb->request_bufflen) {
+ while (transferred != scsi_bufflen(srb)) {
- if (len > srb->request_bufflen - transferred)
- len = srb->request_bufflen - transferred;
+ if (len > scsi_bufflen(srb) - transferred)
+ len = scsi_bufflen(srb) - transferred;
data[3] = len&0xFF; /* (cylL) = expected length (L) */
data[4] = (len>>8)&0xFF; /* (cylH) = expected length (H) */
@@ -1261,7 +1261,7 @@ static int usbat_hp8200e_handle_read10(struct us_data *us,
transferred += len;
sector += len / srb->transfersize;
- } /* while transferred != srb->request_bufflen */
+ } /* while transferred != scsi_bufflen(srb) */
kfree(buffer);
return result;
@@ -1429,9 +1429,8 @@ static int usbat_hp8200e_transport(struct scsi_cmnd *srb, struct us_data *us)
unsigned char data[32];
unsigned int len;
int i;
- char string[64];
- len = srb->request_bufflen;
+ len = scsi_bufflen(srb);
/* Send A0 (ATA PACKET COMMAND).
Note: I guess we're never going to get any of the ATA
@@ -1472,8 +1471,8 @@ static int usbat_hp8200e_transport(struct scsi_cmnd *srb, struct us_data *us)
USBAT_ATA_DATA, USBAT_ATA_STATUS, 0xFD,
(USBAT_QUAL_FCQ | USBAT_QUAL_ALQ),
DMA_TO_DEVICE,
- srb->request_buffer,
- len, srb->use_sg, 10);
+ scsi_sglist(srb),
+ len, scsi_sg_count(srb), 10);
if (result == USB_STOR_TRANSPORT_GOOD) {
transferred += len;
@@ -1540,23 +1539,8 @@ static int usbat_hp8200e_transport(struct scsi_cmnd *srb, struct us_data *us)
len = *status;
- result = usbat_read_block(us, srb->request_buffer, len, srb->use_sg);
-
- /* Debug-print the first 32 bytes of the transfer */
-
- if (!srb->use_sg) {
- string[0] = 0;
- for (i=0; i<len && i<32; i++) {
- sprintf(string+strlen(string), "%02X ",
- ((unsigned char *)srb->request_buffer)[i]);
- if ((i%16)==15) {
- US_DEBUGP("%s\n", string);
- string[0] = 0;
- }
- }
- if (string[0]!=0)
- US_DEBUGP("%s\n", string);
- }
+ result = usbat_read_block(us, scsi_sglist(srb), len,
+ scsi_sg_count(srb));
}
return result;
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index c646750ccc30..d9f4912f873d 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -459,6 +459,22 @@ static int usb_stor_bulk_transfer_sglist(struct us_data *us, unsigned int pipe,
}
/*
+ * Common used function. Transfer a complete command
+ * via usb_stor_bulk_transfer_sglist() above. Set cmnd resid
+ */
+int usb_stor_bulk_srb(struct us_data* us, unsigned int pipe,
+ struct scsi_cmnd* srb)
+{
+ unsigned int partial;
+ int result = usb_stor_bulk_transfer_sglist(us, pipe, scsi_sglist(srb),
+ scsi_sg_count(srb), scsi_bufflen(srb),
+ &partial);
+
+ scsi_set_resid(srb, scsi_bufflen(srb) - partial);
+ return result;
+}
+
+/*
* Transfer an entire SCSI command's worth of data payload over the bulk
* pipe.
*
@@ -508,7 +524,7 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
int result;
/* send the command to the transport layer */
- srb->resid = 0;
+ scsi_set_resid(srb, 0);
result = us->transport(srb, us);
/* if the command gets aborted by the higher layers, we need to
@@ -568,7 +584,7 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
* A short transfer on a command where we don't expect it
* is unusual, but it doesn't mean we need to auto-sense.
*/
- if ((srb->resid > 0) &&
+ if ((scsi_get_resid(srb) > 0) &&
!((srb->cmnd[0] == REQUEST_SENSE) ||
(srb->cmnd[0] == INQUIRY) ||
(srb->cmnd[0] == MODE_SENSE) ||
@@ -593,7 +609,7 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
srb->cmd_len = 12;
/* issue the auto-sense command */
- srb->resid = 0;
+ scsi_set_resid(srb, 0);
temp_result = us->transport(us->srb, us);
/* let's clean up right away */
@@ -649,7 +665,7 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
/* Did we transfer less than the minimum amount required? */
if (srb->result == SAM_STAT_GOOD &&
- srb->request_bufflen - srb->resid < srb->underflow)
+ scsi_bufflen(srb) - scsi_get_resid(srb) < srb->underflow)
srb->result = (DID_ERROR << 16) | (SUGGEST_RETRY << 24);
return;
@@ -708,7 +724,7 @@ void usb_stor_stop_transport(struct us_data *us)
int usb_stor_CBI_transport(struct scsi_cmnd *srb, struct us_data *us)
{
- unsigned int transfer_length = srb->request_bufflen;
+ unsigned int transfer_length = scsi_bufflen(srb);
unsigned int pipe = 0;
int result;
@@ -737,9 +753,7 @@ int usb_stor_CBI_transport(struct scsi_cmnd *srb, struct us_data *us)
if (transfer_length) {
pipe = srb->sc_data_direction == DMA_FROM_DEVICE ?
us->recv_bulk_pipe : us->send_bulk_pipe;
- result = usb_stor_bulk_transfer_sg(us, pipe,
- srb->request_buffer, transfer_length,
- srb->use_sg, &srb->resid);
+ result = usb_stor_bulk_srb(us, pipe, srb);
US_DEBUGP("CBI data stage result is 0x%x\n", result);
/* if we stalled the data transfer it means command failed */
@@ -808,7 +822,7 @@ int usb_stor_CBI_transport(struct scsi_cmnd *srb, struct us_data *us)
*/
int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us)
{
- unsigned int transfer_length = srb->request_bufflen;
+ unsigned int transfer_length = scsi_bufflen(srb);
int result;
/* COMMAND STAGE */
@@ -836,9 +850,7 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us)
if (transfer_length) {
unsigned int pipe = srb->sc_data_direction == DMA_FROM_DEVICE ?
us->recv_bulk_pipe : us->send_bulk_pipe;
- result = usb_stor_bulk_transfer_sg(us, pipe,
- srb->request_buffer, transfer_length,
- srb->use_sg, &srb->resid);
+ result = usb_stor_bulk_srb(us, pipe, srb);
US_DEBUGP("CB data stage result is 0x%x\n", result);
/* if we stalled the data transfer it means command failed */
@@ -904,7 +916,7 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf;
- unsigned int transfer_length = srb->request_bufflen;
+ unsigned int transfer_length = scsi_bufflen(srb);
unsigned int residue;
int result;
int fake_sense = 0;
@@ -955,9 +967,7 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
if (transfer_length) {
unsigned int pipe = srb->sc_data_direction == DMA_FROM_DEVICE ?
us->recv_bulk_pipe : us->send_bulk_pipe;
- result = usb_stor_bulk_transfer_sg(us, pipe,
- srb->request_buffer, transfer_length,
- srb->use_sg, &srb->resid);
+ result = usb_stor_bulk_srb(us, pipe, srb);
US_DEBUGP("Bulk data transfer result 0x%x\n", result);
if (result == USB_STOR_XFER_ERROR)
return USB_STOR_TRANSPORT_ERROR;
@@ -1036,7 +1046,8 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
if (residue) {
if (!(us->flags & US_FL_IGNORE_RESIDUE)) {
residue = min(residue, transfer_length);
- srb->resid = max(srb->resid, (int) residue);
+ scsi_set_resid(srb, max(scsi_get_resid(srb),
+ (int) residue));
}
}
diff --git a/drivers/usb/storage/transport.h b/drivers/usb/storage/transport.h
index 633a715850a4..ada7c2f43f84 100644
--- a/drivers/usb/storage/transport.h
+++ b/drivers/usb/storage/transport.h
@@ -139,6 +139,8 @@ extern int usb_stor_bulk_transfer_buf(struct us_data *us, unsigned int pipe,
void *buf, unsigned int length, unsigned int *act_len);
extern int usb_stor_bulk_transfer_sg(struct us_data *us, unsigned int pipe,
void *buf, unsigned int length, int use_sg, int *residual);
+extern int usb_stor_bulk_srb(struct us_data* us, unsigned int pipe,
+ struct scsi_cmnd* srb);
extern int usb_stor_port_reset(struct us_data *us);
#endif
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 8acf82bba44c..a271c87c4472 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -523,7 +523,11 @@ int sysfs_add_file_to_group(struct kobject *kobj,
struct sysfs_dirent *dir_sd;
int error;
- dir_sd = sysfs_get_dirent(kobj->sd, group);
+ if (group)
+ dir_sd = sysfs_get_dirent(kobj->sd, group);
+ else
+ dir_sd = sysfs_get(kobj->sd);
+
if (!dir_sd)
return -ENOENT;
@@ -611,7 +615,10 @@ void sysfs_remove_file_from_group(struct kobject *kobj,
{
struct sysfs_dirent *dir_sd;
- dir_sd = sysfs_get_dirent(kobj->sd, group);
+ if (group)
+ dir_sd = sysfs_get_dirent(kobj->sd, group);
+ else
+ dir_sd = sysfs_get(kobj->sd);
if (dir_sd) {
sysfs_hash_and_remove(dir_sd, attr->name);
sysfs_put(dir_sd);
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index d1972374655a..0871c3dadce1 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -16,25 +16,31 @@
#include "sysfs.h"
-static void remove_files(struct sysfs_dirent *dir_sd,
+static void remove_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
const struct attribute_group *grp)
{
struct attribute *const* attr;
+ int i;
- for (attr = grp->attrs; *attr; attr++)
- sysfs_hash_and_remove(dir_sd, (*attr)->name);
+ for (i = 0, attr = grp->attrs; *attr; i++, attr++)
+ if (!grp->is_visible ||
+ grp->is_visible(kobj, *attr, i))
+ sysfs_hash_and_remove(dir_sd, (*attr)->name);
}
-static int create_files(struct sysfs_dirent *dir_sd,
+static int create_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
const struct attribute_group *grp)
{
struct attribute *const* attr;
- int error = 0;
+ int error = 0, i;
- for (attr = grp->attrs; *attr && !error; attr++)
- error = sysfs_add_file(dir_sd, *attr, SYSFS_KOBJ_ATTR);
+ for (i = 0, attr = grp->attrs; *attr && !error; i++, attr++)
+ if (!grp->is_visible ||
+ grp->is_visible(kobj, *attr, i))
+ error |=
+ sysfs_add_file(dir_sd, *attr, SYSFS_KOBJ_ATTR);
if (error)
- remove_files(dir_sd, grp);
+ remove_files(dir_sd, kobj, grp);
return error;
}
@@ -54,7 +60,7 @@ int sysfs_create_group(struct kobject * kobj,
} else
sd = kobj->sd;
sysfs_get(sd);
- error = create_files(sd, grp);
+ error = create_files(sd, kobj, grp);
if (error) {
if (grp->name)
sysfs_remove_subdir(sd);
@@ -75,7 +81,7 @@ void sysfs_remove_group(struct kobject * kobj,
} else
sd = sysfs_get(dir_sd);
- remove_files(sd, grp);
+ remove_files(sd, kobj, grp);
if (grp->name)
sysfs_remove_subdir(sd);
diff --git a/include/linux/attribute_container.h b/include/linux/attribute_container.h
index 8ff274933948..f5582332af04 100644
--- a/include/linux/attribute_container.h
+++ b/include/linux/attribute_container.h
@@ -17,6 +17,7 @@ struct attribute_container {
struct list_head node;
struct klist containers;
struct class *class;
+ struct attribute_group *grp;
struct class_device_attribute **attrs;
int (*match)(struct attribute_container *, struct device *);
#define ATTRIBUTE_CONTAINER_NO_CLASSDEVS 0x01
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 40ee1706caa3..bd20a4e8663a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -765,6 +765,7 @@ extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(struct request_queue *, int);
+extern void blk_queue_update_dma_alignment(struct request_queue *, int);
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 149ab62329e2..802710438a9e 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -32,6 +32,8 @@ struct attribute {
struct attribute_group {
const char *name;
+ int (*is_visible)(struct kobject *,
+ struct attribute *, int);
struct attribute **attrs;
};
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index 50e907f42048..e19e58423166 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -49,12 +49,15 @@ enum iscsi_uevent_e {
ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15,
ISCSI_UEVENT_SET_HOST_PARAM = UEVENT_BASE + 16,
+ ISCSI_UEVENT_UNBIND_SESSION = UEVENT_BASE + 17,
/* up events */
ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
ISCSI_KEVENT_CONN_ERROR = KEVENT_BASE + 2,
ISCSI_KEVENT_IF_ERROR = KEVENT_BASE + 3,
ISCSI_KEVENT_DESTROY_SESSION = KEVENT_BASE + 4,
+ ISCSI_KEVENT_UNBIND_SESSION = KEVENT_BASE + 5,
+ ISCSI_KEVENT_CREATE_SESSION = KEVENT_BASE + 6,
};
enum iscsi_tgt_dscvr {
@@ -156,6 +159,10 @@ struct iscsi_uevent {
uint32_t sid;
uint32_t cid;
} c_conn_ret;
+ struct msg_unbind_session {
+ uint32_t sid;
+ uint32_t host_no;
+ } unbind_session;
struct msg_recv_req {
uint32_t sid;
uint32_t cid;
@@ -236,6 +243,13 @@ enum iscsi_param {
ISCSI_PARAM_PASSWORD,
ISCSI_PARAM_PASSWORD_IN,
+ ISCSI_PARAM_FAST_ABORT,
+ ISCSI_PARAM_ABORT_TMO,
+ ISCSI_PARAM_LU_RESET_TMO,
+ ISCSI_PARAM_HOST_RESET_TMO,
+
+ ISCSI_PARAM_PING_TMO,
+ ISCSI_PARAM_RECV_TMO,
/* must always be last */
ISCSI_PARAM_MAX,
};
@@ -266,6 +280,12 @@ enum iscsi_param {
#define ISCSI_USERNAME_IN (1 << ISCSI_PARAM_USERNAME_IN)
#define ISCSI_PASSWORD (1 << ISCSI_PARAM_PASSWORD)
#define ISCSI_PASSWORD_IN (1 << ISCSI_PARAM_PASSWORD_IN)
+#define ISCSI_FAST_ABORT (1 << ISCSI_PARAM_FAST_ABORT)
+#define ISCSI_ABORT_TMO (1 << ISCSI_PARAM_ABORT_TMO)
+#define ISCSI_LU_RESET_TMO (1 << ISCSI_PARAM_LU_RESET_TMO)
+#define ISCSI_HOST_RESET_TMO (1 << ISCSI_PARAM_HOST_RESET_TMO)
+#define ISCSI_PING_TMO (1 << ISCSI_PARAM_PING_TMO)
+#define ISCSI_RECV_TMO (1 << ISCSI_PARAM_RECV_TMO)
/* iSCSI HBA params */
enum iscsi_host_param {
diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
index 8d1e4e8026fe..318a909e7ae1 100644
--- a/include/scsi/iscsi_proto.h
+++ b/include/scsi/iscsi_proto.h
@@ -21,13 +21,15 @@
#ifndef ISCSI_PROTO_H
#define ISCSI_PROTO_H
+#include <linux/types.h>
+
#define ISCSI_DRAFT20_VERSION 0x00
/* default iSCSI listen port for incoming connections */
#define ISCSI_LISTEN_PORT 3260
/* Padding word length */
-#define PAD_WORD_LEN 4
+#define ISCSI_PAD_LEN 4
/*
* useful common(control and data pathes) macro
@@ -147,6 +149,14 @@ struct iscsi_rlength_ahdr {
__be32 read_length;
};
+/* Extended CDB AHS */
+struct iscsi_ecdb_ahdr {
+ __be16 ahslength; /* CDB length - 15, including reserved byte */
+ uint8_t ahstype;
+ uint8_t reserved;
+ uint8_t ecdb[260 - 16]; /* 4-byte aligned extended CDB spillover */
+};
+
/* SCSI Response Header */
struct iscsi_cmd_rsp {
uint8_t opcode;
@@ -600,6 +610,8 @@ struct iscsi_reject {
#define ISCSI_MIN_MAX_BURST_LEN 512
#define ISCSI_MAX_MAX_BURST_LEN 16777215
+#define ISCSI_DEF_TIME2WAIT 2
+
/************************* RFC 3720 End *****************************/
#endif /* ISCSI_PROTO_H */
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index b4b31132618b..889f51fabab9 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -57,11 +57,14 @@ struct iscsi_nopin;
#define ISCSI_MAX_CMD_PER_LUN 128
/* Task Mgmt states */
-#define TMABORT_INITIAL 0x0
-#define TMABORT_SUCCESS 0x1
-#define TMABORT_FAILED 0x2
-#define TMABORT_TIMEDOUT 0x3
-#define TMABORT_NOT_FOUND 0x4
+enum {
+ TMF_INITIAL,
+ TMF_QUEUED,
+ TMF_SUCCESS,
+ TMF_FAILED,
+ TMF_TIMEDOUT,
+ TMF_NOT_FOUND,
+};
/* Connection suspend "bit" */
#define ISCSI_SUSPEND_BIT 1
@@ -74,6 +77,13 @@ struct iscsi_nopin;
#define ISCSI_ADDRESS_BUF_LEN 64
+enum {
+ /* this is the maximum possible storage for AHSs */
+ ISCSI_MAX_AHS_SIZE = sizeof(struct iscsi_ecdb_ahdr) +
+ sizeof(struct iscsi_rlength_ahdr),
+ ISCSI_DIGEST_SIZE = sizeof(__u32),
+};
+
struct iscsi_mgmt_task {
/*
* Becuae LLDs allocate their hdr differently, this is a pointer to
@@ -91,15 +101,17 @@ enum {
ISCSI_TASK_COMPLETED,
ISCSI_TASK_PENDING,
ISCSI_TASK_RUNNING,
- ISCSI_TASK_ABORTING,
};
struct iscsi_cmd_task {
/*
- * Becuae LLDs allocate their hdr differently, this is a pointer to
- * that storage. It must be setup at session creation time.
+ * Because LLDs allocate their hdr differently, this is a pointer
+ * and length to that storage. It must be setup at session
+ * creation time.
*/
struct iscsi_cmd *hdr;
+ unsigned short hdr_max;
+ unsigned short hdr_len; /* accumulated size of hdr used */
int itt; /* this ITT */
uint32_t unsol_datasn;
@@ -110,7 +122,6 @@ struct iscsi_cmd_task {
unsigned data_count; /* remaining Data-Out */
struct scsi_cmnd *sc; /* associated SCSI cmd*/
struct iscsi_conn *conn; /* used connection */
- struct iscsi_mgmt_task *mtask; /* tmf mtask in progr */
/* state set/tested under session->lock */
int state;
@@ -119,6 +130,11 @@ struct iscsi_cmd_task {
void *dd_data; /* driver/transport data */
};
+static inline void* iscsi_next_hdr(struct iscsi_cmd_task *ctask)
+{
+ return (void*)ctask->hdr + ctask->hdr_len;
+}
+
struct iscsi_conn {
struct iscsi_cls_conn *cls_conn; /* ptr to class connection */
void *dd_data; /* iscsi_transport data */
@@ -132,6 +148,12 @@ struct iscsi_conn {
* conn_stop() flag: stop to recover, stop to terminate
*/
int stop_stage;
+ struct timer_list transport_timer;
+ unsigned long last_recv;
+ unsigned long last_ping;
+ int ping_timeout;
+ int recv_timeout;
+ struct iscsi_mgmt_task *ping_mtask;
/* iSCSI connection-wide sequencing */
uint32_t exp_statsn;
@@ -152,10 +174,11 @@ struct iscsi_conn {
struct iscsi_cmd_task *ctask; /* xmit ctask in progress */
/* xmit */
- struct kfifo *mgmtqueue; /* mgmt (control) xmit queue */
+ struct list_head mgmtqueue; /* mgmt (control) xmit queue */
struct list_head mgmt_run_list; /* list of control tasks */
struct list_head xmitqueue; /* data-path cmd queue */
struct list_head run_list; /* list of cmds in progress */
+ struct list_head requeue; /* tasks needing another run */
struct work_struct xmitwork; /* per-conn. xmit workqueue */
unsigned long suspend_tx; /* suspend Tx */
unsigned long suspend_rx; /* suspend Rx */
@@ -163,8 +186,8 @@ struct iscsi_conn {
/* abort */
wait_queue_head_t ehwait; /* used in eh_abort() */
struct iscsi_tm tmhdr;
- struct timer_list tmabort_timer;
- int tmabort_state; /* see TMABORT_INITIAL, etc.*/
+ struct timer_list tmf_timer;
+ int tmf_state; /* see TMF_INITIAL, etc.*/
/* negotiated params */
unsigned max_recv_dlength; /* initiator_max_recv_dsl*/
@@ -198,7 +221,7 @@ struct iscsi_conn {
uint32_t eh_abort_cnt;
};
-struct iscsi_queue {
+struct iscsi_pool {
struct kfifo *queue; /* FIFO Queue */
void **pool; /* Pool of elements */
int max; /* Max number of elements */
@@ -221,6 +244,8 @@ struct iscsi_session {
uint32_t queued_cmdsn;
/* configuration */
+ int abort_timeout;
+ int lu_reset_timeout;
int initial_r2t_en;
unsigned max_r2t;
int imm_data_en;
@@ -231,6 +256,7 @@ struct iscsi_session {
int pdu_inorder_en;
int dataseq_inorder_en;
int erl;
+ int fast_abort;
int tpgt;
char *username;
char *username_in;
@@ -256,10 +282,10 @@ struct iscsi_session {
int cmds_max; /* size of cmds array */
struct iscsi_cmd_task **cmds; /* Original Cmds arr */
- struct iscsi_queue cmdpool; /* PDU's pool */
+ struct iscsi_pool cmdpool; /* PDU's pool */
int mgmtpool_max; /* size of mgmt array */
struct iscsi_mgmt_task **mgmt_cmds; /* Original mgmt arr */
- struct iscsi_queue mgmtpool; /* Mgmt PDU's pool */
+ struct iscsi_pool mgmtpool; /* Mgmt PDU's pool */
};
/*
@@ -268,6 +294,7 @@ struct iscsi_session {
extern int iscsi_change_queue_depth(struct scsi_device *sdev, int depth);
extern int iscsi_eh_abort(struct scsi_cmnd *sc);
extern int iscsi_eh_host_reset(struct scsi_cmnd *sc);
+extern int iscsi_eh_device_reset(struct scsi_cmnd *sc);
extern int iscsi_queuecommand(struct scsi_cmnd *sc,
void (*done)(struct scsi_cmnd *));
@@ -326,11 +353,32 @@ extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
char *, int);
extern int iscsi_verify_itt(struct iscsi_conn *, struct iscsi_hdr *,
uint32_t *);
+extern void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask);
+extern void iscsi_free_mgmt_task(struct iscsi_conn *conn,
+ struct iscsi_mgmt_task *mtask);
/*
* generic helpers
*/
-extern void iscsi_pool_free(struct iscsi_queue *, void **);
-extern int iscsi_pool_init(struct iscsi_queue *, int, void ***, int);
+extern void iscsi_pool_free(struct iscsi_pool *);
+extern int iscsi_pool_init(struct iscsi_pool *, int, void ***, int);
+
+/*
+ * inline functions to deal with padding.
+ */
+static inline unsigned int
+iscsi_padded(unsigned int len)
+{
+ return (len + ISCSI_PAD_LEN - 1) & ~(ISCSI_PAD_LEN - 1);
+}
+
+static inline unsigned int
+iscsi_padding(unsigned int len)
+{
+ len &= (ISCSI_PAD_LEN - 1);
+ if (len)
+ len = ISCSI_PAD_LEN - len;
+ return len;
+}
#endif
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index a466c2cb8955..3ffd6b582a97 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -91,8 +91,6 @@ enum discover_event {
/* ---------- Expander Devices ---------- */
-#define ETASK 0xFA
-
#define to_dom_device(_obj) container_of(_obj, struct domain_device, dev_obj)
#define to_dev_attr(_attr) container_of(_attr, struct domain_dev_attribute,\
attr)
@@ -122,8 +120,8 @@ struct ex_phy {
u8 attached_sata_dev:1;
u8 attached_sata_ps:1;
- enum sas_proto attached_tproto;
- enum sas_proto attached_iproto;
+ enum sas_protocol attached_tproto;
+ enum sas_protocol attached_iproto;
u8 attached_sas_addr[SAS_ADDR_SIZE];
u8 attached_phy_id;
@@ -191,8 +189,8 @@ struct domain_device {
struct list_head dev_list_node;
- enum sas_proto iproto;
- enum sas_proto tproto;
+ enum sas_protocol iproto;
+ enum sas_protocol tproto;
struct sas_rphy *rphy;
@@ -245,8 +243,8 @@ struct asd_sas_port {
enum sas_class class;
u8 sas_addr[SAS_ADDR_SIZE];
u8 attached_sas_addr[SAS_ADDR_SIZE];
- enum sas_proto iproto;
- enum sas_proto tproto;
+ enum sas_protocol iproto;
+ enum sas_protocol tproto;
enum sas_oob_mode oob_mode;
@@ -289,8 +287,8 @@ struct asd_sas_phy {
int id; /* must be set */
enum sas_class class;
- enum sas_proto iproto;
- enum sas_proto tproto;
+ enum sas_protocol iproto;
+ enum sas_protocol tproto;
enum sas_phy_type type;
enum sas_phy_role role;
@@ -537,7 +535,7 @@ struct sas_task {
spinlock_t task_state_lock;
unsigned task_state_flags;
- enum sas_proto task_proto;
+ enum sas_protocol task_proto;
/* Used by the discovery code. */
struct timer_list timer;
@@ -563,7 +561,7 @@ struct sas_task {
struct work_struct abort_work;
};
-
+extern struct kmem_cache *sas_task_cache;
#define SAS_TASK_STATE_PENDING 1
#define SAS_TASK_STATE_DONE 2
@@ -573,7 +571,6 @@ struct sas_task {
static inline struct sas_task *sas_alloc_task(gfp_t flags)
{
- extern struct kmem_cache *sas_task_cache;
struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags);
if (task) {
@@ -590,7 +587,6 @@ static inline struct sas_task *sas_alloc_task(gfp_t flags)
static inline void sas_free_task(struct sas_task *task)
{
if (task) {
- extern struct kmem_cache *sas_task_cache;
BUG_ON(!list_empty(&task->list));
kmem_cache_free(sas_task_cache, task);
}
@@ -676,4 +672,8 @@ extern int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg);
extern int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
struct request *req);
+
+extern void sas_ssp_task_response(struct device *dev, struct sas_task *task,
+ struct ssp_response_iu *iu);
+
#endif /* _SASLIB_H_ */
diff --git a/include/scsi/sas.h b/include/scsi/sas.h
index 2f4b6afa34fc..e9fd02281381 100644
--- a/include/scsi/sas.h
+++ b/include/scsi/sas.h
@@ -102,13 +102,12 @@ enum sas_dev_type {
SATA_PM_PORT= 8,
};
-/* Partly from IDENTIFY address frame. */
-enum sas_proto {
- SATA_PROTO = 1,
- SAS_PROTO_SMP = 2, /* protocol */
- SAS_PROTO_STP = 4, /* protocol */
- SAS_PROTO_SSP = 8, /* protocol */
- SAS_PROTO_ALL = 0xE,
+enum sas_protocol {
+ SAS_PROTOCOL_SATA = 0x01,
+ SAS_PROTOCOL_SMP = 0x02,
+ SAS_PROTOCOL_STP = 0x04,
+ SAS_PROTOCOL_SSP = 0x08,
+ SAS_PROTOCOL_ALL = 0x0E,
};
/* From the spec; local phys only */
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 3f47e522a1ec..abd7479ff452 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -88,7 +88,7 @@ struct scsi_cmnd {
working on */
#define SCSI_SENSE_BUFFERSIZE 96
- unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];
+ unsigned char *sense_buffer;
/* obtained by REQUEST SENSE when
* CHECK CONDITION is received on original
* command (auto-sense) */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 6c2d80b36aa1..ab7acbe80960 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -122,9 +122,6 @@ struct scsi_device {
unsigned tagged_supported:1; /* Supports SCSI-II tagged queuing */
unsigned simple_tags:1; /* simple queue tag messages are enabled */
unsigned ordered_tags:1;/* ordered queue tag messages are enabled */
- unsigned single_lun:1; /* Indicates we should only allow I/O to
- * one of the luns for the device at a
- * time. */
unsigned was_reset:1; /* There was a bus reset on the bus for
* this device */
unsigned expecting_cc_ua:1; /* Expecting a CHECK_CONDITION/UNIT_ATTN
@@ -142,6 +139,7 @@ struct scsi_device {
unsigned fix_capacity:1; /* READ_CAPACITY is too high by 1 */
unsigned guess_capacity:1; /* READ_CAPACITY might be too high by 1 */
unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */
+ unsigned last_sector_bug:1; /* Always read last sector in a 1 sector read */
DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
struct list_head event_list; /* asserted events */
@@ -202,6 +200,9 @@ struct scsi_target {
unsigned int id; /* target id ... replace
* scsi_device.id eventually */
unsigned int create:1; /* signal that it needs to be added */
+ unsigned int single_lun:1; /* Indicates we should only
+ * allow I/O to one of the luns
+ * for the device at a time. */
unsigned int pdt_1f_for_no_lun; /* PDT = 0x1f */
/* means no lun present */
@@ -295,7 +296,7 @@ extern int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
struct scsi_mode_data *data,
struct scsi_sense_hdr *);
extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout,
- int retries);
+ int retries, struct scsi_sense_hdr *sshdr);
extern int scsi_device_set_state(struct scsi_device *sdev,
enum scsi_device_state state);
extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
@@ -386,6 +387,10 @@ static inline int scsi_device_qas(struct scsi_device *sdev)
return 0;
return sdev->inquiry[56] & 0x02;
}
+static inline int scsi_device_enclosure(struct scsi_device *sdev)
+{
+ return sdev->inquiry[6] & (1<<6);
+}
#define MODULE_ALIAS_SCSI_DEVICE(type) \
MODULE_ALIAS("scsi:t-" __stringify(type) "*")
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 7ff6199cbd55..404f11d331d6 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -118,7 +118,7 @@ struct iscsi_transport {
char *data, uint32_t data_size);
void (*get_stats) (struct iscsi_cls_conn *conn,
struct iscsi_stats *stats);
- void (*init_cmd_task) (struct iscsi_cmd_task *ctask);
+ int (*init_cmd_task) (struct iscsi_cmd_task *ctask);
void (*init_mgmt_task) (struct iscsi_conn *conn,
struct iscsi_mgmt_task *mtask);
int (*xmit_cmd_task) (struct iscsi_conn *conn,
@@ -176,6 +176,7 @@ struct iscsi_cls_conn {
#define ISCSI_STATE_TERMINATE 4
#define ISCSI_STATE_IN_RECOVERY 5
#define ISCSI_STATE_RECOVERY_FAILED 6
+#define ISCSI_STATE_LOGGING_OUT 7
struct iscsi_cls_session {
struct list_head sess_list; /* item in session_list */
@@ -185,6 +186,7 @@ struct iscsi_cls_session {
/* recovery fields */
int recovery_tmo;
struct delayed_work recovery_work;
+ struct work_struct unbind_work;
int target_id;
@@ -205,6 +207,8 @@ struct iscsi_cls_session {
struct iscsi_host {
struct list_head sessions;
struct mutex mutex;
+ struct workqueue_struct *unbind_workq;
+ char unbind_workq_name[KOBJ_NAME_LEN];
};
/*
@@ -214,8 +218,8 @@ extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost,
struct iscsi_transport *transport);
extern int iscsi_add_session(struct iscsi_cls_session *session,
unsigned int target_id);
-extern int iscsi_if_create_session_done(struct iscsi_cls_conn *conn);
-extern int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn);
+extern int iscsi_session_event(struct iscsi_cls_session *session,
+ enum iscsi_uevent_e event);
extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
struct iscsi_transport *t,
unsigned int target_id);
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index abdfd2e27dd7..09125fa95b93 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -4,23 +4,17 @@
#include <linux/transport_class.h>
#include <linux/types.h>
#include <linux/mutex.h>
+#include <scsi/sas.h>
struct scsi_transport_template;
struct sas_rphy;
struct request;
enum sas_device_type {
- SAS_PHY_UNUSED,
- SAS_END_DEVICE,
- SAS_EDGE_EXPANDER_DEVICE,
- SAS_FANOUT_EXPANDER_DEVICE,
-};
-
-enum sas_protocol {
- SAS_PROTOCOL_SATA = 0x01,
- SAS_PROTOCOL_SMP = 0x02,
- SAS_PROTOCOL_STP = 0x04,
- SAS_PROTOCOL_SSP = 0x08,
+ SAS_PHY_UNUSED = 0,
+ SAS_END_DEVICE = 1,
+ SAS_EDGE_EXPANDER_DEVICE = 2,
+ SAS_FANOUT_EXPANDER_DEVICE = 3,
};
static inline int sas_protocol_ata(enum sas_protocol proto)
diff --git a/include/scsi/sd.h b/include/scsi/sd.h
index f7513313ef0d..8ea9f7358ac1 100644
--- a/include/scsi/sd.h
+++ b/include/scsi/sd.h
@@ -41,6 +41,7 @@ struct scsi_disk {
u32 index;
u8 media_present;
u8 write_prot;
+ unsigned previous_state : 1;
unsigned WCE : 1; /* state of disk WCE bit */
unsigned RCD : 1; /* state of disk RCD bit, unused */
unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
diff --git a/kernel/params.c b/kernel/params.c
index b4da9505f4d2..67f65ee7211d 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -472,7 +472,7 @@ param_sysfs_setup(struct module_kobject *mk,
sizeof(mp->grp.attrs[0]));
size[1] = (valid_attrs + 1) * sizeof(mp->grp.attrs[0]);
- mp = kmalloc(size[0] + size[1], GFP_KERNEL);
+ mp = kzalloc(size[0] + size[1], GFP_KERNEL);
if (!mp)
return ERR_PTR(-ENOMEM);