From 911ae9434f83e7355d343f6c2be3ef5b00ea7aed Mon Sep 17 00:00:00 2001 From: "nagalakshmi.nandigama@lsi.com" Date: Thu, 8 Sep 2011 06:18:50 +0530 Subject: [SCSI] mpt2sas: Added NUNA IO support in driver which uses multi-reply queue support of the HBA Support added for controllers capable of multi reply queues. The following are the modifications to the driver to support NUMA. 1) Create the new structure adapter_reply_queue to contain the reply queue info for every msix vector. This object will contain a reply_post_host_index, reply_post_free for each instance, msix_index, among other parameters. We will track all the reply queues on a link list called ioc->reply_queue_list. Each reply queue is aligned with each IRQ, and is passed to the interrupt via the bus_id parameter. (2) The driver will figure out the msix_vector_count from the PCIe MSIX capabilities register instead of the IOC Facts->MaxMSIxVectors. This is because the firmware is not filling in this field until the driver has already registered MSIX support. (3) If the ioc_facts reports that the controller is MSIX compatible in the capabilities, then the driver will request for multiple irqs. This count is calculated based on the minimum between the online cpus available and the ioc->msix_vector_count. This count is reported to firmware in the ioc_init request. (4) New routines were added _base_free_irq and _base_request_irq, so registering and freeing msix vectors were done thru simple function API. (5) The new routine _base_assign_reply_queues was added to align the msix indexes across cpus. This will initialize the array called ioc->cpu_msix_table. This array is looked up on every MPI request so the MSIxIndex is set appropriately. (6) A new shost sysfs attribute was added to report the reply_queue_count. (7) User needs to set the affinity cpu mask, so the interrupts occur on the same cpu that sent the original request. Signed-off-by: Nagalakshmi Nandigama Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.c | 447 +++++++++++++++++++++++++++--------- 1 file changed, 338 insertions(+), 109 deletions(-) (limited to 'drivers/scsi/mpt2sas/mpt2sas_base.c') diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 83035bd1c489..ef323e9a3e19 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -834,25 +834,31 @@ union reply_descriptor { static irqreturn_t _base_interrupt(int irq, void *bus_id) { + struct adapter_reply_queue *reply_q = bus_id; union reply_descriptor rd; u32 completed_cmds; u8 request_desript_type; u16 smid; u8 cb_idx; u32 reply; - u8 msix_index; - struct MPT2SAS_ADAPTER *ioc = bus_id; + u8 msix_index = reply_q->msix_index; + struct MPT2SAS_ADAPTER *ioc = reply_q->ioc; Mpi2ReplyDescriptorsUnion_t *rpf; u8 rc; if (ioc->mask_interrupts) return IRQ_NONE; - rpf = &ioc->reply_post_free[ioc->reply_post_host_index]; + if (!atomic_add_unless(&reply_q->busy, 1, 1)) + return IRQ_NONE; + + rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index]; request_desript_type = rpf->Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; - if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) + if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) { + atomic_dec(&reply_q->busy); return IRQ_NONE; + } completed_cmds = 0; cb_idx = 0xFF; @@ -861,9 +867,7 @@ _base_interrupt(int irq, void *bus_id) if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) goto out; reply = 0; - cb_idx = 0xFF; smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1); - msix_index = rpf->Default.MSIxIndex; if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { reply = le32_to_cpu @@ -907,31 +911,85 @@ _base_interrupt(int irq, void *bus_id) next: rpf->Words = cpu_to_le64(ULLONG_MAX); - ioc->reply_post_host_index = (ioc->reply_post_host_index == + reply_q->reply_post_host_index = + (reply_q->reply_post_host_index == (ioc->reply_post_queue_depth - 1)) ? 0 : - ioc->reply_post_host_index + 1; + reply_q->reply_post_host_index + 1; request_desript_type = - ioc->reply_post_free[ioc->reply_post_host_index].Default. - ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; + reply_q->reply_post_free[reply_q->reply_post_host_index]. + Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; completed_cmds++; if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) goto out; - if (!ioc->reply_post_host_index) - rpf = ioc->reply_post_free; + if (!reply_q->reply_post_host_index) + rpf = reply_q->reply_post_free; else rpf++; } while (1); out: - if (!completed_cmds) + if (!completed_cmds) { + atomic_dec(&reply_q->busy); return IRQ_NONE; - + } wmb(); - writel(ioc->reply_post_host_index, &ioc->chip->ReplyPostHostIndex); + if (ioc->is_warpdrive) { + writel(reply_q->reply_post_host_index, + ioc->reply_post_host_index[msix_index]); + atomic_dec(&reply_q->busy); + return IRQ_HANDLED; + } + writel(reply_q->reply_post_host_index | (msix_index << + MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex); + atomic_dec(&reply_q->busy); return IRQ_HANDLED; } +/** + * _base_is_controller_msix_enabled - is controller support muli-reply queues + * @ioc: per adapter object + * + */ +static inline int +_base_is_controller_msix_enabled(struct MPT2SAS_ADAPTER *ioc) +{ + return (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable; +} + +/** + * mpt2sas_base_flush_reply_queues - flushing the MSIX reply queues + * @ioc: per adapter object + * Context: ISR conext + * + * Called when a Task Management request has completed. We want + * to flush the other reply queues so all the outstanding IO has been + * completed back to OS before we process the TM completetion. + * + * Return nothing. + */ +void +mpt2sas_base_flush_reply_queues(struct MPT2SAS_ADAPTER *ioc) +{ + struct adapter_reply_queue *reply_q; + + /* If MSIX capability is turned off + * then multi-queues are not enabled + */ + if (!_base_is_controller_msix_enabled(ioc)) + return; + + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (ioc->shost_recovery) + return; + /* TMs are on msix_index == 0 */ + if (reply_q->msix_index == 0) + continue; + _base_interrupt(reply_q->vector, (void *)reply_q); + } +} + /** * mpt2sas_base_release_callback_handler - clear interrupt callback handler * @cb_idx: callback index @@ -1082,74 +1140,171 @@ _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev) } /** - * _base_save_msix_table - backup msix vector table + * _base_check_enable_msix - checks MSIX capabable. * @ioc: per adapter object * - * This address an errata where diag reset clears out the table + * Check to see if card is capable of MSIX, and set number + * of available msix vectors */ -static void -_base_save_msix_table(struct MPT2SAS_ADAPTER *ioc) +static int +_base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc) { - int i; + int base; + u16 message_control; - if (!ioc->msix_enable || ioc->msix_table_backup == NULL) - return; - for (i = 0; i < ioc->msix_vector_count; i++) - ioc->msix_table_backup[i] = ioc->msix_table[i]; + base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); + if (!base) { + dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not " + "supported\n", ioc->name)); + return -EINVAL; + } + + /* get msix vector count */ + /* NUMA_IO not supported for older controllers */ + if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2) + ioc->msix_vector_count = 1; + else { + pci_read_config_word(ioc->pdev, base + 2, &message_control); + ioc->msix_vector_count = (message_control & 0x3FF) + 1; + } + dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, " + "vector_count(%d)\n", ioc->name, ioc->msix_vector_count)); + + return 0; } /** - * _base_restore_msix_table - this restores the msix vector table + * _base_free_irq - free irq * @ioc: per adapter object * + * Freeing respective reply_queue from the list. */ static void -_base_restore_msix_table(struct MPT2SAS_ADAPTER *ioc) +_base_free_irq(struct MPT2SAS_ADAPTER *ioc) { - int i; + struct adapter_reply_queue *reply_q, *next; - if (!ioc->msix_enable || ioc->msix_table_backup == NULL) + if (list_empty(&ioc->reply_queue_list)) return; - for (i = 0; i < ioc->msix_vector_count; i++) - ioc->msix_table[i] = ioc->msix_table_backup[i]; + list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { + list_del(&reply_q->list); + synchronize_irq(reply_q->vector); + free_irq(reply_q->vector, reply_q); + kfree(reply_q); + } } /** - * _base_check_enable_msix - checks MSIX capabable. + * _base_request_irq - request irq * @ioc: per adapter object + * @index: msix index into vector table + * @vector: irq vector * - * Check to see if card is capable of MSIX, and set number - * of available msix vectors + * Inserting respective reply_queue into the list. */ static int -_base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc) +_base_request_irq(struct MPT2SAS_ADAPTER *ioc, u8 index, u32 vector) { - int base; - u16 message_control; - u32 msix_table_offset; + struct adapter_reply_queue *reply_q; + int r; - base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); - if (!base) { - dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not " - "supported\n", ioc->name)); - return -EINVAL; + reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL); + if (!reply_q) { + printk(MPT2SAS_ERR_FMT "unable to allocate memory %d!\n", + ioc->name, (int)sizeof(struct adapter_reply_queue)); + return -ENOMEM; + } + reply_q->ioc = ioc; + reply_q->msix_index = index; + reply_q->vector = vector; + atomic_set(&reply_q->busy, 0); + if (ioc->msix_enable) + snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", + MPT2SAS_DRIVER_NAME, ioc->id, index); + else + snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d", + MPT2SAS_DRIVER_NAME, ioc->id); + r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name, + reply_q); + if (r) { + printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n", + reply_q->name, vector); + kfree(reply_q); + return -EBUSY; } - /* get msix vector count */ - pci_read_config_word(ioc->pdev, base + 2, &message_control); - ioc->msix_vector_count = (message_control & 0x3FF) + 1; + INIT_LIST_HEAD(&reply_q->list); + list_add_tail(&reply_q->list, &ioc->reply_queue_list); + return 0; +} - /* get msix table */ - pci_read_config_dword(ioc->pdev, base + 4, &msix_table_offset); - msix_table_offset &= 0xFFFFFFF8; - ioc->msix_table = (u32 *)((void *)ioc->chip + msix_table_offset); +/** + * _base_assign_reply_queues - assigning msix index for each cpu + * @ioc: per adapter object + * + * The enduser would need to set the affinity via /proc/irq/#/smp_affinity + * + * It would nice if we could call irq_set_affinity, however it is not + * an exported symbol + */ +static void +_base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc) +{ + struct adapter_reply_queue *reply_q; + int cpu_id; + int cpu_grouping, loop, grouping, grouping_mod; - dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, " - "vector_count(%d), table_offset(0x%08x), table(%p)\n", ioc->name, - ioc->msix_vector_count, msix_table_offset, ioc->msix_table)); - return 0; + if (!_base_is_controller_msix_enabled(ioc)) + return; + + memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz); + /* when there are more cpus than available msix vectors, + * then group cpus togeather on same irq + */ + if (ioc->cpu_count > ioc->msix_vector_count) { + grouping = ioc->cpu_count / ioc->msix_vector_count; + grouping_mod = ioc->cpu_count % ioc->msix_vector_count; + if (grouping < 2 || (grouping == 2 && !grouping_mod)) + cpu_grouping = 2; + else if (grouping < 4 || (grouping == 4 && !grouping_mod)) + cpu_grouping = 4; + else if (grouping < 8 || (grouping == 8 && !grouping_mod)) + cpu_grouping = 8; + else + cpu_grouping = 16; + } else + cpu_grouping = 0; + + loop = 0; + reply_q = list_entry(ioc->reply_queue_list.next, + struct adapter_reply_queue, list); + for_each_online_cpu(cpu_id) { + if (!cpu_grouping) { + ioc->cpu_msix_table[cpu_id] = reply_q->msix_index; + reply_q = list_entry(reply_q->list.next, + struct adapter_reply_queue, list); + } else { + if (loop < cpu_grouping) { + ioc->cpu_msix_table[cpu_id] = + reply_q->msix_index; + loop++; + } else { + reply_q = list_entry(reply_q->list.next, + struct adapter_reply_queue, list); + ioc->cpu_msix_table[cpu_id] = + reply_q->msix_index; + loop = 1; + } + } + } } /** @@ -1162,8 +1317,6 @@ _base_disable_msix(struct MPT2SAS_ADAPTER *ioc) { if (ioc->msix_enable) { pci_disable_msix(ioc->pdev); - kfree(ioc->msix_table_backup); - ioc->msix_table_backup = NULL; ioc->msix_enable = 0; } } @@ -1176,10 +1329,13 @@ _base_disable_msix(struct MPT2SAS_ADAPTER *ioc) static int _base_enable_msix(struct MPT2SAS_ADAPTER *ioc) { - struct msix_entry entries; + struct msix_entry *entries, *a; int r; + int i; u8 try_msix = 0; + INIT_LIST_HEAD(&ioc->reply_queue_list); + if (msix_disable == -1 || msix_disable == 0) try_msix = 1; @@ -1189,51 +1345,48 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc) if (_base_check_enable_msix(ioc) != 0) goto try_ioapic; - ioc->msix_table_backup = kcalloc(ioc->msix_vector_count, - sizeof(u32), GFP_KERNEL); - if (!ioc->msix_table_backup) { - dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for " - "msix_table_backup failed!!!\n", ioc->name)); + ioc->reply_queue_count = min_t(u8, ioc->cpu_count, + ioc->msix_vector_count); + + entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry), + GFP_KERNEL); + if (!entries) { + dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "kcalloc " + "failed @ at %s:%d/%s() !!!\n", ioc->name, __FILE__, + __LINE__, __func__)); goto try_ioapic; } - memset(&entries, 0, sizeof(struct msix_entry)); - r = pci_enable_msix(ioc->pdev, &entries, 1); + for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) + a->entry = i; + + r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count); if (r) { dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "pci_enable_msix " "failed (r=%d) !!!\n", ioc->name, r)); + kfree(entries); goto try_ioapic; } - r = request_irq(entries.vector, _base_interrupt, IRQF_SHARED, - ioc->name, ioc); - if (r) { - dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "unable to allocate " - "interrupt %d !!!\n", ioc->name, entries.vector)); - pci_disable_msix(ioc->pdev); - goto try_ioapic; + ioc->msix_enable = 1; + for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) { + r = _base_request_irq(ioc, i, a->vector); + if (r) { + _base_free_irq(ioc); + _base_disable_msix(ioc); + kfree(entries); + goto try_ioapic; + } } - ioc->pci_irq = entries.vector; - ioc->msix_enable = 1; + kfree(entries); return 0; /* failback to io_apic interrupt routing */ try_ioapic: - r = request_irq(ioc->pdev->irq, _base_interrupt, IRQF_SHARED, - ioc->name, ioc); - if (r) { - printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n", - ioc->name, ioc->pdev->irq); - r = -EBUSY; - goto out_fail; - } + r = _base_request_irq(ioc, 0, ioc->pdev->irq); - ioc->pci_irq = ioc->pdev->irq; - return 0; - - out_fail: return r; } @@ -1252,6 +1405,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc) int i, r = 0; u64 pio_chip = 0; u64 chip_phys = 0; + struct adapter_reply_queue *reply_q; dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); @@ -1314,9 +1468,11 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc) if (r) goto out_fail; - printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n", - ioc->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : - "IO-APIC enabled"), ioc->pci_irq); + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) + printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n", + reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : + "IO-APIC enabled"), reply_q->vector); + printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n", ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz); printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n", @@ -1331,7 +1487,6 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc) if (ioc->chip_phys) iounmap(ioc->chip); ioc->chip_phys = 0; - ioc->pci_irq = -1; pci_release_selected_regions(ioc->pdev, ioc->bars); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); @@ -1578,6 +1733,12 @@ static inline void _base_writeq(__u64 b, volatile void __iomem *addr, } #endif +static inline u8 +_base_get_msix_index(struct MPT2SAS_ADAPTER *ioc) +{ + return ioc->cpu_msix_table[smp_processor_id()]; +} + /** * mpt2sas_base_put_smid_scsi_io - send SCSI_IO request to firmware * @ioc: per adapter object @@ -1594,7 +1755,7 @@ mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid, u16 handle) descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; - descriptor.SCSIIO.MSIxIndex = 0; /* TODO */ + descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc); descriptor.SCSIIO.SMID = cpu_to_le16(smid); descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); descriptor.SCSIIO.LMID = 0; @@ -1618,7 +1779,7 @@ mpt2sas_base_put_smid_hi_priority(struct MPT2SAS_ADAPTER *ioc, u16 smid) descriptor.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; - descriptor.HighPriority.MSIxIndex = 0; /* TODO */ + descriptor.HighPriority.MSIxIndex = 0; descriptor.HighPriority.SMID = cpu_to_le16(smid); descriptor.HighPriority.LMID = 0; descriptor.HighPriority.Reserved1 = 0; @@ -1640,7 +1801,7 @@ mpt2sas_base_put_smid_default(struct MPT2SAS_ADAPTER *ioc, u16 smid) u64 *request = (u64 *)&descriptor; descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; - descriptor.Default.MSIxIndex = 0; /* TODO */ + descriptor.Default.MSIxIndex = _base_get_msix_index(ioc); descriptor.Default.SMID = cpu_to_le16(smid); descriptor.Default.LMID = 0; descriptor.Default.DescriptorTypeDependent = 0; @@ -1665,7 +1826,7 @@ mpt2sas_base_put_smid_target_assist(struct MPT2SAS_ADAPTER *ioc, u16 smid, descriptor.SCSITarget.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET; - descriptor.SCSITarget.MSIxIndex = 0; /* TODO */ + descriptor.SCSITarget.MSIxIndex = _base_get_msix_index(ioc); descriptor.SCSITarget.SMID = cpu_to_le16(smid); descriptor.SCSITarget.LMID = 0; descriptor.SCSITarget.IoIndex = cpu_to_le16(io_index); @@ -2172,7 +2333,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) u16 max_sge_elements; u16 num_of_reply_frames; u16 chains_needed_per_io; - u32 sz, total_sz; + u32 sz, total_sz, reply_post_free_sz; u32 retry_sz; u16 max_request_credit; int i; @@ -2499,7 +2660,12 @@ chain_done: total_sz += sz; /* reply post queue, 16 byte align */ - sz = ioc->reply_post_queue_depth * sizeof(Mpi2DefaultReplyDescriptor_t); + reply_post_free_sz = ioc->reply_post_queue_depth * + sizeof(Mpi2DefaultReplyDescriptor_t); + if (_base_is_controller_msix_enabled(ioc)) + sz = reply_post_free_sz * ioc->reply_queue_count; + else + sz = reply_post_free_sz; ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool", ioc->pdev, sz, 16, 0); if (!ioc->reply_post_free_dma_pool) { @@ -3187,6 +3353,7 @@ _base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) facts->MaxChainDepth = mpi_reply.MaxChainDepth; facts->WhoInit = mpi_reply.WhoInit; facts->NumberOfPorts = mpi_reply.NumberOfPorts; + facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors; facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); facts->MaxReplyDescriptorPostQueueDepth = le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth); @@ -3244,7 +3411,8 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION); mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); - + if (_base_is_controller_msix_enabled(ioc)) + mpi_request.HostMSIxVectors = ioc->reply_queue_count; mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4); mpi_request.ReplyDescriptorPostQueueDepth = cpu_to_le16(ioc->reply_post_queue_depth); @@ -3513,9 +3681,6 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) u32 hcb_size; printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name); - - _base_save_msix_table(ioc); - drsprintk(ioc, printk(MPT2SAS_INFO_FMT "clear interrupts\n", ioc->name)); @@ -3611,7 +3776,6 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) goto out; } - _base_restore_msix_table(ioc); printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name); return 0; @@ -3692,6 +3856,9 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) u16 smid; struct _tr_list *delayed_tr, *delayed_tr_next; u8 hide_flag; + struct adapter_reply_queue *reply_q; + long reply_post_free; + u32 reply_post_free_sz; dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); @@ -3757,19 +3924,43 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) ioc->reply_sz) ioc->reply_free[i] = cpu_to_le32(reply_address); + /* initialize reply queues */ + _base_assign_reply_queues(ioc); + /* initialize Reply Post Free Queue */ - for (i = 0; i < ioc->reply_post_queue_depth; i++) - ioc->reply_post_free[i].Words = cpu_to_le64(ULLONG_MAX); + reply_post_free = (long)ioc->reply_post_free; + reply_post_free_sz = ioc->reply_post_queue_depth * + sizeof(Mpi2DefaultReplyDescriptor_t); + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + reply_q->reply_post_host_index = 0; + reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *) + reply_post_free; + for (i = 0; i < ioc->reply_post_queue_depth; i++) + reply_q->reply_post_free[i].Words = + cpu_to_le64(ULLONG_MAX); + if (!_base_is_controller_msix_enabled(ioc)) + goto skip_init_reply_post_free_queue; + reply_post_free += reply_post_free_sz; + } + skip_init_reply_post_free_queue: r = _base_send_ioc_init(ioc, sleep_flag); if (r) return r; - /* initialize the index's */ + /* initialize reply free host index */ ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1; - ioc->reply_post_host_index = 0; writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex); - writel(0, &ioc->chip->ReplyPostHostIndex); + + /* initialize reply post host index */ + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT, + &ioc->chip->ReplyPostHostIndex); + if (!_base_is_controller_msix_enabled(ioc)) + goto skip_init_reply_post_host_index; + } + + skip_init_reply_post_host_index: _base_unmask_interrupts(ioc); r = _base_event_notification(ioc, sleep_flag); @@ -3820,14 +4011,10 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc) ioc->shost_recovery = 1; _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); ioc->shost_recovery = 0; - if (ioc->pci_irq) { - synchronize_irq(pdev->irq); - free_irq(ioc->pci_irq, ioc); - } + _base_free_irq(ioc); _base_disable_msix(ioc); if (ioc->chip_phys) iounmap(ioc->chip); - ioc->pci_irq = -1; ioc->chip_phys = 0; pci_release_selected_regions(ioc->pdev, ioc->bars); pci_disable_pcie_error_reporting(pdev); @@ -3845,14 +4032,50 @@ int mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) { int r, i; + int cpu_id, last_cpu_id = 0; dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); + /* setup cpu_msix_table */ + ioc->cpu_count = num_online_cpus(); + for_each_online_cpu(cpu_id) + last_cpu_id = cpu_id; + ioc->cpu_msix_table_sz = last_cpu_id + 1; + ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL); + ioc->reply_queue_count = 1; + if (!ioc->cpu_msix_table) { + dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for " + "cpu_msix_table failed!!!\n", ioc->name)); + r = -ENOMEM; + goto out_free_resources; + } + + if (ioc->is_warpdrive) { + ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz, + sizeof(resource_size_t *), GFP_KERNEL); + if (!ioc->reply_post_host_index) { + dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation " + "for cpu_msix_table failed!!!\n", ioc->name)); + r = -ENOMEM; + goto out_free_resources; + } + } + r = mpt2sas_base_map_resources(ioc); if (r) return r; + if (ioc->is_warpdrive) { + ioc->reply_post_host_index[0] = + (resource_size_t *)&ioc->chip->ReplyPostHostIndex; + + for (i = 1; i < ioc->cpu_msix_table_sz; i++) + ioc->reply_post_host_index[i] = (resource_size_t *) + ((u8 *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) + * 4))); + } + pci_set_drvdata(ioc->pdev, ioc->shost); r = _base_get_ioc_facts(ioc, CAN_SLEEP); if (r) @@ -3973,6 +4196,9 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) mpt2sas_base_free_resources(ioc); _base_release_memory_pools(ioc); pci_set_drvdata(ioc->pdev, NULL); + kfree(ioc->cpu_msix_table); + if (ioc->is_warpdrive) + kfree(ioc->reply_post_host_index); kfree(ioc->pd_handles); kfree(ioc->tm_cmds.reply); kfree(ioc->transport_cmds.reply); @@ -4010,6 +4236,9 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc) mpt2sas_base_free_resources(ioc); _base_release_memory_pools(ioc); pci_set_drvdata(ioc->pdev, NULL); + kfree(ioc->cpu_msix_table); + if (ioc->is_warpdrive) + kfree(ioc->reply_post_host_index); kfree(ioc->pd_handles); kfree(ioc->pfacts); kfree(ioc->ctl_cmds.reply); -- cgit v1.2.3 From 921cd8024b908f8f49f772c8d3a02381b4db2ed2 Mon Sep 17 00:00:00 2001 From: "nagalakshmi.nandigama@lsi.com" Date: Wed, 19 Oct 2011 15:36:26 +0530 Subject: [SCSI] mpt2sas: New feature - Fast Load Support New feature Fast Load Support. (1)Asynchronous SCSI scanning: This will allow the drivers to scan for devices in parallel while other device drivers are loading at the same time. This will improve the amount of time it takes for the OS to load. (2) Reporting Devices while port enable is active: This feature will allow devices to be reported to OS immediately while port enable is active. The previous implementation waits for port enable to complete, and then report devices. This feature is only enabled on IT firmware configurations when there are no boot device configured in BIOS Configuration Utility, else the driver will wait till port enable completes reporting devices. For IR firmware, this feature is turned off. This feature is to address large SAS topologies (>100 drives) when the boot OS is using onboard SATA device, in other words, the boot devices is not connected to our controller. (3) Scanning for devices after diagnostic reset completes: A new routine _scsih_scan_start is added. This will scan the expander pages, IR pages, and sas device pages, then reporting new devices to SCSI Mid layer. It seems the driver is not supporting adding devices while diagnostic reset is active. Apparently this is due to the sanity checks on ioc->shost_recovery flag throughout the context of kernel work thread FIFO, and the mpt2sas_fw_work. Signed-off-by: Nagalakshmi Nandigama Signed-off-by: James Bottomley --- drivers/scsi/mpt2sas/mpt2sas_base.c | 247 ++++++++++++++++---- drivers/scsi/mpt2sas/mpt2sas_base.h | 26 ++- drivers/scsi/mpt2sas/mpt2sas_ctl.c | 9 +- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 423 ++++++++++++++++++++++++++++------- 4 files changed, 580 insertions(+), 125 deletions(-) (limited to 'drivers/scsi/mpt2sas/mpt2sas_base.c') diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 81209ca87274..beda04a8404b 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -81,6 +81,15 @@ static int missing_delay[2] = {-1, -1}; module_param_array(missing_delay, int, NULL, 0); MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay"); +static int mpt2sas_fwfault_debug; +MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault " + "and halt firmware - (default=0)"); + +static int disable_discovery = -1; +module_param(disable_discovery, int, 0); +MODULE_PARM_DESC(disable_discovery, " disable discovery "); + + /* diag_buffer_enable is bitwise * bit 0 set = TRACE * bit 1 set = SNAPSHOT @@ -93,14 +102,6 @@ module_param(diag_buffer_enable, int, 0); MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers " "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); -static int mpt2sas_fwfault_debug; -MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault " - "and halt firmware - (default=0)"); - -static int disable_discovery = -1; -module_param(disable_discovery, int, 0); -MODULE_PARM_DESC(disable_discovery, " disable discovery "); - /** * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. * @@ -691,6 +692,7 @@ mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); } ioc->base_cmds.status &= ~MPT2_CMD_PENDING; + complete(&ioc->base_cmds.done); return 1; } @@ -3469,6 +3471,58 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) return 0; } +/** + * mpt2sas_port_enable_done - command completion routine for port enable + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt2sas_port_enable_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + u16 ioc_status; + + mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK) + return 1; + + if (ioc->port_enable_cmds.status == MPT2_CMD_NOT_USED) + return 1; + + ioc->port_enable_cmds.status |= MPT2_CMD_COMPLETE; + if (mpi_reply) { + ioc->port_enable_cmds.status |= MPT2_CMD_REPLY_VALID; + memcpy(ioc->port_enable_cmds.reply, mpi_reply, + mpi_reply->MsgLength*4); + } + ioc->port_enable_cmds.status &= ~MPT2_CMD_PENDING; + + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + ioc->port_enable_failed = 1; + + if (ioc->is_driver_loading) { + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { + mpt2sas_port_enable_complete(ioc); + return 1; + } else { + ioc->start_scan_failed = ioc_status; + ioc->start_scan = 0; + return 1; + } + } + complete(&ioc->port_enable_cmds.done); + return 1; +} + + /** * _base_send_port_enable - send port_enable(discovery stuff) to firmware * @ioc: per adapter object @@ -3480,66 +3534,150 @@ static int _base_send_port_enable(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) { Mpi2PortEnableRequest_t *mpi_request; - u32 ioc_state; + Mpi2PortEnableReply_t *mpi_reply; unsigned long timeleft; int r = 0; u16 smid; + u16 ioc_status; printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name); - if (ioc->base_cmds.status & MPT2_CMD_PENDING) { + if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) { printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n", ioc->name, __func__); return -EAGAIN; } - smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx); + smid = mpt2sas_base_get_smid(ioc, ioc->port_enable_cb_idx); if (!smid) { printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); return -EAGAIN; } - ioc->base_cmds.status = MPT2_CMD_PENDING; + ioc->port_enable_cmds.status = MPT2_CMD_PENDING; mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); - ioc->base_cmds.smid = smid; + ioc->port_enable_cmds.smid = smid; memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; - mpi_request->VF_ID = 0; /* TODO */ - mpi_request->VP_ID = 0; + init_completion(&ioc->port_enable_cmds.done); mpt2sas_base_put_smid_default(ioc, smid); - init_completion(&ioc->base_cmds.done); - timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, + timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ); - if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) { + if (!(ioc->port_enable_cmds.status & MPT2_CMD_COMPLETE)) { printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name, __func__); _debug_dump_mf(mpi_request, sizeof(Mpi2PortEnableRequest_t)/4); - if (ioc->base_cmds.status & MPT2_CMD_RESET) + if (ioc->port_enable_cmds.status & MPT2_CMD_RESET) r = -EFAULT; else r = -ETIME; goto out; - } else - dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: complete\n", - ioc->name, __func__)); + } + mpi_reply = ioc->port_enable_cmds.reply; - ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_OPERATIONAL, - 60, sleep_flag); - if (ioc_state) { - printk(MPT2SAS_ERR_FMT "%s: failed going to operational state " - " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state); + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + printk(MPT2SAS_ERR_FMT "%s: failed with (ioc_status=0x%08x)\n", + ioc->name, __func__, ioc_status); r = -EFAULT; + goto out; } out: - ioc->base_cmds.status = MPT2_CMD_NOT_USED; - printk(MPT2SAS_INFO_FMT "port enable: %s\n", - ioc->name, ((r == 0) ? "SUCCESS" : "FAILED")); + ioc->port_enable_cmds.status = MPT2_CMD_NOT_USED; + printk(MPT2SAS_INFO_FMT "port enable: %s\n", ioc->name, ((r == 0) ? + "SUCCESS" : "FAILED")); return r; } +/** + * mpt2sas_port_enable - initiate firmware discovery (don't wait for reply) + * @ioc: per adapter object + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc) +{ + Mpi2PortEnableRequest_t *mpi_request; + u16 smid; + + printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name); + + if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) { + printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n", + ioc->name, __func__); + return -EAGAIN; + } + + smid = mpt2sas_base_get_smid(ioc, ioc->port_enable_cb_idx); + if (!smid) { + printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + return -EAGAIN; + } + + ioc->port_enable_cmds.status = MPT2_CMD_PENDING; + mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); + ioc->port_enable_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); + mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; + + mpt2sas_base_put_smid_default(ioc, smid); + return 0; +} + +/** + * _base_determine_wait_on_discovery - desposition + * @ioc: per adapter object + * + * Decide whether to wait on discovery to complete. Used to either + * locate boot device, or report volumes ahead of physical devices. + * + * Returns 1 for wait, 0 for don't wait + */ +static int +_base_determine_wait_on_discovery(struct MPT2SAS_ADAPTER *ioc) +{ + /* We wait for discovery to complete if IR firmware is loaded. + * The sas topology events arrive before PD events, so we need time to + * turn on the bit in ioc->pd_handles to indicate PD + * Also, it maybe required to report Volumes ahead of physical + * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set. + */ + if (ioc->ir_firmware) + return 1; + + /* if no Bios, then we don't need to wait */ + if (!ioc->bios_pg3.BiosVersion) + return 0; + + /* Bios is present, then we drop down here. + * + * If there any entries in the Bios Page 2, then we wait + * for discovery to complete. + */ + + /* Current Boot Device */ + if ((ioc->bios_pg2.CurrentBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK) == + MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && + /* Request Boot Device */ + (ioc->bios_pg2.ReqBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK) == + MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && + /* Alternate Request Boot Device */ + (ioc->bios_pg2.ReqAltBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK) == + MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED) + return 0; + + return 1; +} + + /** * _base_unmask_events - turn on notification for this event * @ioc: per adapter object @@ -3962,6 +4100,7 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) skip_init_reply_post_host_index: _base_unmask_interrupts(ioc); + r = _base_event_notification(ioc, sleep_flag); if (r) return r; @@ -3969,7 +4108,18 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) if (sleep_flag == CAN_SLEEP) _base_static_config_pages(ioc); - if (ioc->wait_for_port_enable_to_complete && ioc->is_warpdrive) { + + if (ioc->is_driver_loading) { + + + + ioc->wait_for_discovery_to_complete = + _base_determine_wait_on_discovery(ioc); + return r; /* scan_start and scan_finished support */ + } + + + if (ioc->wait_for_discovery_to_complete && ioc->is_warpdrive) { if (ioc->manu_pg10.OEMIdentifier == 0x80) { hide_flag = (u8) (ioc->manu_pg10.OEMSpecificFlags0 & MFG_PAGE10_HIDE_SSDS_MASK); @@ -3978,13 +4128,6 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) } } - if (ioc->wait_for_port_enable_to_complete) { - if (diag_buffer_enable != 0) - mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable); - if (disable_discovery > 0) - return r; - } - r = _base_send_port_enable(ioc, sleep_flag); if (r) return r; @@ -4121,6 +4264,10 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); ioc->base_cmds.status = MPT2_CMD_NOT_USED; + /* port_enable command bits */ + ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->port_enable_cmds.status = MPT2_CMD_NOT_USED; + /* transport internal command bits */ ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); ioc->transport_cmds.status = MPT2_CMD_NOT_USED; @@ -4162,8 +4309,6 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) goto out_free_resources; } - init_completion(&ioc->shost_recovery_done); - for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) ioc->event_masks[i] = -1; @@ -4186,7 +4331,6 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) _base_update_missing_delay(ioc, missing_delay[0], missing_delay[1]); - mpt2sas_base_start_watchdog(ioc); return 0; out_free_resources: @@ -4204,6 +4348,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) kfree(ioc->scsih_cmds.reply); kfree(ioc->config_cmds.reply); kfree(ioc->base_cmds.reply); + kfree(ioc->port_enable_cmds.reply); kfree(ioc->ctl_cmds.reply); kfree(ioc->ctl_cmds.sense); kfree(ioc->pfacts); @@ -4243,6 +4388,7 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc) kfree(ioc->ctl_cmds.reply); kfree(ioc->ctl_cmds.sense); kfree(ioc->base_cmds.reply); + kfree(ioc->port_enable_cmds.reply); kfree(ioc->tm_cmds.reply); kfree(ioc->transport_cmds.reply); kfree(ioc->scsih_cmds.reply); @@ -4284,6 +4430,20 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) mpt2sas_base_free_smid(ioc, ioc->base_cmds.smid); complete(&ioc->base_cmds.done); } + if (ioc->port_enable_cmds.status & MPT2_CMD_PENDING) { + ioc->port_enable_failed = 1; + ioc->port_enable_cmds.status |= MPT2_CMD_RESET; + mpt2sas_base_free_smid(ioc, ioc->port_enable_cmds.smid); + if (ioc->is_driver_loading) { + ioc->start_scan_failed = + MPI2_IOCSTATUS_INTERNAL_ERROR; + ioc->start_scan = 0; + ioc->port_enable_cmds.status = + MPT2_CMD_NOT_USED; + } else + complete(&ioc->port_enable_cmds.done); + + } if (ioc->config_cmds.status & MPT2_CMD_PENDING) { ioc->config_cmds.status |= MPT2_CMD_RESET; mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid); @@ -4349,7 +4509,6 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, { int r; unsigned long flags; - u8 pe_complete = ioc->wait_for_port_enable_to_complete; dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, __func__)); @@ -4396,7 +4555,8 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, /* If this hard reset is called while port enable is active, then * there is no reason to call make_ioc_operational */ - if (pe_complete) { + if (ioc->is_driver_loading && ioc->port_enable_failed) { + ioc->remove_host = 1; r = -EFAULT; goto out; } @@ -4410,7 +4570,6 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); ioc->ioc_reset_in_progress_status = r; ioc->shost_recovery = 0; - complete(&ioc->shost_recovery_done); spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); mutex_unlock(&ioc->reset_in_progress_mutex); diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 59354dba68c0..ce2bd9bf40f7 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -655,7 +655,12 @@ enum mutex_type { * @ignore_loginfos: ignore loginfos during task management * @remove_host: flag for when driver unloads, to avoid sending dev resets * @pci_error_recovery: flag to prevent ioc access until slot reset completes - * @wait_for_port_enable_to_complete: + * @wait_for_discovery_to_complete: flag set at driver load time when + * waiting on reporting devices + * @is_driver_loading: flag set at driver load time + * @port_enable_failed: flag set when port enable has failed + * @start_scan: flag set from scan_start callback, cleared from _mpt2sas_fw_work + * @start_scan_failed: means port enable failed, return's the ioc_status * @msix_enable: flag indicating msix is enabled * @msix_vector_count: number msix vectors * @cpu_msix_table: table for mapping cpus to msix index @@ -790,15 +795,20 @@ struct MPT2SAS_ADAPTER { u8 shost_recovery; struct mutex reset_in_progress_mutex; - struct completion shost_recovery_done; spinlock_t ioc_reset_in_progress_lock; u8 ioc_link_reset_in_progress; - int ioc_reset_in_progress_status; + u8 ioc_reset_in_progress_status; u8 ignore_loginfos; u8 remove_host; u8 pci_error_recovery; - u8 wait_for_port_enable_to_complete; + u8 wait_for_discovery_to_complete; + struct completion port_enable_done; + u8 is_driver_loading; + u8 port_enable_failed; + + u8 start_scan; + u16 start_scan_failed; u8 msix_enable; u16 msix_vector_count; @@ -814,11 +824,13 @@ struct MPT2SAS_ADAPTER { u8 scsih_cb_idx; u8 ctl_cb_idx; u8 base_cb_idx; + u8 port_enable_cb_idx; u8 config_cb_idx; u8 tm_tr_cb_idx; u8 tm_tr_volume_cb_idx; u8 tm_sas_control_cb_idx; struct _internal_cmd base_cmds; + struct _internal_cmd port_enable_cmds; struct _internal_cmd transport_cmds; struct _internal_cmd scsih_cmds; struct _internal_cmd tm_cmds; @@ -1001,6 +1013,8 @@ void mpt2sas_base_release_callback_handler(u8 cb_idx); u8 mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply); +u8 mpt2sas_port_enable_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply); void *mpt2sas_base_get_reply_virt_addr(struct MPT2SAS_ADAPTER *ioc, u32 phys_addr); u32 mpt2sas_base_get_iocstate(struct MPT2SAS_ADAPTER *ioc, int cooked); @@ -1015,6 +1029,8 @@ void mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_ty void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc); +int mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc); + /* scsih shared API */ u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply); @@ -1032,6 +1048,8 @@ struct _sas_node *mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAP struct _sas_device *mpt2sas_scsih_sas_device_find_by_sas_address( struct MPT2SAS_ADAPTER *ioc, u64 sas_address); +void mpt2sas_port_enable_complete(struct MPT2SAS_ADAPTER *ioc); + void mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase); /* config shared API */ diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index 9adb0133d6fb..aabcb911706e 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -1207,6 +1207,9 @@ _ctl_do_reset(void __user *arg) if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; + if (ioc->shost_recovery || ioc->pci_error_recovery || + ioc->is_driver_loading) + return -EAGAIN; dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, __func__)); @@ -2178,7 +2181,8 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg) !ioc) return -ENODEV; - if (ioc->shost_recovery || ioc->pci_error_recovery) + if (ioc->shost_recovery || ioc->pci_error_recovery || + ioc->is_driver_loading) return -EAGAIN; if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) { @@ -2297,7 +2301,8 @@ _ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg) if (_ctl_verify_adapter(karg32.hdr.ioc_number, &ioc) == -1 || !ioc) return -ENODEV; - if (ioc->shost_recovery || ioc->pci_error_recovery) + if (ioc->shost_recovery || ioc->pci_error_recovery || + ioc->is_driver_loading) return -EAGAIN; memset(&karg, 0, sizeof(struct mpt2_ioctl_command)); diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 1da1aa1a11e2..7a3865d9c959 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -71,6 +71,9 @@ static void _firmware_event_work(struct work_struct *work); static u8 _scsih_check_for_pending_tm(struct MPT2SAS_ADAPTER *ioc, u16 smid); +static void _scsih_scan_start(struct Scsi_Host *shost); +static int _scsih_scan_finished(struct Scsi_Host *shost, unsigned long time); + /* global parameters */ LIST_HEAD(mpt2sas_ioc_list); @@ -79,6 +82,7 @@ static u8 scsi_io_cb_idx = -1; static u8 tm_cb_idx = -1; static u8 ctl_cb_idx = -1; static u8 base_cb_idx = -1; +static u8 port_enable_cb_idx = -1; static u8 transport_cb_idx = -1; static u8 scsih_cb_idx = -1; static u8 config_cb_idx = -1; @@ -103,6 +107,18 @@ static int max_lun = MPT2SAS_MAX_LUN; module_param(max_lun, int, 0); MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); +/* diag_buffer_enable is bitwise + * bit 0 set = TRACE + * bit 1 set = SNAPSHOT + * bit 2 set = EXTENDED + * + * Either bit can be set, or both + */ +static int diag_buffer_enable = -1; +module_param(diag_buffer_enable, int, 0); +MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers " + "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); + /** * struct sense_info - common structure for obtaining sense keys * @skey: sense key @@ -117,8 +133,8 @@ struct sense_info { #define MPT2SAS_TURN_ON_FAULT_LED (0xFFFC) -#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF) - +#define MPT2SAS_PORT_ENABLE_COMPLETE (0xFFFD) +#define MPT2SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF) /** * struct fw_event_work - firmware event struct * @list: link list framework @@ -424,7 +440,11 @@ _scsih_determine_boot_device(struct MPT2SAS_ADAPTER *ioc, u16 slot; /* only process this function when driver loads */ - if (!ioc->wait_for_port_enable_to_complete) + if (!ioc->is_driver_loading) + return; + + /* no Bios, return immediately */ + if (!ioc->bios_pg3.BiosVersion) return; if (!is_raid) { @@ -2714,22 +2734,38 @@ _scsih_fw_event_free(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work /** - * _scsih_queue_rescan - queue a topology rescan from user context + * _scsih_error_recovery_delete_devices - remove devices not responding * @ioc: per adapter object * * Return nothing. */ static void -_scsih_queue_rescan(struct MPT2SAS_ADAPTER *ioc) +_scsih_error_recovery_delete_devices(struct MPT2SAS_ADAPTER *ioc) { struct fw_event_work *fw_event; - if (ioc->wait_for_port_enable_to_complete) + if (ioc->is_driver_loading) return; + fw_event->event = MPT2SAS_REMOVE_UNRESPONDING_DEVICES; + fw_event->ioc = ioc; + _scsih_fw_event_add(ioc, fw_event); +} + +/** + * mpt2sas_port_enable_complete - port enable completed (fake event) + * @ioc: per adapter object + * + * Return nothing. + */ +void +mpt2sas_port_enable_complete(struct MPT2SAS_ADAPTER *ioc) +{ + struct fw_event_work *fw_event; + fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); if (!fw_event) return; - fw_event->event = MPT2SAS_RESCAN_AFTER_HOST_RESET; + fw_event->event = MPT2SAS_PORT_ENABLE_COMPLETE; fw_event->ioc = ioc; _scsih_fw_event_add(ioc, fw_event); } @@ -5099,7 +5135,7 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd) /* get device name */ sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); - if (ioc->wait_for_port_enable_to_complete) + if (ioc->wait_for_discovery_to_complete) _scsih_sas_device_init_add(ioc, sas_device); else _scsih_sas_device_add(ioc, sas_device); @@ -5622,7 +5658,7 @@ broadcast_aen_retry: termination_count = 0; query_count = 0; for (smid = 1; smid <= ioc->scsiio_depth; smid++) { - if (ioc->ioc_reset_in_progress_status) + if (ioc->shost_recovery) goto out; scmd = _scsih_scsi_lookup_get(ioc, smid); if (!scmd) @@ -5644,7 +5680,7 @@ broadcast_aen_retry: lun = sas_device_priv_data->lun; query_count++; - if (ioc->ioc_reset_in_progress_status) + if (ioc->shost_recovery) goto out; spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); @@ -5686,7 +5722,7 @@ broadcast_aen_retry: goto broadcast_aen_retry; } - if (ioc->ioc_reset_in_progress_status) + if (ioc->shost_recovery) goto out_no_lock; r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, @@ -5725,7 +5761,7 @@ broadcast_aen_retry: ioc->name, __func__, query_count, termination_count)); ioc->broadcast_aen_busy = 0; - if (!ioc->ioc_reset_in_progress_status) + if (!ioc->shost_recovery) _scsih_ublock_io_all_device(ioc); mutex_unlock(&ioc->tm_cmds.mutex); } @@ -5845,7 +5881,7 @@ _scsih_sas_volume_add(struct MPT2SAS_ADAPTER *ioc, raid_device->handle = handle; raid_device->wwid = wwid; _scsih_raid_device_add(ioc, raid_device); - if (!ioc->wait_for_port_enable_to_complete) { + if (!ioc->wait_for_discovery_to_complete) { rc = scsi_add_device(ioc->shost, RAID_CHANNEL, raid_device->id, 0); if (rc) @@ -6127,6 +6163,10 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc, _scsih_sas_ir_config_change_event_debug(ioc, event_data); #endif + + if (ioc->shost_recovery) + return; + foreign_config = (le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0; @@ -6185,6 +6225,9 @@ _scsih_sas_ir_volume_event(struct MPT2SAS_ADAPTER *ioc, int rc; Mpi2EventDataIrVolume_t *event_data = fw_event->event_data; + if (ioc->shost_recovery) + return; + if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) return; @@ -6267,6 +6310,9 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data; u64 sas_address; + if (ioc->shost_recovery) + return; + if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) return; @@ -6510,10 +6556,10 @@ _scsih_search_responding_sas_devices(struct MPT2SAS_ADAPTER *ioc) u32 device_info; u16 slot; - printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__); + printk(MPT2SAS_INFO_FMT "search for end-devices: start\n", ioc->name); if (list_empty(&ioc->sas_device_list)) - return; + goto out; handle = 0xFFFF; while (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, @@ -6532,6 +6578,9 @@ _scsih_search_responding_sas_devices(struct MPT2SAS_ADAPTER *ioc) _scsih_mark_responding_sas_device(ioc, sas_address, slot, handle); } +out: + printk(MPT2SAS_INFO_FMT "search for end-devices: complete\n", + ioc->name); } /** @@ -6607,10 +6656,14 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc) u16 handle; u8 phys_disk_num; - printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__); + if (!ioc->ir_firmware) + return; + + printk(MPT2SAS_INFO_FMT "search for raid volumes: start\n", + ioc->name); if (list_empty(&ioc->raid_device_list)) - return; + goto out; handle = 0xFFFF; while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply, @@ -6649,6 +6702,9 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc) set_bit(handle, ioc->pd_handles); } } +out: + printk(MPT2SAS_INFO_FMT "search for responding raid volumes: " + "complete\n", ioc->name); } /** @@ -6708,10 +6764,10 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc) u64 sas_address; u16 handle; - printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__); + printk(MPT2SAS_INFO_FMT "search for expanders: start\n", ioc->name); if (list_empty(&ioc->sas_expander_list)) - return; + goto out; handle = 0xFFFF; while (!(mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, @@ -6730,6 +6786,8 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc) _scsih_mark_responding_expander(ioc, sas_address, handle); } + out: + printk(MPT2SAS_INFO_FMT "search for expanders: complete\n", ioc->name); } /** @@ -6745,6 +6803,8 @@ _scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc) struct _sas_node *sas_expander; struct _raid_device *raid_device, *raid_device_next; + printk(MPT2SAS_INFO_FMT "removing unresponding devices: start\n", + ioc->name); list_for_each_entry_safe(sas_device, sas_device_next, &ioc->sas_device_list, list) { @@ -6764,6 +6824,9 @@ _scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc) _scsih_remove_device(ioc, sas_device); } + if (!ioc->ir_firmware) + goto retry_expander_search; + list_for_each_entry_safe(raid_device, raid_device_next, &ioc->raid_device_list, list) { if (raid_device->responding) { @@ -6790,52 +6853,170 @@ _scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc) mpt2sas_expander_remove(ioc, sas_expander->sas_address); goto retry_expander_search; } + printk(MPT2SAS_INFO_FMT "removing unresponding devices: complete\n", + ioc->name); + /* unblock devices */ + _scsih_ublock_io_all_device(ioc); +} + +static void +_scsih_refresh_expander_links(struct MPT2SAS_ADAPTER *ioc, + struct _sas_node *sas_expander, u16 handle) +{ + Mpi2ExpanderPage1_t expander_pg1; + Mpi2ConfigReply_t mpi_reply; + int i; + + for (i = 0 ; i < sas_expander->num_phys ; i++) { + if ((mpt2sas_config_get_expander_pg1(ioc, &mpi_reply, + &expander_pg1, i, handle))) { + printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + mpt2sas_transport_update_links(ioc, sas_expander->sas_address, + le16_to_cpu(expander_pg1.AttachedDevHandle), i, + expander_pg1.NegotiatedLinkRate >> 4); + } } /** - * _scsih_hide_unhide_sas_devices - add/remove device to/from OS + * _scsih_scan_for_devices_after_reset - scan for devices after host reset * @ioc: per adapter object * * Return nothing. */ static void -_scsih_hide_unhide_sas_devices(struct MPT2SAS_ADAPTER *ioc) +_scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc) { - struct _sas_device *sas_device, *sas_device_next; + Mpi2ExpanderPage0_t expander_pg0; + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2RaidVolPage1_t volume_pg1; + Mpi2RaidVolPage0_t volume_pg0; + Mpi2RaidPhysDiskPage0_t pd_pg0; + Mpi2EventIrConfigElement_t element; + Mpi2ConfigReply_t mpi_reply; + u8 phys_disk_num; + u16 ioc_status; + u16 handle, parent_handle; + u64 sas_address; + struct _sas_device *sas_device; + struct _sas_node *expander_device; + static struct _raid_device *raid_device; - if (!ioc->is_warpdrive || ioc->mfg_pg10_hide_flag != - MFG_PAGE10_HIDE_IF_VOL_PRESENT) - return; + printk(MPT2SAS_INFO_FMT "scan devices: start\n", ioc->name); - if (ioc->hide_drives) { - if (_scsih_get_num_volumes(ioc)) - return; - ioc->hide_drives = 0; - list_for_each_entry_safe(sas_device, sas_device_next, - &ioc->sas_device_list, list) { - if (!mpt2sas_transport_port_add(ioc, sas_device->handle, - sas_device->sas_address_parent)) { - _scsih_sas_device_remove(ioc, sas_device); - } else if (!sas_device->starget) { - mpt2sas_transport_port_remove(ioc, - sas_device->sas_address, - sas_device->sas_address_parent); - _scsih_sas_device_remove(ioc, sas_device); - } + _scsih_sas_host_refresh(ioc); + + /* expanders */ + handle = 0xFFFF; + while (!(mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, + MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + break; + handle = le16_to_cpu(expander_pg0.DevHandle); + expander_device = mpt2sas_scsih_expander_find_by_sas_address( + ioc, le64_to_cpu(expander_pg0.SASAddress)); + if (expander_device) + _scsih_refresh_expander_links(ioc, expander_device, + handle); + else + _scsih_expander_add(ioc, handle); + } + + if (!ioc->ir_firmware) + goto skip_to_sas; + + /* phys disk */ + phys_disk_num = 0xFF; + while (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, + phys_disk_num))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + break; + phys_disk_num = pd_pg0.PhysDiskNum; + handle = le16_to_cpu(pd_pg0.DevHandle); + sas_device = _scsih_sas_device_find_by_handle(ioc, handle); + if (sas_device) + continue; + if (mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, + handle) != 0) + continue; + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!_scsih_get_sas_address(ioc, parent_handle, + &sas_address)) { + mpt2sas_transport_update_links(ioc, sas_address, + handle, sas_device_pg0.PhyNum, + MPI2_SAS_NEG_LINK_RATE_1_5); + set_bit(handle, ioc->pd_handles); + _scsih_add_device(ioc, handle, 0, 1); } - } else { - if (!_scsih_get_num_volumes(ioc)) - return; - ioc->hide_drives = 1; - list_for_each_entry_safe(sas_device, sas_device_next, - &ioc->sas_device_list, list) { - mpt2sas_transport_port_remove(ioc, - sas_device->sas_address, - sas_device->sas_address_parent); + } + + /* volumes */ + handle = 0xFFFF; + while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply, + &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + break; + handle = le16_to_cpu(volume_pg1.DevHandle); + raid_device = _scsih_raid_device_find_by_wwid(ioc, + le64_to_cpu(volume_pg1.WWID)); + if (raid_device) + continue; + if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, + &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, + sizeof(Mpi2RaidVolPage0_t))) + continue; + if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || + volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || + volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) { + memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t)); + element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED; + element.VolDevHandle = volume_pg1.DevHandle; + _scsih_sas_volume_add(ioc, &element); + } + } + + skip_to_sas: + + /* sas devices */ + handle = 0xFFFF; + while (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + break; + handle = le16_to_cpu(sas_device_pg0.DevHandle); + if (!(_scsih_is_end_device( + le32_to_cpu(sas_device_pg0.DeviceInfo)))) + continue; + sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, + le64_to_cpu(sas_device_pg0.SASAddress)); + if (sas_device) + continue; + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) { + mpt2sas_transport_update_links(ioc, sas_address, handle, + sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); + _scsih_add_device(ioc, handle, 0, 0); } } + + printk(MPT2SAS_INFO_FMT "scan devices: complete\n", ioc->name); } + /** * mpt2sas_scsih_reset_handler - reset callback handler (for scsih) * @ioc: per adapter object @@ -6871,7 +7052,6 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) } _scsih_fw_event_cleanup_queue(ioc); _scsih_flush_running_cmds(ioc); - _scsih_queue_rescan(ioc); break; case MPT2_IOC_DONE_RESET: dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " @@ -6881,6 +7061,7 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) _scsih_search_responding_sas_devices(ioc); _scsih_search_responding_raid_devices(ioc); _scsih_search_responding_expanders(ioc); + _scsih_error_recovery_delete_devices(ioc); break; } } @@ -6898,7 +7079,6 @@ _firmware_event_work(struct work_struct *work) { struct fw_event_work *fw_event = container_of(work, struct fw_event_work, delayed_work.work); - unsigned long flags; struct MPT2SAS_ADAPTER *ioc = fw_event->ioc; /* the queue is being flushed so ignore this event */ @@ -6908,23 +7088,31 @@ _firmware_event_work(struct work_struct *work) return; } - if (fw_event->event == MPT2SAS_RESCAN_AFTER_HOST_RESET) { - _scsih_fw_event_free(ioc, fw_event); - spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); - if (ioc->shost_recovery) { - init_completion(&ioc->shost_recovery_done); - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, - flags); - wait_for_completion(&ioc->shost_recovery_done); - } else - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, - flags); + switch (fw_event->event) { + case MPT2SAS_REMOVE_UNRESPONDING_DEVICES: + while (scsi_host_in_recovery(ioc->shost)) + ssleep(1); _scsih_remove_unresponding_sas_devices(ioc); - _scsih_hide_unhide_sas_devices(ioc); - return; - } + _scsih_scan_for_devices_after_reset(ioc); + break; + case MPT2SAS_PORT_ENABLE_COMPLETE: + if (!ioc->is_driver_loading && ioc->shost_recovery) { + _scsih_prep_device_scan(ioc); + _scsih_search_responding_sas_devices(ioc); + _scsih_search_responding_raid_devices(ioc); + _scsih_search_responding_expanders(ioc); + } - switch (fw_event->event) { + if (ioc->start_scan) + ioc->start_scan = 0; + else + complete(&ioc->port_enable_done); + + + + dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "port enable: complete " + "from worker thread\n", ioc->name)); + break; case MPT2SAS_TURN_ON_FAULT_LED: _scsih_turn_on_fault_led(ioc, fw_event->device_handle); break; @@ -7121,6 +7309,8 @@ static struct scsi_host_template scsih_driver_template = { .slave_configure = _scsih_slave_configure, .target_destroy = _scsih_target_destroy, .slave_destroy = _scsih_slave_destroy, + .scan_finished = _scsih_scan_finished, + .scan_start = _scsih_scan_start, .change_queue_depth = _scsih_change_queue_depth, .change_queue_type = _scsih_change_queue_type, .eh_abort_handler = _scsih_abort, @@ -7381,7 +7571,12 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc) unsigned long flags; int rc; + /* no Bios, return immediately */ + if (!ioc->bios_pg3.BiosVersion) + return; + device = NULL; + is_raid = 0; if (ioc->req_boot_device.device) { device = ioc->req_boot_device.device; is_raid = ioc->req_boot_device.is_raid; @@ -7490,9 +7685,7 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc) static void _scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc) { - u16 volume_mapping_flags = - le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) & - MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; + u16 volume_mapping_flags; if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR)) return; /* return when IOC doesn't support initiator mode */ @@ -7500,18 +7693,93 @@ _scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc) _scsih_probe_boot_devices(ioc); if (ioc->ir_firmware) { - if ((volume_mapping_flags & - MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING)) { - _scsih_probe_sas(ioc); + volume_mapping_flags = + le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) & + MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; + if (volume_mapping_flags == + MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) { _scsih_probe_raid(ioc); + _scsih_probe_sas(ioc); } else { - _scsih_probe_raid(ioc); _scsih_probe_sas(ioc); + _scsih_probe_raid(ioc); } } else _scsih_probe_sas(ioc); } + +/** + * _scsih_scan_start - scsi lld callback for .scan_start + * @shost: SCSI host pointer + * + * The shost has the ability to discover targets on its own instead + * of scanning the entire bus. In our implemention, we will kick off + * firmware discovery. + */ +static void +_scsih_scan_start(struct Scsi_Host *shost) +{ + struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); + int rc; + + if (diag_buffer_enable != -1 && diag_buffer_enable != 0) + mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable); + + ioc->start_scan = 1; + rc = mpt2sas_port_enable(ioc); + + if (rc != 0) + printk(MPT2SAS_INFO_FMT "port enable: FAILED\n", ioc->name); +} + +/** + * _scsih_scan_finished - scsi lld callback for .scan_finished + * @shost: SCSI host pointer + * @time: elapsed time of the scan in jiffies + * + * This function will be called periodically until it returns 1 with the + * scsi_host and the elapsed time of the scan in jiffies. In our implemention, + * we wait for firmware discovery to complete, then return 1. + */ +static int +_scsih_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); + + if (time >= (300 * HZ)) { + ioc->base_cmds.status = MPT2_CMD_NOT_USED; + printk(MPT2SAS_INFO_FMT "port enable: FAILED with timeout " + "(timeout=300s)\n", ioc->name); + ioc->is_driver_loading = 0; + return 1; + } + + if (ioc->start_scan) + return 0; + + if (ioc->start_scan_failed) { + printk(MPT2SAS_INFO_FMT "port enable: FAILED with " + "(ioc_status=0x%08x)\n", ioc->name, ioc->start_scan_failed); + ioc->is_driver_loading = 0; + ioc->wait_for_discovery_to_complete = 0; + ioc->remove_host = 1; + return 1; + } + + printk(MPT2SAS_INFO_FMT "port enable: SUCCESS\n", ioc->name); + ioc->base_cmds.status = MPT2_CMD_NOT_USED; + + if (ioc->wait_for_discovery_to_complete) { + ioc->wait_for_discovery_to_complete = 0; + _scsih_probe_devices(ioc); + } + mpt2sas_base_start_watchdog(ioc); + ioc->is_driver_loading = 0; + return 1; +} + + /** * _scsih_probe - attach and add scsi host * @pdev: PCI device struct @@ -7548,6 +7816,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) ioc->tm_cb_idx = tm_cb_idx; ioc->ctl_cb_idx = ctl_cb_idx; ioc->base_cb_idx = base_cb_idx; + ioc->port_enable_cb_idx = port_enable_cb_idx; ioc->transport_cb_idx = transport_cb_idx; ioc->scsih_cb_idx = scsih_cb_idx; ioc->config_cb_idx = config_cb_idx; @@ -7620,14 +7889,14 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto out_thread_fail; } - ioc->wait_for_port_enable_to_complete = 1; + ioc->is_driver_loading = 1; if ((mpt2sas_base_attach(ioc))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out_attach_fail; } - ioc->wait_for_port_enable_to_complete = 0; + scsi_scan_host(shost); if (ioc->is_warpdrive) { if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) ioc->hide_drives = 0; @@ -7650,6 +7919,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) out_thread_fail: list_del(&ioc->list); scsi_remove_host(shost); + scsi_host_put(shost); out_add_shost_fail: return -ENODEV; } @@ -7896,6 +8166,8 @@ _scsih_init(void) /* base internal commands callback handler */ base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done); + port_enable_cb_idx = mpt2sas_base_register_callback_handler( + mpt2sas_port_enable_done); /* transport internal commands callback handler */ transport_cb_idx = mpt2sas_base_register_callback_handler( @@ -7950,6 +8222,7 @@ _scsih_exit(void) mpt2sas_base_release_callback_handler(scsi_io_cb_idx); mpt2sas_base_release_callback_handler(tm_cb_idx); mpt2sas_base_release_callback_handler(base_cb_idx); + mpt2sas_base_release_callback_handler(port_enable_cb_idx); mpt2sas_base_release_callback_handler(transport_cb_idx); mpt2sas_base_release_callback_handler(scsih_cb_idx); mpt2sas_base_release_callback_handler(config_cb_idx); -- cgit v1.2.3