diff options
Diffstat (limited to 'drivers/scsi/ibmvscsi/ibmvfc.c')
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvfc.c | 157 |
1 files changed, 113 insertions, 44 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 1b68734940b5..6540d48eb0e8 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -326,6 +326,7 @@ static const char *ibmvfc_get_cmd_error(u16 status, u16 error) /** * ibmvfc_get_err_result - Find the scsi status to return for the fcp response + * @vhost: ibmvfc host struct * @vfc_cmd: ibmvfc command struct * * Return value: @@ -603,8 +604,17 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS) vhost->action = action; break; + case IBMVFC_HOST_ACTION_REENABLE: + case IBMVFC_HOST_ACTION_RESET: + vhost->action = action; + break; case IBMVFC_HOST_ACTION_INIT: case IBMVFC_HOST_ACTION_TGT_DEL: + case IBMVFC_HOST_ACTION_LOGO: + case IBMVFC_HOST_ACTION_QUERY_TGTS: + case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: + case IBMVFC_HOST_ACTION_NONE: + default: switch (vhost->action) { case IBMVFC_HOST_ACTION_RESET: case IBMVFC_HOST_ACTION_REENABLE: @@ -614,15 +624,6 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, break; } break; - case IBMVFC_HOST_ACTION_LOGO: - case IBMVFC_HOST_ACTION_QUERY_TGTS: - case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: - case IBMVFC_HOST_ACTION_NONE: - case IBMVFC_HOST_ACTION_RESET: - case IBMVFC_HOST_ACTION_REENABLE: - default: - vhost->action = action; - break; } } @@ -650,8 +651,6 @@ static void ibmvfc_reinit_host(struct ibmvfc_host *vhost) /** * ibmvfc_del_tgt - Schedule cleanup and removal of the target * @tgt: ibmvfc target struct - * @job_step: job step to perform - * **/ static void ibmvfc_del_tgt(struct ibmvfc_target *tgt) { @@ -768,6 +767,8 @@ static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost) /** * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host * @vhost: ibmvfc host who owns the event pool + * @queue: ibmvfc queue struct + * @size: pool size * * Returns zero on success. **/ @@ -820,6 +821,7 @@ static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost, /** * ibmvfc_free_event_pool - Frees memory of the event pool of a host * @vhost: ibmvfc host who owns the event pool + * @queue: ibmvfc queue struct * **/ static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost, @@ -1414,6 +1416,7 @@ static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost) /** * ibmvfc_gather_partition_info - Gather info about the LPAR + * @vhost: ibmvfc host struct * * Return value: * none @@ -1484,7 +1487,7 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost) /** * ibmvfc_get_event - Gets the next free event in pool - * @vhost: ibmvfc host struct + * @queue: ibmvfc queue struct * * Returns a free event from the pool. **/ @@ -1631,7 +1634,7 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd, /** * ibmvfc_timeout - Internal command timeout handler - * @evt: struct ibmvfc_event that timed out + * @t: struct ibmvfc_event that timed out * * Called when an internally generated command times out **/ @@ -1892,8 +1895,8 @@ static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct s /** * ibmvfc_queuecommand - The queuecommand function of the scsi template + * @shost: scsi host struct * @cmnd: struct scsi_cmnd to be executed - * @done: Callback function to be called when cmnd is completed * * Returns: * 0 on success / other on failure @@ -2324,7 +2327,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc) /** * ibmvfc_match_rport - Match function for specified remote port * @evt: ibmvfc event struct - * @device: device to match (rport) + * @rport: device to match * * Returns: * 1 if event matches rport / 0 if event does not match rport @@ -2372,6 +2375,24 @@ static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device) } /** + * ibmvfc_event_is_free - Check if event is free or not + * @evt: ibmvfc event struct + * + * Returns: + * true / false + **/ +static bool ibmvfc_event_is_free(struct ibmvfc_event *evt) +{ + struct ibmvfc_event *loop_evt; + + list_for_each_entry(loop_evt, &evt->queue->free, queue_list) + if (loop_evt == evt) + return true; + + return false; +} + +/** * ibmvfc_wait_for_ops - Wait for ops to complete * @vhost: ibmvfc host struct * @device: device to match (starget or sdev) @@ -2385,35 +2406,58 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device, { struct ibmvfc_event *evt; DECLARE_COMPLETION_ONSTACK(comp); - int wait; + int wait, i, q_index, q_size; unsigned long flags; signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ; + struct ibmvfc_queue *queues; ENTER; + if (vhost->mq_enabled && vhost->using_channels) { + queues = vhost->scsi_scrqs.scrqs; + q_size = vhost->scsi_scrqs.active_queues; + } else { + queues = &vhost->crq; + q_size = 1; + } + do { wait = 0; - spin_lock_irqsave(&vhost->crq.l_lock, flags); - list_for_each_entry(evt, &vhost->crq.sent, queue_list) { - if (match(evt, device)) { - evt->eh_comp = ∁ - wait++; + spin_lock_irqsave(vhost->host->host_lock, flags); + for (q_index = 0; q_index < q_size; q_index++) { + spin_lock(&queues[q_index].l_lock); + for (i = 0; i < queues[q_index].evt_pool.size; i++) { + evt = &queues[q_index].evt_pool.events[i]; + if (!ibmvfc_event_is_free(evt)) { + if (match(evt, device)) { + evt->eh_comp = ∁ + wait++; + } + } } + spin_unlock(&queues[q_index].l_lock); } - spin_unlock_irqrestore(&vhost->crq.l_lock, flags); + spin_unlock_irqrestore(vhost->host->host_lock, flags); if (wait) { timeout = wait_for_completion_timeout(&comp, timeout); if (!timeout) { wait = 0; - spin_lock_irqsave(&vhost->crq.l_lock, flags); - list_for_each_entry(evt, &vhost->crq.sent, queue_list) { - if (match(evt, device)) { - evt->eh_comp = NULL; - wait++; + spin_lock_irqsave(vhost->host->host_lock, flags); + for (q_index = 0; q_index < q_size; q_index++) { + spin_lock(&queues[q_index].l_lock); + for (i = 0; i < queues[q_index].evt_pool.size; i++) { + evt = &queues[q_index].evt_pool.events[i]; + if (!ibmvfc_event_is_free(evt)) { + if (match(evt, device)) { + evt->eh_comp = NULL; + wait++; + } + } } + spin_unlock(&queues[q_index].l_lock); } - spin_unlock_irqrestore(&vhost->crq.l_lock, flags); + spin_unlock_irqrestore(vhost->host->host_lock, flags); if (wait) dev_err(vhost->dev, "Timed out waiting for aborted commands\n"); LEAVE; @@ -3135,8 +3179,9 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, * ibmvfc_handle_crq - Handles and frees received events in the CRQ * @crq: Command/Response queue * @vhost: ibmvfc host struct + * @evt_doneq: Event done queue * - **/ +**/ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost, struct list_head *evt_doneq) { @@ -3317,7 +3362,6 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev) * ibmvfc_change_queue_depth - Change the device's queue depth * @sdev: scsi device struct * @qdepth: depth to set - * @reason: calling context * * Return value: * actual depth set @@ -3389,6 +3433,7 @@ static ssize_t ibmvfc_show_host_capabilities(struct device *dev, /** * ibmvfc_show_log_level - Show the adapter's error logging level * @dev: class device struct + * @attr: unused * @buf: buffer * * Return value: @@ -3411,7 +3456,9 @@ static ssize_t ibmvfc_show_log_level(struct device *dev, /** * ibmvfc_store_log_level - Change the adapter's error logging level * @dev: class device struct + * @attr: unused * @buf: buffer + * @count: buffer size * * Return value: * number of bytes printed to buffer @@ -3489,7 +3536,7 @@ static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct Scsi_Host *shost = class_to_shost(dev); struct ibmvfc_host *vhost = shost_priv(shost); unsigned long flags = 0; @@ -4121,6 +4168,7 @@ static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt) /** * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout * @tgt: ibmvfc target struct + * @done: Routine to call when the event is responded to * * Returns: * Allocated and initialized ibmvfc_event struct @@ -4437,7 +4485,7 @@ static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt) /** * ibmvfc_adisc_timeout - Handle an ADISC timeout - * @tgt: ibmvfc target struct + * @t: ibmvfc target struct * * If an ADISC times out, send a cancel. If the cancel times * out, reset the CRQ. When the ADISC comes back as cancelled, @@ -4640,7 +4688,7 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt) /** * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target * @vhost: ibmvfc host struct - * @scsi_id: SCSI ID to allocate target for + * @target: Holds SCSI ID to allocate target forand the WWPN * * Returns: * 0 on success / other on failure @@ -5070,7 +5118,7 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost) /** * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout - * @vhost: ibmvfc host struct + * @evt: ibmvfc event struct * **/ static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt) @@ -5332,30 +5380,49 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) case IBMVFC_HOST_ACTION_INIT_WAIT: break; case IBMVFC_HOST_ACTION_RESET: - vhost->action = IBMVFC_HOST_ACTION_TGT_DEL; list_splice_init(&vhost->purge, &purge); spin_unlock_irqrestore(vhost->host->host_lock, flags); ibmvfc_complete_purge(&purge); rc = ibmvfc_reset_crq(vhost); + spin_lock_irqsave(vhost->host->host_lock, flags); - if (rc == H_CLOSED) + if (!rc || rc == H_CLOSED) vio_enable_interrupts(to_vio_dev(vhost->dev)); - if (rc || (rc = ibmvfc_send_crq_init(vhost)) || - (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) { - ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); - dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc); + if (vhost->action == IBMVFC_HOST_ACTION_RESET) { + /* + * The only action we could have changed to would have + * been reenable, in which case, we skip the rest of + * this path and wait until we've done the re-enable + * before sending the crq init. + */ + vhost->action = IBMVFC_HOST_ACTION_TGT_DEL; + + if (rc || (rc = ibmvfc_send_crq_init(vhost)) || + (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) { + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc); + } } break; case IBMVFC_HOST_ACTION_REENABLE: - vhost->action = IBMVFC_HOST_ACTION_TGT_DEL; list_splice_init(&vhost->purge, &purge); spin_unlock_irqrestore(vhost->host->host_lock, flags); ibmvfc_complete_purge(&purge); rc = ibmvfc_reenable_crq_queue(vhost); + spin_lock_irqsave(vhost->host->host_lock, flags); - if (rc || (rc = ibmvfc_send_crq_init(vhost))) { - ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); - dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc); + if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) { + /* + * The only action we could have changed to would have + * been reset, in which case, we skip the rest of this + * path and wait until we've done the reset before + * sending the crq init. + */ + vhost->action = IBMVFC_HOST_ACTION_TGT_DEL; + if (rc || (rc = ibmvfc_send_crq_init(vhost))) { + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc); + } } break; case IBMVFC_HOST_ACTION_LOGO: @@ -5784,6 +5851,8 @@ static void ibmvfc_free_mem(struct ibmvfc_host *vhost) vhost->disc_buf_dma); dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf), vhost->login_buf, vhost->login_buf_dma); + dma_free_coherent(vhost->dev, sizeof(*vhost->channel_setup_buf), + vhost->channel_setup_buf, vhost->channel_setup_dma); dma_pool_destroy(vhost->sg_pool); ibmvfc_free_queue(vhost, async_q); LEAVE; |