summaryrefslogtreecommitdiff
path: root/drivers/scsi/scsi_lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r--drivers/scsi/scsi_lib.c228
1 files changed, 155 insertions, 73 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f6097b89d5d3..1cbc497e00bd 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -44,6 +44,8 @@ static struct kmem_cache *scsi_sense_cache;
static struct kmem_cache *scsi_sense_isadma_cache;
static DEFINE_MUTEX(scsi_sense_cache_mutex);
+static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd);
+
static inline struct kmem_cache *
scsi_select_sense_cache(bool unchecked_isa_dma)
{
@@ -140,6 +142,12 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
+ if (cmd->request->rq_flags & RQF_DONTPREP) {
+ cmd->request->rq_flags &= ~RQF_DONTPREP;
+ scsi_mq_uninit_cmd(cmd);
+ } else {
+ WARN_ON_ONCE(true);
+ }
blk_mq_requeue_request(cmd->request, true);
put_device(&sdev->sdev_gendev);
}
@@ -244,9 +252,9 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
struct scsi_request *rq;
int ret = DRIVER_ERROR << 24;
- req = blk_get_request(sdev->request_queue,
+ req = blk_get_request_flags(sdev->request_queue,
data_direction == DMA_TO_DEVICE ?
- REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM);
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT);
if (IS_ERR(req))
return ret;
rq = scsi_req(req);
@@ -260,7 +268,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
rq->retries = retries;
req->timeout = timeout;
req->cmd_flags |= flags;
- req->rq_flags |= rq_flags | RQF_QUIET | RQF_PREEMPT;
+ req->rq_flags |= rq_flags | RQF_QUIET;
/*
* head injection *required* here otherwise quiesce won't work
@@ -627,7 +635,7 @@ static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
static bool scsi_end_request(struct request *req, blk_status_t error,
unsigned int bytes, unsigned int bidi_bytes)
{
- struct scsi_cmnd *cmd = req->special;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
struct scsi_device *sdev = cmd->device;
struct request_queue *q = sdev->request_queue;
@@ -642,6 +650,11 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
if (blk_queue_add_random(q))
add_disk_randomness(req->rq_disk);
+ if (!blk_rq_is_scsi(req)) {
+ WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
+ cmd->flags &= ~SCMD_INITIALIZED;
+ }
+
if (req->mq_ctx) {
/*
* In the MQ case the command gets freed by __blk_mq_end_request,
@@ -977,8 +990,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* A new command will be prepared and issued.
*/
if (q->mq_ops) {
- cmd->request->rq_flags &= ~RQF_DONTPREP;
- scsi_mq_uninit_cmd(cmd);
scsi_mq_requeue_cmd(cmd);
} else {
scsi_release_buffers(cmd);
@@ -1107,15 +1118,23 @@ err_exit:
EXPORT_SYMBOL(scsi_init_io);
/**
- * scsi_initialize_rq - initialize struct scsi_cmnd.req
+ * scsi_initialize_rq - initialize struct scsi_cmnd partially
+ * @rq: Request associated with the SCSI command to be initialized.
*
- * Called from inside blk_get_request().
+ * This function initializes the members of struct scsi_cmnd that must be
+ * initialized before request processing starts and that won't be
+ * reinitialized if a SCSI command is requeued.
+ *
+ * Called from inside blk_get_request() for pass-through requests and from
+ * inside scsi_init_command() for filesystem requests.
*/
void scsi_initialize_rq(struct request *rq)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
scsi_req_init(&cmd->req);
+ cmd->jiffies_at_alloc = jiffies;
+ cmd->retries = 0;
}
EXPORT_SYMBOL(scsi_initialize_rq);
@@ -1153,8 +1172,18 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
{
void *buf = cmd->sense_buffer;
void *prot = cmd->prot_sdb;
- unsigned int unchecked_isa_dma = cmd->flags & SCMD_UNCHECKED_ISA_DMA;
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+ unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS;
+ unsigned long jiffies_at_alloc;
+ int retries;
+ if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) {
+ flags |= SCMD_INITIALIZED;
+ scsi_initialize_rq(rq);
+ }
+
+ jiffies_at_alloc = cmd->jiffies_at_alloc;
+ retries = cmd->retries;
/* zero out the cmd, except for the embedded scsi_request */
memset((char *)cmd + sizeof(cmd->req), 0,
sizeof(*cmd) - sizeof(cmd->req) + dev->host->hostt->cmd_size);
@@ -1162,16 +1191,17 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
cmd->device = dev;
cmd->sense_buffer = buf;
cmd->prot_sdb = prot;
- cmd->flags = unchecked_isa_dma;
+ cmd->flags = flags;
INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
- cmd->jiffies_at_alloc = jiffies;
+ cmd->jiffies_at_alloc = jiffies_at_alloc;
+ cmd->retries = retries;
scsi_add_cmd_to_list(cmd);
}
static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req)
{
- struct scsi_cmnd *cmd = req->special;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
/*
* Passthrough requests may transfer data, in which case they must
@@ -1202,7 +1232,7 @@ static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req)
*/
static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
{
- struct scsi_cmnd *cmd = req->special;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
if (unlikely(sdev->handler && sdev->handler->prep_fn)) {
int ret = sdev->handler->prep_fn(sdev, req);
@@ -1217,7 +1247,7 @@ static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req)
{
- struct scsi_cmnd *cmd = req->special;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
if (!blk_rq_bytes(req))
cmd->sc_data_direction = DMA_NONE;
@@ -1271,7 +1301,7 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
/*
* If the devices is blocked we defer normal commands.
*/
- if (!(req->rq_flags & RQF_PREEMPT))
+ if (req && !(req->rq_flags & RQF_PREEMPT))
ret = BLKPREP_DEFER;
break;
default:
@@ -1280,7 +1310,7 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
* special commands. In particular any user initiated
* command is not allowed.
*/
- if (!(req->rq_flags & RQF_PREEMPT))
+ if (req && !(req->rq_flags & RQF_PREEMPT))
ret = BLKPREP_KILL;
break;
}
@@ -1354,7 +1384,7 @@ out:
static void scsi_unprep_fn(struct request_queue *q, struct request *req)
{
- scsi_uninit_cmd(req->special);
+ scsi_uninit_cmd(blk_mq_rq_to_pdu(req));
}
/*
@@ -1545,7 +1575,7 @@ static int scsi_lld_busy(struct request_queue *q)
*/
static void scsi_kill_request(struct request *req, struct request_queue *q)
{
- struct scsi_cmnd *cmd = req->special;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
struct scsi_device *sdev;
struct scsi_target *starget;
struct Scsi_Host *shost;
@@ -1576,7 +1606,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
static void scsi_softirq_done(struct request *rq)
{
- struct scsi_cmnd *cmd = rq->special;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
int disposition;
@@ -1720,7 +1750,10 @@ static void scsi_done(struct scsi_cmnd *cmd)
*
* Returns: Nothing
*
- * Lock status: IO request lock assumed to be held when called.
+ * Lock status: request queue lock assumed to be held when called.
+ *
+ * Note: See sd_zbc.c sd_zbc_write_lock_zone() for write order
+ * protection for ZBC disks.
*/
static void scsi_request_fn(struct request_queue *q)
__releases(q->queue_lock)
@@ -1764,8 +1797,8 @@ static void scsi_request_fn(struct request_queue *q)
blk_start_request(req);
spin_unlock_irq(q->queue_lock);
- cmd = req->special;
- if (unlikely(cmd == NULL)) {
+ cmd = blk_mq_rq_to_pdu(req);
+ if (cmd != req->special) {
printk(KERN_CRIT "impossible request in %s.\n"
"please mail a stack trace to "
"linux-scsi@vger.kernel.org\n",
@@ -1910,6 +1943,33 @@ static void scsi_mq_done(struct scsi_cmnd *cmd)
blk_mq_complete_request(cmd->request);
}
+static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx)
+{
+ struct request_queue *q = hctx->queue;
+ struct scsi_device *sdev = q->queuedata;
+
+ atomic_dec(&sdev->device_busy);
+ put_device(&sdev->sdev_gendev);
+}
+
+static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
+{
+ struct request_queue *q = hctx->queue;
+ struct scsi_device *sdev = q->queuedata;
+
+ if (!get_device(&sdev->sdev_gendev))
+ goto out;
+ if (!scsi_dev_queue_ready(q, sdev))
+ goto out_put_device;
+
+ return true;
+
+out_put_device:
+ put_device(&sdev->sdev_gendev);
+out:
+ return false;
+}
+
static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -1923,16 +1983,11 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
ret = prep_to_mq(scsi_prep_state_check(sdev, req));
if (ret != BLK_STS_OK)
- goto out;
+ goto out_put_budget;
ret = BLK_STS_RESOURCE;
- if (!get_device(&sdev->sdev_gendev))
- goto out;
-
- if (!scsi_dev_queue_ready(q, sdev))
- goto out_put_device;
if (!scsi_target_queue_ready(shost, sdev))
- goto out_dec_device_busy;
+ goto out_put_budget;
if (!scsi_host_queue_ready(q, shost, sdev))
goto out_dec_target_busy;
@@ -1963,15 +2018,12 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
out_dec_host_busy:
- atomic_dec(&shost->host_busy);
+ atomic_dec(&shost->host_busy);
out_dec_target_busy:
if (scsi_target(sdev)->can_queue > 0)
atomic_dec(&scsi_target(sdev)->target_busy);
-out_dec_device_busy:
- atomic_dec(&sdev->device_busy);
-out_put_device:
- put_device(&sdev->sdev_gendev);
-out:
+out_put_budget:
+ scsi_mq_put_budget(hctx);
switch (ret) {
case BLK_STS_OK:
break;
@@ -2001,8 +2053,8 @@ static enum blk_eh_timer_return scsi_timeout(struct request *req,
return scsi_times_out(req);
}
-static int scsi_init_request(struct blk_mq_tag_set *set, struct request *rq,
- unsigned int hctx_idx, unsigned int numa_node)
+static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
{
struct Scsi_Host *shost = set->driver_data;
const bool unchecked_isa_dma = shost->unchecked_isa_dma;
@@ -2026,8 +2078,8 @@ static int scsi_init_request(struct blk_mq_tag_set *set, struct request *rq,
return 0;
}
-static void scsi_exit_request(struct blk_mq_tag_set *set, struct request *rq,
- unsigned int hctx_idx)
+static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
@@ -2104,7 +2156,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
}
EXPORT_SYMBOL_GPL(__scsi_init_queue);
-static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
+static int scsi_old_init_rq(struct request_queue *q, struct request *rq,
+ gfp_t gfp)
{
struct Scsi_Host *shost = q->rq_alloc_data;
const bool unchecked_isa_dma = shost->unchecked_isa_dma;
@@ -2134,7 +2187,7 @@ fail:
return -ENOMEM;
}
-static void scsi_exit_rq(struct request_queue *q, struct request *rq)
+static void scsi_old_exit_rq(struct request_queue *q, struct request *rq)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
@@ -2144,7 +2197,7 @@ static void scsi_exit_rq(struct request_queue *q, struct request *rq)
cmd->sense_buffer);
}
-struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
+struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
struct request_queue *q;
@@ -2155,8 +2208,8 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
q->rq_alloc_data = shost;
q->request_fn = scsi_request_fn;
- q->init_rq_fn = scsi_init_rq;
- q->exit_rq_fn = scsi_exit_rq;
+ q->init_rq_fn = scsi_old_init_rq;
+ q->exit_rq_fn = scsi_old_exit_rq;
q->initialize_rq_fn = scsi_initialize_rq;
if (blk_init_allocated_queue(q) < 0) {
@@ -2174,14 +2227,16 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
}
static const struct blk_mq_ops scsi_mq_ops = {
+ .get_budget = scsi_mq_get_budget,
+ .put_budget = scsi_mq_put_budget,
.queue_rq = scsi_queue_rq,
.complete = scsi_softirq_done,
.timeout = scsi_timeout,
#ifdef CONFIG_BLK_DEBUG_FS
.show_rq = scsi_show_rq,
#endif
- .init_request = scsi_init_request,
- .exit_request = scsi_exit_request,
+ .init_request = scsi_mq_init_request,
+ .exit_request = scsi_mq_exit_request,
.initialize_rq_fn = scsi_initialize_rq,
.map_queues = scsi_map_queues,
};
@@ -2547,7 +2602,7 @@ EXPORT_SYMBOL(scsi_test_unit_ready);
* @sdev: scsi device to change the state of.
* @state: state to change to.
*
- * Returns zero if unsuccessful or an error if the requested
+ * Returns zero if successful or an error if the requested
* transition is illegal.
*/
int
@@ -2702,6 +2757,9 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED";
break;
+ case SDEV_EVT_POWER_ON_RESET_OCCURRED:
+ envp[idx++] = "SDEV_UA=POWER_ON_RESET_OCCURRED";
+ break;
default:
/* do nothing */
break;
@@ -2806,6 +2864,7 @@ struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
case SDEV_EVT_LUN_CHANGE_REPORTED:
case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
+ case SDEV_EVT_POWER_ON_RESET_OCCURRED:
default:
/* do nothing */
break;
@@ -2888,21 +2947,37 @@ static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
int
scsi_device_quiesce(struct scsi_device *sdev)
{
+ struct request_queue *q = sdev->request_queue;
int err;
+ /*
+ * It is allowed to call scsi_device_quiesce() multiple times from
+ * the same context but concurrent scsi_device_quiesce() calls are
+ * not allowed.
+ */
+ WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current);
+
+ blk_set_preempt_only(q);
+
+ blk_mq_freeze_queue(q);
+ /*
+ * Ensure that the effect of blk_set_preempt_only() will be visible
+ * for percpu_ref_tryget() callers that occur after the queue
+ * unfreeze even if the queue was already frozen before this function
+ * was called. See also https://lwn.net/Articles/573497/.
+ */
+ synchronize_rcu();
+ blk_mq_unfreeze_queue(q);
+
mutex_lock(&sdev->state_mutex);
err = scsi_device_set_state(sdev, SDEV_QUIESCE);
+ if (err == 0)
+ sdev->quiesced_by = current;
+ else
+ blk_clear_preempt_only(q);
mutex_unlock(&sdev->state_mutex);
- if (err)
- return err;
-
- scsi_run_queue(sdev->request_queue);
- while (atomic_read(&sdev->device_busy)) {
- msleep_interruptible(200);
- scsi_run_queue(sdev->request_queue);
- }
- return 0;
+ return err;
}
EXPORT_SYMBOL(scsi_device_quiesce);
@@ -2922,9 +2997,11 @@ void scsi_device_resume(struct scsi_device *sdev)
* device deleted during suspend)
*/
mutex_lock(&sdev->state_mutex);
- if (sdev->sdev_state == SDEV_QUIESCE &&
- scsi_device_set_state(sdev, SDEV_RUNNING) == 0)
- scsi_run_queue(sdev->request_queue);
+ WARN_ON_ONCE(!sdev->quiesced_by);
+ sdev->quiesced_by = NULL;
+ blk_clear_preempt_only(sdev->request_queue);
+ if (sdev->sdev_state == SDEV_QUIESCE)
+ scsi_device_set_state(sdev, SDEV_RUNNING);
mutex_unlock(&sdev->state_mutex);
}
EXPORT_SYMBOL(scsi_device_resume);
@@ -3073,19 +3150,24 @@ int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
* Try to transition the scsi device to SDEV_RUNNING or one of the
* offlined states and goose the device queue if successful.
*/
- if ((sdev->sdev_state == SDEV_BLOCK) ||
- (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE))
+ switch (sdev->sdev_state) {
+ case SDEV_BLOCK:
+ case SDEV_TRANSPORT_OFFLINE:
sdev->sdev_state = new_state;
- else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
+ break;
+ case SDEV_CREATED_BLOCK:
if (new_state == SDEV_TRANSPORT_OFFLINE ||
new_state == SDEV_OFFLINE)
sdev->sdev_state = new_state;
else
sdev->sdev_state = SDEV_CREATED;
- } else if (sdev->sdev_state != SDEV_CANCEL &&
- sdev->sdev_state != SDEV_OFFLINE)
+ break;
+ case SDEV_CANCEL:
+ case SDEV_OFFLINE:
+ break;
+ default:
return -EINVAL;
-
+ }
scsi_start_queue(sdev);
return 0;
@@ -3262,8 +3344,8 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
{
u8 cur_id_type = 0xff;
u8 cur_id_size = 0;
- unsigned char *d, *cur_id_str;
- unsigned char __rcu *vpd_pg83;
+ const unsigned char *d, *cur_id_str;
+ const struct scsi_vpd *vpd_pg83;
int id_size = -EINVAL;
rcu_read_lock();
@@ -3294,8 +3376,8 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
}
memset(id, 0, id_len);
- d = vpd_pg83 + 4;
- while (d < vpd_pg83 + sdev->vpd_pg83_len) {
+ d = vpd_pg83->data + 4;
+ while (d < vpd_pg83->data + vpd_pg83->len) {
/* Skip designators not referring to the LUN */
if ((d[1] & 0x30) != 0x00)
goto next_desig;
@@ -3411,8 +3493,8 @@ EXPORT_SYMBOL(scsi_vpd_lun_id);
*/
int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
{
- unsigned char *d;
- unsigned char __rcu *vpd_pg83;
+ const unsigned char *d;
+ const struct scsi_vpd *vpd_pg83;
int group_id = -EAGAIN, rel_port = -1;
rcu_read_lock();
@@ -3422,8 +3504,8 @@ int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
return -ENXIO;
}
- d = sdev->vpd_pg83 + 4;
- while (d < sdev->vpd_pg83 + sdev->vpd_pg83_len) {
+ d = vpd_pg83->data + 4;
+ while (d < vpd_pg83->data + vpd_pg83->len) {
switch (d[1] & 0xf) {
case 0x4:
/* Relative target port */