diff options
Diffstat (limited to 'drivers/mmc/core')
-rw-r--r-- | drivers/mmc/core/block.c | 125 | ||||
-rw-r--r-- | drivers/mmc/core/core.c | 54 | ||||
-rw-r--r-- | drivers/mmc/core/mmc.c | 56 | ||||
-rw-r--r-- | drivers/mmc/core/mmc_ops.c | 194 | ||||
-rw-r--r-- | drivers/mmc/core/mmc_ops.h | 15 | ||||
-rw-r--r-- | drivers/mmc/core/mmc_test.c | 52 | ||||
-rw-r--r-- | drivers/mmc/core/queue.c | 22 | ||||
-rw-r--r-- | drivers/mmc/core/sd.c | 10 | ||||
-rw-r--r-- | drivers/mmc/core/sdio_irq.c | 15 |
9 files changed, 315 insertions, 228 deletions
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 663d87924e5e..8499b56a15a8 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -70,7 +70,6 @@ MODULE_ALIAS("mmc:block"); * ample. */ #define MMC_BLK_TIMEOUT_MS (10 * 1000) -#define MMC_SANITIZE_REQ_TIMEOUT 240000 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) #define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8) @@ -168,6 +167,11 @@ MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); static inline int mmc_blk_part_switch(struct mmc_card *card, unsigned int part_type); +static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, + struct mmc_card *card, + int disable_multi, + struct mmc_queue *mq); +static void mmc_blk_hsq_req_done(struct mmc_request *mrq); static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) { @@ -408,44 +412,6 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr, return 0; } -static int ioctl_do_sanitize(struct mmc_card *card) -{ - int err; - - if (!mmc_can_sanitize(card)) { - pr_warn("%s: %s - SANITIZE is not supported\n", - mmc_hostname(card->host), __func__); - err = -EOPNOTSUPP; - goto out; - } - - pr_debug("%s: %s - SANITIZE IN PROGRESS...\n", - mmc_hostname(card->host), __func__); - - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_SANITIZE_START, 1, - MMC_SANITIZE_REQ_TIMEOUT); - - if (err) - pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n", - mmc_hostname(card->host), __func__, err); - - pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host), - __func__); -out: - return err; -} - -static inline bool mmc_blk_in_tran_state(u32 status) -{ - /* - * Some cards mishandle the status bits, so make sure to check both the - * busy indication and the card state. - */ - return status & R1_READY_FOR_DATA && - (R1_CURRENT_STATE(status) == R1_STATE_TRAN); -} - static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, u32 *resp_errs) { @@ -477,13 +443,7 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, __func__, status); return -ETIMEDOUT; } - - /* - * Some cards mishandle the status bits, - * so make sure to check both the busy - * indication and the card state. - */ - } while (!mmc_blk_in_tran_state(status)); + } while (!mmc_ready_for_data(status)); return err; } @@ -580,15 +540,8 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, } if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && - (cmd.opcode == MMC_SWITCH)) { - err = ioctl_do_sanitize(card); - - if (err) - pr_err("%s: ioctl_do_sanitize() failed. err = %d", - __func__, err); - - return err; - } + (cmd.opcode == MMC_SWITCH)) + return mmc_sanitize(card); mmc_wait_for_req(card->host, &mrq); @@ -1532,9 +1485,30 @@ static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req) return mmc_blk_cqe_start_req(mq->card->host, mrq); } +static int mmc_blk_hsq_issue_rw_rq(struct mmc_queue *mq, struct request *req) +{ + struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); + struct mmc_host *host = mq->card->host; + int err; + + mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq); + mqrq->brq.mrq.done = mmc_blk_hsq_req_done; + mmc_pre_req(host, &mqrq->brq.mrq); + + err = mmc_cqe_start_req(host, &mqrq->brq.mrq); + if (err) + mmc_post_req(host, &mqrq->brq.mrq, err); + + return err; +} + static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req) { struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); + struct mmc_host *host = mq->card->host; + + if (host->hsq_enabled) + return mmc_blk_hsq_issue_rw_rq(mq, req); mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL); @@ -1666,7 +1640,7 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) goto error_exit; if (!mmc_host_is_spi(host) && - !mmc_blk_in_tran_state(status)) { + !mmc_ready_for_data(status)) { err = mmc_blk_fix_state(card, req); if (err) goto error_exit; @@ -1726,7 +1700,7 @@ static bool mmc_blk_status_error(struct request *req, u32 status) return brq->cmd.resp[0] & CMD_ERRORS || brq->stop.resp[0] & stop_err_bits || status & stop_err_bits || - (rq_data_dir(req) == WRITE && !mmc_blk_in_tran_state(status)); + (rq_data_dir(req) == WRITE && !mmc_ready_for_data(status)); } static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq) @@ -1788,7 +1762,7 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req) /* Try to get back to "tran" state */ if (!mmc_host_is_spi(mq->card->host) && - (err || !mmc_blk_in_tran_state(status))) + (err || !mmc_ready_for_data(status))) err = mmc_blk_fix_state(mq->card, req); /* @@ -1920,6 +1894,41 @@ static void mmc_blk_urgent_bkops(struct mmc_queue *mq, mmc_run_bkops(mq->card); } +static void mmc_blk_hsq_req_done(struct mmc_request *mrq) +{ + struct mmc_queue_req *mqrq = + container_of(mrq, struct mmc_queue_req, brq.mrq); + struct request *req = mmc_queue_req_to_req(mqrq); + struct request_queue *q = req->q; + struct mmc_queue *mq = q->queuedata; + struct mmc_host *host = mq->card->host; + unsigned long flags; + + if (mmc_blk_rq_error(&mqrq->brq) || + mmc_blk_urgent_bkops_needed(mq, mqrq)) { + spin_lock_irqsave(&mq->lock, flags); + mq->recovery_needed = true; + mq->recovery_req = req; + spin_unlock_irqrestore(&mq->lock, flags); + + host->cqe_ops->cqe_recovery_start(host); + + schedule_work(&mq->recovery_work); + return; + } + + mmc_blk_rw_reset_success(mq, req); + + /* + * Block layer timeouts race with completions which means the normal + * completion path cannot be used during recovery. + */ + if (mq->in_recovery) + mmc_blk_cqe_complete_rq(mq, req); + else + blk_mq_complete_request(req); +} + void mmc_blk_mq_complete(struct request *req) { struct mmc_queue *mq = req->q->queuedata; diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index a971c4bcc442..4c5de6d37ac7 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -403,23 +403,6 @@ void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq) cmd = mrq->cmd; - /* - * If host has timed out waiting for the sanitize - * to complete, card might be still in programming state - * so let's try to bring the card out of programming - * state. - */ - if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) { - if (!mmc_interrupt_hpi(host->card)) { - pr_warn("%s: %s: Interrupted sanitize\n", - mmc_hostname(host), __func__); - cmd->error = 0; - break; - } else { - pr_err("%s: %s: Failed to interrupt sanitize\n", - mmc_hostname(host), __func__); - } - } if (!cmd->error || !cmd->retries || mmc_card_removed(host->card)) break; @@ -1658,8 +1641,6 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, struct mmc_command cmd = {}; unsigned int qty = 0, busy_timeout = 0; bool use_r1b_resp = false; - unsigned long timeout; - int loop_udelay=64, udelay_max=32768; int err; mmc_retune_hold(card->host); @@ -1763,38 +1744,8 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) goto out; - timeout = jiffies + msecs_to_jiffies(busy_timeout); - do { - memset(&cmd, 0, sizeof(struct mmc_command)); - cmd.opcode = MMC_SEND_STATUS; - cmd.arg = card->rca << 16; - cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - /* Do not retry else we can't see errors */ - err = mmc_wait_for_cmd(card->host, &cmd, 0); - if (err || R1_STATUS(cmd.resp[0])) { - pr_err("error %d requesting status %#x\n", - err, cmd.resp[0]); - err = -EIO; - goto out; - } - - /* Timeout if the device never becomes ready for data and - * never leaves the program state. - */ - if (time_after(jiffies, timeout)) { - pr_err("%s: Card stuck in programming state! %s\n", - mmc_hostname(card->host), __func__); - err = -EIO; - goto out; - } - if ((cmd.resp[0] & R1_READY_FOR_DATA) && - R1_CURRENT_STATE(cmd.resp[0]) != R1_STATE_PRG) - break; - - usleep_range(loop_udelay, loop_udelay*2); - if (loop_udelay < udelay_max) - loop_udelay *= 2; - } while (1); + /* Let's poll to find out when the erase operation completes. */ + err = mmc_poll_for_busy(card, busy_timeout, MMC_BUSY_ERASE); out: mmc_retune_release(card->host); @@ -1957,7 +1908,6 @@ int mmc_can_sanitize(struct mmc_card *card) return 1; return 0; } -EXPORT_SYMBOL(mmc_can_sanitize); int mmc_can_secure_erase_trim(struct mmc_card *card) { diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index de14b5845f52..de94fbe629bd 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1055,7 +1055,7 @@ static int mmc_select_hs(struct mmc_card *card) err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, card->ext_csd.generic_cmd6_time, MMC_TIMING_MMC_HS, - true, true, true); + true, true); if (err) pr_warn("%s: switch to high-speed failed, err:%d\n", mmc_hostname(card->host), err); @@ -1087,7 +1087,7 @@ static int mmc_select_hs_ddr(struct mmc_card *card) ext_csd_bits, card->ext_csd.generic_cmd6_time, MMC_TIMING_MMC_DDR52, - true, true, true); + true, true); if (err) { pr_err("%s: switch to bus width %d ddr failed\n", mmc_hostname(host), 1 << bus_width); @@ -1155,7 +1155,7 @@ static int mmc_select_hs400(struct mmc_card *card) err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, val, card->ext_csd.generic_cmd6_time, 0, - true, false, true); + false, true); if (err) { pr_err("%s: switch to high-speed from hs200 failed, err:%d\n", mmc_hostname(host), err); @@ -1173,7 +1173,7 @@ static int mmc_select_hs400(struct mmc_card *card) max_dtr = card->ext_csd.hs_max_dtr; mmc_set_clock(host, max_dtr); - err = mmc_switch_status(card); + err = mmc_switch_status(card, true); if (err) goto out_err; @@ -1197,7 +1197,7 @@ static int mmc_select_hs400(struct mmc_card *card) err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, val, card->ext_csd.generic_cmd6_time, 0, - true, false, true); + false, true); if (err) { pr_err("%s: switch to hs400 failed, err:%d\n", mmc_hostname(host), err); @@ -1211,7 +1211,7 @@ static int mmc_select_hs400(struct mmc_card *card) if (host->ops->hs400_complete) host->ops->hs400_complete(host); - err = mmc_switch_status(card); + err = mmc_switch_status(card, true); if (err) goto out_err; @@ -1243,20 +1243,20 @@ int mmc_hs400_to_hs200(struct mmc_card *card) val = EXT_CSD_TIMING_HS; err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, val, card->ext_csd.generic_cmd6_time, 0, - true, false, true); + false, true); if (err) goto out_err; mmc_set_timing(host, MMC_TIMING_MMC_DDR52); - err = mmc_switch_status(card); + err = mmc_switch_status(card, true); if (err) goto out_err; /* Switch HS DDR to HS */ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_8, card->ext_csd.generic_cmd6_time, - 0, true, false, true); + 0, false, true); if (err) goto out_err; @@ -1265,7 +1265,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card) if (host->ops->hs400_downgrade) host->ops->hs400_downgrade(host); - err = mmc_switch_status(card); + err = mmc_switch_status(card, true); if (err) goto out_err; @@ -1274,7 +1274,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card) card->drive_strength << EXT_CSD_DRV_STR_SHIFT; err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, val, card->ext_csd.generic_cmd6_time, 0, - true, false, true); + false, true); if (err) goto out_err; @@ -1285,7 +1285,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card) * failed. If there really is a problem, we would expect tuning will * fail and the result ends up the same. */ - err = __mmc_switch_status(card, false); + err = mmc_switch_status(card, false); if (err) goto out_err; @@ -1358,7 +1358,7 @@ static int mmc_select_hs400es(struct mmc_card *card) err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, card->ext_csd.generic_cmd6_time, 0, - true, false, true); + false, true); if (err) { pr_err("%s: switch to hs for hs400es failed, err:%d\n", mmc_hostname(host), err); @@ -1366,7 +1366,7 @@ static int mmc_select_hs400es(struct mmc_card *card) } mmc_set_timing(host, MMC_TIMING_MMC_HS); - err = mmc_switch_status(card); + err = mmc_switch_status(card, true); if (err) goto out_err; @@ -1392,7 +1392,7 @@ static int mmc_select_hs400es(struct mmc_card *card) err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, val, card->ext_csd.generic_cmd6_time, 0, - true, false, true); + false, true); if (err) { pr_err("%s: switch to hs400es failed, err:%d\n", mmc_hostname(host), err); @@ -1407,7 +1407,7 @@ static int mmc_select_hs400es(struct mmc_card *card) if (host->ops->hs400_enhanced_strobe) host->ops->hs400_enhanced_strobe(host, &host->ios); - err = mmc_switch_status(card); + err = mmc_switch_status(card, true); if (err) goto out_err; @@ -1457,7 +1457,7 @@ static int mmc_select_hs200(struct mmc_card *card) err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, val, card->ext_csd.generic_cmd6_time, 0, - true, false, true); + false, true); if (err) goto err; old_timing = host->ios.timing; @@ -1468,7 +1468,7 @@ static int mmc_select_hs200(struct mmc_card *card) * switch failed. If there really is a problem, we would expect * tuning will fail and the result ends up the same. */ - err = __mmc_switch_status(card, false); + err = mmc_switch_status(card, false); /* * mmc_select_timing() assumes timing has not changed if @@ -1851,15 +1851,19 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, */ card->reenable_cmdq = card->ext_csd.cmdq_en; - if (card->ext_csd.cmdq_en && !host->cqe_enabled) { + if (host->cqe_ops && !host->cqe_enabled) { err = host->cqe_ops->cqe_enable(host, card); - if (err) { - pr_err("%s: Failed to enable CQE, error %d\n", - mmc_hostname(host), err); - } else { + if (!err) { host->cqe_enabled = true; - pr_info("%s: Command Queue Engine enabled\n", - mmc_hostname(host)); + + if (card->ext_csd.cmdq_en) { + pr_info("%s: Command Queue Engine enabled\n", + mmc_hostname(host)); + } else { + host->hsq_enabled = true; + pr_info("%s: Host Software Queue enabled\n", + mmc_hostname(host)); + } } } @@ -1958,7 +1962,7 @@ static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type) err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_POWER_OFF_NOTIFICATION, - notify_type, timeout, 0, true, false, false); + notify_type, timeout, 0, false, false); if (err) pr_err("%s: Power Off Notification timed out, %u\n", mmc_hostname(card->host), timeout); diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index e025604e17d4..5bd0ab8b236a 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -19,9 +19,9 @@ #include "host.h" #include "mmc_ops.h" -#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10min*/ #define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */ #define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */ +#define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */ static const u8 tuning_blk_pattern_4bit[] = { 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, @@ -431,7 +431,7 @@ static int mmc_switch_status_error(struct mmc_host *host, u32 status) } /* Caller must hold re-tuning */ -int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal) +int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal) { u32 status; int err; @@ -445,18 +445,54 @@ int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal) return mmc_switch_status_error(card->host, status); } -int mmc_switch_status(struct mmc_card *card) +static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err, + enum mmc_busy_cmd busy_cmd, bool *busy) { - return __mmc_switch_status(card, true); + struct mmc_host *host = card->host; + u32 status = 0; + int err; + + if (host->ops->card_busy) { + *busy = host->ops->card_busy(host); + return 0; + } + + err = mmc_send_status(card, &status); + if (retry_crc_err && err == -EILSEQ) { + *busy = true; + return 0; + } + if (err) + return err; + + switch (busy_cmd) { + case MMC_BUSY_CMD6: + err = mmc_switch_status_error(card->host, status); + break; + case MMC_BUSY_ERASE: + err = R1_STATUS(status) ? -EIO : 0; + break; + case MMC_BUSY_HPI: + break; + default: + err = -EINVAL; + } + + if (err) + return err; + + *busy = !mmc_ready_for_data(status); + return 0; } -static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, - bool send_status, bool retry_crc_err) +static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, + bool send_status, bool retry_crc_err, + enum mmc_busy_cmd busy_cmd) { struct mmc_host *host = card->host; int err; unsigned long timeout; - u32 status = 0; + unsigned int udelay = 32, udelay_max = 32768; bool expired = false; bool busy = false; @@ -478,21 +514,9 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, */ expired = time_after(jiffies, timeout); - if (host->ops->card_busy) { - busy = host->ops->card_busy(host); - } else { - err = mmc_send_status(card, &status); - if (retry_crc_err && err == -EILSEQ) { - busy = true; - } else if (err) { - return err; - } else { - err = mmc_switch_status_error(host, status); - if (err) - return err; - busy = R1_CURRENT_STATE(status) == R1_STATE_PRG; - } - } + err = mmc_busy_status(card, retry_crc_err, busy_cmd, &busy); + if (err) + return err; /* Timeout if the device still remains busy. */ if (expired && busy) { @@ -500,11 +524,24 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, mmc_hostname(host), __func__); return -ETIMEDOUT; } + + /* Throttle the polling rate to avoid hogging the CPU. */ + if (busy) { + usleep_range(udelay, udelay * 2); + if (udelay < udelay_max) + udelay *= 2; + } } while (busy); return 0; } +int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, + enum mmc_busy_cmd busy_cmd) +{ + return __mmc_poll_for_busy(card, timeout_ms, true, false, busy_cmd); +} + /** * __mmc_switch - modify EXT_CSD register * @card: the MMC card associated with the data transfer @@ -514,7 +551,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, * @timeout_ms: timeout (ms) for operation performed by register write, * timeout of zero implies maximum possible timeout * @timing: new timing to change to - * @use_busy_signal: use the busy signal as response type * @send_status: send status cmd to poll for busy * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy * @@ -522,12 +558,12 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, */ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, unsigned int timeout_ms, unsigned char timing, - bool use_busy_signal, bool send_status, bool retry_crc_err) + bool send_status, bool retry_crc_err) { struct mmc_host *host = card->host; int err; struct mmc_command cmd = {}; - bool use_r1b_resp = use_busy_signal; + bool use_r1b_resp = true; unsigned char old_timing = host->ios.timing; mmc_retune_hold(host); @@ -562,24 +598,18 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1; } - if (index == EXT_CSD_SANITIZE_START) - cmd.sanitize_busy = true; - err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); if (err) goto out; - /* No need to check card status in case of unblocking command */ - if (!use_busy_signal) - goto out; - /*If SPI or used HW busy detection above, then we don't need to poll. */ if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) || mmc_host_is_spi(host)) goto out_tim; /* Let's try to poll to find out when the command is completed. */ - err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err); + err = __mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err, + MMC_BUSY_CMD6); if (err) goto out; @@ -589,7 +619,7 @@ out_tim: mmc_set_timing(host, timing); if (send_status) { - err = mmc_switch_status(card); + err = mmc_switch_status(card, true); if (err && timing) mmc_set_timing(host, old_timing); } @@ -603,7 +633,7 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, unsigned int timeout_ms) { return __mmc_switch(card, set, index, value, timeout_ms, 0, - true, true, false); + true, false); } EXPORT_SYMBOL_GPL(mmc_switch); @@ -799,32 +829,46 @@ int mmc_bus_test(struct mmc_card *card, u8 bus_width) return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width); } -static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status) +static int mmc_send_hpi_cmd(struct mmc_card *card) { + unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time; + struct mmc_host *host = card->host; + bool use_r1b_resp = true; struct mmc_command cmd = {}; - unsigned int opcode; int err; - opcode = card->ext_csd.hpi_cmd; - if (opcode == MMC_STOP_TRANSMISSION) + cmd.opcode = card->ext_csd.hpi_cmd; + cmd.arg = card->rca << 16 | 1; + + /* + * Make sure the host's max_busy_timeout fit the needed timeout for HPI. + * In case it doesn't, let's instruct the host to avoid HW busy + * detection, by using a R1 response instead of R1B. + */ + if (host->max_busy_timeout && busy_timeout_ms > host->max_busy_timeout) + use_r1b_resp = false; + + if (cmd.opcode == MMC_STOP_TRANSMISSION && use_r1b_resp) { cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; - else if (opcode == MMC_SEND_STATUS) + cmd.busy_timeout = busy_timeout_ms; + } else { cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + use_r1b_resp = false; + } - cmd.opcode = opcode; - cmd.arg = card->rca << 16 | 1; - - err = mmc_wait_for_cmd(card->host, &cmd, 0); + err = mmc_wait_for_cmd(host, &cmd, 0); if (err) { - pr_warn("%s: error %d interrupting operation. " - "HPI command response %#x\n", mmc_hostname(card->host), - err, cmd.resp[0]); + pr_warn("%s: HPI error %d. Command response %#x\n", + mmc_hostname(host), err, cmd.resp[0]); return err; } - if (status) - *status = cmd.resp[0]; - return 0; + /* No need to poll when using HW busy detection. */ + if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp) + return 0; + + /* Let's poll to find out when the HPI request completes. */ + return mmc_poll_for_busy(card, busy_timeout_ms, MMC_BUSY_HPI); } /** @@ -838,7 +882,6 @@ int mmc_interrupt_hpi(struct mmc_card *card) { int err; u32 status; - unsigned long prg_wait; if (!card->ext_csd.hpi_en) { pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); @@ -871,20 +914,7 @@ int mmc_interrupt_hpi(struct mmc_card *card) goto out; } - err = mmc_send_hpi_cmd(card, &status); - if (err) - goto out; - - prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time); - do { - err = mmc_send_status(card, &status); - - if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN) - break; - if (time_after(jiffies, prg_wait)) - err = -ETIMEDOUT; - } while (!err); - + err = mmc_send_hpi_cmd(card); out: return err; } @@ -1000,3 +1030,37 @@ int mmc_cmdq_disable(struct mmc_card *card) return mmc_cmdq_switch(card, false); } EXPORT_SYMBOL_GPL(mmc_cmdq_disable); + +int mmc_sanitize(struct mmc_card *card) +{ + struct mmc_host *host = card->host; + int err; + + if (!mmc_can_sanitize(card)) { + pr_warn("%s: Sanitize not supported\n", mmc_hostname(host)); + return -EOPNOTSUPP; + } + + pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host)); + + mmc_retune_hold(host); + + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START, + 1, MMC_SANITIZE_TIMEOUT_MS); + if (err) + pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err); + + /* + * If the sanitize operation timed out, the card is probably still busy + * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort + * it with a HPI command to get back into R1_STATE_TRAN. + */ + if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card)) + pr_warn("%s: Sanitize aborted\n", mmc_hostname(host)); + + mmc_retune_release(host); + + pr_debug("%s: Sanitize completed\n", mmc_hostname(host)); + return err; +} +EXPORT_SYMBOL_GPL(mmc_sanitize); diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h index 8f2f9475716d..632009260e51 100644 --- a/drivers/mmc/core/mmc_ops.h +++ b/drivers/mmc/core/mmc_ops.h @@ -10,6 +10,12 @@ #include <linux/types.h> +enum mmc_busy_cmd { + MMC_BUSY_CMD6, + MMC_BUSY_ERASE, + MMC_BUSY_HPI, +}; + struct mmc_host; struct mmc_card; @@ -26,20 +32,21 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid); int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp); int mmc_spi_set_crc(struct mmc_host *host, int use_crc); int mmc_bus_test(struct mmc_card *card, u8 bus_width); -int mmc_interrupt_hpi(struct mmc_card *card); int mmc_can_ext_csd(struct mmc_card *card); int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd); -int mmc_switch_status(struct mmc_card *card); -int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal); +int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal); +int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, + enum mmc_busy_cmd busy_cmd); int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, unsigned int timeout_ms, unsigned char timing, - bool use_busy_signal, bool send_status, bool retry_crc_err); + bool send_status, bool retry_crc_err); int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, unsigned int timeout_ms); void mmc_run_bkops(struct mmc_card *card); int mmc_flush_cache(struct mmc_card *card); int mmc_cmdq_enable(struct mmc_card *card); int mmc_cmdq_disable(struct mmc_card *card); +int mmc_sanitize(struct mmc_card *card); #endif diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c index 492dd4596314..c21b3cb71775 100644 --- a/drivers/mmc/core/mmc_test.c +++ b/drivers/mmc/core/mmc_test.c @@ -71,6 +71,7 @@ struct mmc_test_mem { * @sg_len: length of currently mapped scatterlist @sg * @mem: allocated memory * @sg: scatterlist + * @sg_areq: scatterlist for non-blocking request */ struct mmc_test_area { unsigned long max_sz; @@ -82,6 +83,7 @@ struct mmc_test_area { unsigned int sg_len; struct mmc_test_mem *mem; struct scatterlist *sg; + struct scatterlist *sg_areq; }; /** @@ -836,14 +838,16 @@ static int mmc_test_start_areq(struct mmc_test_card *test, } static int mmc_test_nonblock_transfer(struct mmc_test_card *test, - struct scatterlist *sg, unsigned sg_len, - unsigned dev_addr, unsigned blocks, - unsigned blksz, int write, int count) + unsigned int dev_addr, int write, + int count) { struct mmc_test_req *rq1, *rq2; struct mmc_request *mrq, *prev_mrq; int i; int ret = RESULT_OK; + struct mmc_test_area *t = &test->area; + struct scatterlist *sg = t->sg; + struct scatterlist *sg_areq = t->sg_areq; rq1 = mmc_test_req_alloc(); rq2 = mmc_test_req_alloc(); @@ -857,8 +861,8 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test, for (i = 0; i < count; i++) { mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq)); - mmc_test_prepare_mrq(test, mrq, sg, sg_len, dev_addr, blocks, - blksz, write); + mmc_test_prepare_mrq(test, mrq, sg, t->sg_len, dev_addr, + t->blocks, 512, write); ret = mmc_test_start_areq(test, mrq, prev_mrq); if (ret) goto err; @@ -867,7 +871,8 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test, prev_mrq = &rq2->mrq; swap(mrq, prev_mrq); - dev_addr += blocks; + swap(sg, sg_areq); + dev_addr += t->blocks; } ret = mmc_test_start_areq(test, NULL, prev_mrq); @@ -1396,10 +1401,11 @@ static int mmc_test_no_highmem(struct mmc_test_card *test) * Map sz bytes so that it can be transferred. */ static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz, - int max_scatter, int min_sg_len) + int max_scatter, int min_sg_len, bool nonblock) { struct mmc_test_area *t = &test->area; int err; + unsigned int sg_len = 0; t->blocks = sz >> 9; @@ -1411,6 +1417,22 @@ static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz, err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs, t->max_seg_sz, &t->sg_len, min_sg_len); } + + if (err || !nonblock) + goto err; + + if (max_scatter) { + err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg_areq, + t->max_segs, t->max_seg_sz, + &sg_len); + } else { + err = mmc_test_map_sg(t->mem, sz, t->sg_areq, 1, t->max_segs, + t->max_seg_sz, &sg_len, min_sg_len); + } + if (!err && sg_len != t->sg_len) + err = -EINVAL; + +err: if (err) pr_info("%s: Failed to map sg list\n", mmc_hostname(test->card->host)); @@ -1440,7 +1462,6 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz, struct timespec64 ts1, ts2; int ret = 0; int i; - struct mmc_test_area *t = &test->area; /* * In the case of a maximally scattered transfer, the maximum transfer @@ -1458,15 +1479,14 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz, sz = max_tfr; } - ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len); + ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len, nonblock); if (ret) return ret; if (timed) ktime_get_ts64(&ts1); if (nonblock) - ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len, - dev_addr, t->blocks, 512, write, count); + ret = mmc_test_nonblock_transfer(test, dev_addr, write, count); else for (i = 0; i < count && ret == 0; i++) { ret = mmc_test_area_transfer(test, dev_addr, write); @@ -1525,6 +1545,7 @@ static int mmc_test_area_cleanup(struct mmc_test_card *test) struct mmc_test_area *t = &test->area; kfree(t->sg); + kfree(t->sg_areq); mmc_test_free_mem(t->mem); return 0; @@ -1584,6 +1605,13 @@ static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill) goto out_free; } + t->sg_areq = kmalloc_array(t->max_segs, sizeof(*t->sg_areq), + GFP_KERNEL); + if (!t->sg_areq) { + ret = -ENOMEM; + goto out_free; + } + t->dev_addr = mmc_test_capacity(test->card) / 2; t->dev_addr -= t->dev_addr % (t->max_sz >> 9); @@ -2468,7 +2496,7 @@ static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test, if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR)) return RESULT_UNSUP_HOST; - ret = mmc_test_area_map(test, sz, 0, 0); + ret = mmc_test_area_map(test, sz, 0, 0, use_areq); if (ret) return ret; diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 9edc08685e86..25bee3daf9e2 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -62,7 +62,7 @@ enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req) { struct mmc_host *host = mq->card->host; - if (mq->use_cqe) + if (mq->use_cqe && !host->hsq_enabled) return mmc_cqe_issue_type(host, req); if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE) @@ -124,12 +124,14 @@ static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req, { struct request_queue *q = req->q; struct mmc_queue *mq = q->queuedata; + struct mmc_card *card = mq->card; + struct mmc_host *host = card->host; unsigned long flags; int ret; spin_lock_irqsave(&mq->lock, flags); - if (mq->recovery_needed || !mq->use_cqe) + if (mq->recovery_needed || !mq->use_cqe || host->hsq_enabled) ret = BLK_EH_RESET_TIMER; else ret = mmc_cqe_timed_out(req); @@ -144,12 +146,13 @@ static void mmc_mq_recovery_handler(struct work_struct *work) struct mmc_queue *mq = container_of(work, struct mmc_queue, recovery_work); struct request_queue *q = mq->queue; + struct mmc_host *host = mq->card->host; mmc_get_card(mq->card, &mq->ctx); mq->in_recovery = true; - if (mq->use_cqe) + if (mq->use_cqe && !host->hsq_enabled) mmc_blk_cqe_recovery(mq); else mmc_blk_mq_recovery(mq); @@ -160,6 +163,9 @@ static void mmc_mq_recovery_handler(struct work_struct *work) mq->recovery_needed = false; spin_unlock_irq(&mq->lock); + if (host->hsq_enabled) + host->cqe_ops->cqe_recovery_finish(host); + mmc_put_card(mq->card, &mq->ctx); blk_mq_run_hw_queues(q, true); @@ -279,6 +285,14 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, } break; case MMC_ISSUE_ASYNC: + /* + * For MMC host software queue, we only allow 2 requests in + * flight to avoid a long latency. + */ + if (host->hsq_enabled && mq->in_flight[issue_type] > 2) { + spin_unlock_irq(&mq->lock); + return BLK_STS_RESOURCE; + } break; default: /* @@ -430,7 +444,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) * The queue depth for CQE must match the hardware because the request * tag is used to index the hardware queue. */ - if (mq->use_cqe) + if (mq->use_cqe && !host->hsq_enabled) mq->tag_set.queue_depth = min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth); else diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index fe914ff5f5d6..76c7add367d5 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -1082,6 +1082,16 @@ retry: } } + if (host->cqe_ops && !host->cqe_enabled) { + err = host->cqe_ops->cqe_enable(host, card); + if (!err) { + host->cqe_enabled = true; + host->hsq_enabled = true; + pr_info("%s: Host Software Queue enabled\n", + mmc_hostname(host)); + } + } + if (host->caps2 & MMC_CAP2_AVOID_3_3V && host->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330) { pr_err("%s: Host failed to negotiate down from 3.3V\n", diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c index 900871073bd7..3ffe4ff49aa7 100644 --- a/drivers/mmc/core/sdio_irq.c +++ b/drivers/mmc/core/sdio_irq.c @@ -276,14 +276,15 @@ static void sdio_single_irq_set(struct mmc_card *card) card->sdio_single_irq = NULL; if ((card->host->caps & MMC_CAP_SDIO_IRQ) && - card->host->sdio_irqs == 1) + card->host->sdio_irqs == 1) { for (i = 0; i < card->sdio_funcs; i++) { - func = card->sdio_func[i]; - if (func && func->irq_handler) { - card->sdio_single_irq = func; - break; - } - } + func = card->sdio_func[i]; + if (func && func->irq_handler) { + card->sdio_single_irq = func; + break; + } + } + } } /** |