diff options
author | James Morris <james.l.morris@oracle.com> | 2017-11-29 12:47:41 +1100 |
---|---|---|
committer | James Morris <james.l.morris@oracle.com> | 2017-11-29 12:47:41 +1100 |
commit | cf40a76e7d5874bb25f4404eecc58a2e033af885 (patch) | |
tree | 8fd81cbea03c87b3d41d7ae5b1d11eadd35d6ef5 /drivers/mmc | |
parent | ab5348c9c23cd253f5902980d2d8fe067dc24c82 (diff) | |
parent | 4fbd8d194f06c8a3fd2af1ce560ddb31f7ec8323 (diff) |
Merge tag 'v4.15-rc1' into next-seccomp
Linux 4.15-rc1
Diffstat (limited to 'drivers/mmc')
94 files changed, 5549 insertions, 1710 deletions
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig index 7e803fc454d1..ec21388311db 100644 --- a/drivers/mmc/Kconfig +++ b/drivers/mmc/Kconfig @@ -12,13 +12,6 @@ menuconfig MMC If you want MMC/SD/SDIO support, you should say Y here and also to your specific host controller driver. -config MMC_DEBUG - bool "MMC debugging" - depends on MMC != n - help - This is an option for use by developers; most people should - say N here. This enables MMC core and driver debugging. - if MMC source "drivers/mmc/core/Kconfig" diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile index 416b6d1c9ec6..26ab7af4e0f9 100644 --- a/drivers/mmc/Makefile +++ b/drivers/mmc/Makefile @@ -2,7 +2,5 @@ # Makefile for the kernel mmc device drivers. # -subdir-ccflags-$(CONFIG_MMC_DEBUG) := -DDEBUG - obj-$(CONFIG_MMC) += core/ obj-$(subst m,y,$(CONFIG_MMC)) += host/ diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile index 7e3ed1aeada2..abba078f7f49 100644 --- a/drivers/mmc/core/Makefile +++ b/drivers/mmc/core/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 # # Makefile for the kernel mmc core. # diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 8ac59dc80f23..ea80ff4cd7f9 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -28,6 +28,7 @@ #include <linux/hdreg.h> #include <linux/kdev_t.h> #include <linux/blkdev.h> +#include <linux/cdev.h> #include <linux/mutex.h> #include <linux/scatterlist.h> #include <linux/string_helpers.h> @@ -36,6 +37,7 @@ #include <linux/compat.h> #include <linux/pm_runtime.h> #include <linux/idr.h> +#include <linux/debugfs.h> #include <linux/mmc/ioctl.h> #include <linux/mmc/card.h> @@ -85,6 +87,7 @@ static int max_devices; #define MAX_DEVICES 256 static DEFINE_IDA(mmc_blk_ida); +static DEFINE_IDA(mmc_rpmb_ida); /* * There is one mmc_blk_data per slot. @@ -95,6 +98,7 @@ struct mmc_blk_data { struct gendisk *disk; struct mmc_queue queue; struct list_head part; + struct list_head rpmbs; unsigned int flags; #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ @@ -120,13 +124,39 @@ struct mmc_blk_data { int area_type; }; +/* Device type for RPMB character devices */ +static dev_t mmc_rpmb_devt; + +/* Bus type for RPMB character devices */ +static struct bus_type mmc_rpmb_bus_type = { + .name = "mmc_rpmb", +}; + +/** + * struct mmc_rpmb_data - special RPMB device type for these areas + * @dev: the device for the RPMB area + * @chrdev: character device for the RPMB area + * @id: unique device ID number + * @part_index: partition index (0 on first) + * @md: parent MMC block device + * @node: list item, so we can put this device on a list + */ +struct mmc_rpmb_data { + struct device dev; + struct cdev chrdev; + int id; + unsigned int part_index; + struct mmc_blk_data *md; + struct list_head node; +}; + static DEFINE_MUTEX(open_lock); module_param(perdev_minors, int, 0444); MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); static inline int mmc_blk_part_switch(struct mmc_card *card, - struct mmc_blk_data *md); + unsigned int part_type); static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) { @@ -188,7 +218,6 @@ static ssize_t power_ro_lock_store(struct device *dev, { int ret; struct mmc_blk_data *md, *part_md; - struct mmc_card *card; struct mmc_queue *mq; struct request *req; unsigned long set; @@ -201,7 +230,6 @@ static ssize_t power_ro_lock_store(struct device *dev, md = mmc_blk_get(dev_to_disk(dev)); mq = &md->queue; - card = md->queue.card; /* Dispatch locking to the block layer */ req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM); @@ -300,6 +328,7 @@ struct mmc_blk_ioc_data { struct mmc_ioc_cmd ic; unsigned char *buf; u64 buf_bytes; + struct mmc_rpmb_data *rpmb; }; static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( @@ -438,14 +467,25 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, struct mmc_request mrq = {}; struct scatterlist sg; int err; - bool is_rpmb = false; + unsigned int target_part; u32 status = 0; if (!card || !md || !idata) return -EINVAL; - if (md->area_type & MMC_BLK_DATA_AREA_RPMB) - is_rpmb = true; + /* + * The RPMB accesses comes in from the character device, so we + * need to target these explicitly. Else we just target the + * partition type for the block device the ioctl() was issued + * on. + */ + if (idata->rpmb) { + /* Support multiple RPMB partitions */ + target_part = idata->rpmb->part_index; + target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB; + } else { + target_part = md->part_type; + } cmd.opcode = idata->ic.opcode; cmd.arg = idata->ic.arg; @@ -489,7 +529,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, mrq.cmd = &cmd; - err = mmc_blk_part_switch(card, md); + err = mmc_blk_part_switch(card, target_part); if (err) return err; @@ -499,7 +539,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, return err; } - if (is_rpmb) { + if (idata->rpmb) { err = mmc_set_blockcount(card, data.blocks, idata->ic.write_flag & (1 << 31)); if (err) @@ -539,7 +579,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp)); - if (is_rpmb) { + if (idata->rpmb) { /* * Ensure RPMB command has completed by polling CMD13 * "Send Status". @@ -554,34 +594,22 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, return err; } -static int mmc_blk_ioctl_cmd(struct block_device *bdev, - struct mmc_ioc_cmd __user *ic_ptr) +static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md, + struct mmc_ioc_cmd __user *ic_ptr, + struct mmc_rpmb_data *rpmb) { struct mmc_blk_ioc_data *idata; struct mmc_blk_ioc_data *idatas[1]; - struct mmc_blk_data *md; struct mmc_queue *mq; struct mmc_card *card; int err = 0, ioc_err = 0; struct request *req; - /* - * The caller must have CAP_SYS_RAWIO, and must be calling this on the - * whole block device, not on a partition. This prevents overspray - * between sibling partitions. - */ - if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) - return -EPERM; - idata = mmc_blk_ioctl_copy_from_user(ic_ptr); if (IS_ERR(idata)) return PTR_ERR(idata); - - md = mmc_blk_get(bdev->bd_disk); - if (!md) { - err = -EINVAL; - goto cmd_err; - } + /* This will be NULL on non-RPMB ioctl():s */ + idata->rpmb = rpmb; card = md->queue.card; if (IS_ERR(card)) { @@ -597,8 +625,9 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM); idatas[0] = idata; - req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL; - req_to_mmc_queue_req(req)->idata = idatas; + req_to_mmc_queue_req(req)->drv_op = + rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; + req_to_mmc_queue_req(req)->drv_op_data = idatas; req_to_mmc_queue_req(req)->ioc_count = 1; blk_execute_rq(mq->queue, NULL, req, 0); ioc_err = req_to_mmc_queue_req(req)->drv_op_result; @@ -606,33 +635,23 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, blk_put_request(req); cmd_done: - mmc_blk_put(md); -cmd_err: kfree(idata->buf); kfree(idata); return ioc_err ? ioc_err : err; } -static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev, - struct mmc_ioc_multi_cmd __user *user) +static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md, + struct mmc_ioc_multi_cmd __user *user, + struct mmc_rpmb_data *rpmb) { struct mmc_blk_ioc_data **idata = NULL; struct mmc_ioc_cmd __user *cmds = user->cmds; struct mmc_card *card; - struct mmc_blk_data *md; struct mmc_queue *mq; int i, err = 0, ioc_err = 0; __u64 num_of_cmds; struct request *req; - /* - * The caller must have CAP_SYS_RAWIO, and must be calling this on the - * whole block device, not on a partition. This prevents overspray - * between sibling partitions. - */ - if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) - return -EPERM; - if (copy_from_user(&num_of_cmds, &user->num_of_cmds, sizeof(num_of_cmds))) return -EFAULT; @@ -654,18 +673,14 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev, num_of_cmds = i; goto cmd_err; } - } - - md = mmc_blk_get(bdev->bd_disk); - if (!md) { - err = -EINVAL; - goto cmd_err; + /* This will be NULL on non-RPMB ioctl():s */ + idata[i]->rpmb = rpmb; } card = md->queue.card; if (IS_ERR(card)) { err = PTR_ERR(card); - goto cmd_done; + goto cmd_err; } @@ -676,8 +691,9 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev, req = blk_get_request(mq->queue, idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM); - req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL; - req_to_mmc_queue_req(req)->idata = idata; + req_to_mmc_queue_req(req)->drv_op = + rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; + req_to_mmc_queue_req(req)->drv_op_data = idata; req_to_mmc_queue_req(req)->ioc_count = num_of_cmds; blk_execute_rq(mq->queue, NULL, req, 0); ioc_err = req_to_mmc_queue_req(req)->drv_op_result; @@ -688,8 +704,6 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev, blk_put_request(req); -cmd_done: - mmc_blk_put(md); cmd_err: for (i = 0; i < num_of_cmds; i++) { kfree(idata[i]->buf); @@ -699,16 +713,49 @@ cmd_err: return ioc_err ? ioc_err : err; } +static int mmc_blk_check_blkdev(struct block_device *bdev) +{ + /* + * The caller must have CAP_SYS_RAWIO, and must be calling this on the + * whole block device, not on a partition. This prevents overspray + * between sibling partitions. + */ + if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) + return -EPERM; + return 0; +} + static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { + struct mmc_blk_data *md; + int ret; + switch (cmd) { case MMC_IOC_CMD: - return mmc_blk_ioctl_cmd(bdev, - (struct mmc_ioc_cmd __user *)arg); + ret = mmc_blk_check_blkdev(bdev); + if (ret) + return ret; + md = mmc_blk_get(bdev->bd_disk); + if (!md) + return -EINVAL; + ret = mmc_blk_ioctl_cmd(md, + (struct mmc_ioc_cmd __user *)arg, + NULL); + mmc_blk_put(md); + return ret; case MMC_IOC_MULTI_CMD: - return mmc_blk_ioctl_multi_cmd(bdev, - (struct mmc_ioc_multi_cmd __user *)arg); + ret = mmc_blk_check_blkdev(bdev); + if (ret) + return ret; + md = mmc_blk_get(bdev->bd_disk); + if (!md) + return -EINVAL; + ret = mmc_blk_ioctl_multi_cmd(md, + (struct mmc_ioc_multi_cmd __user *)arg, + NULL); + mmc_blk_put(md); + return ret; default: return -EINVAL; } @@ -765,29 +812,29 @@ static int mmc_blk_part_switch_post(struct mmc_card *card, } static inline int mmc_blk_part_switch(struct mmc_card *card, - struct mmc_blk_data *md) + unsigned int part_type) { int ret = 0; struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); - if (main_md->part_curr == md->part_type) + if (main_md->part_curr == part_type) return 0; if (mmc_card_mmc(card)) { u8 part_config = card->ext_csd.part_config; - ret = mmc_blk_part_switch_pre(card, md->part_type); + ret = mmc_blk_part_switch_pre(card, part_type); if (ret) return ret; part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; - part_config |= md->part_type; + part_config |= part_type; ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, part_config, card->ext_csd.part_time); if (ret) { - mmc_blk_part_switch_post(card, md->part_type); + mmc_blk_part_switch_post(card, part_type); return ret; } @@ -796,7 +843,7 @@ static inline int mmc_blk_part_switch(struct mmc_card *card, ret = mmc_blk_part_switch_post(card, main_md->part_curr); } - main_md->part_curr = md->part_type; + main_md->part_curr = part_type; return ret; } @@ -1139,7 +1186,7 @@ static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, int part_err; main_md->part_curr = main_md->part_type; - part_err = mmc_blk_part_switch(host->card, md); + part_err = mmc_blk_part_switch(host->card, md->part_type); if (part_err) { /* * We have failed to get back into the correct @@ -1156,18 +1203,6 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) md->reset_done &= ~type; } -int mmc_access_rpmb(struct mmc_queue *mq) -{ - struct mmc_blk_data *md = mq->blkdata; - /* - * If this is a RPMB partition access, return ture - */ - if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) - return true; - - return false; -} - /* * The non-block commands come back from the block layer after it queued it and * processed it with all other requests and then they get issued in this @@ -1178,21 +1213,28 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) struct mmc_queue_req *mq_rq; struct mmc_card *card = mq->card; struct mmc_blk_data *md = mq->blkdata; + struct mmc_blk_ioc_data **idata; + bool rpmb_ioctl; + u8 **ext_csd; + u32 status; int ret; int i; mq_rq = req_to_mmc_queue_req(req); + rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB); switch (mq_rq->drv_op) { case MMC_DRV_OP_IOCTL: + case MMC_DRV_OP_IOCTL_RPMB: + idata = mq_rq->drv_op_data; for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) { - ret = __mmc_blk_ioctl_cmd(card, md, mq_rq->idata[i]); + ret = __mmc_blk_ioctl_cmd(card, md, idata[i]); if (ret) break; } /* Always switch back to main area after RPMB access */ - if (md->area_type & MMC_BLK_DATA_AREA_RPMB) - mmc_blk_part_switch(card, dev_get_drvdata(&card->dev)); + if (rpmb_ioctl) + mmc_blk_part_switch(card, 0); break; case MMC_DRV_OP_BOOT_WP: ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, @@ -1206,6 +1248,15 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN; break; + case MMC_DRV_OP_GET_CARD_STATUS: + ret = mmc_send_status(card, &status); + if (!ret) + ret = status; + break; + case MMC_DRV_OP_GET_EXT_CSD: + ext_csd = mq_rq->drv_op_data; + ret = mmc_get_ext_csd(card, ext_csd); + break; default: pr_err("%s: unknown driver specific operation\n", md->disk->disk_name); @@ -1213,7 +1264,7 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) break; } mq_rq->drv_op_result = ret; - blk_end_request_all(req, ret); + blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK); } static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) @@ -1371,12 +1422,46 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, R1_CC_ERROR | /* Card controller error */ \ R1_ERROR) /* General/unknown error */ -static bool mmc_blk_has_cmd_err(struct mmc_command *cmd) +static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq) { - if (!cmd->error && cmd->resp[0] & CMD_ERRORS) - cmd->error = -EIO; + u32 val; + + /* + * Per the SD specification(physical layer version 4.10)[1], + * section 4.3.3, it explicitly states that "When the last + * block of user area is read using CMD18, the host should + * ignore OUT_OF_RANGE error that may occur even the sequence + * is correct". And JESD84-B51 for eMMC also has a similar + * statement on section 6.8.3. + * + * Multiple block read/write could be done by either predefined + * method, namely CMD23, or open-ending mode. For open-ending mode, + * we should ignore the OUT_OF_RANGE error as it's normal behaviour. + * + * However the spec[1] doesn't tell us whether we should also + * ignore that for predefined method. But per the spec[1], section + * 4.15 Set Block Count Command, it says"If illegal block count + * is set, out of range error will be indicated during read/write + * operation (For example, data transfer is stopped at user area + * boundary)." In another word, we could expect a out of range error + * in the response for the following CMD18/25. And if argument of + * CMD23 + the argument of CMD18/25 exceed the max number of blocks, + * we could also expect to get a -ETIMEDOUT or any error number from + * the host drivers due to missing data response(for write)/data(for + * read), as the cards will stop the data transfer by itself per the + * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode. + */ - return cmd->error; + if (!brq->stop.error) { + bool oor_with_open_end; + /* If there is no error yet, check R1 response */ + + val = brq->stop.resp[0] & CMD_ERRORS; + oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc; + + if (val && !oor_with_open_end) + brq->stop.error = -EIO; + } } static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, @@ -1400,8 +1485,11 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, * stop.error indicates a problem with the stop command. Data * may have been transferred, or may still be transferring. */ - if (brq->sbc.error || brq->cmd.error || mmc_blk_has_cmd_err(&brq->stop) || - brq->data.error) { + + mmc_blk_eval_resp_error(brq); + + if (brq->sbc.error || brq->cmd.error || + brq->stop.error || brq->data.error) { switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) { case ERR_RETRY: return MMC_BLK_RETRY; @@ -1487,25 +1575,27 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, } static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, - int disable_multi, bool *do_rel_wr, - bool *do_data_tag) + int disable_multi, bool *do_rel_wr_p, + bool *do_data_tag_p) { struct mmc_blk_data *md = mq->blkdata; struct mmc_card *card = md->queue.card; struct mmc_blk_request *brq = &mqrq->brq; struct request *req = mmc_queue_req_to_req(mqrq); + bool do_rel_wr, do_data_tag; /* * Reliable writes are used to implement Forced Unit Access and * are supported only on MMCs. */ - *do_rel_wr = (req->cmd_flags & REQ_FUA) && - rq_data_dir(req) == WRITE && - (md->flags & MMC_BLK_REL_WR); + do_rel_wr = (req->cmd_flags & REQ_FUA) && + rq_data_dir(req) == WRITE && + (md->flags & MMC_BLK_REL_WR); memset(brq, 0, sizeof(struct mmc_blk_request)); brq->mrq.data = &brq->data; + brq->mrq.tag = req->tag; brq->stop.opcode = MMC_STOP_TRANSMISSION; brq->stop.arg = 0; @@ -1520,6 +1610,14 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, brq->data.blksz = 512; brq->data.blocks = blk_rq_sectors(req); + brq->data.blk_addr = blk_rq_pos(req); + + /* + * The command queue supports 2 priorities: "high" (1) and "simple" (0). + * The eMMC will give "high" priority tasks priority over "simple" + * priority tasks. Here we always set "simple" priority by not setting + * MMC_DATA_PRIO. + */ /* * The block layer doesn't support all sector count @@ -1549,18 +1647,23 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, brq->data.blocks); } - if (*do_rel_wr) + if (do_rel_wr) { mmc_apply_rel_rw(brq, card, req); + brq->data.flags |= MMC_DATA_REL_WR; + } /* * Data tag is used only during writing meta data to speed * up write and any subsequent read of this meta data */ - *do_data_tag = card->ext_csd.data_tag_unit_size && - (req->cmd_flags & REQ_META) && - (rq_data_dir(req) == WRITE) && - ((brq->data.blocks * brq->data.blksz) >= - card->ext_csd.data_tag_unit_size); + do_data_tag = card->ext_csd.data_tag_unit_size && + (req->cmd_flags & REQ_META) && + (rq_data_dir(req) == WRITE) && + ((brq->data.blocks * brq->data.blksz) >= + card->ext_csd.data_tag_unit_size); + + if (do_data_tag) + brq->data.flags |= MMC_DATA_DAT_TAG; mmc_set_data_timeout(&brq->data, card); @@ -1588,7 +1691,11 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, mqrq->areq.mrq = &brq->mrq; - mmc_queue_bounce_pre(mqrq); + if (do_rel_wr_p) + *do_rel_wr_p = do_rel_wr; + + if (do_data_tag_p) + *do_data_tag_p = do_data_tag; } static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, @@ -1681,9 +1788,9 @@ static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, if (err) req_pending = old_req_pending; else - req_pending = blk_end_request(req, 0, blocks << 9); + req_pending = blk_end_request(req, BLK_STS_OK, blocks << 9); } else { - req_pending = blk_end_request(req, 0, brq->data.bytes_xfered); + req_pending = blk_end_request(req, BLK_STS_OK, brq->data.bytes_xfered); } return req_pending; } @@ -1782,7 +1889,6 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) brq = &mq_rq->brq; old_req = mmc_queue_req_to_req(mq_rq); type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; - mmc_queue_bounce_post(mq_rq); switch (status) { case MMC_BLK_SUCCESS: @@ -1904,9 +2010,9 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) if (req && !mq->qcnt) /* claim host only for the first request */ - mmc_get_card(card); + mmc_get_card(card, NULL); - ret = mmc_blk_part_switch(card, md); + ret = mmc_blk_part_switch(card, md->part_type); if (ret) { if (req) { blk_end_request_all(req, BLK_STS_IOERR); @@ -1967,7 +2073,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) out: if (!mq->qcnt) - mmc_put_card(card); + mmc_put_card(card, NULL); } static inline int mmc_blk_readonly(struct mmc_card *card) @@ -1987,8 +2093,20 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, int devidx, ret; devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL); - if (devidx < 0) + if (devidx < 0) { + /* + * We get -ENOSPC because there are no more any available + * devidx. The reason may be that, either userspace haven't yet + * unmounted the partitions, which postpones mmc_blk_release() + * from being called, or the device has more partitions than + * what we support. + */ + if (devidx == -ENOSPC) + dev_err(mmc_dev(card->host), + "no more device IDs available\n"); + return ERR_PTR(devidx); + } md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL); if (!md) { @@ -2012,6 +2130,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, spin_lock_init(&md->lock); INIT_LIST_HEAD(&md->part); + INIT_LIST_HEAD(&md->rpmbs); md->usage = 1; ret = mmc_init_queue(&md->queue, card, &md->lock, subname); @@ -2130,6 +2249,158 @@ static int mmc_blk_alloc_part(struct mmc_card *card, return 0; } +/** + * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev + * @filp: the character device file + * @cmd: the ioctl() command + * @arg: the argument from userspace + * + * This will essentially just redirect the ioctl()s coming in over to + * the main block device spawning the RPMB character device. + */ +static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct mmc_rpmb_data *rpmb = filp->private_data; + int ret; + + switch (cmd) { + case MMC_IOC_CMD: + ret = mmc_blk_ioctl_cmd(rpmb->md, + (struct mmc_ioc_cmd __user *)arg, + rpmb); + break; + case MMC_IOC_MULTI_CMD: + ret = mmc_blk_ioctl_multi_cmd(rpmb->md, + (struct mmc_ioc_multi_cmd __user *)arg, + rpmb); + break; + default: + ret = -EINVAL; + break; + } + + return 0; +} + +#ifdef CONFIG_COMPAT +static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + return mmc_rpmb_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); +} +#endif + +static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp) +{ + struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, + struct mmc_rpmb_data, chrdev); + + get_device(&rpmb->dev); + filp->private_data = rpmb; + mmc_blk_get(rpmb->md->disk); + + return nonseekable_open(inode, filp); +} + +static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp) +{ + struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, + struct mmc_rpmb_data, chrdev); + + put_device(&rpmb->dev); + mmc_blk_put(rpmb->md); + + return 0; +} + +static const struct file_operations mmc_rpmb_fileops = { + .release = mmc_rpmb_chrdev_release, + .open = mmc_rpmb_chrdev_open, + .owner = THIS_MODULE, + .llseek = no_llseek, + .unlocked_ioctl = mmc_rpmb_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = mmc_rpmb_ioctl_compat, +#endif +}; + +static void mmc_blk_rpmb_device_release(struct device *dev) +{ + struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev); + + ida_simple_remove(&mmc_rpmb_ida, rpmb->id); + kfree(rpmb); +} + +static int mmc_blk_alloc_rpmb_part(struct mmc_card *card, + struct mmc_blk_data *md, + unsigned int part_index, + sector_t size, + const char *subname) +{ + int devidx, ret; + char rpmb_name[DISK_NAME_LEN]; + char cap_str[10]; + struct mmc_rpmb_data *rpmb; + + /* This creates the minor number for the RPMB char device */ + devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL); + if (devidx < 0) + return devidx; + + rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL); + if (!rpmb) { + ida_simple_remove(&mmc_rpmb_ida, devidx); + return -ENOMEM; + } + + snprintf(rpmb_name, sizeof(rpmb_name), + "mmcblk%u%s", card->host->index, subname ? subname : ""); + + rpmb->id = devidx; + rpmb->part_index = part_index; + rpmb->dev.init_name = rpmb_name; + rpmb->dev.bus = &mmc_rpmb_bus_type; + rpmb->dev.devt = MKDEV(MAJOR(mmc_rpmb_devt), rpmb->id); + rpmb->dev.parent = &card->dev; + rpmb->dev.release = mmc_blk_rpmb_device_release; + device_initialize(&rpmb->dev); + dev_set_drvdata(&rpmb->dev, rpmb); + rpmb->md = md; + + cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops); + rpmb->chrdev.owner = THIS_MODULE; + ret = cdev_device_add(&rpmb->chrdev, &rpmb->dev); + if (ret) { + pr_err("%s: could not add character device\n", rpmb_name); + goto out_put_device; + } + + list_add(&rpmb->node, &md->rpmbs); + + string_get_size((u64)size, 512, STRING_UNITS_2, + cap_str, sizeof(cap_str)); + + pr_info("%s: %s %s partition %u %s, chardev (%d:%d)\n", + rpmb_name, mmc_card_id(card), + mmc_card_name(card), EXT_CSD_PART_CONFIG_ACC_RPMB, cap_str, + MAJOR(mmc_rpmb_devt), rpmb->id); + + return 0; + +out_put_device: + put_device(&rpmb->dev); + return ret; +} + +static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb) + +{ + cdev_device_del(&rpmb->chrdev, &rpmb->dev); + put_device(&rpmb->dev); +} + /* MMC Physical partitions consist of two boot partitions and * up to four general purpose partitions. * For each partition enabled in EXT_CSD a block device will be allocatedi @@ -2138,13 +2409,26 @@ static int mmc_blk_alloc_part(struct mmc_card *card, static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) { - int idx, ret = 0; + int idx, ret; if (!mmc_card_mmc(card)) return 0; for (idx = 0; idx < card->nr_parts; idx++) { - if (card->part[idx].size) { + if (card->part[idx].area_type & MMC_BLK_DATA_AREA_RPMB) { + /* + * RPMB partitions does not provide block access, they + * are only accessed using ioctl():s. Thus create + * special RPMB block devices that do not have a + * backing block queue for these. + */ + ret = mmc_blk_alloc_rpmb_part(card, md, + card->part[idx].part_cfg, + card->part[idx].size >> 9, + card->part[idx].name); + if (ret) + return ret; + } else if (card->part[idx].size) { ret = mmc_blk_alloc_part(card, md, card->part[idx].part_cfg, card->part[idx].size >> 9, @@ -2156,7 +2440,7 @@ static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) } } - return ret; + return 0; } static void mmc_blk_remove_req(struct mmc_blk_data *md) @@ -2170,6 +2454,9 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md) * from being accepted. */ card = md->queue.card; + spin_lock_irq(md->queue.queue->queue_lock); + queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue); + spin_unlock_irq(md->queue.queue->queue_lock); blk_set_queue_dying(md->queue.queue); mmc_cleanup_queue(&md->queue); if (md->disk->flags & GENHD_FL_UP) { @@ -2190,7 +2477,15 @@ static void mmc_blk_remove_parts(struct mmc_card *card, { struct list_head *pos, *q; struct mmc_blk_data *part_md; + struct mmc_rpmb_data *rpmb; + /* Remove RPMB partitions */ + list_for_each_safe(pos, q, &md->rpmbs) { + rpmb = list_entry(pos, struct mmc_rpmb_data, node); + list_del(pos); + mmc_blk_remove_rpmb_part(rpmb); + } + /* Remove block partitions */ list_for_each_safe(pos, q, &md->part) { part_md = list_entry(pos, struct mmc_blk_data, part); list_del(pos); @@ -2243,6 +2538,134 @@ force_ro_fail: return ret; } +#ifdef CONFIG_DEBUG_FS + +static int mmc_dbg_card_status_get(void *data, u64 *val) +{ + struct mmc_card *card = data; + struct mmc_blk_data *md = dev_get_drvdata(&card->dev); + struct mmc_queue *mq = &md->queue; + struct request *req; + int ret; + + /* Ask the block layer about the card status */ + req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); + req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS; + blk_execute_rq(mq->queue, NULL, req, 0); + ret = req_to_mmc_queue_req(req)->drv_op_result; + if (ret >= 0) { + *val = ret; + ret = 0; + } + + return ret; +} +DEFINE_SIMPLE_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get, + NULL, "%08llx\n"); + +/* That is two digits * 512 + 1 for newline */ +#define EXT_CSD_STR_LEN 1025 + +static int mmc_ext_csd_open(struct inode *inode, struct file *filp) +{ + struct mmc_card *card = inode->i_private; + struct mmc_blk_data *md = dev_get_drvdata(&card->dev); + struct mmc_queue *mq = &md->queue; + struct request *req; + char *buf; + ssize_t n = 0; + u8 *ext_csd; + int err, i; + + buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* Ask the block layer for the EXT CSD */ + req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); + req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD; + req_to_mmc_queue_req(req)->drv_op_data = &ext_csd; + blk_execute_rq(mq->queue, NULL, req, 0); + err = req_to_mmc_queue_req(req)->drv_op_result; + if (err) { + pr_err("FAILED %d\n", err); + goto out_free; + } + + for (i = 0; i < 512; i++) + n += sprintf(buf + n, "%02x", ext_csd[i]); + n += sprintf(buf + n, "\n"); + + if (n != EXT_CSD_STR_LEN) { + err = -EINVAL; + goto out_free; + } + + filp->private_data = buf; + kfree(ext_csd); + return 0; + +out_free: + kfree(buf); + return err; +} + +static ssize_t mmc_ext_csd_read(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char *buf = filp->private_data; + + return simple_read_from_buffer(ubuf, cnt, ppos, + buf, EXT_CSD_STR_LEN); +} + +static int mmc_ext_csd_release(struct inode *inode, struct file *file) +{ + kfree(file->private_data); + return 0; +} + +static const struct file_operations mmc_dbg_ext_csd_fops = { + .open = mmc_ext_csd_open, + .read = mmc_ext_csd_read, + .release = mmc_ext_csd_release, + .llseek = default_llseek, +}; + +static int mmc_blk_add_debugfs(struct mmc_card *card) +{ + struct dentry *root; + + if (!card->debugfs_root) + return 0; + + root = card->debugfs_root; + + if (mmc_card_mmc(card) || mmc_card_sd(card)) { + if (!debugfs_create_file("status", S_IRUSR, root, card, + &mmc_dbg_card_status_fops)) + return -EIO; + } + + if (mmc_card_mmc(card)) { + if (!debugfs_create_file("ext_csd", S_IRUSR, root, card, + &mmc_dbg_ext_csd_fops)) + return -EIO; + } + + return 0; +} + + +#else + +static int mmc_blk_add_debugfs(struct mmc_card *card) +{ + return 0; +} + +#endif /* CONFIG_DEBUG_FS */ + static int mmc_blk_probe(struct mmc_card *card) { struct mmc_blk_data *md, *part_md; @@ -2279,6 +2702,9 @@ static int mmc_blk_probe(struct mmc_card *card) goto out; } + /* Add two debugfs entries */ + mmc_blk_add_debugfs(card); + pm_runtime_set_autosuspend_delay(&card->dev, 3000); pm_runtime_use_autosuspend(&card->dev); @@ -2306,7 +2732,7 @@ static void mmc_blk_remove(struct mmc_card *card) mmc_blk_remove_parts(card, md); pm_runtime_get_sync(&card->dev); mmc_claim_host(card->host); - mmc_blk_part_switch(card, md); + mmc_blk_part_switch(card, md->part_type); mmc_release_host(card->host); if (card->type != MMC_TYPE_SD_COMBO) pm_runtime_disable(&card->dev); @@ -2378,6 +2804,17 @@ static int __init mmc_blk_init(void) { int res; + res = bus_register(&mmc_rpmb_bus_type); + if (res < 0) { + pr_err("mmcblk: could not register RPMB bus type\n"); + return res; + } + res = alloc_chrdev_region(&mmc_rpmb_devt, 0, MAX_DEVICES, "rpmb"); + if (res < 0) { + pr_err("mmcblk: failed to allocate rpmb chrdev region\n"); + goto out_bus_unreg; + } + if (perdev_minors != CONFIG_MMC_BLOCK_MINORS) pr_info("mmcblk: using %d minors per device\n", perdev_minors); @@ -2385,16 +2822,20 @@ static int __init mmc_blk_init(void) res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); if (res) - goto out; + goto out_chrdev_unreg; res = mmc_register_driver(&mmc_driver); if (res) - goto out2; + goto out_blkdev_unreg; return 0; - out2: + +out_blkdev_unreg: unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); - out: +out_chrdev_unreg: + unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES); +out_bus_unreg: + bus_unregister(&mmc_rpmb_bus_type); return res; } @@ -2402,6 +2843,7 @@ static void __exit mmc_blk_exit(void) { mmc_unregister_driver(&mmc_driver); unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); + unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES); } module_init(mmc_blk_init); diff --git a/drivers/mmc/core/block.h b/drivers/mmc/core/block.h index 860ca7c8df86..5946636101ef 100644 --- a/drivers/mmc/core/block.h +++ b/drivers/mmc/core/block.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _MMC_CORE_BLOCK_H #define _MMC_CORE_BLOCK_H diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index 301246513a37..a4b49e25fe96 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -369,10 +369,17 @@ int mmc_add_card(struct mmc_card *card) */ void mmc_remove_card(struct mmc_card *card) { + struct mmc_host *host = card->host; + #ifdef CONFIG_DEBUG_FS mmc_remove_card_debugfs(card); #endif + if (host->cqe_enabled) { + host->cqe_ops->cqe_disable(host); + host->cqe_enabled = false; + } + if (mmc_card_present(card)) { if (mmc_host_is_spi(card->host)) { pr_info("%s: SPI card removed\n", diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 26431267a3e2..1f0f44f4dd5f 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -260,10 +260,14 @@ static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) trace_mmc_request_start(host, mrq); + if (host->cqe_on) + host->cqe_ops->cqe_off(host); + host->ops->request(host, mrq); } -static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq) +static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq, + bool cqe) { if (mrq->sbc) { pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n", @@ -272,9 +276,12 @@ static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq) } if (mrq->cmd) { - pr_debug("%s: starting CMD%u arg %08x flags %08x\n", - mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->arg, - mrq->cmd->flags); + pr_debug("%s: starting %sCMD%u arg %08x flags %08x\n", + mmc_hostname(host), cqe ? "CQE direct " : "", + mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags); + } else if (cqe) { + pr_debug("%s: starting CQE transfer for tag %d blkaddr %u\n", + mmc_hostname(host), mrq->tag, mrq->data->blk_addr); } if (mrq->data) { @@ -295,10 +302,8 @@ static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq) static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq) { -#ifdef CONFIG_MMC_DEBUG - unsigned int i, sz; + unsigned int i, sz = 0; struct scatterlist *sg; -#endif if (mrq->cmd) { mrq->cmd->error = 0; @@ -314,13 +319,12 @@ static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq) mrq->data->blocks > host->max_blk_count || mrq->data->blocks * mrq->data->blksz > host->max_req_size) return -EINVAL; -#ifdef CONFIG_MMC_DEBUG - sz = 0; + for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) sz += sg->length; if (sz != mrq->data->blocks * mrq->data->blksz) return -EINVAL; -#endif + mrq->data->error = 0; mrq->data->mrq = mrq; if (mrq->stop) { @@ -333,7 +337,7 @@ static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq) return 0; } -static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) +int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) { int err; @@ -342,7 +346,7 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) if (mmc_card_removed(host->card)) return -ENOMEDIUM; - mmc_mrq_pr_debug(host, mrq); + mmc_mrq_pr_debug(host, mrq, false); WARN_ON(!host->claimed); @@ -355,6 +359,7 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) return 0; } +EXPORT_SYMBOL(mmc_start_request); /* * mmc_wait_data_done() - done callback for data request @@ -482,6 +487,155 @@ void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq) } EXPORT_SYMBOL(mmc_wait_for_req_done); +/* + * mmc_cqe_start_req - Start a CQE request. + * @host: MMC host to start the request + * @mrq: request to start + * + * Start the request, re-tuning if needed and it is possible. Returns an error + * code if the request fails to start or -EBUSY if CQE is busy. + */ +int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq) +{ + int err; + + /* + * CQE cannot process re-tuning commands. Caller must hold retuning + * while CQE is in use. Re-tuning can happen here only when CQE has no + * active requests i.e. this is the first. Note, re-tuning will call + * ->cqe_off(). + */ + err = mmc_retune(host); + if (err) + goto out_err; + + mrq->host = host; + + mmc_mrq_pr_debug(host, mrq, true); + + err = mmc_mrq_prep(host, mrq); + if (err) + goto out_err; + + err = host->cqe_ops->cqe_request(host, mrq); + if (err) + goto out_err; + + trace_mmc_request_start(host, mrq); + + return 0; + +out_err: + if (mrq->cmd) { + pr_debug("%s: failed to start CQE direct CMD%u, error %d\n", + mmc_hostname(host), mrq->cmd->opcode, err); + } else { + pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n", + mmc_hostname(host), mrq->tag, err); + } + return err; +} +EXPORT_SYMBOL(mmc_cqe_start_req); + +/** + * mmc_cqe_request_done - CQE has finished processing an MMC request + * @host: MMC host which completed request + * @mrq: MMC request which completed + * + * CQE drivers should call this function when they have completed + * their processing of a request. + */ +void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq) +{ + mmc_should_fail_request(host, mrq); + + /* Flag re-tuning needed on CRC errors */ + if ((mrq->cmd && mrq->cmd->error == -EILSEQ) || + (mrq->data && mrq->data->error == -EILSEQ)) + mmc_retune_needed(host); + + trace_mmc_request_done(host, mrq); + + if (mrq->cmd) { + pr_debug("%s: CQE req done (direct CMD%u): %d\n", + mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error); + } else { + pr_debug("%s: CQE transfer done tag %d\n", + mmc_hostname(host), mrq->tag); + } + + if (mrq->data) { + pr_debug("%s: %d bytes transferred: %d\n", + mmc_hostname(host), + mrq->data->bytes_xfered, mrq->data->error); + } + + mrq->done(mrq); +} +EXPORT_SYMBOL(mmc_cqe_request_done); + +/** + * mmc_cqe_post_req - CQE post process of a completed MMC request + * @host: MMC host + * @mrq: MMC request to be processed + */ +void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq) +{ + if (host->cqe_ops->cqe_post_req) + host->cqe_ops->cqe_post_req(host, mrq); +} +EXPORT_SYMBOL(mmc_cqe_post_req); + +/* Arbitrary 1 second timeout */ +#define MMC_CQE_RECOVERY_TIMEOUT 1000 + +/* + * mmc_cqe_recovery - Recover from CQE errors. + * @host: MMC host to recover + * + * Recovery consists of stopping CQE, stopping eMMC, discarding the queue in + * in eMMC, and discarding the queue in CQE. CQE must call + * mmc_cqe_request_done() on all requests. An error is returned if the eMMC + * fails to discard its queue. + */ +int mmc_cqe_recovery(struct mmc_host *host) +{ + struct mmc_command cmd; + int err; + + mmc_retune_hold_now(host); + + /* + * Recovery is expected seldom, if at all, but it reduces performance, + * so make sure it is not completely silent. + */ + pr_warn("%s: running CQE recovery\n", mmc_hostname(host)); + + host->cqe_ops->cqe_recovery_start(host); + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = MMC_STOP_TRANSMISSION, + cmd.flags = MMC_RSP_R1B | MMC_CMD_AC, + cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */ + cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT, + mmc_wait_for_cmd(host, &cmd, 0); + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = MMC_CMDQ_TASK_MGMT; + cmd.arg = 1; /* Discard entire queue */ + cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; + cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */ + cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT, + err = mmc_wait_for_cmd(host, &cmd, 0); + + host->cqe_ops->cqe_recovery_finish(host); + + mmc_retune_release(host); + + return err; +} +EXPORT_SYMBOL(mmc_cqe_recovery); + /** * mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done * @host: MMC host @@ -736,8 +890,8 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) if (data->flags & MMC_DATA_WRITE) mult <<= card->csd.r2w_factor; - data->timeout_ns = card->csd.tacc_ns * mult; - data->timeout_clks = card->csd.tacc_clks * mult; + data->timeout_ns = card->csd.taac_ns * mult; + data->timeout_clks = card->csd.taac_clks * mult; /* * SD cards also have an upper limit on the timeout. @@ -766,7 +920,7 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) /* * SDHC cards always use these fixed values. */ - if (timeout_us > limit_us || mmc_card_blockaddr(card)) { + if (timeout_us > limit_us) { data->timeout_ns = limit_us * 1000; data->timeout_clks = 0; } @@ -832,9 +986,36 @@ unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) } EXPORT_SYMBOL(mmc_align_data_size); +/* + * Allow claiming an already claimed host if the context is the same or there is + * no context but the task is the same. + */ +static inline bool mmc_ctx_matches(struct mmc_host *host, struct mmc_ctx *ctx, + struct task_struct *task) +{ + return host->claimer == ctx || + (!ctx && task && host->claimer->task == task); +} + +static inline void mmc_ctx_set_claimer(struct mmc_host *host, + struct mmc_ctx *ctx, + struct task_struct *task) +{ + if (!host->claimer) { + if (ctx) + host->claimer = ctx; + else + host->claimer = &host->default_ctx; + } + if (task) + host->claimer->task = task; +} + /** * __mmc_claim_host - exclusively claim a host * @host: mmc host to claim + * @ctx: context that claims the host or NULL in which case the default + * context will be used * @abort: whether or not the operation should be aborted * * Claim a host for a set of operations. If @abort is non null and @@ -842,8 +1023,10 @@ EXPORT_SYMBOL(mmc_align_data_size); * that non-zero value without acquiring the lock. Returns zero * with the lock held otherwise. */ -int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) +int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx, + atomic_t *abort) { + struct task_struct *task = ctx ? NULL : current; DECLARE_WAITQUEUE(wait, current); unsigned long flags; int stop; @@ -856,7 +1039,7 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) while (1) { set_current_state(TASK_UNINTERRUPTIBLE); stop = abort ? atomic_read(abort) : 0; - if (stop || !host->claimed || host->claimer == current) + if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task)) break; spin_unlock_irqrestore(&host->lock, flags); schedule(); @@ -865,7 +1048,7 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) set_current_state(TASK_RUNNING); if (!stop) { host->claimed = 1; - host->claimer = current; + mmc_ctx_set_claimer(host, ctx, task); host->claim_cnt += 1; if (host->claim_cnt == 1) pm = true; @@ -900,6 +1083,7 @@ void mmc_release_host(struct mmc_host *host) spin_unlock_irqrestore(&host->lock, flags); } else { host->claimed = 0; + host->claimer->task = NULL; host->claimer = NULL; spin_unlock_irqrestore(&host->lock, flags); wake_up(&host->wq); @@ -913,10 +1097,10 @@ EXPORT_SYMBOL(mmc_release_host); * This is a helper function, which fetches a runtime pm reference for the * card device and also claims the host. */ -void mmc_get_card(struct mmc_card *card) +void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx) { pm_runtime_get_sync(&card->dev); - mmc_claim_host(card->host); + __mmc_claim_host(card->host, ctx, NULL); } EXPORT_SYMBOL(mmc_get_card); @@ -924,9 +1108,13 @@ EXPORT_SYMBOL(mmc_get_card); * This is a helper function, which releases the host and drops the runtime * pm reference for the card device. */ -void mmc_put_card(struct mmc_card *card) +void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx) { - mmc_release_host(card->host); + struct mmc_host *host = card->host; + + WARN_ON(ctx && host->claimer != ctx); + + mmc_release_host(host); pm_runtime_mark_last_busy(&card->dev); pm_runtime_put_autosuspend(&card->dev); } @@ -982,6 +1170,9 @@ int mmc_execute_tuning(struct mmc_card *card) if (!host->ops->execute_tuning) return 0; + if (host->cqe_on) + host->cqe_ops->cqe_off(host); + if (mmc_card_mmc(card)) opcode = MMC_SEND_TUNING_BLOCK_HS200; else @@ -1021,6 +1212,9 @@ void mmc_set_bus_width(struct mmc_host *host, unsigned int width) */ void mmc_set_initial_state(struct mmc_host *host) { + if (host->cqe_on) + host->cqe_ops->cqe_off(host); + mmc_retune_disable(host); if (mmc_host_is_spi(host)) @@ -1137,11 +1331,11 @@ int mmc_of_parse_voltage(struct device_node *np, u32 *mask) voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges); num_ranges = num_ranges / sizeof(*voltage_ranges) / 2; if (!voltage_ranges) { - pr_debug("%s: voltage-ranges unspecified\n", np->full_name); + pr_debug("%pOF: voltage-ranges unspecified\n", np); return 0; } if (!num_ranges) { - pr_err("%s: voltage-ranges empty\n", np->full_name); + pr_err("%pOF: voltage-ranges empty\n", np); return -EINVAL; } @@ -1153,8 +1347,8 @@ int mmc_of_parse_voltage(struct device_node *np, u32 *mask) be32_to_cpu(voltage_ranges[j]), be32_to_cpu(voltage_ranges[j + 1])); if (!ocr_mask) { - pr_err("%s: voltage-range #%d is invalid\n", - np->full_name, i); + pr_err("%pOF: voltage-range #%d is invalid\n", + np, i); return -EINVAL; } *mask |= ocr_mask; @@ -1394,6 +1588,16 @@ EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc); #endif /* CONFIG_REGULATOR */ +/** + * mmc_regulator_get_supply - try to get VMMC and VQMMC regulators for a host + * @mmc: the host to regulate + * + * Returns 0 or errno. errno should be handled, it is either a critical error + * or -EPROBE_DEFER. 0 means no critical error but it does not mean all + * regulators have been found because they all are optional. If you require + * certain regulators, you need to check separately in your driver if they got + * populated after calling this function. + */ int mmc_regulator_get_supply(struct mmc_host *mmc) { struct device *dev = mmc_dev(mmc); @@ -1478,11 +1682,33 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage) } +int mmc_host_set_uhs_voltage(struct mmc_host *host) +{ + u32 clock; + + /* + * During a signal voltage level switch, the clock must be gated + * for 5 ms according to the SD spec + */ + clock = host->ios.clock; + host->ios.clock = 0; + mmc_set_ios(host); + + if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180)) + return -EAGAIN; + + /* Keep clock gated for at least 10 ms, though spec only says 5 ms */ + mmc_delay(10); + host->ios.clock = clock; + mmc_set_ios(host); + + return 0; +} + int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr) { struct mmc_command cmd = {}; int err = 0; - u32 clock; /* * If we cannot switch voltages, return failure so the caller @@ -1514,15 +1740,8 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr) err = -EAGAIN; goto power_cycle; } - /* - * During a signal voltage level switch, the clock must be gated - * for 5 ms according to the SD spec - */ - clock = host->ios.clock; - host->ios.clock = 0; - mmc_set_ios(host); - if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180)) { + if (mmc_host_set_uhs_voltage(host)) { /* * Voltages may not have been switched, but we've already * sent CMD11, so a power cycle is required anyway @@ -1531,11 +1750,6 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr) goto power_cycle; } - /* Keep clock gated for at least 10 ms, though spec only says 5 ms */ - mmc_delay(10); - host->ios.clock = clock; - mmc_set_ios(host); - /* Wait for at least 1 ms according to spec */ mmc_delay(1); @@ -1769,13 +1983,6 @@ void mmc_detach_bus(struct mmc_host *host) static void _mmc_detect_change(struct mmc_host *host, unsigned long delay, bool cd_irq) { -#ifdef CONFIG_MMC_DEBUG - unsigned long flags; - spin_lock_irqsave(&host->lock, flags); - WARN_ON(host->removed); - spin_unlock_irqrestore(&host->lock, flags); -#endif - /* * If the device is configured as wakeup, we prevent a new sleep for * 5 s to give provision for user space to consume the event. @@ -1869,14 +2076,14 @@ static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, } else { /* CSD Erase Group Size uses write timeout */ unsigned int mult = (10 << card->csd.r2w_factor); - unsigned int timeout_clks = card->csd.tacc_clks * mult; + unsigned int timeout_clks = card->csd.taac_clks * mult; unsigned int timeout_us; - /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */ - if (card->csd.tacc_ns < 1000000) - timeout_us = (card->csd.tacc_ns * mult) / 1000; + /* Avoid overflow: e.g. taac_ns=80000000 mult=1280 */ + if (card->csd.taac_ns < 1000000) + timeout_us = (card->csd.taac_ns * mult) / 1000; else - timeout_us = (card->csd.tacc_ns / 1000) * mult; + timeout_us = (card->csd.taac_ns / 1000) * mult; /* * ios.clock is only a target. The real clock rate might be @@ -2446,10 +2653,9 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) { host->f_init = freq; -#ifdef CONFIG_MMC_DEBUG - pr_info("%s: %s: trying to init card at %u Hz\n", + pr_debug("%s: %s: trying to init card at %u Hz\n", mmc_hostname(host), __func__, host->f_init); -#endif + mmc_power_up(host, host->ocr_avail); /* @@ -2646,12 +2852,6 @@ void mmc_start_host(struct mmc_host *host) void mmc_stop_host(struct mmc_host *host) { -#ifdef CONFIG_MMC_DEBUG - unsigned long flags; - spin_lock_irqsave(&host->lock, flags); - host->removed = 1; - spin_unlock_irqrestore(&host->lock, flags); -#endif if (host->slot.cd_irq >= 0) { if (host->slot.cd_wake_enabled) disable_irq_wake(host->slot.cd_irq); @@ -2686,9 +2886,7 @@ int mmc_power_save_host(struct mmc_host *host) { int ret = 0; -#ifdef CONFIG_MMC_DEBUG - pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__); -#endif + pr_debug("%s: %s: powering down\n", mmc_hostname(host), __func__); mmc_bus_get(host); @@ -2712,9 +2910,7 @@ int mmc_power_restore_host(struct mmc_host *host) { int ret; -#ifdef CONFIG_MMC_DEBUG - pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__); -#endif + pr_debug("%s: %s: powering up\n", mmc_hostname(host), __func__); mmc_bus_get(host); diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 55f543fd37c4..71e6c6d7ceb7 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -49,6 +49,7 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode); void mmc_set_bus_width(struct mmc_host *host, unsigned int width); u32 mmc_select_voltage(struct mmc_host *host, u32 ocr); int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr); +int mmc_host_set_uhs_voltage(struct mmc_host *host); int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage); void mmc_set_timing(struct mmc_host *host, unsigned int timing); void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type); @@ -107,6 +108,14 @@ static inline void mmc_unregister_pm_notifier(struct mmc_host *host) { } void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq); bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq); +int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq); + +struct mmc_async_req; + +struct mmc_async_req *mmc_start_areq(struct mmc_host *host, + struct mmc_async_req *areq, + enum mmc_blk_status *ret_stat); + int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, unsigned int arg); int mmc_can_erase(struct mmc_card *card); @@ -122,10 +131,11 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen); int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount, bool is_rel_write); -int __mmc_claim_host(struct mmc_host *host, atomic_t *abort); +int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx, + atomic_t *abort); void mmc_release_host(struct mmc_host *host); -void mmc_get_card(struct mmc_card *card); -void mmc_put_card(struct mmc_card *card); +void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx); +void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx); /** * mmc_claim_host - exclusively claim a host @@ -135,7 +145,11 @@ void mmc_put_card(struct mmc_card *card); */ static inline void mmc_claim_host(struct mmc_host *host) { - __mmc_claim_host(host, NULL); + __mmc_claim_host(host, NULL, NULL); } +int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq); +void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq); +int mmc_cqe_recovery(struct mmc_host *host); + #endif diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c index a1fba5732d66..01e459a34f33 100644 --- a/drivers/mmc/core/debugfs.c +++ b/drivers/mmc/core/debugfs.c @@ -281,85 +281,6 @@ void mmc_remove_host_debugfs(struct mmc_host *host) debugfs_remove_recursive(host->debugfs_root); } -static int mmc_dbg_card_status_get(void *data, u64 *val) -{ - struct mmc_card *card = data; - u32 status; - int ret; - - mmc_get_card(card); - - ret = mmc_send_status(data, &status); - if (!ret) - *val = status; - - mmc_put_card(card); - - return ret; -} -DEFINE_SIMPLE_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get, - NULL, "%08llx\n"); - -#define EXT_CSD_STR_LEN 1025 - -static int mmc_ext_csd_open(struct inode *inode, struct file *filp) -{ - struct mmc_card *card = inode->i_private; - char *buf; - ssize_t n = 0; - u8 *ext_csd; - int err, i; - - buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - mmc_get_card(card); - err = mmc_get_ext_csd(card, &ext_csd); - mmc_put_card(card); - if (err) - goto out_free; - - for (i = 0; i < 512; i++) - n += sprintf(buf + n, "%02x", ext_csd[i]); - n += sprintf(buf + n, "\n"); - - if (n != EXT_CSD_STR_LEN) { - err = -EINVAL; - goto out_free; - } - - filp->private_data = buf; - kfree(ext_csd); - return 0; - -out_free: - kfree(buf); - return err; -} - -static ssize_t mmc_ext_csd_read(struct file *filp, char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - char *buf = filp->private_data; - - return simple_read_from_buffer(ubuf, cnt, ppos, - buf, EXT_CSD_STR_LEN); -} - -static int mmc_ext_csd_release(struct inode *inode, struct file *file) -{ - kfree(file->private_data); - return 0; -} - -static const struct file_operations mmc_dbg_ext_csd_fops = { - .open = mmc_ext_csd_open, - .read = mmc_ext_csd_read, - .release = mmc_ext_csd_release, - .llseek = default_llseek, -}; - void mmc_add_card_debugfs(struct mmc_card *card) { struct mmc_host *host = card->host; @@ -382,16 +303,6 @@ void mmc_add_card_debugfs(struct mmc_card *card) if (!debugfs_create_x32("state", S_IRUSR, root, &card->state)) goto err; - if (mmc_card_mmc(card) || mmc_card_sd(card)) - if (!debugfs_create_file("status", S_IRUSR, root, card, - &mmc_dbg_card_status_fops)) - goto err; - - if (mmc_card_mmc(card)) - if (!debugfs_create_file("ext_csd", S_IRUSR, root, card, - &mmc_dbg_ext_csd_fops)) - goto err; - return; err: diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 1503412f826c..64b03d6eaf18 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -118,6 +118,7 @@ void mmc_retune_release(struct mmc_host *host) else WARN_ON(1); } +EXPORT_SYMBOL(mmc_retune_release); int mmc_retune(struct mmc_host *host) { @@ -159,9 +160,9 @@ out: return err; } -static void mmc_retune_timer(unsigned long data) +static void mmc_retune_timer(struct timer_list *t) { - struct mmc_host *host = (struct mmc_host *)data; + struct mmc_host *host = from_timer(host, t, retune_timer); mmc_retune_needed(host); } @@ -178,7 +179,7 @@ static void mmc_retune_timer(unsigned long data) int mmc_of_parse(struct mmc_host *host) { struct device *dev = host->parent; - u32 bus_width; + u32 bus_width, drv_type; int ret; bool cd_cap_invert, cd_gpio_invert = false; bool ro_cap_invert, ro_gpio_invert = false; @@ -320,6 +321,15 @@ int mmc_of_parse(struct mmc_host *host) if (device_property_read_bool(dev, "no-mmc")) host->caps2 |= MMC_CAP2_NO_MMC; + /* Must be after "non-removable" check */ + if (device_property_read_u32(dev, "fixed-emmc-driver-type", &drv_type) == 0) { + if (host->caps & MMC_CAP_NONREMOVABLE) + host->fixed_drv_type = drv_type; + else + dev_err(host->parent, + "can't use fixed driver type, media is removable\n"); + } + host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr); if (host->dsr_req && (host->dsr & ~0xffff)) { dev_err(host->parent, @@ -379,7 +389,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) init_waitqueue_head(&host->wq); INIT_DELAYED_WORK(&host->detect, mmc_rescan); INIT_DELAYED_WORK(&host->sdio_irq_work, sdio_irq_work); - setup_timer(&host->retune_timer, mmc_retune_timer, (unsigned long)host); + timer_setup(&host->retune_timer, mmc_retune_timer, 0); /* * By default, hosts do not support SGIO or large requests. @@ -392,6 +402,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) host->max_blk_size = 512; host->max_blk_count = PAGE_SIZE / 512; + host->fixed_drv_type = -EINVAL; + return host; } diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h index fb6a76a03833..fb689a1065ed 100644 --- a/drivers/mmc/core/host.h +++ b/drivers/mmc/core/host.h @@ -24,6 +24,12 @@ int mmc_retune(struct mmc_host *host); void mmc_retune_pause(struct mmc_host *host); void mmc_retune_unpause(struct mmc_host *host); +static inline void mmc_retune_hold_now(struct mmc_host *host) +{ + host->retune_now = 0; + host->hold_retune += 1; +} + static inline void mmc_retune_recheck(struct mmc_host *host) { if (host->hold_retune <= 1) diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 4ffea14b7eb6..a552f61060d2 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -41,11 +41,11 @@ static const unsigned char tran_mant[] = { 35, 40, 45, 50, 55, 60, 70, 80, }; -static const unsigned int tacc_exp[] = { +static const unsigned int taac_exp[] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, }; -static const unsigned int tacc_mant[] = { +static const unsigned int taac_mant[] = { 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80, }; @@ -153,8 +153,8 @@ static int mmc_decode_csd(struct mmc_card *card) csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4); m = UNSTUFF_BITS(resp, 115, 4); e = UNSTUFF_BITS(resp, 112, 3); - csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; - csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; + csd->taac_ns = (taac_exp[e] * taac_mant[m] + 9) / 10; + csd->taac_clks = UNSTUFF_BITS(resp, 104, 8) * 100; m = UNSTUFF_BITS(resp, 99, 4); e = UNSTUFF_BITS(resp, 96, 3); @@ -780,6 +780,7 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid); MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name); MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid); MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv); +MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev); MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info); MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n", card->ext_csd.device_life_time_est_typ_a, @@ -838,6 +839,7 @@ static struct attribute *mmc_std_attrs[] = { &dev_attr_name.attr, &dev_attr_oemid.attr, &dev_attr_prv.attr, + &dev_attr_rev.attr, &dev_attr_pre_eol_info.attr, &dev_attr_life_time.attr, &dev_attr_serial.attr, @@ -1286,10 +1288,32 @@ out_err: return err; } +static void mmc_select_driver_type(struct mmc_card *card) +{ + int card_drv_type, drive_strength, drv_type; + int fixed_drv_type = card->host->fixed_drv_type; + + card_drv_type = card->ext_csd.raw_driver_strength | + mmc_driver_type_mask(0); + + if (fixed_drv_type >= 0) + drive_strength = card_drv_type & mmc_driver_type_mask(fixed_drv_type) + ? fixed_drv_type : 0; + else + drive_strength = mmc_select_drive_strength(card, + card->ext_csd.hs200_max_dtr, + card_drv_type, &drv_type); + + card->drive_strength = drive_strength; + + if (drv_type) + mmc_set_driver_type(card->host, drv_type); +} + static int mmc_select_hs400es(struct mmc_card *card) { struct mmc_host *host = card->host; - int err = 0; + int err = -EINVAL; u8 val; if (!(host->caps & MMC_CAP_8_BIT_DATA)) { @@ -1341,6 +1365,8 @@ static int mmc_select_hs400es(struct mmc_card *card) goto out_err; } + mmc_select_driver_type(card); + /* Switch card to HS400 */ val = EXT_CSD_TIMING_HS400 | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; @@ -1374,23 +1400,6 @@ out_err: return err; } -static void mmc_select_driver_type(struct mmc_card *card) -{ - int card_drv_type, drive_strength, drv_type; - - card_drv_type = card->ext_csd.raw_driver_strength | - mmc_driver_type_mask(0); - - drive_strength = mmc_select_drive_strength(card, - card->ext_csd.hs200_max_dtr, - card_drv_type, &drv_type); - - card->drive_strength = drive_strength; - - if (drv_type) - mmc_set_driver_type(card->host, drv_type); -} - /* * For device supporting HS200 mode, the following sequence * should be done before executing the tuning process. @@ -1784,32 +1793,38 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, } /* + * Enable Command Queue if supported. Note that Packed Commands cannot + * be used with Command Queue. + */ + card->ext_csd.cmdq_en = false; + if (card->ext_csd.cmdq_support && host->caps2 & MMC_CAP2_CQE) { + err = mmc_cmdq_enable(card); + if (err && err != -EBADMSG) + goto free_card; + if (err) { + pr_warn("%s: Enabling CMDQ failed\n", + mmc_hostname(card->host)); + card->ext_csd.cmdq_support = false; + card->ext_csd.cmdq_depth = 0; + err = 0; + } + } + /* * In some cases (e.g. RPMB or mmc_test), the Command Queue must be * disabled for a time, so a flag is needed to indicate to re-enable the * Command Queue. */ card->reenable_cmdq = card->ext_csd.cmdq_en; - /* - * The mandatory minimum values are defined for packed command. - * read: 5, write: 3 - */ - if (card->ext_csd.max_packed_writes >= 3 && - card->ext_csd.max_packed_reads >= 5 && - host->caps2 & MMC_CAP2_PACKED_CMD) { - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_EXP_EVENTS_CTRL, - EXT_CSD_PACKED_EVENT_EN, - card->ext_csd.generic_cmd6_time); - if (err && err != -EBADMSG) - goto free_card; + if (card->ext_csd.cmdq_en && !host->cqe_enabled) { + err = host->cqe_ops->cqe_enable(host, card); if (err) { - pr_warn("%s: Enabling packed event failed\n", - mmc_hostname(card->host)); - card->ext_csd.packed_event_en = 0; - err = 0; + pr_err("%s: Failed to enable CQE, error %d\n", + mmc_hostname(host), err); } else { - card->ext_csd.packed_event_en = 1; + host->cqe_enabled = true; + pr_info("%s: Command Queue Engine enabled\n", + mmc_hostname(host)); } } @@ -1932,14 +1947,14 @@ static void mmc_detect(struct mmc_host *host) { int err; - mmc_get_card(host->card); + mmc_get_card(host->card, NULL); /* * Just check if our card has been removed. */ err = _mmc_detect_card_removed(host); - mmc_put_card(host->card); + mmc_put_card(host->card, NULL); if (err) { mmc_remove(host); diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 5f7c5920231a..908e4db03535 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -83,6 +83,7 @@ int mmc_send_status(struct mmc_card *card, u32 *status) { return __mmc_send_status(card, status, MMC_CMD_RETRIES); } +EXPORT_SYMBOL_GPL(mmc_send_status); static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) { @@ -946,7 +947,7 @@ static int mmc_read_bkops_status(struct mmc_card *card) /** * mmc_start_bkops - start BKOPS for supported cards * @card: MMC card to start BKOPS - * @form_exception: A flag to indicate if this function was + * @from_exception: A flag to indicate if this function was * called due to an exception raised by the card * * Start background operations whenever requested. @@ -976,7 +977,6 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception) from_exception) return; - mmc_claim_host(card->host); if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) { timeout = MMC_OPS_TIMEOUT_MS; use_busy_signal = true; @@ -994,7 +994,7 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception) pr_warn("%s: Error %d starting bkops\n", mmc_hostname(card->host), err); mmc_retune_release(card->host); - goto out; + return; } /* @@ -1006,9 +1006,8 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception) mmc_card_set_doing_bkops(card); else mmc_retune_release(card->host); -out: - mmc_release_host(card->host); } +EXPORT_SYMBOL(mmc_start_bkops); /* * Flush the cache to the non-volatile storage. diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c index 7a304a6e5bf1..478869805b96 100644 --- a/drivers/mmc/core/mmc_test.c +++ b/drivers/mmc/core/mmc_test.c @@ -800,38 +800,44 @@ static int mmc_test_check_broken_result(struct mmc_test_card *test, return ret; } +struct mmc_test_req { + struct mmc_request mrq; + struct mmc_command sbc; + struct mmc_command cmd; + struct mmc_command stop; + struct mmc_command status; + struct mmc_data data; +}; + /* * Tests nonblock transfer with certain parameters */ -static void mmc_test_nonblock_reset(struct mmc_request *mrq, - struct mmc_command *cmd, - struct mmc_command *stop, - struct mmc_data *data) +static void mmc_test_req_reset(struct mmc_test_req *rq) +{ + memset(rq, 0, sizeof(struct mmc_test_req)); + + rq->mrq.cmd = &rq->cmd; + rq->mrq.data = &rq->data; + rq->mrq.stop = &rq->stop; +} + +static struct mmc_test_req *mmc_test_req_alloc(void) { - memset(mrq, 0, sizeof(struct mmc_request)); - memset(cmd, 0, sizeof(struct mmc_command)); - memset(data, 0, sizeof(struct mmc_data)); - memset(stop, 0, sizeof(struct mmc_command)); + struct mmc_test_req *rq = kmalloc(sizeof(*rq), GFP_KERNEL); - mrq->cmd = cmd; - mrq->data = data; - mrq->stop = stop; + if (rq) + mmc_test_req_reset(rq); + + return rq; } + + static int mmc_test_nonblock_transfer(struct mmc_test_card *test, struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, unsigned blocks, unsigned blksz, int write, int count) { - struct mmc_request mrq1; - struct mmc_command cmd1; - struct mmc_command stop1; - struct mmc_data data1; - - struct mmc_request mrq2; - struct mmc_command cmd2; - struct mmc_command stop2; - struct mmc_data data2; - + struct mmc_test_req *rq1, *rq2; struct mmc_test_async_req test_areq[2]; struct mmc_async_req *done_areq; struct mmc_async_req *cur_areq = &test_areq[0].areq; @@ -843,12 +849,16 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test, test_areq[0].test = test; test_areq[1].test = test; - mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1); - mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2); + rq1 = mmc_test_req_alloc(); + rq2 = mmc_test_req_alloc(); + if (!rq1 || !rq2) { + ret = RESULT_FAIL; + goto err; + } - cur_areq->mrq = &mrq1; + cur_areq->mrq = &rq1->mrq; cur_areq->err_check = mmc_test_check_result_async; - other_areq->mrq = &mrq2; + other_areq->mrq = &rq2->mrq; other_areq->err_check = mmc_test_check_result_async; for (i = 0; i < count; i++) { @@ -861,14 +871,10 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test, goto err; } - if (done_areq) { - if (done_areq->mrq == &mrq2) - mmc_test_nonblock_reset(&mrq2, &cmd2, - &stop2, &data2); - else - mmc_test_nonblock_reset(&mrq1, &cmd1, - &stop1, &data1); - } + if (done_areq) + mmc_test_req_reset(container_of(done_areq->mrq, + struct mmc_test_req, mrq)); + swap(cur_areq, other_areq); dev_addr += blocks; } @@ -877,8 +883,9 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test, if (status != MMC_BLK_SUCCESS) ret = RESULT_FAIL; - return ret; err: + kfree(rq1); + kfree(rq2); return ret; } @@ -2329,28 +2336,6 @@ static int mmc_test_reset(struct mmc_test_card *test) return RESULT_FAIL; } -struct mmc_test_req { - struct mmc_request mrq; - struct mmc_command sbc; - struct mmc_command cmd; - struct mmc_command stop; - struct mmc_command status; - struct mmc_data data; -}; - -static struct mmc_test_req *mmc_test_req_alloc(void) -{ - struct mmc_test_req *rq = kzalloc(sizeof(*rq), GFP_KERNEL); - - if (rq) { - rq->mrq.cmd = &rq->cmd; - rq->mrq.data = &rq->data; - rq->mrq.stop = &rq->stop; - } - - return rq; -} - static int mmc_test_send_status(struct mmc_test_card *test, struct mmc_command *cmd) { diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index affa7370ba82..4f33d277b125 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -23,8 +23,6 @@ #include "core.h" #include "card.h" -#define MMC_QUEUE_BOUNCESZ 65536 - /* * Prepare a MMC request. This just filters out odd stuff. */ @@ -32,7 +30,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) { struct mmc_queue *mq = q->queuedata; - if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) + if (mq && mmc_card_removed(mq->card)) return BLKPREP_KILL; req->rq_flags |= RQF_DONTPREP; @@ -150,26 +148,6 @@ static void mmc_queue_setup_discard(struct request_queue *q, queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); } -static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host) -{ - unsigned int bouncesz = MMC_QUEUE_BOUNCESZ; - - if (host->max_segs != 1 || (host->caps & MMC_CAP_NO_BOUNCE_BUFF)) - return 0; - - if (bouncesz > host->max_req_size) - bouncesz = host->max_req_size; - if (bouncesz > host->max_seg_size) - bouncesz = host->max_seg_size; - if (bouncesz > host->max_blk_count * 512) - bouncesz = host->max_blk_count * 512; - - if (bouncesz <= 512) - return 0; - - return bouncesz; -} - /** * mmc_init_request() - initialize the MMC-specific per-request data * @q: the request queue @@ -184,26 +162,9 @@ static int mmc_init_request(struct request_queue *q, struct request *req, struct mmc_card *card = mq->card; struct mmc_host *host = card->host; - if (card->bouncesz) { - mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp); - if (!mq_rq->bounce_buf) - return -ENOMEM; - if (card->bouncesz > 512) { - mq_rq->sg = mmc_alloc_sg(1, gfp); - if (!mq_rq->sg) - return -ENOMEM; - mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512, - gfp); - if (!mq_rq->bounce_sg) - return -ENOMEM; - } - } else { - mq_rq->bounce_buf = NULL; - mq_rq->bounce_sg = NULL; - mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp); - if (!mq_rq->sg) - return -ENOMEM; - } + mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp); + if (!mq_rq->sg) + return -ENOMEM; return 0; } @@ -212,17 +173,33 @@ static void mmc_exit_request(struct request_queue *q, struct request *req) { struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); - /* It is OK to kfree(NULL) so this will be smooth */ - kfree(mq_rq->bounce_sg); - mq_rq->bounce_sg = NULL; - - kfree(mq_rq->bounce_buf); - mq_rq->bounce_buf = NULL; - kfree(mq_rq->sg); mq_rq->sg = NULL; } +static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) +{ + struct mmc_host *host = card->host; + u64 limit = BLK_BOUNCE_HIGH; + + if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) + limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; + + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); + queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); + if (mmc_can_erase(card)) + mmc_queue_setup_discard(mq->queue, card); + + blk_queue_bounce_limit(mq->queue, limit); + blk_queue_max_hw_sectors(mq->queue, + min(host->max_blk_count, host->max_req_size / 512)); + blk_queue_max_segments(mq->queue, host->max_segs); + blk_queue_max_segment_size(mq->queue, host->max_seg_size); + + /* Initialize thread_sem even if it is not used */ + sema_init(&mq->thread_sem, 1); +} + /** * mmc_init_queue - initialise a queue structure. * @mq: mmc queue @@ -236,12 +213,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock, const char *subname) { struct mmc_host *host = card->host; - u64 limit = BLK_BOUNCE_HIGH; int ret = -ENOMEM; - if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) - limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; - mq->card = card; mq->queue = blk_alloc_queue(GFP_KERNEL); if (!mq->queue) @@ -260,25 +233,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, } blk_queue_prep_rq(mq->queue, mmc_prep_request); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); - if (mmc_can_erase(card)) - mmc_queue_setup_discard(mq->queue, card); - - card->bouncesz = mmc_queue_calc_bouncesz(host); - if (card->bouncesz) { - blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512); - blk_queue_max_segments(mq->queue, card->bouncesz / 512); - blk_queue_max_segment_size(mq->queue, card->bouncesz); - } else { - blk_queue_bounce_limit(mq->queue, limit); - blk_queue_max_hw_sectors(mq->queue, - min(host->max_blk_count, host->max_req_size / 512)); - blk_queue_max_segments(mq->queue, host->max_segs); - blk_queue_max_segment_size(mq->queue, host->max_seg_size); - } - sema_init(&mq->thread_sem, 1); + mmc_setup_queue(mq, card); mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", host->index, subname ? subname : ""); @@ -365,56 +321,7 @@ void mmc_queue_resume(struct mmc_queue *mq) */ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) { - unsigned int sg_len; - size_t buflen; - struct scatterlist *sg; struct request *req = mmc_queue_req_to_req(mqrq); - int i; - - if (!mqrq->bounce_buf) - return blk_rq_map_sg(mq->queue, req, mqrq->sg); - - sg_len = blk_rq_map_sg(mq->queue, req, mqrq->bounce_sg); - - mqrq->bounce_sg_len = sg_len; - - buflen = 0; - for_each_sg(mqrq->bounce_sg, sg, sg_len, i) - buflen += sg->length; - - sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen); - - return 1; -} - -/* - * If writing, bounce the data to the buffer before the request - * is sent to the host driver - */ -void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) -{ - if (!mqrq->bounce_buf) - return; - - if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != WRITE) - return; - - sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, - mqrq->bounce_buf, mqrq->sg[0].length); -} - -/* - * If reading, bounce the data from the buffer after the request - * has been handled by the host driver - */ -void mmc_queue_bounce_post(struct mmc_queue_req *mqrq) -{ - if (!mqrq->bounce_buf) - return; - - if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != READ) - return; - sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, - mqrq->bounce_buf, mqrq->sg[0].length); + return blk_rq_map_sg(mq->queue, req, mqrq->sg); } diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h index 361b46408e0f..547b457c4251 100644 --- a/drivers/mmc/core/queue.h +++ b/drivers/mmc/core/queue.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef MMC_QUEUE_H #define MMC_QUEUE_H @@ -35,23 +36,26 @@ struct mmc_blk_request { /** * enum mmc_drv_op - enumerates the operations in the mmc_queue_req * @MMC_DRV_OP_IOCTL: ioctl operation + * @MMC_DRV_OP_IOCTL_RPMB: RPMB-oriented ioctl operation * @MMC_DRV_OP_BOOT_WP: write protect boot partitions + * @MMC_DRV_OP_GET_CARD_STATUS: get card status + * @MMC_DRV_OP_GET_EXT_CSD: get the EXT CSD from an eMMC card */ enum mmc_drv_op { MMC_DRV_OP_IOCTL, + MMC_DRV_OP_IOCTL_RPMB, MMC_DRV_OP_BOOT_WP, + MMC_DRV_OP_GET_CARD_STATUS, + MMC_DRV_OP_GET_EXT_CSD, }; struct mmc_queue_req { struct mmc_blk_request brq; struct scatterlist *sg; - char *bounce_buf; - struct scatterlist *bounce_sg; - unsigned int bounce_sg_len; struct mmc_async_req areq; enum mmc_drv_op drv_op; int drv_op_result; - struct mmc_blk_ioc_data **idata; + void *drv_op_data; unsigned int ioc_count; }; @@ -77,12 +81,7 @@ extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *, extern void mmc_cleanup_queue(struct mmc_queue *); extern void mmc_queue_suspend(struct mmc_queue *); extern void mmc_queue_resume(struct mmc_queue *); - extern unsigned int mmc_queue_map_sg(struct mmc_queue *, struct mmc_queue_req *); -extern void mmc_queue_bounce_pre(struct mmc_queue_req *); -extern void mmc_queue_bounce_post(struct mmc_queue_req *); - -extern int mmc_access_rpmb(struct mmc_queue *); #endif diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h index fb725934fa21..f664e9cbc9f8 100644 --- a/drivers/mmc/core/quirks.h +++ b/drivers/mmc/core/quirks.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * This file contains work-arounds for many known SD/MMC * and SDIO hardware bugs. diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index a1b0aa14d5e3..45bf78f32716 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -39,11 +39,11 @@ static const unsigned char tran_mant[] = { 35, 40, 45, 50, 55, 60, 70, 80, }; -static const unsigned int tacc_exp[] = { +static const unsigned int taac_exp[] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, }; -static const unsigned int tacc_mant[] = { +static const unsigned int taac_mant[] = { 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80, }; @@ -111,8 +111,8 @@ static int mmc_decode_csd(struct mmc_card *card) case 0: m = UNSTUFF_BITS(resp, 115, 4); e = UNSTUFF_BITS(resp, 112, 3); - csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; - csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; + csd->taac_ns = (taac_exp[e] * taac_mant[m] + 9) / 10; + csd->taac_clks = UNSTUFF_BITS(resp, 104, 8) * 100; m = UNSTUFF_BITS(resp, 99, 4); e = UNSTUFF_BITS(resp, 96, 3); @@ -148,8 +148,8 @@ static int mmc_decode_csd(struct mmc_card *card) */ mmc_card_set_blockaddr(card); - csd->tacc_ns = 0; /* Unused */ - csd->tacc_clks = 0; /* Unused */ + csd->taac_ns = 0; /* Unused */ + csd->taac_clks = 0; /* Unused */ m = UNSTUFF_BITS(resp, 99, 4); e = UNSTUFF_BITS(resp, 96, 3); @@ -908,6 +908,18 @@ unsigned mmc_sd_get_max_clock(struct mmc_card *card) return max_dtr; } +static bool mmc_sd_card_using_v18(struct mmc_card *card) +{ + /* + * According to the SD spec., the Bus Speed Mode (function group 1) bits + * 2 to 4 are zero if the card is initialized at 3.3V signal level. Thus + * they can be used to determine if the card has already switched to + * 1.8V signaling. + */ + return card->sw_caps.sd3_bus_mode & + (SD_MODE_UHS_SDR50 | SD_MODE_UHS_SDR104 | SD_MODE_UHS_DDR50); +} + /* * Handle the detection and initialisation of a card. * @@ -921,9 +933,10 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, int err; u32 cid[4]; u32 rocr = 0; + bool v18_fixup_failed = false; WARN_ON(!host->claimed); - +retry: err = mmc_sd_get_cid(host, ocr, cid, &rocr); if (err) return err; @@ -989,6 +1002,36 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, if (err) goto free_card; + /* + * If the card has not been power cycled, it may still be using 1.8V + * signaling. Detect that situation and try to initialize a UHS-I (1.8V) + * transfer mode. + */ + if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_uhs(host) && + mmc_sd_card_using_v18(card) && + host->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_180) { + /* + * Re-read switch information in case it has changed since + * oldcard was initialized. + */ + if (oldcard) { + err = mmc_read_switch(card); + if (err) + goto free_card; + } + if (mmc_sd_card_using_v18(card)) { + if (mmc_host_set_uhs_voltage(host) || + mmc_sd_init_uhs_card(card)) { + v18_fixup_failed = true; + mmc_power_cycle(host, ocr); + if (!oldcard) + mmc_remove_card(card); + goto retry; + } + goto done; + } + } + /* Initialization sequence for UHS-I cards */ if (rocr & SD_ROCR_S18A) { err = mmc_sd_init_uhs_card(card); @@ -1021,7 +1064,7 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, mmc_set_bus_width(host, MMC_BUS_WIDTH_4); } } - +done: host->card = card; return 0; @@ -1056,14 +1099,14 @@ static void mmc_sd_detect(struct mmc_host *host) { int err; - mmc_get_card(host->card); + mmc_get_card(host->card, NULL); /* * Just check if our card has been removed. */ err = _mmc_detect_card_removed(host); - mmc_put_card(host->card); + mmc_put_card(host->card, NULL); if (err) { mmc_sd_remove(host); diff --git a/drivers/mmc/core/sd.h b/drivers/mmc/core/sd.h index 1ada9808c329..497c026a5c5a 100644 --- a/drivers/mmc/core/sd.h +++ b/drivers/mmc/core/sd.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _MMC_CORE_SD_H #define _MMC_CORE_SD_H diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c index c771843e4c15..7a2eaf8410a3 100644 --- a/drivers/mmc/core/sdio_irq.c +++ b/drivers/mmc/core/sdio_irq.c @@ -155,7 +155,8 @@ static int sdio_irq_thread(void *_host) * holding of the host lock does not cover too much work * that doesn't require that lock to be held. */ - ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort); + ret = __mmc_claim_host(host, NULL, + &host->sdio_irq_thread_abort); if (ret) break; ret = process_sdio_pending_irqs(host); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 5755b69f2f72..567028c9219a 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -4,6 +4,15 @@ comment "MMC/SD/SDIO Host Controller Drivers" +config MMC_DEBUG + bool "MMC host drivers debugging" + depends on MMC != n + help + This is an option for use by developers; most people should + say N here. This enables MMC host driver debugging. And further + added host drivers please don't invent their private macro for + debugging. + config MMC_ARMMMCI tristate "ARM AMBA Multimedia Card Interface support" depends on ARM_AMBA @@ -15,7 +24,7 @@ config MMC_ARMMMCI If unsure, say N. config MMC_QCOM_DML - tristate "Qualcomm Data Mover for SD Card Controller" + bool "Qualcomm Data Mover for SD Card Controller" depends on MMC_ARMMMCI && QCOM_BAM_DMA default y help @@ -343,6 +352,19 @@ config MMC_MESON_GX If you have a controller with this interface, say Y here. +config MMC_MESON_MX_SDIO + tristate "Amlogic Meson6/Meson8/Meson8b SD/MMC Host Controller support" + depends on ARCH_MESON || COMPILE_TEST + depends on COMMON_CLK + depends on HAS_DMA + depends on OF + help + This selects support for the SD/MMC Host Controller on + Amlogic Meson6, Meson8 and Meson8b SoCs. + + If you have a controller with this interface, say Y or M here. + If unsure, say N. + config MMC_MOXART tristate "MOXART SD/MMC Host Controller support" depends on ARCH_MOXART && MMC @@ -354,7 +376,7 @@ config MMC_MOXART config MMC_SDHCI_ST tristate "SDHCI support on STMicroelectronics SoC" - depends on ARCH_STI + depends on ARCH_STI || FSP2 depends on MMC_SDHCI_PLTFM select MMC_SDHCI_IO_ACCESSORS help @@ -420,6 +442,7 @@ config MMC_SDHCI_MSM tristate "Qualcomm SDHCI Controller Support" depends on ARCH_QCOM || (ARM && COMPILE_TEST) depends on MMC_SDHCI_PLTFM + select MMC_SDHCI_IO_ACCESSORS help This selects the Secure Digital Host Controller Interface (SDHCI) support present in Qualcomm SOCs. The controller supports @@ -494,7 +517,7 @@ config MMC_GOLDFISH config MMC_SPI tristate "MMC/SD/SDIO over SPI" - depends on SPI_MASTER && !HIGHMEM && HAS_DMA + depends on SPI_MASTER && HAS_DMA select CRC7 select CRC_ITU_T help @@ -575,10 +598,29 @@ config MMC_SDHI depends on SUPERH || ARM || ARM64 depends on SUPERH || ARCH_RENESAS || COMPILE_TEST select MMC_TMIO_CORE + select MMC_SDHI_SYS_DMAC if (SUPERH || ARM) + select MMC_SDHI_INTERNAL_DMAC if ARM64 help This provides support for the SDHI SD/SDIO controller found in Renesas SuperH, ARM and ARM64 based SoCs +config MMC_SDHI_SYS_DMAC + tristate "DMA for SDHI SD/SDIO controllers using SYS-DMAC" + depends on MMC_SDHI + help + This provides DMA support for SDHI SD/SDIO controllers + using SYS-DMAC via DMA Engine. This supports the controllers + found in SuperH and Renesas ARM based SoCs. + +config MMC_SDHI_INTERNAL_DMAC + tristate "DMA for SDHI SD/SDIO controllers using on-chip bus mastering" + depends on ARM64 || COMPILE_TEST + depends on MMC_SDHI + help + This provides DMA support for SDHI SD/SDIO controllers + using on-chip bus mastering. This supports the controllers + found in arm64 based SoCs. + config MMC_CB710 tristate "ENE CB710 MMC/SD Interface support" depends on PCI @@ -635,7 +677,7 @@ config MMC_CAVIUM_OCTEON config MMC_CAVIUM_THUNDERX tristate "Cavium ThunderX SD/MMC Card Interface support" depends on PCI && 64BIT && (ARM64 || COMPILE_TEST) - depends on GPIOLIB + depends on GPIO_THUNDERX depends on OF_ADDRESS help This selects Cavium ThunderX SD/MMC Card Interface. @@ -871,3 +913,15 @@ config MMC_SDHCI_XENON This selects Marvell Xenon eMMC/SD/SDIO SDHCI. If you have a controller with this interface, say Y or M here. If unsure, say N. + +config MMC_SDHCI_OMAP + tristate "TI SDHCI Controller Support" + depends on MMC_SDHCI_PLTFM && OF + help + This selects the Secure Digital Host Controller Interface (SDHCI) + support present in TI's DRA7 SOCs. The controller supports + SD/MMC/SDIO devices. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 4d4547116311..a43cf0d5a5d3 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -1,9 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 # # Makefile for MMC/SD host controller drivers # -obj-$(CONFIG_MMC_ARMMMCI) += mmci.o -obj-$(CONFIG_MMC_QCOM_DML) += mmci_qcom_dml.o +obj-$(CONFIG_MMC_ARMMMCI) += armmmci.o +armmmci-y := mmci.o +armmmci-$(CONFIG_MMC_QCOM_DML) += mmci_qcom_dml.o obj-$(CONFIG_MMC_PXA) += pxamci.o obj-$(CONFIG_MMC_MXC) += mxcmmc.o obj-$(CONFIG_MMC_MXS) += mxs-mmc.o @@ -36,7 +38,13 @@ obj-$(CONFIG_MMC_S3C) += s3cmci.o obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o -obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_core.o renesas_sdhi_sys_dmac.o +obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_core.o +ifeq ($(subst m,y,$(CONFIG_MMC_SDHI_SYS_DMAC)),y) +obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_sys_dmac.o +endif +ifeq ($(subst m,y,$(CONFIG_MMC_SDHI_INTERNAL_DMAC)),y) +obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_internal_dmac.o +endif obj-$(CONFIG_MMC_CB710) += cb710-mmc.o obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o @@ -57,6 +65,7 @@ obj-$(CONFIG_MMC_VUB300) += vub300.o obj-$(CONFIG_MMC_USHC) += ushc.o obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o obj-$(CONFIG_MMC_MESON_GX) += meson-gx-mmc.o +obj-$(CONFIG_MMC_MESON_MX_SDIO) += meson-mx-sdio.o obj-$(CONFIG_MMC_MOXART) += moxart-mmc.o obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o @@ -82,6 +91,7 @@ obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o +obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o ifeq ($(CONFIG_CB710_DEBUG),y) CFLAGS-cb710-mmc += -DDEBUG diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c index 5b3e1c9bb75f..63fe5091ca59 100644 --- a/drivers/mmc/host/android-goldfish.c +++ b/drivers/mmc/host/android-goldfish.c @@ -290,7 +290,6 @@ static irqreturn_t goldfish_mmc_irq(int irq, void *dev_id) u16 status; int end_command = 0; int end_transfer = 0; - int transfer_error = 0; int state_changed = 0; int cmd_timeout = 0; @@ -322,9 +321,7 @@ static irqreturn_t goldfish_mmc_irq(int irq, void *dev_id) if (end_command) goldfish_mmc_cmd_done(host, host->cmd); - if (transfer_error) - goldfish_mmc_xfer_done(host, host->data); - else if (end_transfer) { + if (end_transfer) { host->dma_done = 1; goldfish_mmc_end_of_data(host, host->data); } else if (host->data != NULL) { @@ -347,8 +344,7 @@ static irqreturn_t goldfish_mmc_irq(int irq, void *dev_id) mmc_detect_change(host->mmc, 0); } - if (!end_command && !end_transfer && - !transfer_error && !state_changed && !cmd_timeout) { + if (!end_command && !end_transfer && !state_changed && !cmd_timeout) { status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS); dev_info(mmc_dev(host->mmc),"spurious irq 0x%04x\n", status); if (status != 0) { diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 97de2d32ba84..e55f3932d580 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -665,14 +665,15 @@ atmci_of_init(struct platform_device *pdev) for_each_child_of_node(np, cnp) { if (of_property_read_u32(cnp, "reg", &slot_id)) { - dev_warn(&pdev->dev, "reg property is missing for %s\n", - cnp->full_name); + dev_warn(&pdev->dev, "reg property is missing for %pOF\n", + cnp); continue; } if (slot_id >= ATMCI_MAX_NR_SLOTS) { dev_warn(&pdev->dev, "can't have more than %d slots\n", ATMCI_MAX_NR_SLOTS); + of_node_put(cnp); break; } @@ -731,11 +732,11 @@ static inline unsigned int atmci_convert_chksize(struct atmel_mci *host, return 0; } -static void atmci_timeout_timer(unsigned long data) +static void atmci_timeout_timer(struct timer_list *t) { struct atmel_mci *host; - host = (struct atmel_mci *)data; + host = from_timer(host, t, timer); dev_dbg(&host->pdev->dev, "software timeout\n"); @@ -1083,7 +1084,6 @@ static u32 atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data) { u32 iflags, tmp; - unsigned int sg_len; int i; data->error = -EINPROGRESS; @@ -1108,8 +1108,8 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data) /* Configure PDC */ host->data_size = data->blocks * data->blksz; - sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, - mmc_get_dma_dir(data)); + dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, + mmc_get_dma_dir(data)); if ((!host->caps.has_rwproof) && (host->data->flags & MMC_DATA_WRITE)) { @@ -1661,9 +1661,9 @@ static void atmci_command_complete(struct atmel_mci *host, cmd->error = 0; } -static void atmci_detect_change(unsigned long data) +static void atmci_detect_change(struct timer_list *t) { - struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data; + struct atmel_mci_slot *slot = from_timer(slot, t, detect_timer); bool present; bool present_old; @@ -2349,8 +2349,7 @@ static int atmci_init_slot(struct atmel_mci *host, if (gpio_is_valid(slot->detect_pin)) { int ret; - setup_timer(&slot->detect_timer, atmci_detect_change, - (unsigned long)slot); + timer_setup(&slot->detect_timer, atmci_detect_change, 0); ret = request_irq(gpio_to_irq(slot->detect_pin), atmci_detect_interrupt, @@ -2563,7 +2562,7 @@ static int atmci_probe(struct platform_device *pdev) platform_set_drvdata(pdev, host); - setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host); + timer_setup(&host->timer, atmci_timeout_timer, 0); pm_runtime_get_noresume(&pdev->dev); pm_runtime_set_active(&pdev->dev); diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index abba9a2a78b8..229dc18f0581 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -1252,7 +1252,7 @@ static void bcm2835_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) mutex_unlock(&host->mutex); } -static struct mmc_host_ops bcm2835_ops = { +static const struct mmc_host_ops bcm2835_ops = { .request = bcm2835_request, .set_ios = bcm2835_set_ios, .hw_reset = bcm2835_reset, diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c index 951d2cdd7888..22aded1065ae 100644 --- a/drivers/mmc/host/cavium-octeon.c +++ b/drivers/mmc/host/cavium-octeon.c @@ -342,18 +342,7 @@ static struct platform_driver octeon_mmc_driver = { }, }; -static int __init octeon_mmc_init(void) -{ - return platform_driver_register(&octeon_mmc_driver); -} - -static void __exit octeon_mmc_cleanup(void) -{ - platform_driver_unregister(&octeon_mmc_driver); -} - -module_init(octeon_mmc_init); -module_exit(octeon_mmc_cleanup); +module_platform_driver(octeon_mmc_driver); MODULE_AUTHOR("Cavium Inc. <support@cavium.com>"); MODULE_DESCRIPTION("Low-level driver for Cavium OCTEON MMC/SSD card"); diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c index b9cc95998799..eee08d81b242 100644 --- a/drivers/mmc/host/cavium-thunderx.c +++ b/drivers/mmc/host/cavium-thunderx.c @@ -7,6 +7,7 @@ * * Copyright (C) 2016 Cavium Inc. */ +#include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/mmc/mmc.h> @@ -149,8 +150,11 @@ error: for (i = 0; i < CAVIUM_MAX_MMC; i++) { if (host->slot[i]) cvm_mmc_of_slot_remove(host->slot[i]); - if (host->slot_pdev[i]) + if (host->slot_pdev[i]) { + get_device(&host->slot_pdev[i]->dev); of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL); + put_device(&host->slot_pdev[i]->dev); + } } clk_disable_unprepare(host->clk); return ret; diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c index 3686d77c717b..ed5cefb83768 100644 --- a/drivers/mmc/host/cavium.c +++ b/drivers/mmc/host/cavium.c @@ -957,19 +957,17 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot) ret = of_property_read_u32(node, "reg", &id); if (ret) { - dev_err(dev, "Missing or invalid reg property on %s\n", - of_node_full_name(node)); + dev_err(dev, "Missing or invalid reg property on %pOF\n", node); return ret; } if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) { - dev_err(dev, "Invalid reg property on %s\n", - of_node_full_name(node)); + dev_err(dev, "Invalid reg property on %pOF\n", node); return -EINVAL; } ret = mmc_regulator_get_supply(mmc); - if (ret == -EPROBE_DEFER) + if (ret) return ret; /* * Legacy Octeon firmware has no regulator entry, fall-back to @@ -1040,7 +1038,7 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host) */ mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD | - MMC_CAP_3_3V_DDR | MMC_CAP_NO_BOUNCE_BUFF; + MMC_CAP_3_3V_DDR; if (host->use_sg) mmc->max_segs = 16; diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index 621ce47e0e4a..351330dfb954 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -1062,7 +1062,7 @@ static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable) } } -static struct mmc_host_ops mmc_davinci_ops = { +static const struct mmc_host_ops mmc_davinci_ops = { .request = mmc_davinci_request, .set_ios = mmc_davinci_set_ios, .get_cd = mmc_davinci_get_cd, diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c index e38fb0020bb1..73fd75c3c824 100644 --- a/drivers/mmc/host/dw_mmc-k3.c +++ b/drivers/mmc/host/dw_mmc-k3.c @@ -8,6 +8,8 @@ * (at your option) any later version. */ +#include <linux/bitops.h> +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/mfd/syscon.h> #include <linux/mmc/host.h> @@ -28,7 +30,35 @@ #define AO_SCTRL_SEL18 BIT(10) #define AO_SCTRL_CTRL3 0x40C +#define DWMMC_SDIO_ID 2 + +#define SOC_SCTRL_SCPERCTRL5 (0x314) +#define SDCARD_IO_SEL18 BIT(2) + +#define SDCARD_RD_THRESHOLD (512) + +#define GENCLK_DIV (7) + +#define GPIO_CLK_ENABLE BIT(16) +#define GPIO_CLK_DIV_MASK GENMASK(11, 8) +#define GPIO_USE_SAMPLE_DLY_MASK GENMASK(13, 13) +#define UHS_REG_EXT_SAMPLE_PHASE_MASK GENMASK(20, 16) +#define UHS_REG_EXT_SAMPLE_DRVPHASE_MASK GENMASK(25, 21) +#define UHS_REG_EXT_SAMPLE_DLY_MASK GENMASK(30, 26) + +#define TIMING_MODE 3 +#define TIMING_CFG_NUM 10 + +#define NUM_PHASES (40) + +#define ENABLE_SHIFT_MIN_SMPL (4) +#define ENABLE_SHIFT_MAX_SMPL (12) +#define USE_DLY_MIN_SMPL (11) +#define USE_DLY_MAX_SMPL (14) + struct k3_priv { + int ctrl_id; + u32 cur_speed; struct regmap *reg; }; @@ -38,6 +68,41 @@ static unsigned long dw_mci_hi6220_caps[] = { 0 }; +struct hs_timing { + u32 drv_phase; + u32 smpl_dly; + u32 smpl_phase_max; + u32 smpl_phase_min; +}; + +static struct hs_timing hs_timing_cfg[TIMING_MODE][TIMING_CFG_NUM] = { + { /* reserved */ }, + { /* SD */ + {7, 0, 15, 15,}, /* 0: LEGACY 400k */ + {6, 0, 4, 4,}, /* 1: MMC_HS */ + {6, 0, 3, 3,}, /* 2: SD_HS */ + {6, 0, 15, 15,}, /* 3: SDR12 */ + {6, 0, 2, 2,}, /* 4: SDR25 */ + {4, 0, 11, 0,}, /* 5: SDR50 */ + {6, 4, 15, 0,}, /* 6: SDR104 */ + {0}, /* 7: DDR50 */ + {0}, /* 8: DDR52 */ + {0}, /* 9: HS200 */ + }, + { /* SDIO */ + {7, 0, 15, 15,}, /* 0: LEGACY 400k */ + {0}, /* 1: MMC_HS */ + {6, 0, 15, 15,}, /* 2: SD_HS */ + {6, 0, 15, 15,}, /* 3: SDR12 */ + {6, 0, 0, 0,}, /* 4: SDR25 */ + {4, 0, 12, 0,}, /* 5: SDR50 */ + {5, 4, 15, 0,}, /* 6: SDR104 */ + {0}, /* 7: DDR50 */ + {0}, /* 8: DDR52 */ + {0}, /* 9: HS200 */ + } +}; + static void dw_mci_k3_set_ios(struct dw_mci *host, struct mmc_ios *ios) { int ret; @@ -66,6 +131,10 @@ static int dw_mci_hi6220_parse_dt(struct dw_mci *host) if (IS_ERR(priv->reg)) priv->reg = NULL; + priv->ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); + if (priv->ctrl_id < 0) + priv->ctrl_id = 0; + host->priv = priv; return 0; } @@ -144,7 +213,236 @@ static const struct dw_mci_drv_data hi6220_data = { .execute_tuning = dw_mci_hi6220_execute_tuning, }; +static void dw_mci_hs_set_timing(struct dw_mci *host, int timing, + int smpl_phase) +{ + u32 drv_phase; + u32 smpl_dly; + u32 use_smpl_dly = 0; + u32 enable_shift = 0; + u32 reg_value; + int ctrl_id; + struct k3_priv *priv; + + priv = host->priv; + ctrl_id = priv->ctrl_id; + + drv_phase = hs_timing_cfg[ctrl_id][timing].drv_phase; + smpl_dly = hs_timing_cfg[ctrl_id][timing].smpl_dly; + if (smpl_phase == -1) + smpl_phase = (hs_timing_cfg[ctrl_id][timing].smpl_phase_max + + hs_timing_cfg[ctrl_id][timing].smpl_phase_min) / 2; + + switch (timing) { + case MMC_TIMING_UHS_SDR104: + if (smpl_phase >= USE_DLY_MIN_SMPL && + smpl_phase <= USE_DLY_MAX_SMPL) + use_smpl_dly = 1; + /* fallthrough */ + case MMC_TIMING_UHS_SDR50: + if (smpl_phase >= ENABLE_SHIFT_MIN_SMPL && + smpl_phase <= ENABLE_SHIFT_MAX_SMPL) + enable_shift = 1; + break; + } + + mci_writel(host, GPIO, 0x0); + usleep_range(5, 10); + + reg_value = FIELD_PREP(UHS_REG_EXT_SAMPLE_PHASE_MASK, smpl_phase) | + FIELD_PREP(UHS_REG_EXT_SAMPLE_DLY_MASK, smpl_dly) | + FIELD_PREP(UHS_REG_EXT_SAMPLE_DRVPHASE_MASK, drv_phase); + mci_writel(host, UHS_REG_EXT, reg_value); + + mci_writel(host, ENABLE_SHIFT, enable_shift); + + reg_value = FIELD_PREP(GPIO_CLK_DIV_MASK, GENCLK_DIV) | + FIELD_PREP(GPIO_USE_SAMPLE_DLY_MASK, use_smpl_dly); + mci_writel(host, GPIO, (unsigned int)reg_value | GPIO_CLK_ENABLE); + + /* We should delay 1ms wait for timing setting finished. */ + usleep_range(1000, 2000); +} + +static int dw_mci_hi3660_init(struct dw_mci *host) +{ + mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(SDCARD_RD_THRESHOLD, + SDMMC_CARD_RD_THR_EN)); + + dw_mci_hs_set_timing(host, MMC_TIMING_LEGACY, -1); + host->bus_hz /= (GENCLK_DIV + 1); + + return 0; +} + +static int dw_mci_set_sel18(struct dw_mci *host, bool set) +{ + int ret; + unsigned int val; + struct k3_priv *priv; + + priv = host->priv; + + val = set ? SDCARD_IO_SEL18 : 0; + ret = regmap_update_bits(priv->reg, SOC_SCTRL_SCPERCTRL5, + SDCARD_IO_SEL18, val); + if (ret) { + dev_err(host->dev, "sel18 %u error\n", val); + return ret; + } + + return 0; +} + +static void dw_mci_hi3660_set_ios(struct dw_mci *host, struct mmc_ios *ios) +{ + int ret; + unsigned long wanted; + unsigned long actual; + struct k3_priv *priv = host->priv; + + if (!ios->clock || ios->clock == priv->cur_speed) + return; + + wanted = ios->clock * (GENCLK_DIV + 1); + ret = clk_set_rate(host->ciu_clk, wanted); + if (ret) { + dev_err(host->dev, "failed to set rate %luHz\n", wanted); + return; + } + actual = clk_get_rate(host->ciu_clk); + + dw_mci_hs_set_timing(host, ios->timing, -1); + host->bus_hz = actual / (GENCLK_DIV + 1); + host->current_speed = 0; + priv->cur_speed = host->bus_hz; +} + +static int dw_mci_get_best_clksmpl(unsigned int sample_flag) +{ + int i; + int interval; + unsigned int v; + unsigned int len; + unsigned int range_start = 0; + unsigned int range_length = 0; + unsigned int middle_range = 0; + + if (!sample_flag) + return -EIO; + + if (~sample_flag == 0) + return 0; + + i = ffs(sample_flag) - 1; + + /* + * A clock cycle is divided into 32 phases, + * each of which is represented by a bit, + * finding the optimal phase. + */ + while (i < 32) { + v = ror32(sample_flag, i); + len = ffs(~v) - 1; + + if (len > range_length) { + range_length = len; + range_start = i; + } + + interval = ffs(v >> len) - 1; + if (interval < 0) + break; + + i += len + interval; + } + + middle_range = range_start + range_length / 2; + if (middle_range >= 32) + middle_range %= 32; + + return middle_range; +} + +static int dw_mci_hi3660_execute_tuning(struct dw_mci_slot *slot, u32 opcode) +{ + int i = 0; + struct dw_mci *host = slot->host; + struct mmc_host *mmc = slot->mmc; + int smpl_phase = 0; + u32 tuning_sample_flag = 0; + int best_clksmpl = 0; + + for (i = 0; i < NUM_PHASES; ++i, ++smpl_phase) { + smpl_phase %= 32; + + mci_writel(host, TMOUT, ~0); + dw_mci_hs_set_timing(host, mmc->ios.timing, smpl_phase); + + if (!mmc_send_tuning(mmc, opcode, NULL)) + tuning_sample_flag |= (1 << smpl_phase); + else + tuning_sample_flag &= ~(1 << smpl_phase); + } + + best_clksmpl = dw_mci_get_best_clksmpl(tuning_sample_flag); + if (best_clksmpl < 0) { + dev_err(host->dev, "All phases bad!\n"); + return -EIO; + } + + dw_mci_hs_set_timing(host, mmc->ios.timing, best_clksmpl); + + dev_info(host->dev, "tuning ok best_clksmpl %u tuning_sample_flag %x\n", + best_clksmpl, tuning_sample_flag); + return 0; +} + +static int dw_mci_hi3660_switch_voltage(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + int ret = 0; + struct dw_mci_slot *slot = mmc_priv(mmc); + struct k3_priv *priv; + struct dw_mci *host; + + host = slot->host; + priv = host->priv; + + if (!priv || !priv->reg) + return 0; + + if (priv->ctrl_id == DWMMC_SDIO_ID) + return 0; + + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) + ret = dw_mci_set_sel18(host, 0); + else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) + ret = dw_mci_set_sel18(host, 1); + if (ret) + return ret; + + if (!IS_ERR(mmc->supply.vqmmc)) { + ret = mmc_regulator_set_vqmmc(mmc, ios); + if (ret) { + dev_err(host->dev, "Regulator set error %d\n", ret); + return ret; + } + } + + return 0; +} + +static const struct dw_mci_drv_data hi3660_data = { + .init = dw_mci_hi3660_init, + .set_ios = dw_mci_hi3660_set_ios, + .parse_dt = dw_mci_hi6220_parse_dt, + .execute_tuning = dw_mci_hi3660_execute_tuning, + .switch_voltage = dw_mci_hi3660_switch_voltage, +}; + static const struct of_device_id dw_mci_k3_match[] = { + { .compatible = "hisilicon,hi3660-dw-mshc", .data = &hi3660_data, }, { .compatible = "hisilicon,hi4511-dw-mshc", .data = &k3_drv_data, }, { .compatible = "hisilicon,hi6220-dw-mshc", .data = &hi6220_data, }, {}, diff --git a/drivers/mmc/host/dw_mmc-zx.h b/drivers/mmc/host/dw_mmc-zx.h index f369997a39ec..09ac52766f14 100644 --- a/drivers/mmc/host/dw_mmc-zx.h +++ b/drivers/mmc/host/dw_mmc-zx.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _DW_MMC_ZX_H_ #define _DW_MMC_ZX_H_ diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index a9dfb26972f2..0aa39975f33b 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -398,6 +398,42 @@ static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) return cmdr; } +static inline void dw_mci_set_cto(struct dw_mci *host) +{ + unsigned int cto_clks; + unsigned int cto_div; + unsigned int cto_ms; + unsigned long irqflags; + + cto_clks = mci_readl(host, TMOUT) & 0xff; + cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; + if (cto_div == 0) + cto_div = 1; + cto_ms = DIV_ROUND_UP(MSEC_PER_SEC * cto_clks * cto_div, host->bus_hz); + + /* add a bit spare time */ + cto_ms += 10; + + /* + * The durations we're working with are fairly short so we have to be + * extra careful about synchronization here. Specifically in hardware a + * command timeout is _at most_ 5.1 ms, so that means we expect an + * interrupt (either command done or timeout) to come rather quickly + * after the mci_writel. ...but just in case we have a long interrupt + * latency let's add a bit of paranoia. + * + * In general we'll assume that at least an interrupt will be asserted + * in hardware by the time the cto_timer runs. ...and if it hasn't + * been asserted in hardware by that time then we'll assume it'll never + * come. + */ + spin_lock_irqsave(&host->irq_lock, irqflags); + if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) + mod_timer(&host->cto_timer, + jiffies + msecs_to_jiffies(cto_ms) + 1); + spin_unlock_irqrestore(&host->irq_lock, irqflags); +} + static void dw_mci_start_command(struct dw_mci *host, struct mmc_command *cmd, u32 cmd_flags) { @@ -411,6 +447,10 @@ static void dw_mci_start_command(struct dw_mci *host, dw_mci_wait_while_busy(host, cmd_flags); mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); + + /* response expected command only */ + if (cmd_flags & SDMMC_CMD_RESP_EXP) + dw_mci_set_cto(host); } static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) @@ -777,7 +817,7 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host, struct dma_slave_config cfg; struct dma_async_tx_descriptor *desc = NULL; struct scatterlist *sgl = host->data->sg; - const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; + static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; u32 sg_elems = host->data->sg_len; u32 fifoth_val; u32 fifo_offset = host->fifo_reg - host->regs; @@ -984,7 +1024,7 @@ static int dw_mci_get_cd(struct mmc_host *mmc) static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) { unsigned int blksz = data->blksz; - const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; + static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; u32 fifo_width = 1 << host->data_shift; u32 blksz_depth = blksz / fifo_width, fifoth_val; u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; @@ -1896,15 +1936,55 @@ static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) static void dw_mci_set_drto(struct dw_mci *host) { unsigned int drto_clks; + unsigned int drto_div; unsigned int drto_ms; + unsigned long irqflags; drto_clks = mci_readl(host, TMOUT) >> 8; - drto_ms = DIV_ROUND_UP(drto_clks, host->bus_hz / 1000); + drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; + if (drto_div == 0) + drto_div = 1; + drto_ms = DIV_ROUND_UP(MSEC_PER_SEC * drto_clks * drto_div, + host->bus_hz); /* add a bit spare time */ drto_ms += 10; - mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms)); + spin_lock_irqsave(&host->irq_lock, irqflags); + if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) + mod_timer(&host->dto_timer, + jiffies + msecs_to_jiffies(drto_ms)); + spin_unlock_irqrestore(&host->irq_lock, irqflags); +} + +static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host) +{ + if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) + return false; + + /* + * Really be certain that the timer has stopped. This is a bit of + * paranoia and could only really happen if we had really bad + * interrupt latency and the interrupt routine and timeout were + * running concurrently so that the del_timer() in the interrupt + * handler couldn't run. + */ + WARN_ON(del_timer_sync(&host->cto_timer)); + clear_bit(EVENT_CMD_COMPLETE, &host->pending_events); + + return true; +} + +static bool dw_mci_clear_pending_data_complete(struct dw_mci *host) +{ + if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) + return false; + + /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */ + WARN_ON(del_timer_sync(&host->dto_timer)); + clear_bit(EVENT_DATA_COMPLETE, &host->pending_events); + + return true; } static void dw_mci_tasklet_func(unsigned long priv) @@ -1933,8 +2013,7 @@ static void dw_mci_tasklet_func(unsigned long priv) case STATE_SENDING_CMD11: case STATE_SENDING_CMD: - if (!test_and_clear_bit(EVENT_CMD_COMPLETE, - &host->pending_events)) + if (!dw_mci_clear_pending_cmd_complete(host)) break; cmd = host->cmd; @@ -2049,8 +2128,7 @@ static void dw_mci_tasklet_func(unsigned long priv) /* fall through */ case STATE_DATA_BUSY: - if (!test_and_clear_bit(EVENT_DATA_COMPLETE, - &host->pending_events)) { + if (!dw_mci_clear_pending_data_complete(host)) { /* * If data error interrupt comes but data over * interrupt doesn't come within the given time. @@ -2103,8 +2181,7 @@ static void dw_mci_tasklet_func(unsigned long priv) /* fall through */ case STATE_SENDING_STOP: - if (!test_and_clear_bit(EVENT_CMD_COMPLETE, - &host->pending_events)) + if (!dw_mci_clear_pending_cmd_complete(host)) break; /* CMD error in data command */ @@ -2551,6 +2628,8 @@ done: static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) { + del_timer(&host->cto_timer); + if (!host->cmd_status) host->cmd_status = status; @@ -2575,6 +2654,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) struct dw_mci *host = dev_id; u32 pending; struct dw_mci_slot *slot = host->slot; + unsigned long irqflags; pending = mci_readl(host, MINTSTS); /* read-only mask reg */ @@ -2582,8 +2662,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) /* Check volt switch first, since it can look like an error */ if ((host->state == STATE_SENDING_CMD11) && (pending & SDMMC_INT_VOLT_SWITCH)) { - unsigned long irqflags; - mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH); pending &= ~SDMMC_INT_VOLT_SWITCH; @@ -2599,10 +2677,15 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) } if (pending & DW_MCI_CMD_ERROR_FLAGS) { + spin_lock_irqsave(&host->irq_lock, irqflags); + + del_timer(&host->cto_timer); mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); host->cmd_status = pending; smp_wmb(); /* drain writebuffer */ set_bit(EVENT_CMD_COMPLETE, &host->pending_events); + + spin_unlock_irqrestore(&host->irq_lock, irqflags); } if (pending & DW_MCI_DATA_ERROR_FLAGS) { @@ -2615,6 +2698,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) } if (pending & SDMMC_INT_DATA_OVER) { + spin_lock_irqsave(&host->irq_lock, irqflags); + del_timer(&host->dto_timer); mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); @@ -2627,6 +2712,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) } set_bit(EVENT_DATA_COMPLETE, &host->pending_events); tasklet_schedule(&host->tasklet); + + spin_unlock_irqrestore(&host->irq_lock, irqflags); } if (pending & SDMMC_INT_RXDR) { @@ -2642,8 +2729,12 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) } if (pending & SDMMC_INT_CMD_DONE) { + spin_lock_irqsave(&host->irq_lock, irqflags); + mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); dw_mci_cmd_interrupt(host, pending); + + spin_unlock_irqrestore(&host->irq_lock, irqflags); } if (pending & SDMMC_INT_CD) { @@ -2720,7 +2811,7 @@ static int dw_mci_init_slot(struct dw_mci *host) /*if there are external regulators, get them*/ ret = mmc_regulator_get_supply(mmc); - if (ret == -EPROBE_DEFER) + if (ret) goto err_host_allocated; if (!mmc->ocr_avail) @@ -2900,9 +2991,9 @@ no_dma: host->use_dma = TRANS_MODE_PIO; } -static void dw_mci_cmd11_timer(unsigned long arg) +static void dw_mci_cmd11_timer(struct timer_list *t) { - struct dw_mci *host = (struct dw_mci *)arg; + struct dw_mci *host = from_timer(host, t, cmd11_timer); if (host->state != STATE_SENDING_CMD11) { dev_warn(host->dev, "Unexpected CMD11 timeout\n"); @@ -2914,10 +3005,89 @@ static void dw_mci_cmd11_timer(unsigned long arg) tasklet_schedule(&host->tasklet); } -static void dw_mci_dto_timer(unsigned long arg) +static void dw_mci_cto_timer(struct timer_list *t) { - struct dw_mci *host = (struct dw_mci *)arg; + struct dw_mci *host = from_timer(host, t, cto_timer); + unsigned long irqflags; + u32 pending; + + spin_lock_irqsave(&host->irq_lock, irqflags); + + /* + * If somehow we have very bad interrupt latency it's remotely possible + * that the timer could fire while the interrupt is still pending or + * while the interrupt is midway through running. Let's be paranoid + * and detect those two cases. Note that this is paranoia is somewhat + * justified because in this function we don't actually cancel the + * pending command in the controller--we just assume it will never come. + */ + pending = mci_readl(host, MINTSTS); /* read-only mask reg */ + if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) { + /* The interrupt should fire; no need to act but we can warn */ + dev_warn(host->dev, "Unexpected interrupt latency\n"); + goto exit; + } + if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) { + /* Presumably interrupt handler couldn't delete the timer */ + dev_warn(host->dev, "CTO timeout when already completed\n"); + goto exit; + } + + /* + * Continued paranoia to make sure we're in the state we expect. + * This paranoia isn't really justified but it seems good to be safe. + */ + switch (host->state) { + case STATE_SENDING_CMD11: + case STATE_SENDING_CMD: + case STATE_SENDING_STOP: + /* + * If CMD_DONE interrupt does NOT come in sending command + * state, we should notify the driver to terminate current + * transfer and report a command timeout to the core. + */ + host->cmd_status = SDMMC_INT_RTO; + set_bit(EVENT_CMD_COMPLETE, &host->pending_events); + tasklet_schedule(&host->tasklet); + break; + default: + dev_warn(host->dev, "Unexpected command timeout, state %d\n", + host->state); + break; + } + +exit: + spin_unlock_irqrestore(&host->irq_lock, irqflags); +} + +static void dw_mci_dto_timer(struct timer_list *t) +{ + struct dw_mci *host = from_timer(host, t, dto_timer); + unsigned long irqflags; + u32 pending; + + spin_lock_irqsave(&host->irq_lock, irqflags); + /* + * The DTO timer is much longer than the CTO timer, so it's even less + * likely that we'll these cases, but it pays to be paranoid. + */ + pending = mci_readl(host, MINTSTS); /* read-only mask reg */ + if (pending & SDMMC_INT_DATA_OVER) { + /* The interrupt should fire; no need to act but we can warn */ + dev_warn(host->dev, "Unexpected data interrupt latency\n"); + goto exit; + } + if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) { + /* Presumably interrupt handler couldn't delete the timer */ + dev_warn(host->dev, "DTO timeout when already completed\n"); + goto exit; + } + + /* + * Continued paranoia to make sure we're in the state we expect. + * This paranoia isn't really justified but it seems good to be safe. + */ switch (host->state) { case STATE_SENDING_DATA: case STATE_DATA_BUSY: @@ -2932,8 +3102,13 @@ static void dw_mci_dto_timer(unsigned long arg) tasklet_schedule(&host->tasklet); break; default: + dev_warn(host->dev, "Unexpected data timeout, state %d\n", + host->state); break; } + +exit: + spin_unlock_irqrestore(&host->irq_lock, irqflags); } #ifdef CONFIG_OF @@ -2950,14 +3125,14 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) return ERR_PTR(-ENOMEM); /* find reset controller when exist */ - pdata->rstc = devm_reset_control_get_optional(dev, "reset"); + pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset"); if (IS_ERR(pdata->rstc)) { if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER) return ERR_PTR(-EPROBE_DEFER); } /* find out number of slots supported */ - if (device_property_read_u32(dev, "num-slots", &pdata->num_slots)) + if (!device_property_read_u32(dev, "num-slots", &pdata->num_slots)) dev_info(dev, "'num-slots' was deprecated.\n"); if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth)) @@ -3067,6 +3242,12 @@ int dw_mci_probe(struct dw_mci *host) goto err_clk_ciu; } + if (!IS_ERR(host->pdata->rstc)) { + reset_control_assert(host->pdata->rstc); + usleep_range(10, 50); + reset_control_deassert(host->pdata->rstc); + } + if (drv_data && drv_data->init) { ret = drv_data->init(host); if (ret) { @@ -3076,17 +3257,9 @@ int dw_mci_probe(struct dw_mci *host) } } - if (!IS_ERR(host->pdata->rstc)) { - reset_control_assert(host->pdata->rstc); - usleep_range(10, 50); - reset_control_deassert(host->pdata->rstc); - } - - setup_timer(&host->cmd11_timer, - dw_mci_cmd11_timer, (unsigned long)host); - - setup_timer(&host->dto_timer, - dw_mci_dto_timer, (unsigned long)host); + timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0); + timer_setup(&host->cto_timer, dw_mci_cto_timer, 0); + timer_setup(&host->dto_timer, dw_mci_dto_timer, 0); spin_lock_init(&host->lock); spin_lock_init(&host->irq_lock); diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index 75da3756955d..e3124f06a47e 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h @@ -74,7 +74,8 @@ struct dw_mci_dma_slave { * @stop_abort: The command currently prepared for stoping transfer. * @prev_blksz: The former transfer blksz record. * @timing: Record of current ios timing. - * @use_dma: Whether DMA channel is initialized or not. + * @use_dma: Which DMA channel is in use for the current transfer, zero + * denotes PIO mode. * @using_dma: Whether DMA is in use for the current transfer. * @dma_64bit_address: Whether DMA supports 64-bit address mode or not. * @sg_dma: Bus address of DMA buffer. @@ -126,6 +127,7 @@ struct dw_mci_dma_slave { * @irq: The irq value to be passed to request_irq. * @sdio_id0: Number of slot0 in the SDIO interrupt registers. * @cmd11_timer: Timer for SD3.0 voltage switch over scheme. + * @cto_timer: Timer for broken command transfer over scheme. * @dto_timer: Timer for broken data transfer over scheme. * * Locking @@ -232,6 +234,7 @@ struct dw_mci { int sdio_id0; struct timer_list cmd11_timer; + struct timer_list cto_timer; struct timer_list dto_timer; }; @@ -314,6 +317,8 @@ struct dw_mci_board { #define SDMMC_DSCADDR 0x094 #define SDMMC_BUFADDR 0x098 #define SDMMC_CDTHRCTL 0x100 +#define SDMMC_UHS_REG_EXT 0x108 +#define SDMMC_ENABLE_SHIFT 0x110 #define SDMMC_DATA(x) (x) /* * Registers to support idmac 64-bit address mode diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index 7db8c7a8d38d..712e08d9a45e 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -586,9 +586,9 @@ poll_timeout: return true; } -static void jz4740_mmc_timeout(unsigned long data) +static void jz4740_mmc_timeout(struct timer_list *t) { - struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)data; + struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer); if (!test_and_clear_bit(0, &host->waiting)) return; @@ -1036,8 +1036,7 @@ static int jz4740_mmc_probe(struct platform_device* pdev) jz4740_mmc_reset(host); jz4740_mmc_clock_disable(host); - setup_timer(&host->timeout_timer, jz4740_mmc_timeout, - (unsigned long)host); + timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0); host->use_dma = true; if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0) diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index de962c2d5e00..e0862d3f65b3 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -42,22 +42,18 @@ #define SD_EMMC_CLOCK 0x0 #define CLK_DIV_MASK GENMASK(5, 0) -#define CLK_DIV_MAX 63 #define CLK_SRC_MASK GENMASK(7, 6) -#define CLK_SRC_XTAL 0 /* external crystal */ -#define CLK_SRC_XTAL_RATE 24000000 -#define CLK_SRC_PLL 1 /* FCLK_DIV2 */ -#define CLK_SRC_PLL_RATE 1000000000 #define CLK_CORE_PHASE_MASK GENMASK(9, 8) #define CLK_TX_PHASE_MASK GENMASK(11, 10) #define CLK_RX_PHASE_MASK GENMASK(13, 12) -#define CLK_PHASE_0 0 -#define CLK_PHASE_90 1 -#define CLK_PHASE_180 2 -#define CLK_PHASE_270 3 +#define CLK_TX_DELAY_MASK GENMASK(19, 16) +#define CLK_RX_DELAY_MASK GENMASK(23, 20) +#define CLK_DELAY_STEP_PS 200 +#define CLK_PHASE_STEP 30 +#define CLK_PHASE_POINT_NUM (360 / CLK_PHASE_STEP) #define CLK_ALWAYS_ON BIT(24) -#define SD_EMMC_DElAY 0x4 +#define SD_EMMC_DELAY 0x4 #define SD_EMMC_ADJUST 0x8 #define SD_EMMC_CALOUT 0x10 #define SD_EMMC_START 0x40 @@ -81,18 +77,25 @@ #define SD_EMMC_STATUS 0x48 #define STATUS_BUSY BIT(31) +#define STATUS_DATI GENMASK(23, 16) #define SD_EMMC_IRQ_EN 0x4c -#define IRQ_EN_MASK GENMASK(13, 0) #define IRQ_RXD_ERR_MASK GENMASK(7, 0) #define IRQ_TXD_ERR BIT(8) #define IRQ_DESC_ERR BIT(9) #define IRQ_RESP_ERR BIT(10) +#define IRQ_CRC_ERR \ + (IRQ_RXD_ERR_MASK | IRQ_TXD_ERR | IRQ_DESC_ERR | IRQ_RESP_ERR) #define IRQ_RESP_TIMEOUT BIT(11) #define IRQ_DESC_TIMEOUT BIT(12) +#define IRQ_TIMEOUTS \ + (IRQ_RESP_TIMEOUT | IRQ_DESC_TIMEOUT) #define IRQ_END_OF_CHAIN BIT(13) #define IRQ_RESP_STATUS BIT(14) #define IRQ_SDIO BIT(15) +#define IRQ_EN_MASK \ + (IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN | IRQ_RESP_STATUS |\ + IRQ_SDIO) #define SD_EMMC_CMD_CFG 0x50 #define SD_EMMC_CMD_ARG 0x54 @@ -118,12 +121,6 @@ #define MUX_CLK_NUM_PARENTS 2 -struct meson_tuning_params { - u8 core_phase; - u8 tx_phase; - u8 rx_phase; -}; - struct sd_emmc_desc { u32 cmd_cfg; u32 cmd_arg; @@ -139,12 +136,14 @@ struct meson_host { spinlock_t lock; void __iomem *regs; struct clk *core_clk; - struct clk_mux mux; - struct clk *mux_clk; - unsigned long current_clock; + struct clk *mmc_clk; + struct clk *rx_clk; + struct clk *tx_clk; + unsigned long req_rate; - struct clk_divider cfg_div; - struct clk *cfg_div_clk; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_default; + struct pinctrl_state *pins_clk_gate; unsigned int bounce_buf_size; void *bounce_buf; @@ -152,7 +151,6 @@ struct meson_host { struct sd_emmc_desc *descs; dma_addr_t descs_dma_addr; - struct meson_tuning_params tp; bool vqmmc_enabled; }; @@ -179,6 +177,90 @@ struct meson_host { #define CMD_RESP_MASK GENMASK(31, 1) #define CMD_RESP_SRAM BIT(0) +struct meson_mmc_phase { + struct clk_hw hw; + void __iomem *reg; + unsigned long phase_mask; + unsigned long delay_mask; + unsigned int delay_step_ps; +}; + +#define to_meson_mmc_phase(_hw) container_of(_hw, struct meson_mmc_phase, hw) + +static int meson_mmc_clk_get_phase(struct clk_hw *hw) +{ + struct meson_mmc_phase *mmc = to_meson_mmc_phase(hw); + unsigned int phase_num = 1 << hweight_long(mmc->phase_mask); + unsigned long period_ps, p, d; + int degrees; + u32 val; + + val = readl(mmc->reg); + p = (val & mmc->phase_mask) >> __ffs(mmc->phase_mask); + degrees = p * 360 / phase_num; + + if (mmc->delay_mask) { + period_ps = DIV_ROUND_UP((unsigned long)NSEC_PER_SEC * 1000, + clk_get_rate(hw->clk)); + d = (val & mmc->delay_mask) >> __ffs(mmc->delay_mask); + degrees += d * mmc->delay_step_ps * 360 / period_ps; + degrees %= 360; + } + + return degrees; +} + +static void meson_mmc_apply_phase_delay(struct meson_mmc_phase *mmc, + unsigned int phase, + unsigned int delay) +{ + u32 val; + + val = readl(mmc->reg); + val &= ~mmc->phase_mask; + val |= phase << __ffs(mmc->phase_mask); + + if (mmc->delay_mask) { + val &= ~mmc->delay_mask; + val |= delay << __ffs(mmc->delay_mask); + } + + writel(val, mmc->reg); +} + +static int meson_mmc_clk_set_phase(struct clk_hw *hw, int degrees) +{ + struct meson_mmc_phase *mmc = to_meson_mmc_phase(hw); + unsigned int phase_num = 1 << hweight_long(mmc->phase_mask); + unsigned long period_ps, d = 0, r; + uint64_t p; + + p = degrees % 360; + + if (!mmc->delay_mask) { + p = DIV_ROUND_CLOSEST_ULL(p, 360 / phase_num); + } else { + period_ps = DIV_ROUND_UP((unsigned long)NSEC_PER_SEC * 1000, + clk_get_rate(hw->clk)); + + /* First compute the phase index (p), the remainder (r) is the + * part we'll try to acheive using the delays (d). + */ + r = do_div(p, 360 / phase_num); + d = DIV_ROUND_CLOSEST(r * period_ps, + 360 * mmc->delay_step_ps); + d = min(d, mmc->delay_mask >> __ffs(mmc->delay_mask)); + } + + meson_mmc_apply_phase_delay(mmc, p, d); + return 0; +} + +static const struct clk_ops meson_mmc_clk_phase_ops = { + .get_phase = meson_mmc_clk_get_phase, + .set_phase = meson_mmc_clk_set_phase, +}; + static unsigned int meson_mmc_get_timeout_msecs(struct mmc_data *data) { unsigned int timeout = data->timeout_ns / NSEC_PER_MSEC; @@ -271,58 +353,102 @@ static void meson_mmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, mmc_get_dma_dir(data)); } -static int meson_mmc_clk_set(struct meson_host *host, unsigned long clk_rate) +static bool meson_mmc_timing_is_ddr(struct mmc_ios *ios) +{ + if (ios->timing == MMC_TIMING_MMC_DDR52 || + ios->timing == MMC_TIMING_UHS_DDR50 || + ios->timing == MMC_TIMING_MMC_HS400) + return true; + + return false; +} + +/* + * Gating the clock on this controller is tricky. It seems the mmc clock + * is also used by the controller. It may crash during some operation if the + * clock is stopped. The safest thing to do, whenever possible, is to keep + * clock running at stop it at the pad using the pinmux. + */ +static void meson_mmc_clk_gate(struct meson_host *host) +{ + u32 cfg; + + if (host->pins_clk_gate) { + pinctrl_select_state(host->pinctrl, host->pins_clk_gate); + } else { + /* + * If the pinmux is not provided - default to the classic and + * unsafe method + */ + cfg = readl(host->regs + SD_EMMC_CFG); + cfg |= CFG_STOP_CLOCK; + writel(cfg, host->regs + SD_EMMC_CFG); + } +} + +static void meson_mmc_clk_ungate(struct meson_host *host) +{ + u32 cfg; + + if (host->pins_clk_gate) + pinctrl_select_state(host->pinctrl, host->pins_default); + + /* Make sure the clock is not stopped in the controller */ + cfg = readl(host->regs + SD_EMMC_CFG); + cfg &= ~CFG_STOP_CLOCK; + writel(cfg, host->regs + SD_EMMC_CFG); +} + +static int meson_mmc_clk_set(struct meson_host *host, struct mmc_ios *ios) { struct mmc_host *mmc = host->mmc; + unsigned long rate = ios->clock; int ret; u32 cfg; - if (clk_rate) { - if (WARN_ON(clk_rate > mmc->f_max)) - clk_rate = mmc->f_max; - else if (WARN_ON(clk_rate < mmc->f_min)) - clk_rate = mmc->f_min; - } + /* DDR modes require higher module clock */ + if (meson_mmc_timing_is_ddr(ios)) + rate <<= 1; - if (clk_rate == host->current_clock) + /* Same request - bail-out */ + if (host->req_rate == rate) return 0; /* stop clock */ - cfg = readl(host->regs + SD_EMMC_CFG); - if (!(cfg & CFG_STOP_CLOCK)) { - cfg |= CFG_STOP_CLOCK; - writel(cfg, host->regs + SD_EMMC_CFG); - } - - dev_dbg(host->dev, "change clock rate %u -> %lu\n", - mmc->actual_clock, clk_rate); + meson_mmc_clk_gate(host); + host->req_rate = 0; - if (!clk_rate) { + if (!rate) { mmc->actual_clock = 0; - host->current_clock = 0; /* return with clock being stopped */ return 0; } - ret = clk_set_rate(host->cfg_div_clk, clk_rate); + /* Stop the clock during rate change to avoid glitches */ + cfg = readl(host->regs + SD_EMMC_CFG); + cfg |= CFG_STOP_CLOCK; + writel(cfg, host->regs + SD_EMMC_CFG); + + ret = clk_set_rate(host->mmc_clk, rate); if (ret) { dev_err(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n", - clk_rate, ret); + rate, ret); return ret; } - mmc->actual_clock = clk_get_rate(host->cfg_div_clk); - host->current_clock = clk_rate; + host->req_rate = rate; + mmc->actual_clock = clk_get_rate(host->mmc_clk); - if (clk_rate != mmc->actual_clock) - dev_dbg(host->dev, - "divider requested rate %lu != actual rate %u\n", - clk_rate, mmc->actual_clock); + /* We should report the real output frequency of the controller */ + if (meson_mmc_timing_is_ddr(ios)) + mmc->actual_clock >>= 1; + + dev_dbg(host->dev, "clk rate: %u Hz\n", mmc->actual_clock); + if (ios->clock != mmc->actual_clock) + dev_dbg(host->dev, "requested rate was %u\n", ios->clock); /* (re)start clock */ - cfg = readl(host->regs + SD_EMMC_CFG); - cfg &= ~CFG_STOP_CLOCK; - writel(cfg, host->regs + SD_EMMC_CFG); + meson_mmc_clk_ungate(host); return 0; } @@ -335,11 +461,21 @@ static int meson_mmc_clk_set(struct meson_host *host, unsigned long clk_rate) static int meson_mmc_clk_init(struct meson_host *host) { struct clk_init_data init; + struct clk_mux *mux; + struct clk_divider *div; + struct meson_mmc_phase *core, *tx, *rx; + struct clk *clk; char clk_name[32]; int i, ret = 0; const char *mux_parent_names[MUX_CLK_NUM_PARENTS]; - const char *clk_div_parents[1]; - u32 clk_reg, cfg; + const char *clk_parent[1]; + u32 clk_reg; + + /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */ + clk_reg = 0; + clk_reg |= CLK_ALWAYS_ON; + clk_reg |= CLK_DIV_MASK; + writel(clk_reg, host->regs + SD_EMMC_CLOCK); /* get the mux parents */ for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) { @@ -358,103 +494,253 @@ static int meson_mmc_clk_init(struct meson_host *host) } /* create the mux */ + mux = devm_kzalloc(host->dev, sizeof(*mux), GFP_KERNEL); + if (!mux) + return -ENOMEM; + snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev)); init.name = clk_name; init.ops = &clk_mux_ops; init.flags = 0; init.parent_names = mux_parent_names; init.num_parents = MUX_CLK_NUM_PARENTS; - host->mux.reg = host->regs + SD_EMMC_CLOCK; - host->mux.shift = __bf_shf(CLK_SRC_MASK); - host->mux.mask = CLK_SRC_MASK; - host->mux.flags = 0; - host->mux.table = NULL; - host->mux.hw.init = &init; - host->mux_clk = devm_clk_register(host->dev, &host->mux.hw); - if (WARN_ON(IS_ERR(host->mux_clk))) - return PTR_ERR(host->mux_clk); + mux->reg = host->regs + SD_EMMC_CLOCK; + mux->shift = __ffs(CLK_SRC_MASK); + mux->mask = CLK_SRC_MASK >> mux->shift; + mux->hw.init = &init; + + clk = devm_clk_register(host->dev, &mux->hw); + if (WARN_ON(IS_ERR(clk))) + return PTR_ERR(clk); /* create the divider */ + div = devm_kzalloc(host->dev, sizeof(*div), GFP_KERNEL); + if (!div) + return -ENOMEM; + snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev)); init.name = clk_name; init.ops = &clk_divider_ops; init.flags = CLK_SET_RATE_PARENT; - clk_div_parents[0] = __clk_get_name(host->mux_clk); - init.parent_names = clk_div_parents; - init.num_parents = ARRAY_SIZE(clk_div_parents); + clk_parent[0] = __clk_get_name(clk); + init.parent_names = clk_parent; + init.num_parents = 1; + + div->reg = host->regs + SD_EMMC_CLOCK; + div->shift = __ffs(CLK_DIV_MASK); + div->width = __builtin_popcountl(CLK_DIV_MASK); + div->hw.init = &init; + div->flags = CLK_DIVIDER_ONE_BASED; + + clk = devm_clk_register(host->dev, &div->hw); + if (WARN_ON(IS_ERR(clk))) + return PTR_ERR(clk); + + /* create the mmc core clock */ + core = devm_kzalloc(host->dev, sizeof(*core), GFP_KERNEL); + if (!core) + return -ENOMEM; - host->cfg_div.reg = host->regs + SD_EMMC_CLOCK; - host->cfg_div.shift = __bf_shf(CLK_DIV_MASK); - host->cfg_div.width = __builtin_popcountl(CLK_DIV_MASK); - host->cfg_div.hw.init = &init; - host->cfg_div.flags = CLK_DIVIDER_ONE_BASED | - CLK_DIVIDER_ROUND_CLOSEST | CLK_DIVIDER_ALLOW_ZERO; + snprintf(clk_name, sizeof(clk_name), "%s#core", dev_name(host->dev)); + init.name = clk_name; + init.ops = &meson_mmc_clk_phase_ops; + init.flags = CLK_SET_RATE_PARENT; + clk_parent[0] = __clk_get_name(clk); + init.parent_names = clk_parent; + init.num_parents = 1; - host->cfg_div_clk = devm_clk_register(host->dev, &host->cfg_div.hw); - if (WARN_ON(PTR_ERR_OR_ZERO(host->cfg_div_clk))) - return PTR_ERR(host->cfg_div_clk); + core->reg = host->regs + SD_EMMC_CLOCK; + core->phase_mask = CLK_CORE_PHASE_MASK; + core->hw.init = &init; - /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */ - clk_reg = 0; - clk_reg |= FIELD_PREP(CLK_CORE_PHASE_MASK, host->tp.core_phase); - clk_reg |= FIELD_PREP(CLK_TX_PHASE_MASK, host->tp.tx_phase); - clk_reg |= FIELD_PREP(CLK_RX_PHASE_MASK, host->tp.rx_phase); - clk_reg |= FIELD_PREP(CLK_SRC_MASK, CLK_SRC_XTAL); - clk_reg |= FIELD_PREP(CLK_DIV_MASK, CLK_DIV_MAX); - clk_reg &= ~CLK_ALWAYS_ON; - writel(clk_reg, host->regs + SD_EMMC_CLOCK); + host->mmc_clk = devm_clk_register(host->dev, &core->hw); + if (WARN_ON(PTR_ERR_OR_ZERO(host->mmc_clk))) + return PTR_ERR(host->mmc_clk); - /* Ensure clock starts in "auto" mode, not "always on" */ - cfg = readl(host->regs + SD_EMMC_CFG); - cfg &= ~CFG_CLK_ALWAYS_ON; - cfg |= CFG_AUTO_CLK; - writel(cfg, host->regs + SD_EMMC_CFG); + /* create the mmc tx clock */ + tx = devm_kzalloc(host->dev, sizeof(*tx), GFP_KERNEL); + if (!tx) + return -ENOMEM; + + snprintf(clk_name, sizeof(clk_name), "%s#tx", dev_name(host->dev)); + init.name = clk_name; + init.ops = &meson_mmc_clk_phase_ops; + init.flags = 0; + clk_parent[0] = __clk_get_name(host->mmc_clk); + init.parent_names = clk_parent; + init.num_parents = 1; + + tx->reg = host->regs + SD_EMMC_CLOCK; + tx->phase_mask = CLK_TX_PHASE_MASK; + tx->delay_mask = CLK_TX_DELAY_MASK; + tx->delay_step_ps = CLK_DELAY_STEP_PS; + tx->hw.init = &init; + + host->tx_clk = devm_clk_register(host->dev, &tx->hw); + if (WARN_ON(PTR_ERR_OR_ZERO(host->tx_clk))) + return PTR_ERR(host->tx_clk); + + /* create the mmc rx clock */ + rx = devm_kzalloc(host->dev, sizeof(*rx), GFP_KERNEL); + if (!rx) + return -ENOMEM; + + snprintf(clk_name, sizeof(clk_name), "%s#rx", dev_name(host->dev)); + init.name = clk_name; + init.ops = &meson_mmc_clk_phase_ops; + init.flags = 0; + clk_parent[0] = __clk_get_name(host->mmc_clk); + init.parent_names = clk_parent; + init.num_parents = 1; + + rx->reg = host->regs + SD_EMMC_CLOCK; + rx->phase_mask = CLK_RX_PHASE_MASK; + rx->delay_mask = CLK_RX_DELAY_MASK; + rx->delay_step_ps = CLK_DELAY_STEP_PS; + rx->hw.init = &init; - ret = clk_prepare_enable(host->cfg_div_clk); + host->rx_clk = devm_clk_register(host->dev, &rx->hw); + if (WARN_ON(PTR_ERR_OR_ZERO(host->rx_clk))) + return PTR_ERR(host->rx_clk); + + /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */ + host->mmc->f_min = clk_round_rate(host->mmc_clk, 400000); + ret = clk_set_rate(host->mmc_clk, host->mmc->f_min); if (ret) return ret; - /* Get the nearest minimum clock to 400KHz */ - host->mmc->f_min = clk_round_rate(host->cfg_div_clk, 400000); + /* + * Set phases : These values are mostly the datasheet recommended ones + * except for the Tx phase. Datasheet recommends 180 but some cards + * fail at initialisation with it. 270 works just fine, it fixes these + * initialisation issues and enable eMMC DDR52 mode. + */ + clk_set_phase(host->mmc_clk, 180); + clk_set_phase(host->tx_clk, 270); + clk_set_phase(host->rx_clk, 0); - ret = meson_mmc_clk_set(host, host->mmc->f_min); - if (ret) - clk_disable_unprepare(host->cfg_div_clk); + return clk_prepare_enable(host->mmc_clk); +} - return ret; +static void meson_mmc_shift_map(unsigned long *map, unsigned long shift) +{ + DECLARE_BITMAP(left, CLK_PHASE_POINT_NUM); + DECLARE_BITMAP(right, CLK_PHASE_POINT_NUM); + + /* + * shift the bitmap right and reintroduce the dropped bits on the left + * of the bitmap + */ + bitmap_shift_right(right, map, shift, CLK_PHASE_POINT_NUM); + bitmap_shift_left(left, map, CLK_PHASE_POINT_NUM - shift, + CLK_PHASE_POINT_NUM); + bitmap_or(map, left, right, CLK_PHASE_POINT_NUM); } -static void meson_mmc_set_tuning_params(struct mmc_host *mmc) +static void meson_mmc_find_next_region(unsigned long *map, + unsigned long *start, + unsigned long *stop) +{ + *start = find_next_bit(map, CLK_PHASE_POINT_NUM, *start); + *stop = find_next_zero_bit(map, CLK_PHASE_POINT_NUM, *start); +} + +static int meson_mmc_find_tuning_point(unsigned long *test) +{ + unsigned long shift, stop, offset = 0, start = 0, size = 0; + + /* Get the all good/all bad situation out the way */ + if (bitmap_full(test, CLK_PHASE_POINT_NUM)) + return 0; /* All points are good so point 0 will do */ + else if (bitmap_empty(test, CLK_PHASE_POINT_NUM)) + return -EIO; /* No successful tuning point */ + + /* + * Now we know there is a least one region find. Make sure it does + * not wrap by the shifting the bitmap if necessary + */ + shift = find_first_zero_bit(test, CLK_PHASE_POINT_NUM); + if (shift != 0) + meson_mmc_shift_map(test, shift); + + while (start < CLK_PHASE_POINT_NUM) { + meson_mmc_find_next_region(test, &start, &stop); + + if ((stop - start) > size) { + offset = start; + size = stop - start; + } + + start = stop; + } + + /* Get the center point of the region */ + offset += (size / 2); + + /* Shift the result back */ + offset = (offset + shift) % CLK_PHASE_POINT_NUM; + + return offset; +} + +static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode, + struct clk *clk) +{ + int point, ret; + DECLARE_BITMAP(test, CLK_PHASE_POINT_NUM); + + dev_dbg(mmc_dev(mmc), "%s phase/delay tunning...\n", + __clk_get_name(clk)); + bitmap_zero(test, CLK_PHASE_POINT_NUM); + + /* Explore tuning points */ + for (point = 0; point < CLK_PHASE_POINT_NUM; point++) { + clk_set_phase(clk, point * CLK_PHASE_STEP); + ret = mmc_send_tuning(mmc, opcode, NULL); + if (!ret) + set_bit(point, test); + } + + /* Find the optimal tuning point and apply it */ + point = meson_mmc_find_tuning_point(test); + if (point < 0) + return point; /* tuning failed */ + + clk_set_phase(clk, point * CLK_PHASE_STEP); + dev_dbg(mmc_dev(mmc), "success with phase: %d\n", + clk_get_phase(clk)); + return 0; +} + +static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct meson_host *host = mmc_priv(mmc); - u32 regval; + int ret; - /* stop clock */ - regval = readl(host->regs + SD_EMMC_CFG); - regval |= CFG_STOP_CLOCK; - writel(regval, host->regs + SD_EMMC_CFG); - - regval = readl(host->regs + SD_EMMC_CLOCK); - regval &= ~CLK_CORE_PHASE_MASK; - regval |= FIELD_PREP(CLK_CORE_PHASE_MASK, host->tp.core_phase); - regval &= ~CLK_TX_PHASE_MASK; - regval |= FIELD_PREP(CLK_TX_PHASE_MASK, host->tp.tx_phase); - regval &= ~CLK_RX_PHASE_MASK; - regval |= FIELD_PREP(CLK_RX_PHASE_MASK, host->tp.rx_phase); - writel(regval, host->regs + SD_EMMC_CLOCK); - - /* start clock */ - regval = readl(host->regs + SD_EMMC_CFG); - regval &= ~CFG_STOP_CLOCK; - writel(regval, host->regs + SD_EMMC_CFG); + /* + * If this is the initial tuning, try to get a sane Rx starting + * phase before doing the actual tuning. + */ + if (!mmc->doing_retune) { + ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); + + if (ret) + return ret; + } + + ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk); + if (ret) + return ret; + + return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); } static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct meson_host *host = mmc_priv(mmc); - u32 bus_width; - u32 val, orig; + u32 bus_width, val; + int err; /* * GPIO regulator, only controls switching between 1v8 and @@ -475,6 +761,11 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) case MMC_POWER_UP: if (!IS_ERR(mmc->supply.vmmc)) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); + + /* Reset phases */ + clk_set_phase(host->rx_clk, 0); + clk_set_phase(host->tx_clk, 270); + break; case MMC_POWER_ON: @@ -482,7 +773,7 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) int ret = regulator_enable(mmc->supply.vqmmc); if (ret < 0) - dev_err(mmc_dev(mmc), + dev_err(host->dev, "failed to enable vqmmc regulator\n"); else host->vqmmc_enabled = true; @@ -491,9 +782,6 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) break; } - - meson_mmc_clk_set(host, ios->clock); - /* Bus width */ switch (ios->bus_width) { case MMC_BUS_WIDTH_1: @@ -512,26 +800,23 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) } val = readl(host->regs + SD_EMMC_CFG); - orig = val; - val &= ~CFG_BUS_WIDTH_MASK; val |= FIELD_PREP(CFG_BUS_WIDTH_MASK, bus_width); val &= ~CFG_DDR; - if (ios->timing == MMC_TIMING_UHS_DDR50 || - ios->timing == MMC_TIMING_MMC_DDR52 || - ios->timing == MMC_TIMING_MMC_HS400) + if (meson_mmc_timing_is_ddr(ios)) val |= CFG_DDR; val &= ~CFG_CHK_DS; if (ios->timing == MMC_TIMING_MMC_HS400) val |= CFG_CHK_DS; - if (val != orig) { - writel(val, host->regs + SD_EMMC_CFG); - dev_dbg(host->dev, "%s: SD_EMMC_CFG: 0x%08x -> 0x%08x\n", - __func__, orig, val); - } + err = meson_mmc_clk_set(host, ios); + if (err) + dev_err(host->dev, "Failed to set clock: %d\n,", err); + + writel(val, host->regs + SD_EMMC_CFG); + dev_dbg(host->dev, "SD_EMMC_CFG: 0x%08x\n", val); } static void meson_mmc_request_done(struct mmc_host *mmc, @@ -729,57 +1014,40 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id) struct mmc_command *cmd; struct mmc_data *data; u32 irq_en, status, raw_status; - irqreturn_t ret = IRQ_HANDLED; + irqreturn_t ret = IRQ_NONE; - if (WARN_ON(!host)) + if (WARN_ON(!host) || WARN_ON(!host->cmd)) return IRQ_NONE; - cmd = host->cmd; - - if (WARN_ON(!cmd)) - return IRQ_NONE; + spin_lock(&host->lock); + cmd = host->cmd; data = cmd->data; - - spin_lock(&host->lock); irq_en = readl(host->regs + SD_EMMC_IRQ_EN); raw_status = readl(host->regs + SD_EMMC_STATUS); status = raw_status & irq_en; - if (!status) { - dev_warn(host->dev, "Spurious IRQ! status=0x%08x, irq_en=0x%08x\n", - raw_status, irq_en); - ret = IRQ_NONE; - goto out; - } - - meson_mmc_read_resp(host->mmc, cmd); - cmd->error = 0; - if (status & IRQ_RXD_ERR_MASK) { - dev_dbg(host->dev, "Unhandled IRQ: RXD error\n"); - cmd->error = -EILSEQ; - } - if (status & IRQ_TXD_ERR) { - dev_dbg(host->dev, "Unhandled IRQ: TXD error\n"); - cmd->error = -EILSEQ; - } - if (status & IRQ_DESC_ERR) - dev_dbg(host->dev, "Unhandled IRQ: Descriptor error\n"); - if (status & IRQ_RESP_ERR) { - dev_dbg(host->dev, "Unhandled IRQ: Response error\n"); + if (status & IRQ_CRC_ERR) { + dev_dbg(host->dev, "CRC Error - status 0x%08x\n", status); cmd->error = -EILSEQ; + ret = IRQ_HANDLED; + goto out; } - if (status & IRQ_RESP_TIMEOUT) { - dev_dbg(host->dev, "Unhandled IRQ: Response timeout\n"); + + if (status & IRQ_TIMEOUTS) { + dev_dbg(host->dev, "Timeout - status 0x%08x\n", status); cmd->error = -ETIMEDOUT; + ret = IRQ_HANDLED; + goto out; } - if (status & IRQ_DESC_TIMEOUT) { - dev_dbg(host->dev, "Unhandled IRQ: Descriptor timeout\n"); - cmd->error = -ETIMEDOUT; + + meson_mmc_read_resp(host->mmc, cmd); + + if (status & IRQ_SDIO) { + dev_dbg(host->dev, "IRQ: SDIO TODO.\n"); + ret = IRQ_HANDLED; } - if (status & IRQ_SDIO) - dev_dbg(host->dev, "Unhandled IRQ: SDIO.\n"); if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) { if (data && !cmd->error) @@ -787,26 +1055,20 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id) if (meson_mmc_bounce_buf_read(data) || meson_mmc_get_next_command(cmd)) ret = IRQ_WAKE_THREAD; - } else { - dev_warn(host->dev, "Unknown IRQ! status=0x%04x: MMC CMD%u arg=0x%08x flags=0x%08x stop=%d\n", - status, cmd->opcode, cmd->arg, - cmd->flags, cmd->mrq->stop ? 1 : 0); - if (cmd->data) { - struct mmc_data *data = cmd->data; - - dev_warn(host->dev, "\tblksz %u blocks %u flags 0x%08x (%s%s)", - data->blksz, data->blocks, data->flags, - data->flags & MMC_DATA_WRITE ? "write" : "", - data->flags & MMC_DATA_READ ? "read" : ""); - } + else + ret = IRQ_HANDLED; } out: - /* ack all (enabled) interrupts */ - writel(status, host->regs + SD_EMMC_STATUS); + /* ack all enabled interrupts */ + writel(irq_en, host->regs + SD_EMMC_STATUS); if (ret == IRQ_HANDLED) meson_mmc_request_done(host->mmc, cmd->mrq); + else if (ret == IRQ_NONE) + dev_warn(host->dev, + "Unexpected IRQ! status=0x%08x, irq_en=0x%08x\n", + raw_status, irq_en); spin_unlock(&host->lock); return ret; @@ -839,29 +1101,6 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id) return IRQ_HANDLED; } -static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) -{ - struct meson_host *host = mmc_priv(mmc); - struct meson_tuning_params tp_old = host->tp; - int ret = -EINVAL, i, cmd_error; - - dev_info(mmc_dev(mmc), "(re)tuning...\n"); - - for (i = CLK_PHASE_0; i <= CLK_PHASE_270; i++) { - host->tp.rx_phase = i; - /* exclude the active parameter set if retuning */ - if (!memcmp(&tp_old, &host->tp, sizeof(tp_old)) && - mmc->doing_retune) - continue; - meson_mmc_set_tuning_params(mmc); - ret = mmc_send_tuning(mmc, opcode, &cmd_error); - if (!ret) - break; - } - - return ret; -} - /* * NOTE: we only need this until the GPIO/pinctrl driver can handle * interrupts. For now, the MMC core will use this for polling. @@ -888,6 +1127,38 @@ static void meson_mmc_cfg_init(struct meson_host *host) writel(cfg, host->regs + SD_EMMC_CFG); } +static int meson_mmc_card_busy(struct mmc_host *mmc) +{ + struct meson_host *host = mmc_priv(mmc); + u32 regval; + + regval = readl(host->regs + SD_EMMC_STATUS); + + /* We are only interrested in lines 0 to 3, so mask the other ones */ + return !(FIELD_GET(STATUS_DATI, regval) & 0xf); +} + +static int meson_mmc_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios) +{ + /* vqmmc regulator is available */ + if (!IS_ERR(mmc->supply.vqmmc)) { + /* + * The usual amlogic setup uses a GPIO to switch from one + * regulator to the other. While the voltage ramp up is + * pretty fast, care must be taken when switching from 3.3v + * to 1.8v. Please make sure the regulator framework is aware + * of your own regulator constraints + */ + return mmc_regulator_set_vqmmc(mmc, ios); + } + + /* no vqmmc regulator, assume fixed regulator at 3/3.3V */ + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) + return 0; + + return -EINVAL; +} + static const struct mmc_host_ops meson_mmc_ops = { .request = meson_mmc_request, .set_ios = meson_mmc_set_ios, @@ -895,6 +1166,8 @@ static const struct mmc_host_ops meson_mmc_ops = { .pre_req = meson_mmc_pre_req, .post_req = meson_mmc_post_req, .execute_tuning = meson_mmc_execute_tuning, + .card_busy = meson_mmc_card_busy, + .start_signal_voltage_switch = meson_mmc_voltage_switch, }; static int meson_mmc_probe(struct platform_device *pdev) @@ -917,7 +1190,7 @@ static int meson_mmc_probe(struct platform_device *pdev) /* Get regulators and the supported OCR mask */ host->vqmmc_enabled = false; ret = mmc_regulator_get_supply(mmc); - if (ret == -EPROBE_DEFER) + if (ret) goto free_host; ret = mmc_of_parse(mmc); @@ -941,6 +1214,27 @@ static int meson_mmc_probe(struct platform_device *pdev) goto free_host; } + host->pinctrl = devm_pinctrl_get(&pdev->dev); + if (IS_ERR(host->pinctrl)) { + ret = PTR_ERR(host->pinctrl); + goto free_host; + } + + host->pins_default = pinctrl_lookup_state(host->pinctrl, + PINCTRL_STATE_DEFAULT); + if (IS_ERR(host->pins_default)) { + ret = PTR_ERR(host->pins_default); + goto free_host; + } + + host->pins_clk_gate = pinctrl_lookup_state(host->pinctrl, + "clk-gate"); + if (IS_ERR(host->pins_clk_gate)) { + dev_warn(&pdev->dev, + "can't get clk-gate pinctrl, using clk_stop bit\n"); + host->pins_clk_gate = NULL; + } + host->core_clk = devm_clk_get(&pdev->dev, "core"); if (IS_ERR(host->core_clk)) { ret = PTR_ERR(host->core_clk); @@ -951,30 +1245,28 @@ static int meson_mmc_probe(struct platform_device *pdev) if (ret) goto free_host; - host->tp.core_phase = CLK_PHASE_180; - host->tp.tx_phase = CLK_PHASE_0; - host->tp.rx_phase = CLK_PHASE_0; - ret = meson_mmc_clk_init(host); if (ret) goto err_core_clk; + /* set config to sane default */ + meson_mmc_cfg_init(host); + /* Stop execution */ writel(0, host->regs + SD_EMMC_START); - /* clear, ack, enable all interrupts */ + /* clear, ack and enable interrupts */ writel(0, host->regs + SD_EMMC_IRQ_EN); - writel(IRQ_EN_MASK, host->regs + SD_EMMC_STATUS); - writel(IRQ_EN_MASK, host->regs + SD_EMMC_IRQ_EN); - - /* set config to sane default */ - meson_mmc_cfg_init(host); + writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN, + host->regs + SD_EMMC_STATUS); + writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN, + host->regs + SD_EMMC_IRQ_EN); ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq, meson_mmc_irq_thread, IRQF_SHARED, NULL, host); if (ret) - goto err_div_clk; + goto err_init_clk; mmc->caps |= MMC_CAP_CMD23; mmc->max_blk_count = CMD_CFG_LENGTH_MASK; @@ -990,7 +1282,7 @@ static int meson_mmc_probe(struct platform_device *pdev) if (host->bounce_buf == NULL) { dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n"); ret = -ENOMEM; - goto err_div_clk; + goto err_init_clk; } host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, @@ -1009,8 +1301,8 @@ static int meson_mmc_probe(struct platform_device *pdev) err_bounce_buf: dma_free_coherent(host->dev, host->bounce_buf_size, host->bounce_buf, host->bounce_dma_addr); -err_div_clk: - clk_disable_unprepare(host->cfg_div_clk); +err_init_clk: + clk_disable_unprepare(host->mmc_clk); err_core_clk: clk_disable_unprepare(host->core_clk); free_host: @@ -1032,7 +1324,7 @@ static int meson_mmc_remove(struct platform_device *pdev) dma_free_coherent(host->dev, host->bounce_buf_size, host->bounce_buf, host->bounce_dma_addr); - clk_disable_unprepare(host->cfg_div_clk); + clk_disable_unprepare(host->mmc_clk); clk_disable_unprepare(host->core_clk); mmc_free_host(host->mmc); diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c new file mode 100644 index 000000000000..09cb89645d06 --- /dev/null +++ b/drivers/mmc/host/meson-mx-sdio.c @@ -0,0 +1,768 @@ +/* + * meson-mx-sdio.c - Meson6, Meson8 and Meson8b SDIO/MMC Host Controller + * + * Copyright (C) 2015 Endless Mobile, Inc. + * Author: Carlo Caione <carlo@endlessm.com> + * Copyright (C) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/platform_device.h> +#include <linux/of_platform.h> +#include <linux/timer.h> +#include <linux/types.h> + +#include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/sdio.h> +#include <linux/mmc/slot-gpio.h> + +#define MESON_MX_SDIO_ARGU 0x00 + +#define MESON_MX_SDIO_SEND 0x04 + #define MESON_MX_SDIO_SEND_COMMAND_INDEX_MASK GENMASK(7, 0) + #define MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK GENMASK(15, 8) + #define MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7 BIT(16) + #define MESON_MX_SDIO_SEND_RESP_HAS_DATA BIT(17) + #define MESON_MX_SDIO_SEND_RESP_CRC7_FROM_8 BIT(18) + #define MESON_MX_SDIO_SEND_CHECK_DAT0_BUSY BIT(19) + #define MESON_MX_SDIO_SEND_DATA BIT(20) + #define MESON_MX_SDIO_SEND_USE_INT_WINDOW BIT(21) + #define MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK GENMASK(31, 24) + +#define MESON_MX_SDIO_CONF 0x08 + #define MESON_MX_SDIO_CONF_CMD_CLK_DIV_SHIFT 0 + #define MESON_MX_SDIO_CONF_CMD_CLK_DIV_WIDTH 10 + #define MESON_MX_SDIO_CONF_CMD_DISABLE_CRC BIT(10) + #define MESON_MX_SDIO_CONF_CMD_OUT_AT_POSITIVE_EDGE BIT(11) + #define MESON_MX_SDIO_CONF_CMD_ARGUMENT_BITS_MASK GENMASK(17, 12) + #define MESON_MX_SDIO_CONF_RESP_LATCH_AT_NEGATIVE_EDGE BIT(18) + #define MESON_MX_SDIO_CONF_DATA_LATCH_AT_NEGATIVE_EDGE BIT(19) + #define MESON_MX_SDIO_CONF_BUS_WIDTH BIT(20) + #define MESON_MX_SDIO_CONF_M_ENDIAN_MASK GENMASK(22, 21) + #define MESON_MX_SDIO_CONF_WRITE_NWR_MASK GENMASK(28, 23) + #define MESON_MX_SDIO_CONF_WRITE_CRC_OK_STATUS_MASK GENMASK(31, 29) + +#define MESON_MX_SDIO_IRQS 0x0c + #define MESON_MX_SDIO_IRQS_STATUS_STATE_MACHINE_MASK GENMASK(3, 0) + #define MESON_MX_SDIO_IRQS_CMD_BUSY BIT(4) + #define MESON_MX_SDIO_IRQS_RESP_CRC7_OK BIT(5) + #define MESON_MX_SDIO_IRQS_DATA_READ_CRC16_OK BIT(6) + #define MESON_MX_SDIO_IRQS_DATA_WRITE_CRC16_OK BIT(7) + #define MESON_MX_SDIO_IRQS_IF_INT BIT(8) + #define MESON_MX_SDIO_IRQS_CMD_INT BIT(9) + #define MESON_MX_SDIO_IRQS_STATUS_INFO_MASK GENMASK(15, 12) + #define MESON_MX_SDIO_IRQS_TIMING_OUT_INT BIT(16) + #define MESON_MX_SDIO_IRQS_AMRISC_TIMING_OUT_INT_EN BIT(17) + #define MESON_MX_SDIO_IRQS_ARC_TIMING_OUT_INT_EN BIT(18) + #define MESON_MX_SDIO_IRQS_TIMING_OUT_COUNT_MASK GENMASK(31, 19) + +#define MESON_MX_SDIO_IRQC 0x10 + #define MESON_MX_SDIO_IRQC_ARC_IF_INT_EN BIT(3) + #define MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN BIT(4) + #define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK GENMASK(7, 6) + #define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK BIT(8) + #define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD BIT(9) + #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(10, 13) + #define MESON_MX_SDIO_IRQC_SOFT_RESET BIT(15) + #define MESON_MX_SDIO_IRQC_FORCE_HALT BIT(30) + #define MESON_MX_SDIO_IRQC_HALT_HOLE BIT(31) + +#define MESON_MX_SDIO_MULT 0x14 + #define MESON_MX_SDIO_MULT_PORT_SEL_MASK GENMASK(1, 0) + #define MESON_MX_SDIO_MULT_MEMORY_STICK_ENABLE BIT(2) + #define MESON_MX_SDIO_MULT_MEMORY_STICK_SCLK_ALWAYS BIT(3) + #define MESON_MX_SDIO_MULT_STREAM_ENABLE BIT(4) + #define MESON_MX_SDIO_MULT_STREAM_8BITS_MODE BIT(5) + #define MESON_MX_SDIO_MULT_WR_RD_OUT_INDEX BIT(8) + #define MESON_MX_SDIO_MULT_DAT0_DAT1_SWAPPED BIT(10) + #define MESON_MX_SDIO_MULT_DAT1_DAT0_SWAPPED BIT(11) + #define MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK GENMASK(15, 12) + +#define MESON_MX_SDIO_ADDR 0x18 + +#define MESON_MX_SDIO_EXT 0x1c + #define MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK GENMASK(29, 16) + +#define MESON_MX_SDIO_BOUNCE_REQ_SIZE (128 * 1024) +#define MESON_MX_SDIO_RESPONSE_CRC16_BITS (16 - 1) +#define MESON_MX_SDIO_MAX_SLOTS 3 + +struct meson_mx_mmc_host { + struct device *controller_dev; + + struct clk *parent_clk; + struct clk *core_clk; + struct clk_divider cfg_div; + struct clk *cfg_div_clk; + struct clk_fixed_factor fixed_factor; + struct clk *fixed_factor_clk; + + void __iomem *base; + int irq; + spinlock_t irq_lock; + + struct timer_list cmd_timeout; + + unsigned int slot_id; + struct mmc_host *mmc; + + struct mmc_request *mrq; + struct mmc_command *cmd; + int error; +}; + +static void meson_mx_mmc_mask_bits(struct mmc_host *mmc, char reg, u32 mask, + u32 val) +{ + struct meson_mx_mmc_host *host = mmc_priv(mmc); + u32 regval; + + regval = readl(host->base + reg); + regval &= ~mask; + regval |= (val & mask); + + writel(regval, host->base + reg); +} + +static void meson_mx_mmc_soft_reset(struct meson_mx_mmc_host *host) +{ + writel(MESON_MX_SDIO_IRQC_SOFT_RESET, host->base + MESON_MX_SDIO_IRQC); + udelay(2); +} + +static struct mmc_command *meson_mx_mmc_get_next_cmd(struct mmc_command *cmd) +{ + if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error) + return cmd->mrq->cmd; + else if (mmc_op_multi(cmd->opcode) && + (!cmd->mrq->sbc || cmd->error || cmd->data->error)) + return cmd->mrq->stop; + else + return NULL; +} + +static void meson_mx_mmc_start_cmd(struct mmc_host *mmc, + struct mmc_command *cmd) +{ + struct meson_mx_mmc_host *host = mmc_priv(mmc); + unsigned int pack_size; + unsigned long irqflags, timeout; + u32 mult, send = 0, ext = 0; + + host->cmd = cmd; + + if (cmd->busy_timeout) + timeout = msecs_to_jiffies(cmd->busy_timeout); + else + timeout = msecs_to_jiffies(1000); + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_R1: + case MMC_RSP_R1B: + case MMC_RSP_R3: + /* 7 (CMD) + 32 (response) + 7 (CRC) -1 */ + send |= FIELD_PREP(MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK, 45); + break; + case MMC_RSP_R2: + /* 7 (CMD) + 120 (response) + 7 (CRC) -1 */ + send |= FIELD_PREP(MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK, 133); + send |= MESON_MX_SDIO_SEND_RESP_CRC7_FROM_8; + break; + default: + break; + } + + if (!(cmd->flags & MMC_RSP_CRC)) + send |= MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7; + + if (cmd->flags & MMC_RSP_BUSY) + send |= MESON_MX_SDIO_SEND_CHECK_DAT0_BUSY; + + if (cmd->data) { + send |= FIELD_PREP(MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK, + (cmd->data->blocks - 1)); + + pack_size = cmd->data->blksz * BITS_PER_BYTE; + if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) + pack_size += MESON_MX_SDIO_RESPONSE_CRC16_BITS * 4; + else + pack_size += MESON_MX_SDIO_RESPONSE_CRC16_BITS * 1; + + ext |= FIELD_PREP(MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK, + pack_size); + + if (cmd->data->flags & MMC_DATA_WRITE) + send |= MESON_MX_SDIO_SEND_DATA; + else + send |= MESON_MX_SDIO_SEND_RESP_HAS_DATA; + + cmd->data->bytes_xfered = 0; + } + + send |= FIELD_PREP(MESON_MX_SDIO_SEND_COMMAND_INDEX_MASK, + (0x40 | cmd->opcode)); + + spin_lock_irqsave(&host->irq_lock, irqflags); + + mult = readl(host->base + MESON_MX_SDIO_MULT); + mult &= ~MESON_MX_SDIO_MULT_PORT_SEL_MASK; + mult |= FIELD_PREP(MESON_MX_SDIO_MULT_PORT_SEL_MASK, host->slot_id); + mult |= BIT(31); + writel(mult, host->base + MESON_MX_SDIO_MULT); + + /* enable the CMD done interrupt */ + meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_IRQC, + MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN, + MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN); + + /* clear pending interrupts */ + meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_IRQS, + MESON_MX_SDIO_IRQS_CMD_INT, + MESON_MX_SDIO_IRQS_CMD_INT); + + writel(cmd->arg, host->base + MESON_MX_SDIO_ARGU); + writel(ext, host->base + MESON_MX_SDIO_EXT); + writel(send, host->base + MESON_MX_SDIO_SEND); + + spin_unlock_irqrestore(&host->irq_lock, irqflags); + + mod_timer(&host->cmd_timeout, jiffies + timeout); +} + +static void meson_mx_mmc_request_done(struct meson_mx_mmc_host *host) +{ + struct mmc_request *mrq; + + mrq = host->mrq; + + host->mrq = NULL; + host->cmd = NULL; + + mmc_request_done(host->mmc, mrq); +} + +static void meson_mx_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct meson_mx_mmc_host *host = mmc_priv(mmc); + unsigned short vdd = ios->vdd; + unsigned long clk_rate = ios->clock; + + switch (ios->bus_width) { + case MMC_BUS_WIDTH_1: + meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_CONF, + MESON_MX_SDIO_CONF_BUS_WIDTH, 0); + break; + + case MMC_BUS_WIDTH_4: + meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_CONF, + MESON_MX_SDIO_CONF_BUS_WIDTH, + MESON_MX_SDIO_CONF_BUS_WIDTH); + break; + + case MMC_BUS_WIDTH_8: + default: + dev_err(mmc_dev(mmc), "unsupported bus width: %d\n", + ios->bus_width); + host->error = -EINVAL; + return; + } + + host->error = clk_set_rate(host->cfg_div_clk, ios->clock); + if (host->error) { + dev_warn(mmc_dev(mmc), + "failed to set MMC clock to %lu: %d\n", + clk_rate, host->error); + return; + } + + mmc->actual_clock = clk_get_rate(host->cfg_div_clk); + + switch (ios->power_mode) { + case MMC_POWER_OFF: + vdd = 0; + /* fall-through: */ + case MMC_POWER_UP: + if (!IS_ERR(mmc->supply.vmmc)) { + host->error = mmc_regulator_set_ocr(mmc, + mmc->supply.vmmc, + vdd); + if (host->error) + return; + } + break; + } +} + +static int meson_mx_mmc_map_dma(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + int dma_len; + struct scatterlist *sg; + + if (!data) + return 0; + + sg = data->sg; + if (sg->offset & 3 || sg->length & 3) { + dev_err(mmc_dev(mmc), + "unaligned scatterlist: offset %x length %d\n", + sg->offset, sg->length); + return -EINVAL; + } + + dma_len = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len, + mmc_get_dma_dir(data)); + if (dma_len <= 0) { + dev_err(mmc_dev(mmc), "dma_map_sg failed\n"); + return -ENOMEM; + } + + return 0; +} + +static void meson_mx_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct meson_mx_mmc_host *host = mmc_priv(mmc); + struct mmc_command *cmd = mrq->cmd; + + if (!host->error) + host->error = meson_mx_mmc_map_dma(mmc, mrq); + + if (host->error) { + cmd->error = host->error; + mmc_request_done(mmc, mrq); + return; + } + + host->mrq = mrq; + + if (mrq->data) + writel(sg_dma_address(mrq->data->sg), + host->base + MESON_MX_SDIO_ADDR); + + if (mrq->sbc) + meson_mx_mmc_start_cmd(mmc, mrq->sbc); + else + meson_mx_mmc_start_cmd(mmc, mrq->cmd); +} + +static int meson_mx_mmc_card_busy(struct mmc_host *mmc) +{ + struct meson_mx_mmc_host *host = mmc_priv(mmc); + u32 irqc = readl(host->base + MESON_MX_SDIO_IRQC); + + return !!(irqc & MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK); +} + +static void meson_mx_mmc_read_response(struct mmc_host *mmc, + struct mmc_command *cmd) +{ + struct meson_mx_mmc_host *host = mmc_priv(mmc); + u32 mult; + int i, resp[4]; + + mult = readl(host->base + MESON_MX_SDIO_MULT); + mult |= MESON_MX_SDIO_MULT_WR_RD_OUT_INDEX; + mult &= ~MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK; + mult |= FIELD_PREP(MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK, 0); + writel(mult, host->base + MESON_MX_SDIO_MULT); + + if (cmd->flags & MMC_RSP_136) { + for (i = 0; i <= 3; i++) + resp[3 - i] = readl(host->base + MESON_MX_SDIO_ARGU); + cmd->resp[0] = (resp[0] << 8) | ((resp[1] >> 24) & 0xff); + cmd->resp[1] = (resp[1] << 8) | ((resp[2] >> 24) & 0xff); + cmd->resp[2] = (resp[2] << 8) | ((resp[3] >> 24) & 0xff); + cmd->resp[3] = (resp[3] << 8); + } else if (cmd->flags & MMC_RSP_PRESENT) { + cmd->resp[0] = readl(host->base + MESON_MX_SDIO_ARGU); + } +} + +static irqreturn_t meson_mx_mmc_process_cmd_irq(struct meson_mx_mmc_host *host, + u32 irqs, u32 send) +{ + struct mmc_command *cmd = host->cmd; + + /* + * NOTE: even though it shouldn't happen we sometimes get command + * interrupts twice (at least this is what it looks like). Ideally + * we find out why this happens and warn here as soon as it occurs. + */ + if (!cmd) + return IRQ_HANDLED; + + cmd->error = 0; + meson_mx_mmc_read_response(host->mmc, cmd); + + if (cmd->data) { + if (!((irqs & MESON_MX_SDIO_IRQS_DATA_READ_CRC16_OK) || + (irqs & MESON_MX_SDIO_IRQS_DATA_WRITE_CRC16_OK))) + cmd->error = -EILSEQ; + } else { + if (!((irqs & MESON_MX_SDIO_IRQS_RESP_CRC7_OK) || + (send & MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7))) + cmd->error = -EILSEQ; + } + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t meson_mx_mmc_irq(int irq, void *data) +{ + struct meson_mx_mmc_host *host = (void *) data; + u32 irqs, send; + unsigned long irqflags; + irqreturn_t ret; + + spin_lock_irqsave(&host->irq_lock, irqflags); + + irqs = readl(host->base + MESON_MX_SDIO_IRQS); + send = readl(host->base + MESON_MX_SDIO_SEND); + + if (irqs & MESON_MX_SDIO_IRQS_CMD_INT) + ret = meson_mx_mmc_process_cmd_irq(host, irqs, send); + else + ret = IRQ_HANDLED; + + /* finally ACK all pending interrupts */ + writel(irqs, host->base + MESON_MX_SDIO_IRQS); + + spin_unlock_irqrestore(&host->irq_lock, irqflags); + + return ret; +} + +static irqreturn_t meson_mx_mmc_irq_thread(int irq, void *irq_data) +{ + struct meson_mx_mmc_host *host = (void *) irq_data; + struct mmc_command *cmd = host->cmd, *next_cmd; + + if (WARN_ON(!cmd)) + return IRQ_HANDLED; + + del_timer_sync(&host->cmd_timeout); + + if (cmd->data) { + dma_unmap_sg(mmc_dev(host->mmc), cmd->data->sg, + cmd->data->sg_len, + mmc_get_dma_dir(cmd->data)); + + cmd->data->bytes_xfered = cmd->data->blksz * cmd->data->blocks; + } + + next_cmd = meson_mx_mmc_get_next_cmd(cmd); + if (next_cmd) + meson_mx_mmc_start_cmd(host->mmc, next_cmd); + else + meson_mx_mmc_request_done(host); + + return IRQ_HANDLED; +} + +static void meson_mx_mmc_timeout(struct timer_list *t) +{ + struct meson_mx_mmc_host *host = from_timer(host, t, cmd_timeout); + unsigned long irqflags; + u32 irqc; + + spin_lock_irqsave(&host->irq_lock, irqflags); + + /* disable the CMD interrupt */ + irqc = readl(host->base + MESON_MX_SDIO_IRQC); + irqc &= ~MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN; + writel(irqc, host->base + MESON_MX_SDIO_IRQC); + + spin_unlock_irqrestore(&host->irq_lock, irqflags); + + /* + * skip the timeout handling if the interrupt handler already processed + * the command. + */ + if (!host->cmd) + return; + + dev_dbg(mmc_dev(host->mmc), + "Timeout on CMD%u (IRQS = 0x%08x, ARGU = 0x%08x)\n", + host->cmd->opcode, readl(host->base + MESON_MX_SDIO_IRQS), + readl(host->base + MESON_MX_SDIO_ARGU)); + + host->cmd->error = -ETIMEDOUT; + + meson_mx_mmc_request_done(host); +} + +static struct mmc_host_ops meson_mx_mmc_ops = { + .request = meson_mx_mmc_request, + .set_ios = meson_mx_mmc_set_ios, + .card_busy = meson_mx_mmc_card_busy, + .get_cd = mmc_gpio_get_cd, + .get_ro = mmc_gpio_get_ro, +}; + +static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent) +{ + struct device_node *slot_node; + + /* + * TODO: the MMC core framework currently does not support + * controllers with multiple slots properly. So we only register + * the first slot for now + */ + slot_node = of_find_compatible_node(parent->of_node, NULL, "mmc-slot"); + if (!slot_node) { + dev_warn(parent, "no 'mmc-slot' sub-node found\n"); + return ERR_PTR(-ENOENT); + } + + return of_platform_device_create(slot_node, NULL, parent); +} + +static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host) +{ + struct mmc_host *mmc = host->mmc; + struct device *slot_dev = mmc_dev(mmc); + int ret; + + if (of_property_read_u32(slot_dev->of_node, "reg", &host->slot_id)) { + dev_err(slot_dev, "missing 'reg' property\n"); + return -EINVAL; + } + + if (host->slot_id >= MESON_MX_SDIO_MAX_SLOTS) { + dev_err(slot_dev, "invalid 'reg' property value %d\n", + host->slot_id); + return -EINVAL; + } + + /* Get regulators and the supported OCR mask */ + ret = mmc_regulator_get_supply(mmc); + if (ret) + return ret; + + mmc->max_req_size = MESON_MX_SDIO_BOUNCE_REQ_SIZE; + mmc->max_seg_size = mmc->max_req_size; + mmc->max_blk_count = + FIELD_GET(MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK, + 0xffffffff); + mmc->max_blk_size = FIELD_GET(MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK, + 0xffffffff); + mmc->max_blk_size -= (4 * MESON_MX_SDIO_RESPONSE_CRC16_BITS); + mmc->max_blk_size /= BITS_PER_BYTE; + + /* Get the min and max supported clock rates */ + mmc->f_min = clk_round_rate(host->cfg_div_clk, 1); + mmc->f_max = clk_round_rate(host->cfg_div_clk, + clk_get_rate(host->parent_clk)); + + mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23; + mmc->ops = &meson_mx_mmc_ops; + + ret = mmc_of_parse(mmc); + if (ret) + return ret; + + ret = mmc_add_host(mmc); + if (ret) + return ret; + + return 0; +} + +static int meson_mx_mmc_register_clks(struct meson_mx_mmc_host *host) +{ + struct clk_init_data init; + const char *clk_div_parent, *clk_fixed_factor_parent; + + clk_fixed_factor_parent = __clk_get_name(host->parent_clk); + init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL, + "%s#fixed_factor", + dev_name(host->controller_dev)); + init.ops = &clk_fixed_factor_ops; + init.flags = 0; + init.parent_names = &clk_fixed_factor_parent; + init.num_parents = 1; + host->fixed_factor.div = 2; + host->fixed_factor.mult = 1; + host->fixed_factor.hw.init = &init; + + host->fixed_factor_clk = devm_clk_register(host->controller_dev, + &host->fixed_factor.hw); + if (WARN_ON(IS_ERR(host->fixed_factor_clk))) + return PTR_ERR(host->fixed_factor_clk); + + clk_div_parent = __clk_get_name(host->fixed_factor_clk); + init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL, + "%s#div", dev_name(host->controller_dev)); + init.ops = &clk_divider_ops; + init.flags = CLK_SET_RATE_PARENT; + init.parent_names = &clk_div_parent; + init.num_parents = 1; + host->cfg_div.reg = host->base + MESON_MX_SDIO_CONF; + host->cfg_div.shift = MESON_MX_SDIO_CONF_CMD_CLK_DIV_SHIFT; + host->cfg_div.width = MESON_MX_SDIO_CONF_CMD_CLK_DIV_WIDTH; + host->cfg_div.hw.init = &init; + host->cfg_div.flags = CLK_DIVIDER_ALLOW_ZERO; + + host->cfg_div_clk = devm_clk_register(host->controller_dev, + &host->cfg_div.hw); + if (WARN_ON(IS_ERR(host->cfg_div_clk))) + return PTR_ERR(host->cfg_div_clk); + + return 0; +} + +static int meson_mx_mmc_probe(struct platform_device *pdev) +{ + struct platform_device *slot_pdev; + struct mmc_host *mmc; + struct meson_mx_mmc_host *host; + struct resource *res; + int ret, irq; + u32 conf; + + slot_pdev = meson_mx_mmc_slot_pdev(&pdev->dev); + if (!slot_pdev) + return -ENODEV; + else if (IS_ERR(slot_pdev)) + return PTR_ERR(slot_pdev); + + mmc = mmc_alloc_host(sizeof(*host), &slot_pdev->dev); + if (!mmc) { + ret = -ENOMEM; + goto error_unregister_slot_pdev; + } + + host = mmc_priv(mmc); + host->mmc = mmc; + host->controller_dev = &pdev->dev; + + spin_lock_init(&host->irq_lock); + timer_setup(&host->cmd_timeout, meson_mx_mmc_timeout, 0); + + platform_set_drvdata(pdev, host); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host->base = devm_ioremap_resource(host->controller_dev, res); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); + goto error_free_mmc; + } + + irq = platform_get_irq(pdev, 0); + ret = devm_request_threaded_irq(host->controller_dev, irq, + meson_mx_mmc_irq, + meson_mx_mmc_irq_thread, IRQF_ONESHOT, + NULL, host); + if (ret) + goto error_free_mmc; + + host->core_clk = devm_clk_get(host->controller_dev, "core"); + if (IS_ERR(host->core_clk)) { + ret = PTR_ERR(host->core_clk); + goto error_free_mmc; + } + + host->parent_clk = devm_clk_get(host->controller_dev, "clkin"); + if (IS_ERR(host->parent_clk)) { + ret = PTR_ERR(host->parent_clk); + goto error_free_mmc; + } + + ret = meson_mx_mmc_register_clks(host); + if (ret) + goto error_free_mmc; + + ret = clk_prepare_enable(host->core_clk); + if (ret) { + dev_err(host->controller_dev, "Failed to enable core clock\n"); + goto error_free_mmc; + } + + ret = clk_prepare_enable(host->cfg_div_clk); + if (ret) { + dev_err(host->controller_dev, "Failed to enable MMC clock\n"); + goto error_disable_core_clk; + } + + conf = 0; + conf |= FIELD_PREP(MESON_MX_SDIO_CONF_CMD_ARGUMENT_BITS_MASK, 39); + conf |= FIELD_PREP(MESON_MX_SDIO_CONF_M_ENDIAN_MASK, 0x3); + conf |= FIELD_PREP(MESON_MX_SDIO_CONF_WRITE_NWR_MASK, 0x2); + conf |= FIELD_PREP(MESON_MX_SDIO_CONF_WRITE_CRC_OK_STATUS_MASK, 0x2); + writel(conf, host->base + MESON_MX_SDIO_CONF); + + meson_mx_mmc_soft_reset(host); + + ret = meson_mx_mmc_add_host(host); + if (ret) + goto error_disable_clks; + + return 0; + +error_disable_clks: + clk_disable_unprepare(host->cfg_div_clk); +error_disable_core_clk: + clk_disable_unprepare(host->core_clk); +error_free_mmc: + mmc_free_host(mmc); +error_unregister_slot_pdev: + of_platform_device_destroy(&slot_pdev->dev, NULL); + return ret; +} + +static int meson_mx_mmc_remove(struct platform_device *pdev) +{ + struct meson_mx_mmc_host *host = platform_get_drvdata(pdev); + struct device *slot_dev = mmc_dev(host->mmc); + + del_timer_sync(&host->cmd_timeout); + + mmc_remove_host(host->mmc); + + of_platform_device_destroy(slot_dev, NULL); + + clk_disable_unprepare(host->cfg_div_clk); + clk_disable_unprepare(host->core_clk); + + mmc_free_host(host->mmc); + + return 0; +} + +static const struct of_device_id meson_mx_mmc_of_match[] = { + { .compatible = "amlogic,meson8-sdio", }, + { .compatible = "amlogic,meson8b-sdio", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, meson_mx_mmc_of_match); + +static struct platform_driver meson_mx_mmc_driver = { + .probe = meson_mx_mmc_probe, + .remove = meson_mx_mmc_remove, + .driver = { + .name = "meson-mx-sdio", + .of_match_table = of_match_ptr(meson_mx_mmc_of_match), + }, +}; + +module_platform_driver(meson_mx_mmc_driver); + +MODULE_DESCRIPTION("Meson6, Meson8 and Meson8b SDIO/MMC Host Driver"); +MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>"); +MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index d1ca2f489054..e8a1bb1ae694 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1658,7 +1658,7 @@ static int mmci_probe(struct amba_device *dev, /* Get regulators and the supported OCR mask */ ret = mmc_regulator_get_supply(mmc); - if (ret == -EPROBE_DEFER) + if (ret) goto clk_disable; if (!mmc->ocr_avail) @@ -1904,7 +1904,7 @@ static const struct dev_pm_ops mmci_dev_pm_ops = { SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL) }; -static struct amba_id mmci_ids[] = { +static const struct amba_id mmci_ids[] = { { .id = 0x00041180, .mask = 0xff0fffff, diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c index d4dc55ac7dea..a0670e9cd012 100644 --- a/drivers/mmc/host/moxart-mmc.c +++ b/drivers/mmc/host/moxart-mmc.c @@ -546,7 +546,7 @@ static int moxart_get_ro(struct mmc_host *mmc) return !!(readl(host->base + REG_STATUS) & WRITE_PROT); } -static struct mmc_host_ops moxart_ops = { +static const struct mmc_host_ops moxart_ops = { .request = moxart_request, .set_ios = moxart_set_ios, .get_ro = moxart_get_ro, diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 5a672a5218ad..6457a7d8880f 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -67,6 +67,7 @@ #define SDC_RESP2 0x48 #define SDC_RESP3 0x4c #define SDC_BLK_NUM 0x50 +#define SDC_ADV_CFG0 0x64 #define EMMC_IOCON 0x7c #define SDC_ACMD_RESP 0x80 #define MSDC_DMA_SA 0x90 @@ -74,10 +75,14 @@ #define MSDC_DMA_CFG 0x9c #define MSDC_PATCH_BIT 0xb0 #define MSDC_PATCH_BIT1 0xb4 +#define MSDC_PATCH_BIT2 0xb8 #define MSDC_PAD_TUNE 0xec +#define MSDC_PAD_TUNE0 0xf0 #define PAD_DS_TUNE 0x188 #define PAD_CMD_TUNE 0x18c #define EMMC50_CFG0 0x208 +#define EMMC50_CFG3 0x220 +#define SDC_FIFO_CFG 0x228 /*--------------------------------------------------------------------------*/ /* Register Mask */ @@ -95,6 +100,9 @@ #define MSDC_CFG_CKDIV (0xff << 8) /* RW */ #define MSDC_CFG_CKMOD (0x3 << 16) /* RW */ #define MSDC_CFG_HS400_CK_MODE (0x1 << 18) /* RW */ +#define MSDC_CFG_HS400_CK_MODE_EXTRA (0x1 << 22) /* RW */ +#define MSDC_CFG_CKDIV_EXTRA (0xfff << 8) /* RW */ +#define MSDC_CFG_CKMOD_EXTRA (0x3 << 20) /* RW */ /* MSDC_IOCON mask */ #define MSDC_IOCON_SDR104CKS (0x1 << 0) /* RW */ @@ -183,6 +191,9 @@ #define SDC_STS_CMDBUSY (0x1 << 1) /* RW */ #define SDC_STS_SWR_COMPL (0x1 << 31) /* RW */ +/* SDC_ADV_CFG0 mask */ +#define SDC_RX_ENHANCE_EN (0x1 << 20) /* RW */ + /* MSDC_DMA_CTRL mask */ #define MSDC_DMA_CTRL_START (0x1 << 0) /* W */ #define MSDC_DMA_CTRL_STOP (0x1 << 1) /* W */ @@ -212,11 +223,22 @@ #define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */ #define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */ +#define MSDC_PATCH_BIT1_STOP_DLY (0xf << 8) /* RW */ + +#define MSDC_PATCH_BIT2_CFGRESP (0x1 << 15) /* RW */ +#define MSDC_PATCH_BIT2_CFGCRCSTS (0x1 << 28) /* RW */ +#define MSDC_PB2_RESPWAIT (0x3 << 2) /* RW */ +#define MSDC_PB2_RESPSTSENSEL (0x7 << 16) /* RW */ +#define MSDC_PB2_CRCSTSENSEL (0x7 << 29) /* RW */ + #define MSDC_PAD_TUNE_DATWRDLY (0x1f << 0) /* RW */ #define MSDC_PAD_TUNE_DATRRDLY (0x1f << 8) /* RW */ #define MSDC_PAD_TUNE_CMDRDLY (0x1f << 16) /* RW */ #define MSDC_PAD_TUNE_CMDRRDLY (0x1f << 22) /* RW */ #define MSDC_PAD_TUNE_CLKTDLY (0x1f << 27) /* RW */ +#define MSDC_PAD_TUNE_RXDLYSEL (0x1 << 15) /* RW */ +#define MSDC_PAD_TUNE_RD_SEL (0x1 << 13) /* RW */ +#define MSDC_PAD_TUNE_CMD_SEL (0x1 << 21) /* RW */ #define PAD_DS_TUNE_DLY1 (0x1f << 2) /* RW */ #define PAD_DS_TUNE_DLY2 (0x1f << 7) /* RW */ @@ -228,6 +250,11 @@ #define EMMC50_CFG_CRCSTS_EDGE (0x1 << 3) /* RW */ #define EMMC50_CFG_CFCSTS_SEL (0x1 << 4) /* RW */ +#define EMMC50_CFG3_OUTS_WR (0x1f << 0) /* RW */ + +#define SDC_FIFO_CFG_WRVALIDSEL (0x1 << 24) /* RW */ +#define SDC_FIFO_CFG_RDVALIDSEL (0x1 << 25) /* RW */ + #define REQ_CMD_EIO (0x1 << 0) #define REQ_CMD_TMO (0x1 << 1) #define REQ_DAT_ERR (0x1 << 2) @@ -290,9 +317,23 @@ struct msdc_save_para { u32 pad_tune; u32 patch_bit0; u32 patch_bit1; + u32 patch_bit2; u32 pad_ds_tune; u32 pad_cmd_tune; u32 emmc50_cfg0; + u32 emmc50_cfg3; + u32 sdc_fifo_cfg; +}; + +struct mtk_mmc_compatible { + u8 clk_div_bits; + bool hs400_tune; /* only used for MT8173 */ + u32 pad_tune_reg; + bool async_fifo; + bool data_tune; + bool busy_check; + bool stop_clk_fix; + bool enhance_rx; }; struct msdc_tune_para { @@ -309,6 +350,7 @@ struct msdc_delay_phase { struct msdc_host { struct device *dev; + const struct mtk_mmc_compatible *dev_comp; struct mmc_host *mmc; /* mmc structure */ int cmd_rsp; @@ -334,11 +376,13 @@ struct msdc_host { struct clk *src_clk; /* msdc source clock */ struct clk *h_clk; /* msdc h_clk */ + struct clk *src_clk_cg; /* msdc source clock control gate */ u32 mclk; /* mmc subsystem clock frequency */ u32 src_clk_freq; /* source clock frequency */ u32 sclk; /* SD/MS bus clock frequency */ unsigned char timing; bool vqmmc_enabled; + u32 latch_ck; u32 hs400_ds_delay; u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */ u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */ @@ -350,6 +394,59 @@ struct msdc_host { struct msdc_tune_para saved_tune_para; /* tune result of CMD21/CMD19 */ }; +static const struct mtk_mmc_compatible mt8135_compat = { + .clk_div_bits = 8, + .hs400_tune = false, + .pad_tune_reg = MSDC_PAD_TUNE, + .async_fifo = false, + .data_tune = false, + .busy_check = false, + .stop_clk_fix = false, + .enhance_rx = false, +}; + +static const struct mtk_mmc_compatible mt8173_compat = { + .clk_div_bits = 8, + .hs400_tune = true, + .pad_tune_reg = MSDC_PAD_TUNE, + .async_fifo = false, + .data_tune = false, + .busy_check = false, + .stop_clk_fix = false, + .enhance_rx = false, +}; + +static const struct mtk_mmc_compatible mt2701_compat = { + .clk_div_bits = 12, + .hs400_tune = false, + .pad_tune_reg = MSDC_PAD_TUNE0, + .async_fifo = true, + .data_tune = true, + .busy_check = false, + .stop_clk_fix = false, + .enhance_rx = false, +}; + +static const struct mtk_mmc_compatible mt2712_compat = { + .clk_div_bits = 12, + .hs400_tune = false, + .pad_tune_reg = MSDC_PAD_TUNE0, + .async_fifo = true, + .data_tune = true, + .busy_check = true, + .stop_clk_fix = true, + .enhance_rx = true, +}; + +static const struct of_device_id msdc_of_ids[] = { + { .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat}, + { .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat}, + { .compatible = "mediatek,mt2701-mmc", .data = &mt2701_compat}, + { .compatible = "mediatek,mt2712-mmc", .data = &mt2712_compat}, + {} +}; +MODULE_DEVICE_TABLE(of, msdc_of_ids); + static void sdr_set_bits(void __iomem *reg, u32 bs) { u32 val = readl(reg); @@ -509,7 +606,12 @@ static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks) timeout = (ns + clk_ns - 1) / clk_ns + clks; /* in 1048576 sclk cycle unit */ timeout = (timeout + (0x1 << 20) - 1) >> 20; - sdr_get_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD, &mode); + if (host->dev_comp->clk_div_bits == 8) + sdr_get_field(host->base + MSDC_CFG, + MSDC_CFG_CKMOD, &mode); + else + sdr_get_field(host->base + MSDC_CFG, + MSDC_CFG_CKMOD_EXTRA, &mode); /*DDR mode will double the clk cycles for data timeout */ timeout = mode >= 2 ? timeout * 2 : timeout; timeout = timeout > 1 ? timeout - 1 : 0; @@ -520,6 +622,7 @@ static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks) static void msdc_gate_clock(struct msdc_host *host) { + clk_disable_unprepare(host->src_clk_cg); clk_disable_unprepare(host->src_clk); clk_disable_unprepare(host->h_clk); } @@ -528,6 +631,7 @@ static void msdc_ungate_clock(struct msdc_host *host) { clk_prepare_enable(host->h_clk); clk_prepare_enable(host->src_clk); + clk_prepare_enable(host->src_clk_cg); while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB)) cpu_relax(); } @@ -538,6 +642,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) u32 flags; u32 div; u32 sclk; + u32 tune_reg = host->dev_comp->pad_tune_reg; if (!hz) { dev_dbg(host->dev, "set mclk to 0\n"); @@ -548,7 +653,11 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) flags = readl(host->base + MSDC_INTEN); sdr_clr_bits(host->base + MSDC_INTEN, flags); - sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE); + if (host->dev_comp->clk_div_bits == 8) + sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE); + else + sdr_clr_bits(host->base + MSDC_CFG, + MSDC_CFG_HS400_CK_MODE_EXTRA); if (timing == MMC_TIMING_UHS_DDR50 || timing == MMC_TIMING_MMC_DDR52 || timing == MMC_TIMING_MMC_HS400) { @@ -568,8 +677,12 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) if (timing == MMC_TIMING_MMC_HS400 && hz >= (host->src_clk_freq >> 1)) { - sdr_set_bits(host->base + MSDC_CFG, - MSDC_CFG_HS400_CK_MODE); + if (host->dev_comp->clk_div_bits == 8) + sdr_set_bits(host->base + MSDC_CFG, + MSDC_CFG_HS400_CK_MODE); + else + sdr_set_bits(host->base + MSDC_CFG, + MSDC_CFG_HS400_CK_MODE_EXTRA); sclk = host->src_clk_freq >> 1; div = 0; /* div is ignore when bit18 is set */ } @@ -587,11 +700,31 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) sclk = (host->src_clk_freq >> 2) / div; } } - sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV, - (mode << 8) | div); - sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); + sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); + /* + * As src_clk/HCLK use the same bit to gate/ungate, + * So if want to only gate src_clk, need gate its parent(mux). + */ + if (host->src_clk_cg) + clk_disable_unprepare(host->src_clk_cg); + else + clk_disable_unprepare(clk_get_parent(host->src_clk)); + if (host->dev_comp->clk_div_bits == 8) + sdr_set_field(host->base + MSDC_CFG, + MSDC_CFG_CKMOD | MSDC_CFG_CKDIV, + (mode << 8) | div); + else + sdr_set_field(host->base + MSDC_CFG, + MSDC_CFG_CKMOD_EXTRA | MSDC_CFG_CKDIV_EXTRA, + (mode << 12) | div); + if (host->src_clk_cg) + clk_prepare_enable(host->src_clk_cg); + else + clk_prepare_enable(clk_get_parent(host->src_clk)); + while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB)) cpu_relax(); + sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); host->sclk = sclk; host->mclk = hz; host->timing = timing; @@ -605,15 +738,16 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) */ if (host->sclk <= 52000000) { writel(host->def_tune_para.iocon, host->base + MSDC_IOCON); - writel(host->def_tune_para.pad_tune, host->base + MSDC_PAD_TUNE); + writel(host->def_tune_para.pad_tune, host->base + tune_reg); } else { writel(host->saved_tune_para.iocon, host->base + MSDC_IOCON); - writel(host->saved_tune_para.pad_tune, host->base + MSDC_PAD_TUNE); + writel(host->saved_tune_para.pad_tune, host->base + tune_reg); writel(host->saved_tune_para.pad_cmd_tune, host->base + PAD_CMD_TUNE); } - if (timing == MMC_TIMING_MMC_HS400) + if (timing == MMC_TIMING_MMC_HS400 && + host->dev_comp->hs400_tune) sdr_set_field(host->base + PAD_CMD_TUNE, MSDC_PAD_TUNE_CMDRRDLY, host->hs400_cmd_int_delay); @@ -1165,6 +1299,7 @@ static irqreturn_t msdc_irq(int irq, void *dev_id) static void msdc_init_hw(struct msdc_host *host) { u32 val; + u32 tune_reg = host->dev_comp->pad_tune_reg; /* Configure to MMC/SD mode, clock free running */ sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN); @@ -1180,14 +1315,53 @@ static void msdc_init_hw(struct msdc_host *host) val = readl(host->base + MSDC_INT); writel(val, host->base + MSDC_INT); - writel(0, host->base + MSDC_PAD_TUNE); + writel(0, host->base + tune_reg); writel(0, host->base + MSDC_IOCON); sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0); writel(0x403c0046, host->base + MSDC_PATCH_BIT); sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_CKGEN_MSDC_DLY_SEL, 1); - writel(0xffff0089, host->base + MSDC_PATCH_BIT1); + writel(0xffff4089, host->base + MSDC_PATCH_BIT1); sdr_set_bits(host->base + EMMC50_CFG0, EMMC50_CFG_CFCSTS_SEL); + if (host->dev_comp->stop_clk_fix) { + sdr_set_field(host->base + MSDC_PATCH_BIT1, + MSDC_PATCH_BIT1_STOP_DLY, 3); + sdr_clr_bits(host->base + SDC_FIFO_CFG, + SDC_FIFO_CFG_WRVALIDSEL); + sdr_clr_bits(host->base + SDC_FIFO_CFG, + SDC_FIFO_CFG_RDVALIDSEL); + } + + if (host->dev_comp->busy_check) + sdr_clr_bits(host->base + MSDC_PATCH_BIT1, (1 << 7)); + + if (host->dev_comp->async_fifo) { + sdr_set_field(host->base + MSDC_PATCH_BIT2, + MSDC_PB2_RESPWAIT, 3); + if (host->dev_comp->enhance_rx) { + sdr_set_bits(host->base + SDC_ADV_CFG0, + SDC_RX_ENHANCE_EN); + } else { + sdr_set_field(host->base + MSDC_PATCH_BIT2, + MSDC_PB2_RESPSTSENSEL, 2); + sdr_set_field(host->base + MSDC_PATCH_BIT2, + MSDC_PB2_CRCSTSENSEL, 2); + } + /* use async fifo, then no need tune internal delay */ + sdr_clr_bits(host->base + MSDC_PATCH_BIT2, + MSDC_PATCH_BIT2_CFGRESP); + sdr_set_bits(host->base + MSDC_PATCH_BIT2, + MSDC_PATCH_BIT2_CFGCRCSTS); + } + + if (host->dev_comp->data_tune) { + sdr_set_bits(host->base + tune_reg, + MSDC_PAD_TUNE_RD_SEL | MSDC_PAD_TUNE_CMD_SEL); + } else { + /* choose clock tune */ + sdr_set_bits(host->base + tune_reg, MSDC_PAD_TUNE_RXDLYSEL); + } + /* Configure to enable SDIO mode. * it's must otherwise sdio cmd5 failed */ @@ -1200,7 +1374,9 @@ static void msdc_init_hw(struct msdc_host *host) sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3); host->def_tune_para.iocon = readl(host->base + MSDC_IOCON); - host->def_tune_para.pad_tune = readl(host->base + MSDC_PAD_TUNE); + host->def_tune_para.pad_tune = readl(host->base + tune_reg); + host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON); + host->saved_tune_para.pad_tune = readl(host->base + tune_reg); dev_dbg(host->dev, "init hardware done!"); } @@ -1343,18 +1519,19 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) struct msdc_delay_phase internal_delay_phase; u8 final_delay, final_maxlen; u32 internal_delay = 0; + u32 tune_reg = host->dev_comp->pad_tune_reg; int cmd_err; int i, j; if (mmc->ios.timing == MMC_TIMING_MMC_HS200 || mmc->ios.timing == MMC_TIMING_UHS_SDR104) - sdr_set_field(host->base + MSDC_PAD_TUNE, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY, host->hs200_cmd_int_delay); sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); for (i = 0 ; i < PAD_DELAY_MAX; i++) { - sdr_set_field(host->base + MSDC_PAD_TUNE, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, i); /* * Using the same parameters, it may sometimes pass the test, @@ -1373,12 +1550,13 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) } final_rise_delay = get_best_delay(host, rise_delay); /* if rising edge has enough margin, then do not scan falling edge */ - if (final_rise_delay.maxlen >= 12 && final_rise_delay.start < 4) + if (final_rise_delay.maxlen >= 12 || + (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4)) goto skip_fall; sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); for (i = 0; i < PAD_DELAY_MAX; i++) { - sdr_set_field(host->base + MSDC_PAD_TUNE, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, i); /* * Using the same parameters, it may sometimes pass the test, @@ -1403,20 +1581,20 @@ skip_fall: final_maxlen = final_fall_delay.maxlen; if (final_maxlen == final_rise_delay.maxlen) { sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); - sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, final_rise_delay.final_phase); final_delay = final_rise_delay.final_phase; } else { sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); - sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, final_fall_delay.final_phase); final_delay = final_fall_delay.final_phase; } - if (host->hs200_cmd_int_delay) + if (host->dev_comp->async_fifo || host->hs200_cmd_int_delay) goto skip_internal; for (i = 0; i < PAD_DELAY_MAX; i++) { - sdr_set_field(host->base + MSDC_PAD_TUNE, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY, i); mmc_send_tuning(mmc, opcode, &cmd_err); if (!cmd_err) @@ -1424,7 +1602,7 @@ skip_fall: } dev_dbg(host->dev, "Final internal delay: 0x%x\n", internal_delay); internal_delay_phase = get_best_delay(host, internal_delay); - sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY, internal_delay_phase.final_phase); skip_internal: dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay); @@ -1486,12 +1664,15 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode) u32 rise_delay = 0, fall_delay = 0; struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; u8 final_delay, final_maxlen; + u32 tune_reg = host->dev_comp->pad_tune_reg; int i, ret; + sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL, + host->latch_ck); sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); for (i = 0 ; i < PAD_DELAY_MAX; i++) { - sdr_set_field(host->base + MSDC_PAD_TUNE, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, i); ret = mmc_send_tuning(mmc, opcode, NULL); if (!ret) @@ -1506,7 +1687,7 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode) sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); for (i = 0; i < PAD_DELAY_MAX; i++) { - sdr_set_field(host->base + MSDC_PAD_TUNE, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, i); ret = mmc_send_tuning(mmc, opcode, NULL); if (!ret) @@ -1519,14 +1700,14 @@ skip_fall: if (final_maxlen == final_rise_delay.maxlen) { sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); - sdr_set_field(host->base + MSDC_PAD_TUNE, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, final_rise_delay.final_phase); final_delay = final_rise_delay.final_phase; } else { sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); - sdr_set_field(host->base + MSDC_PAD_TUNE, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, final_fall_delay.final_phase); final_delay = final_fall_delay.final_phase; @@ -1540,8 +1721,10 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct msdc_host *host = mmc_priv(mmc); int ret; + u32 tune_reg = host->dev_comp->pad_tune_reg; - if (host->hs400_mode) + if (host->hs400_mode && + host->dev_comp->hs400_tune) ret = hs400_tune_response(mmc, opcode); else ret = msdc_tune_response(mmc, opcode); @@ -1556,7 +1739,7 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode) } host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON); - host->saved_tune_para.pad_tune = readl(host->base + MSDC_PAD_TUNE); + host->saved_tune_para.pad_tune = readl(host->base + tune_reg); host->saved_tune_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE); return ret; } @@ -1567,6 +1750,11 @@ static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) host->hs400_mode = true; writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE); + /* hs400 mode must set it to 0 */ + sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS); + /* to improve read performance, set outstanding to 2 */ + sdr_set_field(host->base + EMMC50_CFG3, EMMC50_CFG3_OUTS_WR, 2); + return 0; } @@ -1579,12 +1767,13 @@ static void msdc_hw_reset(struct mmc_host *mmc) sdr_clr_bits(host->base + EMMC_IOCON, 1); } -static struct mmc_host_ops mt_msdc_ops = { +static const struct mmc_host_ops mt_msdc_ops = { .post_req = msdc_post_req, .pre_req = msdc_pre_req, .request = msdc_ops_request, .set_ios = msdc_ops_set_ios, .get_ro = mmc_gpio_get_ro, + .get_cd = mmc_gpio_get_cd, .start_signal_voltage_switch = msdc_ops_switch_volt, .card_busy = msdc_card_busy, .execute_tuning = msdc_execute_tuning, @@ -1595,6 +1784,9 @@ static struct mmc_host_ops mt_msdc_ops = { static void msdc_of_property_parse(struct platform_device *pdev, struct msdc_host *host) { + of_property_read_u32(pdev->dev.of_node, "mediatek,latch-ck", + &host->latch_ck); + of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay", &host->hs400_ds_delay); @@ -1616,12 +1808,17 @@ static int msdc_drv_probe(struct platform_device *pdev) struct mmc_host *mmc; struct msdc_host *host; struct resource *res; + const struct of_device_id *of_id; int ret; if (!pdev->dev.of_node) { dev_err(&pdev->dev, "No DT found\n"); return -EINVAL; } + + of_id = of_match_node(msdc_of_ids, pdev->dev.of_node); + if (!of_id) + return -EINVAL; /* Allocate MMC host for this device */ mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev); if (!mmc) @@ -1640,7 +1837,7 @@ static int msdc_drv_probe(struct platform_device *pdev) } ret = mmc_regulator_get_supply(mmc); - if (ret == -EPROBE_DEFER) + if (ret) goto host_free; host->src_clk = devm_clk_get(&pdev->dev, "source"); @@ -1655,6 +1852,11 @@ static int msdc_drv_probe(struct platform_device *pdev) goto host_free; } + /*source clock control gate is optional clock*/ + host->src_clk_cg = devm_clk_get(&pdev->dev, "source_cg"); + if (IS_ERR(host->src_clk_cg)) + host->src_clk_cg = NULL; + host->irq = platform_get_irq(pdev, 0); if (host->irq < 0) { ret = -EINVAL; @@ -1685,11 +1887,15 @@ static int msdc_drv_probe(struct platform_device *pdev) msdc_of_property_parse(pdev, host); host->dev = &pdev->dev; + host->dev_comp = of_id->data; host->mmc = mmc; host->src_clk_freq = clk_get_rate(host->src_clk); /* Set host parameters to mmc */ mmc->ops = &mt_msdc_ops; - mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255); + if (host->dev_comp->clk_div_bits == 8) + mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255); + else + mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 4095); mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23; /* MMC core transfer sizes tunable parameters */ @@ -1787,28 +1993,38 @@ static int msdc_drv_remove(struct platform_device *pdev) #ifdef CONFIG_PM static void msdc_save_reg(struct msdc_host *host) { + u32 tune_reg = host->dev_comp->pad_tune_reg; + host->save_para.msdc_cfg = readl(host->base + MSDC_CFG); host->save_para.iocon = readl(host->base + MSDC_IOCON); host->save_para.sdc_cfg = readl(host->base + SDC_CFG); - host->save_para.pad_tune = readl(host->base + MSDC_PAD_TUNE); + host->save_para.pad_tune = readl(host->base + tune_reg); host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT); host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1); + host->save_para.patch_bit2 = readl(host->base + MSDC_PATCH_BIT2); host->save_para.pad_ds_tune = readl(host->base + PAD_DS_TUNE); host->save_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE); host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0); + host->save_para.emmc50_cfg3 = readl(host->base + EMMC50_CFG3); + host->save_para.sdc_fifo_cfg = readl(host->base + SDC_FIFO_CFG); } static void msdc_restore_reg(struct msdc_host *host) { + u32 tune_reg = host->dev_comp->pad_tune_reg; + writel(host->save_para.msdc_cfg, host->base + MSDC_CFG); writel(host->save_para.iocon, host->base + MSDC_IOCON); writel(host->save_para.sdc_cfg, host->base + SDC_CFG); - writel(host->save_para.pad_tune, host->base + MSDC_PAD_TUNE); + writel(host->save_para.pad_tune, host->base + tune_reg); writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT); writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1); + writel(host->save_para.patch_bit2, host->base + MSDC_PATCH_BIT2); writel(host->save_para.pad_ds_tune, host->base + PAD_DS_TUNE); writel(host->save_para.pad_cmd_tune, host->base + PAD_CMD_TUNE); writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0); + writel(host->save_para.emmc50_cfg3, host->base + EMMC50_CFG3); + writel(host->save_para.sdc_fifo_cfg, host->base + SDC_FIFO_CFG); } static int msdc_runtime_suspend(struct device *dev) @@ -1838,12 +2054,6 @@ static const struct dev_pm_ops msdc_dev_pm_ops = { SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL) }; -static const struct of_device_id msdc_of_ids[] = { - { .compatible = "mediatek,mt8135-mmc", }, - {} -}; -MODULE_DEVICE_TABLE(of, msdc_of_ids); - static struct platform_driver mt_msdc_driver = { .probe = msdc_drv_probe, .remove = msdc_drv_remove, diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index 58d74b8d6c79..210247b3d11a 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c @@ -508,9 +508,9 @@ static irqreturn_t mvsd_irq(int irq, void *dev) return IRQ_NONE; } -static void mvsd_timeout_timer(unsigned long data) +static void mvsd_timeout_timer(struct timer_list *t) { - struct mvsd_host *host = (struct mvsd_host *)data; + struct mvsd_host *host = from_timer(host, t, timer); void __iomem *iobase = host->base; struct mmc_request *mrq; unsigned long flags; @@ -776,7 +776,7 @@ static int mvsd_probe(struct platform_device *pdev) goto out; } - setup_timer(&host->timer, mvsd_timeout_timer, (unsigned long)host); + timer_setup(&host->timer, mvsd_timeout_timer, 0); platform_set_drvdata(pdev, mmc); ret = mmc_add_host(mmc); if (ret) diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index fb3ca8296273..5ff8ef7223cc 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -681,6 +681,9 @@ static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat) spin_unlock_irqrestore(&host->lock, flags); + if (data_error) + return; + mxcmci_read_response(host, stat); host->cmd = NULL; @@ -960,10 +963,9 @@ static bool filter(struct dma_chan *chan, void *param) return true; } -static void mxcmci_watchdog(unsigned long data) +static void mxcmci_watchdog(struct timer_list *t) { - struct mmc_host *mmc = (struct mmc_host *)data; - struct mxcmci_host *host = mmc_priv(mmc); + struct mxcmci_host *host = from_timer(host, t, watchdog); struct mmc_request *req = host->req; unsigned int stat = mxcmci_readl(host, MMC_REG_STATUS); @@ -1014,8 +1016,10 @@ static int mxcmci_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); - if (irq < 0) - return -EINVAL; + if (irq < 0) { + dev_err(&pdev->dev, "failed to get IRQ: %d\n", irq); + return irq; + } mmc = mmc_alloc_host(sizeof(*host), &pdev->dev); if (!mmc) @@ -1070,7 +1074,7 @@ static int mxcmci_probe(struct platform_device *pdev) dat3_card_detect = true; ret = mmc_regulator_get_supply(mmc); - if (ret == -EPROBE_DEFER) + if (ret) goto out_free; if (!mmc->ocr_avail) { @@ -1098,8 +1102,13 @@ static int mxcmci_probe(struct platform_device *pdev) goto out_free; } - clk_prepare_enable(host->clk_per); - clk_prepare_enable(host->clk_ipg); + ret = clk_prepare_enable(host->clk_per); + if (ret) + goto out_free; + + ret = clk_prepare_enable(host->clk_ipg); + if (ret) + goto out_clk_per_put; mxcmci_softreset(host); @@ -1155,9 +1164,7 @@ static int mxcmci_probe(struct platform_device *pdev) goto out_free_dma; } - init_timer(&host->watchdog); - host->watchdog.function = &mxcmci_watchdog; - host->watchdog.data = (unsigned long)mmc; + timer_setup(&host->watchdog, mxcmci_watchdog, 0); mmc_add_host(mmc); @@ -1168,8 +1175,9 @@ out_free_dma: dma_release_channel(host->dma); out_clk_put: - clk_disable_unprepare(host->clk_per); clk_disable_unprepare(host->clk_ipg); +out_clk_per_put: + clk_disable_unprepare(host->clk_per); out_free: mmc_free_host(mmc); @@ -1212,10 +1220,17 @@ static int __maybe_unused mxcmci_resume(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct mxcmci_host *host = mmc_priv(mmc); + int ret; - clk_prepare_enable(host->clk_per); - clk_prepare_enable(host->clk_ipg); - return 0; + ret = clk_prepare_enable(host->clk_per); + if (ret) + return ret; + + ret = clk_prepare_enable(host->clk_ipg); + if (ret) + clk_disable_unprepare(host->clk_per); + + return ret; } static SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume); diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c index 85bbebfde02e..c9eed8436b6b 100644 --- a/drivers/mmc/host/of_mmc_spi.c +++ b/drivers/mmc/host/of_mmc_spi.c @@ -71,7 +71,7 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi) struct device *dev = &spi->dev; struct device_node *np = dev->of_node; struct of_mmc_spi *oms; - const u32 *voltage_ranges; + const __be32 *voltage_ranges; int num_ranges; int i; diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index bd49f34d7654..adf32682f27a 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -625,9 +625,9 @@ static void mmc_omap_abort_command(struct work_struct *work) } static void -mmc_omap_cmd_timer(unsigned long data) +mmc_omap_cmd_timer(struct timer_list *t) { - struct mmc_omap_host *host = (struct mmc_omap_host *) data; + struct mmc_omap_host *host = from_timer(host, t, cmd_abort_timer); unsigned long flags; spin_lock_irqsave(&host->slot_lock, flags); @@ -654,9 +654,9 @@ mmc_omap_sg_to_buf(struct mmc_omap_host *host) } static void -mmc_omap_clk_timer(unsigned long data) +mmc_omap_clk_timer(struct timer_list *t) { - struct mmc_omap_host *host = (struct mmc_omap_host *) data; + struct mmc_omap_host *host = from_timer(host, t, clk_timer); mmc_omap_fclk_enable(host, 0); } @@ -874,9 +874,9 @@ void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed) tasklet_hi_schedule(&slot->cover_tasklet); } -static void mmc_omap_cover_timer(unsigned long arg) +static void mmc_omap_cover_timer(struct timer_list *t) { - struct mmc_omap_slot *slot = (struct mmc_omap_slot *) arg; + struct mmc_omap_slot *slot = from_timer(slot, t, cover_timer); tasklet_schedule(&slot->cover_tasklet); } @@ -1264,8 +1264,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id) mmc->max_seg_size = mmc->max_req_size; if (slot->pdata->get_cover_state != NULL) { - setup_timer(&slot->cover_timer, mmc_omap_cover_timer, - (unsigned long)slot); + timer_setup(&slot->cover_timer, mmc_omap_cover_timer, 0); tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler, (unsigned long)slot); } @@ -1352,11 +1351,10 @@ static int mmc_omap_probe(struct platform_device *pdev) INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work); INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command); - setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer, - (unsigned long) host); + timer_setup(&host->cmd_abort_timer, mmc_omap_cmd_timer, 0); spin_lock_init(&host->clk_lock); - setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host); + timer_setup(&host->clk_timer, mmc_omap_clk_timer, 0); spin_lock_init(&host->dma_lock); spin_lock_init(&host->slot_lock); diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 7c12f3715676..071693ebfe18 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -147,10 +147,6 @@ #define OMAP_MMC_MAX_CLOCK 52000000 #define DRIVER_NAME "omap_hsmmc" -#define VDD_1V8 1800000 /* 180000 uV */ -#define VDD_3V0 3000000 /* 300000 uV */ -#define VDD_165_195 (ffs(MMC_VDD_165_195) - 1) - /* * One controller can have multiple slots, like on some omap boards using * omap.c controller driver. Luckily this is not currently done on any known @@ -308,8 +304,7 @@ err_set_ocr: return ret; } -static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on, - int vdd) +static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on) { int ret; @@ -317,17 +312,6 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on, return 0; if (power_on) { - if (vdd <= VDD_165_195) - ret = regulator_set_voltage(host->pbias, VDD_1V8, - VDD_1V8); - else - ret = regulator_set_voltage(host->pbias, VDD_3V0, - VDD_3V0); - if (ret < 0) { - dev_err(host->dev, "pbias set voltage fail\n"); - return ret; - } - if (host->pbias_enabled == 0) { ret = regulator_enable(host->pbias); if (ret) { @@ -350,15 +334,11 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on, return 0; } -static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on, - int vdd) +static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on) { struct mmc_host *mmc = host->mmc; int ret = 0; - if (mmc_pdata(host)->set_power) - return mmc_pdata(host)->set_power(host->dev, power_on, vdd); - /* * If we don't see a Vcc regulator, assume it's a fixed * voltage always-on regulator. @@ -366,10 +346,7 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on, if (IS_ERR(mmc->supply.vmmc)) return 0; - if (mmc_pdata(host)->before_set_reg) - mmc_pdata(host)->before_set_reg(host->dev, power_on, vdd); - - ret = omap_hsmmc_set_pbias(host, false, 0); + ret = omap_hsmmc_set_pbias(host, false); if (ret) return ret; @@ -391,7 +368,7 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on, if (ret) return ret; - ret = omap_hsmmc_set_pbias(host, true, vdd); + ret = omap_hsmmc_set_pbias(host, true); if (ret) goto err_set_voltage; } else { @@ -400,9 +377,6 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on, return ret; } - if (mmc_pdata(host)->after_set_reg) - mmc_pdata(host)->after_set_reg(host->dev, power_on, vdd); - return 0; err_set_voltage: @@ -469,11 +443,9 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) int ret; struct mmc_host *mmc = host->mmc; - if (mmc_pdata(host)->set_power) - return 0; ret = mmc_regulator_get_supply(mmc); - if (ret == -EPROBE_DEFER) + if (ret) return ret; /* Allow an aux regulator */ @@ -1231,11 +1203,11 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd) clk_disable_unprepare(host->dbclk); /* Turn the power off */ - ret = omap_hsmmc_set_power(host, 0, 0); + ret = omap_hsmmc_set_power(host, 0); /* Turn the power ON with given VDD 1.8 or 3.0v */ if (!ret) - ret = omap_hsmmc_set_power(host, 1, vdd); + ret = omap_hsmmc_set_power(host, 1); if (host->dbclk) clk_prepare_enable(host->dbclk); @@ -1632,10 +1604,10 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (ios->power_mode != host->power_mode) { switch (ios->power_mode) { case MMC_POWER_OFF: - omap_hsmmc_set_power(host, 0, 0); + omap_hsmmc_set_power(host, 0); break; case MMC_POWER_UP: - omap_hsmmc_set_power(host, 1, ios->vdd); + omap_hsmmc_set_power(host, 1); break; case MMC_POWER_ON: do_send_init_stream = 1; @@ -2087,9 +2059,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev) host->dbclk = NULL; } - /* Since we do only SG emulation, we can have as many segs - * as we want. */ - mmc->max_segs = 1024; + /* Set this to a value that allows allocating an entire descriptor + * list within a page (zero order allocation). */ + mmc->max_segs = 64; mmc->max_blk_size = 512; /* Block Length at max can be 1024 */ mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */ @@ -2097,7 +2069,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev) mmc->max_seg_size = mmc->max_req_size; mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | - MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE; + MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23; mmc->caps |= mmc_pdata(host)->caps; if (mmc->caps & MMC_CAP_8_BIT_DATA) @@ -2333,7 +2305,7 @@ static int omap_hsmmc_runtime_resume(struct device *dev) return 0; } -static struct dev_pm_ops omap_hsmmc_dev_pm_ops = { +static const struct dev_pm_ops omap_hsmmc_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(omap_hsmmc_suspend, omap_hsmmc_resume) .runtime_suspend = omap_hsmmc_runtime_suspend, .runtime_resume = omap_hsmmc_runtime_resume, diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 59ab194cb009..c763b404510f 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -702,11 +702,7 @@ static int pxamci_probe(struct platform_device *pdev) pxamci_init_ocr(host); - /* - * This architecture used to disable bounce buffers through its - * defconfig, now it is done at runtime as a host property. - */ - mmc->caps = MMC_CAP_NO_BOUNCE_BUFF; + mmc->caps = 0; host->cmdat = 0; if (!cpu_is_pxa25x()) { mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; diff --git a/drivers/mmc/host/pxamci.h b/drivers/mmc/host/pxamci.h index f6c2e2fcce37..d301ca18c5d4 100644 --- a/drivers/mmc/host/pxamci.h +++ b/drivers/mmc/host/pxamci.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #define MMC_STRPCL 0x0000 #define STOP_CLOCK (1 << 0) #define START_CLOCK (2 << 0) diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h index ca83acc113b8..b9dfea5d8193 100644 --- a/drivers/mmc/host/renesas_sdhi.h +++ b/drivers/mmc/host/renesas_sdhi.h @@ -31,6 +31,8 @@ struct renesas_sdhi_of_data { int scc_offset; struct renesas_sdhi_scc *taps; int taps_num; + unsigned int max_blk_count; + unsigned short max_segs; }; int renesas_sdhi_probe(struct platform_device *pdev, diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index a4fb07d0ea91..fcf7235d5742 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -40,6 +40,7 @@ #define EXT_ACC 0xe4 #define SDHI_VER_GEN2_SDR50 0x490c +#define SDHI_VER_RZ_A1 0x820b /* very old datasheets said 0x490c for SDR104, too. They are wrong! */ #define SDHI_VER_GEN2_SDR104 0xcb0d #define SDHI_VER_GEN3_SD 0xcc10 @@ -398,12 +399,14 @@ static void renesas_sdhi_hw_reset(struct tmio_mmc_host *host) sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL)); } -static int renesas_sdhi_wait_idle(struct tmio_mmc_host *host) +static int renesas_sdhi_wait_idle(struct tmio_mmc_host *host, u32 bit) { int timeout = 1000; + /* CBSY is set when busy, SCLKDIVEN is cleared when busy */ + u32 wait_state = (bit == TMIO_STAT_CMD_BUSY ? TMIO_STAT_CMD_BUSY : 0); - while (--timeout && !(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) - & TMIO_STAT_SCLKDIVEN)) + while (--timeout && (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) + & bit) == wait_state) udelay(1); if (!timeout) { @@ -416,17 +419,22 @@ static int renesas_sdhi_wait_idle(struct tmio_mmc_host *host) static int renesas_sdhi_write16_hook(struct tmio_mmc_host *host, int addr) { + u32 bit = TMIO_STAT_SCLKDIVEN; + switch (addr) { case CTL_SD_CMD: case CTL_STOP_INTERNAL_ACTION: case CTL_XFER_BLK_COUNT: - case CTL_SD_CARD_CLK_CTL: case CTL_SD_XFER_LEN: case CTL_SD_MEM_CARD_OPT: case CTL_TRANSACTION_CTL: case CTL_DMA_ENABLE: case EXT_ACC: - return renesas_sdhi_wait_idle(host); + if (host->pdata->flags & TMIO_MMC_HAVE_CBSY) + bit = TMIO_STAT_CMD_BUSY; + /* fallthrough */ + case CTL_SD_CARD_CLK_CTL: + return renesas_sdhi_wait_idle(host, bit); } return 0; @@ -452,10 +460,11 @@ static int renesas_sdhi_multi_io_quirk(struct mmc_card *card, static void renesas_sdhi_enable_dma(struct tmio_mmc_host *host, bool enable) { - sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? 2 : 0); + /* Iff regs are 8 byte apart, sdbuf is 64 bit. Otherwise always 32. */ + int width = (host->bus_shift == 2) ? 64 : 32; - /* enable 32bit access if DMA mode if possibile */ - renesas_sdhi_sdbuf_width(host, enable ? 32 : 16); + sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? DMA_ENABLE_DMASDRW : 0); + renesas_sdhi_sdbuf_width(host, enable ? width : 16); } int renesas_sdhi_probe(struct platform_device *pdev, @@ -526,6 +535,8 @@ int renesas_sdhi_probe(struct platform_device *pdev, mmc_data->capabilities |= of_data->capabilities; mmc_data->capabilities2 |= of_data->capabilities2; mmc_data->dma_rx_offset = of_data->dma_rx_offset; + mmc_data->max_blk_count = of_data->max_blk_count; + mmc_data->max_segs = of_data->max_segs; dma_priv->dma_buswidth = of_data->dma_buswidth; host->bus_shift = of_data->bus_shift; } @@ -579,6 +590,10 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (ret < 0) goto efree; + /* One Gen2 SDHI incarnation does NOT have a CBSY bit */ + if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN2_SDR50) + mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY; + /* Enable tuning iff we have an SCC and a supported mode */ if (of_data && of_data->scc_offset && (host->mmc->caps & MMC_CAP_UHS_SDR104 || diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c new file mode 100644 index 000000000000..41cbe84c1d18 --- /dev/null +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -0,0 +1,291 @@ +/* + * DMA support for Internal DMAC with SDHI SD/SDIO controller + * + * Copyright (C) 2016-17 Renesas Electronics Corporation + * Copyright (C) 2016-17 Horms Solutions, Simon Horman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/io-64-nonatomic-hi-lo.h> +#include <linux/mfd/tmio.h> +#include <linux/mmc/host.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/pagemap.h> +#include <linux/scatterlist.h> +#include <linux/sys_soc.h> + +#include "renesas_sdhi.h" +#include "tmio_mmc.h" + +#define DM_CM_DTRAN_MODE 0x820 +#define DM_CM_DTRAN_CTRL 0x828 +#define DM_CM_RST 0x830 +#define DM_CM_INFO1 0x840 +#define DM_CM_INFO1_MASK 0x848 +#define DM_CM_INFO2 0x850 +#define DM_CM_INFO2_MASK 0x858 +#define DM_DTRAN_ADDR 0x880 + +/* DM_CM_DTRAN_MODE */ +#define DTRAN_MODE_CH_NUM_CH0 0 /* "downstream" = for write commands */ +#define DTRAN_MODE_CH_NUM_CH1 BIT(16) /* "uptream" = for read commands */ +#define DTRAN_MODE_BUS_WID_TH (BIT(5) | BIT(4)) +#define DTRAN_MODE_ADDR_MODE BIT(0) /* 1 = Increment address */ + +/* DM_CM_DTRAN_CTRL */ +#define DTRAN_CTRL_DM_START BIT(0) + +/* DM_CM_RST */ +#define RST_DTRANRST1 BIT(9) +#define RST_DTRANRST0 BIT(8) +#define RST_RESERVED_BITS GENMASK_ULL(32, 0) + +/* DM_CM_INFO1 and DM_CM_INFO1_MASK */ +#define INFO1_CLEAR 0 +#define INFO1_DTRANEND1 BIT(17) +#define INFO1_DTRANEND0 BIT(16) + +/* DM_CM_INFO2 and DM_CM_INFO2_MASK */ +#define INFO2_DTRANERR1 BIT(17) +#define INFO2_DTRANERR0 BIT(16) + +/* + * Specification of this driver: + * - host->chan_{rx,tx} will be used as a flag of enabling/disabling the dma + * - Since this SDHI DMAC register set has 16 but 32-bit width, we + * need a custom accessor. + */ + +/* Definitions for sampling clocks */ +static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = { + { + .clk_rate = 0, + .tap = 0x00000300, + }, +}; + +static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = { + .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE | + TMIO_MMC_CLK_ACTUAL | TMIO_MMC_HAVE_CBSY | + TMIO_MMC_MIN_RCAR2, + .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | + MMC_CAP_CMD23, + .bus_shift = 2, + .scc_offset = 0x1000, + .taps = rcar_gen3_scc_taps, + .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps), + /* Gen3 SDHI DMAC can handle 0xffffffff blk count, but seg = 1 */ + .max_blk_count = 0xffffffff, + .max_segs = 1, +}; + +static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = { + { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, }, + { .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, }, + { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, }, + {}, +}; +MODULE_DEVICE_TABLE(of, renesas_sdhi_internal_dmac_of_match); + +static void +renesas_sdhi_internal_dmac_dm_write(struct tmio_mmc_host *host, + int addr, u64 val) +{ + writeq(val, host->ctl + addr); +} + +static void +renesas_sdhi_internal_dmac_enable_dma(struct tmio_mmc_host *host, bool enable) +{ + if (!host->chan_tx || !host->chan_rx) + return; + + if (!enable) + renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1, + INFO1_CLEAR); + + if (host->dma->enable) + host->dma->enable(host, enable); +} + +static void +renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) { + u64 val = RST_DTRANRST1 | RST_DTRANRST0; + + renesas_sdhi_internal_dmac_enable_dma(host, false); + + renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST, + RST_RESERVED_BITS & ~val); + renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST, + RST_RESERVED_BITS | val); + + renesas_sdhi_internal_dmac_enable_dma(host, true); +} + +static void +renesas_sdhi_internal_dmac_dataend_dma(struct tmio_mmc_host *host) { + tasklet_schedule(&host->dma_complete); +} + +static void +renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host, + struct mmc_data *data) +{ + struct scatterlist *sg = host->sg_ptr; + u32 dtran_mode = DTRAN_MODE_BUS_WID_TH | DTRAN_MODE_ADDR_MODE; + enum dma_data_direction dir; + int ret; + u32 irq_mask; + + /* This DMAC cannot handle if sg_len is not 1 */ + WARN_ON(host->sg_len > 1); + + /* This DMAC cannot handle if buffer is not 8-bytes alignment */ + if (!IS_ALIGNED(sg->offset, 8)) + goto force_pio; + + if (data->flags & MMC_DATA_READ) { + dtran_mode |= DTRAN_MODE_CH_NUM_CH1; + dir = DMA_FROM_DEVICE; + irq_mask = TMIO_STAT_RXRDY; + } else { + dtran_mode |= DTRAN_MODE_CH_NUM_CH0; + dir = DMA_TO_DEVICE; + irq_mask = TMIO_STAT_TXRQ; + } + + ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, dir); + if (ret == 0) + goto force_pio; + + renesas_sdhi_internal_dmac_enable_dma(host, true); + + /* disable PIO irqs to avoid "PIO IRQ in DMA mode!" */ + tmio_mmc_disable_mmc_irqs(host, irq_mask); + + /* set dma parameters */ + renesas_sdhi_internal_dmac_dm_write(host, DM_CM_DTRAN_MODE, + dtran_mode); + renesas_sdhi_internal_dmac_dm_write(host, DM_DTRAN_ADDR, + sg->dma_address); + + return; + +force_pio: + host->force_pio = true; + renesas_sdhi_internal_dmac_enable_dma(host, false); +} + +static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg) +{ + struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; + + tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); + + /* start the DMAC */ + renesas_sdhi_internal_dmac_dm_write(host, DM_CM_DTRAN_CTRL, + DTRAN_CTRL_DM_START); +} + +static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg) +{ + struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; + enum dma_data_direction dir; + + spin_lock_irq(&host->lock); + + if (!host->data) + goto out; + + if (host->data->flags & MMC_DATA_READ) + dir = DMA_FROM_DEVICE; + else + dir = DMA_TO_DEVICE; + + renesas_sdhi_internal_dmac_enable_dma(host, false); + dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->sg_len, dir); + + tmio_mmc_do_data_irq(host); +out: + spin_unlock_irq(&host->lock); +} + +static void +renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host, + struct tmio_mmc_data *pdata) +{ + /* Each value is set to non-zero to assume "enabling" each DMA */ + host->chan_rx = host->chan_tx = (void *)0xdeadbeaf; + + tasklet_init(&host->dma_complete, + renesas_sdhi_internal_dmac_complete_tasklet_fn, + (unsigned long)host); + tasklet_init(&host->dma_issue, + renesas_sdhi_internal_dmac_issue_tasklet_fn, + (unsigned long)host); +} + +static void +renesas_sdhi_internal_dmac_release_dma(struct tmio_mmc_host *host) +{ + /* Each value is set to zero to assume "disabling" each DMA */ + host->chan_rx = host->chan_tx = NULL; +} + +static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = { + .start = renesas_sdhi_internal_dmac_start_dma, + .enable = renesas_sdhi_internal_dmac_enable_dma, + .request = renesas_sdhi_internal_dmac_request_dma, + .release = renesas_sdhi_internal_dmac_release_dma, + .abort = renesas_sdhi_internal_dmac_abort_dma, + .dataend = renesas_sdhi_internal_dmac_dataend_dma, +}; + +/* + * Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC + * implementation as others may use a different implementation. + */ +static const struct soc_device_attribute gen3_soc_whitelist[] = { + { .soc_id = "r8a7795", .revision = "ES1.*" }, + { .soc_id = "r8a7795", .revision = "ES2.0" }, + { .soc_id = "r8a7796", .revision = "ES1.0" }, + { /* sentinel */ } +}; + +static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev) +{ + if (!soc_device_match(gen3_soc_whitelist)) + return -ENODEV; + + return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops); +} + +static const struct dev_pm_ops renesas_sdhi_internal_dmac_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend, + tmio_mmc_host_runtime_resume, + NULL) +}; + +static struct platform_driver renesas_internal_dmac_sdhi_driver = { + .driver = { + .name = "renesas_sdhi_internal_dmac", + .pm = &renesas_sdhi_internal_dmac_dev_pm_ops, + .of_match_table = renesas_sdhi_internal_dmac_of_match, + }, + .probe = renesas_sdhi_internal_dmac_probe, + .remove = renesas_sdhi_remove, +}; + +module_platform_driver(renesas_internal_dmac_sdhi_driver); + +MODULE_DESCRIPTION("Renesas SDHI driver for internal DMAC"); +MODULE_AUTHOR("Yoshihiro Shimoda"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c index 642a0dcc8c5c..9ab10436e4b8 100644 --- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c @@ -1,5 +1,5 @@ /* - * DMA function for TMIO MMC implementations + * DMA support use of SYS DMAC with SDHI SD/SDIO controller * * Copyright (C) 2016-17 Renesas Electronics Corporation * Copyright (C) 2016-17 Sang Engineering, Wolfram Sang @@ -18,8 +18,10 @@ #include <linux/mmc/host.h> #include <linux/mod_devicetable.h> #include <linux/module.h> +#include <linux/of_device.h> #include <linux/pagemap.h> #include <linux/scatterlist.h> +#include <linux/sys_soc.h> #include "renesas_sdhi.h" #include "tmio_mmc.h" @@ -31,7 +33,8 @@ static const struct renesas_sdhi_of_data of_default_cfg = { }; static const struct renesas_sdhi_of_data of_rz_compatible = { - .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_32BIT_DATA_PORT, + .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_32BIT_DATA_PORT | + TMIO_MMC_HAVE_CBSY, .tmio_ocr_mask = MMC_VDD_32_33, .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ, }; @@ -56,7 +59,8 @@ static struct renesas_sdhi_scc rcar_gen2_scc_taps[] = { static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = { .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE | - TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2, + TMIO_MMC_CLK_ACTUAL | TMIO_MMC_HAVE_CBSY | + TMIO_MMC_MIN_RCAR2, .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23, .dma_buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES, @@ -76,7 +80,8 @@ static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = { static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = { .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE | - TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2, + TMIO_MMC_CLK_ACTUAL | TMIO_MMC_HAVE_CBSY | + TMIO_MMC_MIN_RCAR2, .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23, .bus_shift = 2, @@ -86,13 +91,14 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = { }; static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = { - { .compatible = "renesas,sdhi-shmobile" }, { .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, }, { .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, }, { .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, }, { .compatible = "renesas,sdhi-r7s72100", .data = &of_rz_compatible, }, { .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, }, { .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, }, + { .compatible = "renesas,sdhi-r8a7743", .data = &of_rcar_gen2_compatible, }, + { .compatible = "renesas,sdhi-r8a7745", .data = &of_rcar_gen2_compatible, }, { .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, }, { .compatible = "renesas,sdhi-r8a7791", .data = &of_rcar_gen2_compatible, }, { .compatible = "renesas,sdhi-r8a7792", .data = &of_rcar_gen2_compatible, }, @@ -100,6 +106,10 @@ static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = { { .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, }, { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, }, { .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, }, + { .compatible = "renesas,rcar-gen1-sdhi", .data = &of_rcar_gen1_compatible, }, + { .compatible = "renesas,rcar-gen2-sdhi", .data = &of_rcar_gen2_compatible, }, + { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, }, + { .compatible = "renesas,sdhi-shmobile" }, {}, }; MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match); @@ -126,6 +136,11 @@ static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host) renesas_sdhi_sys_dmac_enable_dma(host, true); } +static void renesas_sdhi_sys_dmac_dataend_dma(struct tmio_mmc_host *host) +{ + complete(&host->dma_dataend); +} + static void renesas_sdhi_sys_dmac_dma_callback(void *arg) { struct tmio_mmc_host *host = arg; @@ -451,10 +466,24 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_sys_dmac_dma_ops = { .request = renesas_sdhi_sys_dmac_request_dma, .release = renesas_sdhi_sys_dmac_release_dma, .abort = renesas_sdhi_sys_dmac_abort_dma, + .dataend = renesas_sdhi_sys_dmac_dataend_dma, +}; + +/* + * Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC + * implementation. Currently empty as all supported ES versions use + * the internal DMAC. + */ +static const struct soc_device_attribute gen3_soc_whitelist[] = { + { /* sentinel */ } }; static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev) { + if (of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible && + !soc_device_match(gen3_soc_whitelist)) + return -ENODEV; + return renesas_sdhi_probe(pdev, &renesas_sdhi_sys_dmac_dma_ops); } diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index 41b57713b620..0848dc0f882e 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -618,29 +618,22 @@ static int sd_change_phase(struct realtek_pci_sdmmc *host, u8 sample_point, bool rx) { struct rtsx_pcr *pcr = host->pcr; - int err; dev_dbg(sdmmc_dev(host), "%s(%s): sample_point = %d\n", __func__, rx ? "RX" : "TX", sample_point); - rtsx_pci_init_cmd(pcr); - - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, CHANGE_CLK); + rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, CHANGE_CLK); if (rx) - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, - SD_VPRX_CTL, 0x1F, sample_point); + rtsx_pci_write_register(pcr, SD_VPRX_CTL, + PHASE_SELECT_MASK, sample_point); else - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, - SD_VPTX_CTL, 0x1F, sample_point); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, - PHASE_NOT_RESET, PHASE_NOT_RESET); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, 0); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0); - - err = rtsx_pci_send_cmd(pcr, 100); - if (err < 0) - return err; + rtsx_pci_write_register(pcr, SD_VPTX_CTL, + PHASE_SELECT_MASK, sample_point); + rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0); + rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET, + PHASE_NOT_RESET); + rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, 0); + rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0); return 0; } @@ -708,10 +701,12 @@ static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host, { int err; struct mmc_command cmd = {}; + struct rtsx_pcr *pcr = host->pcr; - err = sd_change_phase(host, sample_point, true); - if (err < 0) - return err; + sd_change_phase(host, sample_point, true); + + rtsx_pci_write_register(pcr, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, + SD_RSP_80CLK_TIMEOUT_EN); cmd.opcode = opcode; err = sd_read_data(host, &cmd, 0x40, NULL, 0, 100); @@ -719,9 +714,12 @@ static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host, /* Wait till SD DATA IDLE */ sd_wait_data_idle(host); sd_clear_error(host); + rtsx_pci_write_register(pcr, SD_CFG3, + SD_RSP_80CLK_TIMEOUT_EN, 0); return err; } + rtsx_pci_write_register(pcr, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0); return 0; } diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c index 12d2fbe9c520..76da1687ab37 100644 --- a/drivers/mmc/host/rtsx_usb_sdmmc.c +++ b/drivers/mmc/host/rtsx_usb_sdmmc.c @@ -909,7 +909,7 @@ static int sd_set_bus_width(struct rtsx_usb_sdmmc *host, unsigned char bus_width) { int err = 0; - u8 width[] = { + static const u8 width[] = { [MMC_BUS_WIDTH_1] = SD_BUS_WIDTH_1BIT, [MMC_BUS_WIDTH_4] = SD_BUS_WIDTH_4BIT, [MMC_BUS_WIDTH_8] = SD_BUS_WIDTH_8BIT, diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 8896bf533dc7..f7f157a62a4a 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -1313,7 +1313,7 @@ static void s3cmci_enable_sdio_irq(struct mmc_host *mmc, int enable) s3cmci_check_sdio_irq(host); } -static struct mmc_host_ops s3cmci_ops = { +static const struct mmc_host_ops s3cmci_ops = { .request = s3cmci_request, .set_ios = s3cmci_set_ios, .get_ro = mmc_gpio_get_ro, diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index ac678e9fb19a..b988997a1e80 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -73,6 +73,7 @@ struct sdhci_acpi_slot { unsigned int caps2; mmc_pm_flag_t pm_caps; unsigned int flags; + size_t priv_size; int (*probe_slot)(struct platform_device *, const char *, const char *); int (*remove_slot)(struct platform_device *); }; @@ -82,13 +83,118 @@ struct sdhci_acpi_host { const struct sdhci_acpi_slot *slot; struct platform_device *pdev; bool use_runtime_pm; + unsigned long private[0] ____cacheline_aligned; }; +static inline void *sdhci_acpi_priv(struct sdhci_acpi_host *c) +{ + return (void *)c->private; +} + static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag) { return c->slot && (c->slot->flags & flag); } +enum { + INTEL_DSM_FNS = 0, + INTEL_DSM_V18_SWITCH = 3, + INTEL_DSM_V33_SWITCH = 4, +}; + +struct intel_host { + u32 dsm_fns; +}; + +static const guid_t intel_dsm_guid = + GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F, + 0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61); + +static int __intel_dsm(struct intel_host *intel_host, struct device *dev, + unsigned int fn, u32 *result) +{ + union acpi_object *obj; + int err = 0; + + obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL); + if (!obj) + return -EOPNOTSUPP; + + if (obj->type == ACPI_TYPE_INTEGER) { + *result = obj->integer.value; + } else if (obj->type == ACPI_TYPE_BUFFER && obj->buffer.length > 0) { + size_t len = min_t(size_t, obj->buffer.length, 4); + + *result = 0; + memcpy(result, obj->buffer.pointer, len); + } else { + dev_err(dev, "%s DSM fn %u obj->type %d obj->buffer.length %d\n", + __func__, fn, obj->type, obj->buffer.length); + err = -EINVAL; + } + + ACPI_FREE(obj); + + return err; +} + +static int intel_dsm(struct intel_host *intel_host, struct device *dev, + unsigned int fn, u32 *result) +{ + if (fn > 31 || !(intel_host->dsm_fns & (1 << fn))) + return -EOPNOTSUPP; + + return __intel_dsm(intel_host, dev, fn, result); +} + +static void intel_dsm_init(struct intel_host *intel_host, struct device *dev, + struct mmc_host *mmc) +{ + int err; + + err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns); + if (err) { + pr_debug("%s: DSM not supported, error %d\n", + mmc_hostname(mmc), err); + return; + } + + pr_debug("%s: DSM function mask %#x\n", + mmc_hostname(mmc), intel_host->dsm_fns); +} + +static int intel_start_signal_voltage_switch(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct device *dev = mmc_dev(mmc); + struct sdhci_acpi_host *c = dev_get_drvdata(dev); + struct intel_host *intel_host = sdhci_acpi_priv(c); + unsigned int fn; + u32 result = 0; + int err; + + err = sdhci_start_signal_voltage_switch(mmc, ios); + if (err) + return err; + + switch (ios->signal_voltage) { + case MMC_SIGNAL_VOLTAGE_330: + fn = INTEL_DSM_V33_SWITCH; + break; + case MMC_SIGNAL_VOLTAGE_180: + fn = INTEL_DSM_V18_SWITCH; + break; + default: + return 0; + } + + err = intel_dsm(intel_host, dev, fn, &result); + pr_debug("%s: %s DSM fn %u error %d result %u\n", + mmc_hostname(mmc), __func__, fn, err, result); + + return 0; +} + static void sdhci_acpi_int_hw_reset(struct sdhci_host *host) { u8 reg; @@ -269,59 +375,26 @@ out: return ret; } -static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev, - const char *hid, const char *uid) +static int intel_probe_slot(struct platform_device *pdev, const char *hid, + const char *uid) { struct sdhci_acpi_host *c = platform_get_drvdata(pdev); - struct sdhci_host *host; - - if (!c || !c->host) - return 0; - - host = c->host; - - /* Platform specific code during emmc probe slot goes here */ + struct intel_host *intel_host = sdhci_acpi_priv(c); + struct sdhci_host *host = c->host; if (hid && uid && !strcmp(hid, "80860F14") && !strcmp(uid, "1") && sdhci_readl(host, SDHCI_CAPABILITIES) == 0x446cc8b2 && sdhci_readl(host, SDHCI_CAPABILITIES_1) == 0x00000807) host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */ - return 0; -} - -static int sdhci_acpi_sdio_probe_slot(struct platform_device *pdev, - const char *hid, const char *uid) -{ - struct sdhci_acpi_host *c = platform_get_drvdata(pdev); - struct sdhci_host *host; - - if (!c || !c->host) - return 0; - - host = c->host; - - /* Platform specific code during sdio probe slot goes here */ - - return 0; -} - -static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev, - const char *hid, const char *uid) -{ - struct sdhci_acpi_host *c = platform_get_drvdata(pdev); - struct sdhci_host *host; - - if (!c || !c->host || !c->slot) - return 0; - - host = c->host; - - /* Platform specific code during sd probe slot goes here */ - if (hid && !strcmp(hid, "80865ACA")) host->mmc_host_ops.get_cd = bxt_get_cd; + intel_dsm_init(intel_host, &pdev->dev, host->mmc); + + host->mmc_host_ops.start_signal_voltage_switch = + intel_start_signal_voltage_switch; + return 0; } @@ -335,7 +408,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = { .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | SDHCI_QUIRK2_STOP_WITH_TC | SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400, - .probe_slot = sdhci_acpi_emmc_probe_slot, + .probe_slot = intel_probe_slot, + .priv_size = sizeof(struct intel_host), }; static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = { @@ -346,7 +420,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = { MMC_CAP_WAIT_WHILE_BUSY, .flags = SDHCI_ACPI_RUNTIME_PM, .pm_caps = MMC_PM_KEEP_POWER, - .probe_slot = sdhci_acpi_sdio_probe_slot, + .probe_slot = intel_probe_slot, + .priv_size = sizeof(struct intel_host), }; static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = { @@ -356,7 +431,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = { .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON | SDHCI_QUIRK2_STOP_WITH_TC, .caps = MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_AGGRESSIVE_PM, - .probe_slot = sdhci_acpi_sd_probe_slot, + .probe_slot = intel_probe_slot, + .priv_size = sizeof(struct intel_host), }; static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd_3v = { @@ -432,21 +508,25 @@ static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid, static int sdhci_acpi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - acpi_handle handle = ACPI_HANDLE(dev); + const struct sdhci_acpi_slot *slot; struct acpi_device *device, *child; struct sdhci_acpi_host *c; struct sdhci_host *host; struct resource *iomem; resource_size_t len; + size_t priv_size; const char *hid; const char *uid; int err; - if (acpi_bus_get_device(handle, &device)) + device = ACPI_COMPANION(dev); + if (!device) return -ENODEV; hid = acpi_device_hid(device); - uid = device->pnp.unique_id; + uid = acpi_device_uid(device); + + slot = sdhci_acpi_get_slot(hid, uid); /* Power on the SDHCI controller and its children */ acpi_device_fix_up_power(device); @@ -470,13 +550,14 @@ static int sdhci_acpi_probe(struct platform_device *pdev) if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev))) return -ENOMEM; - host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host)); + priv_size = slot ? slot->priv_size : 0; + host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host) + priv_size); if (IS_ERR(host)) return PTR_ERR(host); c = sdhci_priv(host); c->host = host; - c->slot = sdhci_acpi_get_slot(hid, uid); + c->slot = slot; c->pdev = pdev; c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM); diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c index 51dd2fd65000..11ca95c60bcf 100644 --- a/drivers/mmc/host/sdhci-bcm-kona.c +++ b/drivers/mmc/host/sdhci-bcm-kona.c @@ -186,7 +186,7 @@ static void sdhci_bcm_kona_init_74_clocks(struct sdhci_host *host, udelay(740); } -static struct sdhci_ops sdhci_bcm_kona_ops = { +static const struct sdhci_ops sdhci_bcm_kona_ops = { .set_clock = sdhci_set_clock, .get_max_clock = sdhci_pltfm_clk_get_max_clock, .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, @@ -197,7 +197,7 @@ static struct sdhci_ops sdhci_bcm_kona_ops = { .card_event = sdhci_bcm_kona_card_event, }; -static struct sdhci_pltfm_data sdhci_pltfm_data_kona = { +static const struct sdhci_pltfm_data sdhci_pltfm_data_kona = { .ops = &sdhci_bcm_kona_ops, .quirks = SDHCI_QUIRK_NO_CARD_NO_RESET | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_32BIT_DMA_ADDR | diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c index e2f638338e8f..552bddc5096c 100644 --- a/drivers/mmc/host/sdhci-brcmstb.c +++ b/drivers/mmc/host/sdhci-brcmstb.c @@ -21,41 +21,6 @@ #include "sdhci-pltfm.h" -#ifdef CONFIG_PM_SLEEP - -static int sdhci_brcmstb_suspend(struct device *dev) -{ - struct sdhci_host *host = dev_get_drvdata(dev); - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - int res; - - if (host->tuning_mode != SDHCI_TUNING_MODE_3) - mmc_retune_needed(host->mmc); - - res = sdhci_suspend_host(host); - if (res) - return res; - clk_disable_unprepare(pltfm_host->clk); - return res; -} - -static int sdhci_brcmstb_resume(struct device *dev) -{ - struct sdhci_host *host = dev_get_drvdata(dev); - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - int err; - - err = clk_prepare_enable(pltfm_host->clk); - if (err) - return err; - return sdhci_resume_host(host); -} - -#endif /* CONFIG_PM_SLEEP */ - -static SIMPLE_DEV_PM_OPS(sdhci_brcmstb_pmops, sdhci_brcmstb_suspend, - sdhci_brcmstb_resume); - static const struct sdhci_ops sdhci_brcmstb_ops = { .set_clock = sdhci_set_clock, .set_bus_width = sdhci_set_bus_width, @@ -63,7 +28,7 @@ static const struct sdhci_ops sdhci_brcmstb_ops = { .set_uhs_signaling = sdhci_set_uhs_signaling, }; -static struct sdhci_pltfm_data sdhci_brcmstb_pdata = { +static const struct sdhci_pltfm_data sdhci_brcmstb_pdata = { .ops = &sdhci_brcmstb_ops, }; @@ -131,7 +96,7 @@ MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match); static struct platform_driver sdhci_brcmstb_driver = { .driver = { .name = "sdhci-brcmstb", - .pm = &sdhci_brcmstb_pmops, + .pm = &sdhci_pltfm_pmops, .of_match_table = of_match_ptr(sdhci_brcm_of_match), }, .probe = sdhci_brcmstb_probe, diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c index 19d5698244b5..0f589e26ee63 100644 --- a/drivers/mmc/host/sdhci-cadence.c +++ b/drivers/mmc/host/sdhci-cadence.c @@ -13,6 +13,7 @@ * GNU General Public License for more details. */ +#include <linux/bitfield.h> #include <linux/bitops.h> #include <linux/iopoll.h> #include <linux/module.h> @@ -27,15 +28,14 @@ #define SDHCI_CDNS_HRS04_ACK BIT(26) #define SDHCI_CDNS_HRS04_RD BIT(25) #define SDHCI_CDNS_HRS04_WR BIT(24) -#define SDHCI_CDNS_HRS04_RDATA_SHIFT 16 -#define SDHCI_CDNS_HRS04_WDATA_SHIFT 8 -#define SDHCI_CDNS_HRS04_ADDR_SHIFT 0 +#define SDHCI_CDNS_HRS04_RDATA GENMASK(23, 16) +#define SDHCI_CDNS_HRS04_WDATA GENMASK(15, 8) +#define SDHCI_CDNS_HRS04_ADDR GENMASK(5, 0) #define SDHCI_CDNS_HRS06 0x18 /* eMMC control */ #define SDHCI_CDNS_HRS06_TUNE_UP BIT(15) -#define SDHCI_CDNS_HRS06_TUNE_SHIFT 8 -#define SDHCI_CDNS_HRS06_TUNE_MASK 0x3f -#define SDHCI_CDNS_HRS06_MODE_MASK 0x7 +#define SDHCI_CDNS_HRS06_TUNE GENMASK(13, 8) +#define SDHCI_CDNS_HRS06_MODE GENMASK(2, 0) #define SDHCI_CDNS_HRS06_MODE_SD 0x0 #define SDHCI_CDNS_HRS06_MODE_MMC_SDR 0x2 #define SDHCI_CDNS_HRS06_MODE_MMC_DDR 0x3 @@ -67,9 +67,16 @@ */ #define SDHCI_CDNS_MAX_TUNING_LOOP 40 +struct sdhci_cdns_phy_param { + u8 addr; + u8 data; +}; + struct sdhci_cdns_priv { void __iomem *hrs_addr; bool enhanced_strobe; + unsigned int nr_phy_params; + struct sdhci_cdns_phy_param phy_params[0]; }; struct sdhci_cdns_phy_cfg { @@ -98,8 +105,8 @@ static int sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv, u32 tmp; int ret; - tmp = (data << SDHCI_CDNS_HRS04_WDATA_SHIFT) | - (addr << SDHCI_CDNS_HRS04_ADDR_SHIFT); + tmp = FIELD_PREP(SDHCI_CDNS_HRS04_WDATA, data) | + FIELD_PREP(SDHCI_CDNS_HRS04_ADDR, addr); writel(tmp, reg); tmp |= SDHCI_CDNS_HRS04_WR; @@ -115,9 +122,22 @@ static int sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv, return 0; } -static int sdhci_cdns_phy_init(struct device_node *np, - struct sdhci_cdns_priv *priv) +static unsigned int sdhci_cdns_phy_param_count(struct device_node *np) +{ + unsigned int count = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(sdhci_cdns_phy_cfgs); i++) + if (of_property_read_bool(np, sdhci_cdns_phy_cfgs[i].property)) + count++; + + return count; +} + +static void sdhci_cdns_phy_param_parse(struct device_node *np, + struct sdhci_cdns_priv *priv) { + struct sdhci_cdns_phy_param *p = priv->phy_params; u32 val; int ret, i; @@ -127,9 +147,19 @@ static int sdhci_cdns_phy_init(struct device_node *np, if (ret) continue; - ret = sdhci_cdns_write_phy_reg(priv, - sdhci_cdns_phy_cfgs[i].addr, - val); + p->addr = sdhci_cdns_phy_cfgs[i].addr; + p->data = val; + p++; + } +} + +static int sdhci_cdns_phy_init(struct sdhci_cdns_priv *priv) +{ + int ret, i; + + for (i = 0; i < priv->nr_phy_params; i++) { + ret = sdhci_cdns_write_phy_reg(priv, priv->phy_params[i].addr, + priv->phy_params[i].data); if (ret) return ret; } @@ -159,8 +189,8 @@ static void sdhci_cdns_set_emmc_mode(struct sdhci_cdns_priv *priv, u32 mode) /* The speed mode for eMMC is selected by HRS06 register */ tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06); - tmp &= ~SDHCI_CDNS_HRS06_MODE_MASK; - tmp |= mode; + tmp &= ~SDHCI_CDNS_HRS06_MODE; + tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_MODE, mode); writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS06); } @@ -169,7 +199,7 @@ static u32 sdhci_cdns_get_emmc_mode(struct sdhci_cdns_priv *priv) u32 tmp; tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06); - return tmp & SDHCI_CDNS_HRS06_MODE_MASK; + return FIELD_GET(SDHCI_CDNS_HRS06_MODE, tmp); } static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host, @@ -224,12 +254,12 @@ static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val) void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS06; u32 tmp; - if (WARN_ON(val > SDHCI_CDNS_HRS06_TUNE_MASK)) + if (WARN_ON(!FIELD_FIT(SDHCI_CDNS_HRS06_TUNE, val))) return -EINVAL; tmp = readl(reg); - tmp &= ~(SDHCI_CDNS_HRS06_TUNE_MASK << SDHCI_CDNS_HRS06_TUNE_SHIFT); - tmp |= val << SDHCI_CDNS_HRS06_TUNE_SHIFT; + tmp &= ~SDHCI_CDNS_HRS06_TUNE; + tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_TUNE, val); tmp |= SDHCI_CDNS_HRS06_TUNE_UP; writel(tmp, reg); @@ -302,6 +332,8 @@ static int sdhci_cdns_probe(struct platform_device *pdev) struct sdhci_pltfm_host *pltfm_host; struct sdhci_cdns_priv *priv; struct clk *clk; + size_t priv_size; + unsigned int nr_phy_params; int ret; struct device *dev = &pdev->dev; @@ -313,7 +345,9 @@ static int sdhci_cdns_probe(struct platform_device *pdev) if (ret) return ret; - host = sdhci_pltfm_init(pdev, &sdhci_cdns_pltfm_data, sizeof(*priv)); + nr_phy_params = sdhci_cdns_phy_param_count(dev->of_node); + priv_size = sizeof(*priv) + sizeof(priv->phy_params[0]) * nr_phy_params; + host = sdhci_pltfm_init(pdev, &sdhci_cdns_pltfm_data, priv_size); if (IS_ERR(host)) { ret = PTR_ERR(host); goto disable_clk; @@ -322,7 +356,8 @@ static int sdhci_cdns_probe(struct platform_device *pdev) pltfm_host = sdhci_priv(host); pltfm_host->clk = clk; - priv = sdhci_cdns_priv(host); + priv = sdhci_pltfm_priv(pltfm_host); + priv->nr_phy_params = nr_phy_params; priv->hrs_addr = host->ioaddr; priv->enhanced_strobe = false; host->ioaddr += SDHCI_CDNS_SRS_BASE; @@ -336,7 +371,9 @@ static int sdhci_cdns_probe(struct platform_device *pdev) if (ret) goto free; - ret = sdhci_cdns_phy_init(dev->of_node, priv); + sdhci_cdns_phy_param_parse(dev->of_node, priv); + + ret = sdhci_cdns_phy_init(priv); if (ret) goto free; @@ -353,6 +390,39 @@ disable_clk: return ret; } +#ifdef CONFIG_PM_SLEEP +static int sdhci_cdns_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_cdns_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = clk_prepare_enable(pltfm_host->clk); + if (ret) + return ret; + + ret = sdhci_cdns_phy_init(priv); + if (ret) + goto disable_clk; + + ret = sdhci_resume_host(host); + if (ret) + goto disable_clk; + + return 0; + +disable_clk: + clk_disable_unprepare(pltfm_host->clk); + + return ret; +} +#endif + +static const struct dev_pm_ops sdhci_cdns_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(sdhci_pltfm_suspend, sdhci_cdns_resume) +}; + static const struct of_device_id sdhci_cdns_match[] = { { .compatible = "socionext,uniphier-sd4hc" }, { .compatible = "cdns,sd4hc" }, @@ -363,7 +433,7 @@ MODULE_DEVICE_TABLE(of, sdhci_cdns_match); static struct platform_driver sdhci_cdns_driver = { .driver = { .name = "sdhci-cdns", - .pm = &sdhci_pltfm_pmops, + .pm = &sdhci_cdns_pm_ops, .of_match_table = sdhci_cdns_match, }, .probe = sdhci_cdns_probe, diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h index e7893f21b65e..dfa58f8b8dfa 100644 --- a/drivers/mmc/host/sdhci-esdhc.h +++ b/drivers/mmc/host/sdhci-esdhc.h @@ -54,6 +54,9 @@ #define ESDHC_CLOCK_HCKEN 0x00000002 #define ESDHC_CLOCK_IPGEN 0x00000001 +/* Host Controller Capabilities Register 2 */ +#define ESDHC_CAPABILITIES_1 0x114 + /* Tuning Block Control Register */ #define ESDHC_TBCTL 0x120 #define ESDHC_TB_EN 0x00000004 diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 9d601dc0d646..3fb7d2eec93f 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -123,14 +123,17 @@ #define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT) #define MSM_MMC_AUTOSUSPEND_DELAY_MS 50 + +/* Timeout value to avoid infinite waiting for pwr_irq */ +#define MSM_PWR_IRQ_TIMEOUT_MS 5000 + struct sdhci_msm_host { struct platform_device *pdev; void __iomem *core_mem; /* MSM SDCC mapped address */ int pwr_irq; /* power irq */ - struct clk *clk; /* main SD/MMC bus clock */ - struct clk *pclk; /* SDHC peripheral bus clock */ struct clk *bus_clk; /* SDHC bus voter clock */ struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/ + struct clk_bulk_data bulk_clks[4]; /* core, iface, cal, sleep clocks */ unsigned long clk_rate; struct mmc_host *mmc; bool use_14lpp_dll_reset; @@ -138,6 +141,10 @@ struct sdhci_msm_host { bool calibration_done; u8 saved_tuning_phase; bool use_cdclp533; + u32 curr_pwr_state; + u32 curr_io_level; + wait_queue_head_t pwr_irq_wait; + bool pwr_irq_flag; }; static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host, @@ -164,10 +171,11 @@ static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host, struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); struct mmc_ios curr_ios = host->mmc->ios; + struct clk *core_clk = msm_host->bulk_clks[0].clk; int rc; clock = msm_get_clock_rate_for_bus_mode(host, clock); - rc = clk_set_rate(msm_host->clk, clock); + rc = clk_set_rate(core_clk, clock); if (rc) { pr_err("%s: Failed to set clock at rate %u at timing %d\n", mmc_hostname(host->mmc), clock, @@ -176,7 +184,7 @@ static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host, } msm_host->clk_rate = clock; pr_debug("%s: Setting clock at rate %lu at timing %d\n", - mmc_hostname(host->mmc), clk_get_rate(msm_host->clk), + mmc_hostname(host->mmc), clk_get_rate(core_clk), curr_ios.timing); } @@ -611,7 +619,7 @@ static void msm_hc_select_hs400(struct sdhci_host *host) * HS400 - divided clock (free running MCLK/2) * All other modes - default (free running MCLK) */ -void sdhci_msm_hc_select_mode(struct sdhci_host *host) +static void sdhci_msm_hc_select_mode(struct sdhci_host *host) { struct mmc_ios ios = host->mmc->ios; @@ -995,21 +1003,142 @@ static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host, sdhci_msm_hs400(host, &mmc->ios); } -static void sdhci_msm_voltage_switch(struct sdhci_host *host) +static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host) +{ + init_waitqueue_head(&msm_host->pwr_irq_wait); +} + +static inline void sdhci_msm_complete_pwr_irq_wait( + struct sdhci_msm_host *msm_host) +{ + wake_up(&msm_host->pwr_irq_wait); +} + +/* + * sdhci_msm_check_power_status API should be called when registers writes + * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens. + * To what state the register writes will change the IO lines should be passed + * as the argument req_type. This API will check whether the IO line's state + * is already the expected state and will wait for power irq only if + * power irq is expected to be trigerred based on the current IO line state + * and expected IO line state. + */ +static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + bool done = false; + + pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n", + mmc_hostname(host->mmc), __func__, req_type, + msm_host->curr_pwr_state, msm_host->curr_io_level); + + /* + * The IRQ for request type IO High/LOW will be generated when - + * there is a state change in 1.8V enable bit (bit 3) of + * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0 + * which indicates 3.3V IO voltage. So, when MMC core layer tries + * to set it to 3.3V before card detection happens, the + * IRQ doesn't get triggered as there is no state change in this bit. + * The driver already handles this case by changing the IO voltage + * level to high as part of controller power up sequence. Hence, check + * for host->pwr to handle a case where IO voltage high request is + * issued even before controller power up. + */ + if ((req_type & REQ_IO_HIGH) && !host->pwr) { + pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n", + mmc_hostname(host->mmc), req_type); + return; + } + if ((req_type & msm_host->curr_pwr_state) || + (req_type & msm_host->curr_io_level)) + done = true; + /* + * This is needed here to handle cases where register writes will + * not change the current bus state or io level of the controller. + * In this case, no power irq will be triggerred and we should + * not wait. + */ + if (!done) { + if (!wait_event_timeout(msm_host->pwr_irq_wait, + msm_host->pwr_irq_flag, + msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS))) + dev_warn(&msm_host->pdev->dev, + "%s: pwr_irq for req: (%d) timed out\n", + mmc_hostname(host->mmc), req_type); + } + pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc), + __func__, req_type); +} + +static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + + pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n", + mmc_hostname(host->mmc), + readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS), + readl_relaxed(msm_host->core_mem + CORE_PWRCTL_MASK), + readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL)); +} + +static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); u32 irq_status, irq_ack = 0; + int retry = 10; + int pwr_state = 0, io_level = 0; + irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS); irq_status &= INT_MASK; writel_relaxed(irq_status, msm_host->core_mem + CORE_PWRCTL_CLEAR); - if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF)) + /* + * There is a rare HW scenario where the first clear pulse could be + * lost when actual reset and clear/read of status register is + * happening at a time. Hence, retry for at least 10 times to make + * sure status register is cleared. Otherwise, this will result in + * a spurious power IRQ resulting in system instability. + */ + while (irq_status & readl_relaxed(msm_host->core_mem + + CORE_PWRCTL_STATUS)) { + if (retry == 0) { + pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n", + mmc_hostname(host->mmc), irq_status); + sdhci_msm_dump_pwr_ctrl_regs(host); + WARN_ON(1); + break; + } + writel_relaxed(irq_status, + msm_host->core_mem + CORE_PWRCTL_CLEAR); + retry--; + udelay(10); + } + + /* Handle BUS ON/OFF*/ + if (irq_status & CORE_PWRCTL_BUS_ON) { + pwr_state = REQ_BUS_ON; + io_level = REQ_IO_HIGH; + irq_ack |= CORE_PWRCTL_BUS_SUCCESS; + } + if (irq_status & CORE_PWRCTL_BUS_OFF) { + pwr_state = REQ_BUS_OFF; + io_level = REQ_IO_LOW; irq_ack |= CORE_PWRCTL_BUS_SUCCESS; - if (irq_status & (CORE_PWRCTL_IO_LOW | CORE_PWRCTL_IO_HIGH)) + } + /* Handle IO LOW/HIGH */ + if (irq_status & CORE_PWRCTL_IO_LOW) { + io_level = REQ_IO_LOW; + irq_ack |= CORE_PWRCTL_IO_SUCCESS; + } + if (irq_status & CORE_PWRCTL_IO_HIGH) { + io_level = REQ_IO_HIGH; irq_ack |= CORE_PWRCTL_IO_SUCCESS; + } /* * The driver has to acknowledge the interrupt, switch voltages and @@ -1017,13 +1146,27 @@ static void sdhci_msm_voltage_switch(struct sdhci_host *host) * switches are handled by the sdhci core, so just report success. */ writel_relaxed(irq_ack, msm_host->core_mem + CORE_PWRCTL_CTL); + + if (pwr_state) + msm_host->curr_pwr_state = pwr_state; + if (io_level) + msm_host->curr_io_level = io_level; + + pr_debug("%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n", + mmc_hostname(msm_host->mmc), __func__, irq, irq_status, + irq_ack); } static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data) { struct sdhci_host *host = (struct sdhci_host *)data; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + + sdhci_msm_handle_pwr_irq(host, irq); + msm_host->pwr_irq_flag = 1; + sdhci_msm_complete_pwr_irq_wait(msm_host); - sdhci_msm_voltage_switch(host); return IRQ_HANDLED; } @@ -1032,8 +1175,9 @@ static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + struct clk *core_clk = msm_host->bulk_clks[0].clk; - return clk_round_rate(msm_host->clk, ULONG_MAX); + return clk_round_rate(core_clk, ULONG_MAX); } static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host) @@ -1049,7 +1193,7 @@ static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host) * instead directly control the GCC clock as per * HW recommendation. **/ -void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock) +static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock) { u16 clk; /* @@ -1092,6 +1236,69 @@ out: __sdhci_msm_set_clock(host, clock); } +/* + * Platform specific register write functions. This is so that, if any + * register write needs to be followed up by platform specific actions, + * they can be added here. These functions can go to sleep when writes + * to certain registers are done. + * These functions are relying on sdhci_set_ios not using spinlock. + */ +static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + u32 req_type = 0; + + switch (reg) { + case SDHCI_HOST_CONTROL2: + req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW : + REQ_IO_HIGH; + break; + case SDHCI_SOFTWARE_RESET: + if (host->pwr && (val & SDHCI_RESET_ALL)) + req_type = REQ_BUS_OFF; + break; + case SDHCI_POWER_CONTROL: + req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON; + break; + } + + if (req_type) { + msm_host->pwr_irq_flag = 0; + /* + * Since this register write may trigger a power irq, ensure + * all previous register writes are complete by this point. + */ + mb(); + } + return req_type; +} + +/* This function may sleep*/ +static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg) +{ + u32 req_type = 0; + + req_type = __sdhci_msm_check_write(host, val, reg); + writew_relaxed(val, host->ioaddr + reg); + + if (req_type) + sdhci_msm_check_power_status(host, req_type); +} + +/* This function may sleep*/ +static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg) +{ + u32 req_type = 0; + + req_type = __sdhci_msm_check_write(host, val, reg); + + writeb_relaxed(val, host->ioaddr + reg); + + if (req_type) + sdhci_msm_check_power_status(host, req_type); +} + static const struct of_device_id sdhci_msm_dt_match[] = { { .compatible = "qcom,sdhci-msm-v4" }, {}, @@ -1106,7 +1313,8 @@ static const struct sdhci_ops sdhci_msm_ops = { .get_max_clock = sdhci_msm_get_max_clock, .set_bus_width = sdhci_set_bus_width, .set_uhs_signaling = sdhci_msm_set_uhs_signaling, - .voltage_switch = sdhci_msm_voltage_switch, + .write_w = sdhci_msm_writew, + .write_b = sdhci_msm_writeb, }; static const struct sdhci_pltfm_data sdhci_msm_pdata = { @@ -1124,6 +1332,7 @@ static int sdhci_msm_probe(struct platform_device *pdev) struct sdhci_pltfm_host *pltfm_host; struct sdhci_msm_host *msm_host; struct resource *core_memres; + struct clk *clk; int ret; u16 host_version, core_minor; u32 core_version, config; @@ -1133,6 +1342,7 @@ static int sdhci_msm_probe(struct platform_device *pdev) if (IS_ERR(host)) return PTR_ERR(host); + host->sdma_boundary = 0; pltfm_host = sdhci_priv(host); msm_host = sdhci_pltfm_priv(pltfm_host); msm_host->mmc = host->mmc; @@ -1159,24 +1369,42 @@ static int sdhci_msm_probe(struct platform_device *pdev) } /* Setup main peripheral bus clock */ - msm_host->pclk = devm_clk_get(&pdev->dev, "iface"); - if (IS_ERR(msm_host->pclk)) { - ret = PTR_ERR(msm_host->pclk); + clk = devm_clk_get(&pdev->dev, "iface"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret); goto bus_clk_disable; } - - ret = clk_prepare_enable(msm_host->pclk); - if (ret) - goto bus_clk_disable; + msm_host->bulk_clks[1].clk = clk; /* Setup SDC MMC clock */ - msm_host->clk = devm_clk_get(&pdev->dev, "core"); - if (IS_ERR(msm_host->clk)) { - ret = PTR_ERR(msm_host->clk); + clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret); - goto pclk_disable; + goto bus_clk_disable; } + msm_host->bulk_clks[0].clk = clk; + + /* Vote for maximum clock rate for maximum performance */ + ret = clk_set_rate(clk, INT_MAX); + if (ret) + dev_warn(&pdev->dev, "core clock boost failed\n"); + + clk = devm_clk_get(&pdev->dev, "cal"); + if (IS_ERR(clk)) + clk = NULL; + msm_host->bulk_clks[2].clk = clk; + + clk = devm_clk_get(&pdev->dev, "sleep"); + if (IS_ERR(clk)) + clk = NULL; + msm_host->bulk_clks[3].clk = clk; + + ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), + msm_host->bulk_clks); + if (ret) + goto bus_clk_disable; /* * xo clock is needed for FLL feature of cm_dll. @@ -1188,15 +1416,6 @@ static int sdhci_msm_probe(struct platform_device *pdev) dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret); } - /* Vote for maximum clock rate for maximum performance */ - ret = clk_set_rate(msm_host->clk, INT_MAX); - if (ret) - dev_warn(&pdev->dev, "core clock boost failed\n"); - - ret = clk_prepare_enable(msm_host->clk); - if (ret) - goto pclk_disable; - core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1); msm_host->core_mem = devm_ioremap_resource(&pdev->dev, core_memres); @@ -1250,6 +1469,21 @@ static int sdhci_msm_probe(struct platform_device *pdev) CORE_VENDOR_SPEC_CAPABILITIES0); } + /* + * Power on reset state may trigger power irq if previous status of + * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq + * interrupt in GIC, any pending power irq interrupt should be + * acknowledged. Otherwise power irq interrupt handler would be + * fired prematurely. + */ + sdhci_msm_handle_pwr_irq(host, 0); + + /* + * Ensure that above writes are propogated before interrupt enablement + * in GIC. + */ + mb(); + /* Setup IRQ for handling power/voltage tasks with PMIC */ msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq"); if (msm_host->pwr_irq < 0) { @@ -1259,6 +1493,10 @@ static int sdhci_msm_probe(struct platform_device *pdev) goto clk_disable; } + sdhci_msm_init_pwr_irq_wait(msm_host); + /* Enable pwr irq interrupts */ + writel_relaxed(INT_MASK, msm_host->core_mem + CORE_PWRCTL_MASK); + ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL, sdhci_msm_pwr_irq, IRQF_ONESHOT, dev_name(&pdev->dev), host); @@ -1289,9 +1527,8 @@ pm_runtime_disable: pm_runtime_set_suspended(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); clk_disable: - clk_disable_unprepare(msm_host->clk); -pclk_disable: - clk_disable_unprepare(msm_host->pclk); + clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), + msm_host->bulk_clks); bus_clk_disable: if (!IS_ERR(msm_host->bus_clk)) clk_disable_unprepare(msm_host->bus_clk); @@ -1314,8 +1551,8 @@ static int sdhci_msm_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); - clk_disable_unprepare(msm_host->clk); - clk_disable_unprepare(msm_host->pclk); + clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), + msm_host->bulk_clks); if (!IS_ERR(msm_host->bus_clk)) clk_disable_unprepare(msm_host->bus_clk); sdhci_pltfm_free(pdev); @@ -1329,8 +1566,8 @@ static int sdhci_msm_runtime_suspend(struct device *dev) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); - clk_disable_unprepare(msm_host->clk); - clk_disable_unprepare(msm_host->pclk); + clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), + msm_host->bulk_clks); return 0; } @@ -1340,21 +1577,9 @@ static int sdhci_msm_runtime_resume(struct device *dev) struct sdhci_host *host = dev_get_drvdata(dev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); - int ret; - ret = clk_prepare_enable(msm_host->clk); - if (ret) { - dev_err(dev, "clk_enable failed for core_clk: %d\n", ret); - return ret; - } - ret = clk_prepare_enable(msm_host->pclk); - if (ret) { - dev_err(dev, "clk_enable failed for iface_clk: %d\n", ret); - clk_disable_unprepare(msm_host->clk); - return ret; - } - - return 0; + return clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), + msm_host->bulk_clks); } #endif diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index b13c0a7d50e4..0720ea717011 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -216,13 +216,13 @@ static void sdhci_arasan_hs400_enhanced_strobe(struct mmc_host *mmc, u32 vendor; struct sdhci_host *host = mmc_priv(mmc); - vendor = readl(host->ioaddr + SDHCI_ARASAN_VENDOR_REGISTER); + vendor = sdhci_readl(host, SDHCI_ARASAN_VENDOR_REGISTER); if (ios->enhanced_strobe) vendor |= VENDOR_ENHANCED_STROBE; else vendor &= ~VENDOR_ENHANCED_STROBE; - writel(vendor, host->ioaddr + SDHCI_ARASAN_VENDOR_REGISTER); + sdhci_writel(host, vendor, SDHCI_ARASAN_VENDOR_REGISTER); } static void sdhci_arasan_reset(struct sdhci_host *host, u8 mask) @@ -262,7 +262,7 @@ static int sdhci_arasan_voltage_switch(struct mmc_host *mmc, return -EINVAL; } -static struct sdhci_ops sdhci_arasan_ops = { +static const struct sdhci_ops sdhci_arasan_ops = { .set_clock = sdhci_arasan_set_clock, .get_max_clock = sdhci_pltfm_clk_get_max_clock, .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, @@ -271,7 +271,7 @@ static struct sdhci_ops sdhci_arasan_ops = { .set_uhs_signaling = sdhci_set_uhs_signaling, }; -static struct sdhci_pltfm_data sdhci_arasan_pdata = { +static const struct sdhci_pltfm_data sdhci_arasan_pdata = { .ops = &sdhci_arasan_ops, .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index 7611fd679f1a..682c573e20a7 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -31,6 +31,7 @@ #define SDMMC_MC1R 0x204 #define SDMMC_MC1R_DDR BIT(3) +#define SDMMC_MC1R_FCD BIT(7) #define SDMMC_CACR 0x230 #define SDMMC_CACR_CAPWREN BIT(0) #define SDMMC_CACR_KEY (0x46 << 8) @@ -41,8 +42,18 @@ struct sdhci_at91_priv { struct clk *hclock; struct clk *gck; struct clk *mainck; + bool restore_needed; }; +static void sdhci_at91_set_force_card_detect(struct sdhci_host *host) +{ + u8 mc1r; + + mc1r = readb(host->ioaddr + SDMMC_MC1R); + mc1r |= SDMMC_MC1R_FCD; + writeb(mc1r, host->ioaddr + SDMMC_MC1R); +} + static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock) { u16 clk; @@ -103,17 +114,26 @@ static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode, sdhci_set_power_noreg(host, mode, vdd); } -void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing) +static void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, + unsigned int timing) { if (timing == MMC_TIMING_MMC_DDR52) sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R); sdhci_set_uhs_signaling(host, timing); } +static void sdhci_at91_reset(struct sdhci_host *host, u8 mask) +{ + sdhci_reset(host, mask); + + if (host->mmc->caps & MMC_CAP_NONREMOVABLE) + sdhci_at91_set_force_card_detect(host); +} + static const struct sdhci_ops sdhci_at91_sama5d2_ops = { .set_clock = sdhci_at91_set_clock, .set_bus_width = sdhci_set_bus_width, - .reset = sdhci_reset, + .reset = sdhci_at91_reset, .set_uhs_signaling = sdhci_at91_set_uhs_signaling, .set_power = sdhci_at91_set_power, }; @@ -128,6 +148,100 @@ static const struct of_device_id sdhci_at91_dt_match[] = { }; MODULE_DEVICE_TABLE(of, sdhci_at91_dt_match); +static int sdhci_at91_set_clks_presets(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + unsigned int caps0, caps1; + unsigned int clk_base, clk_mul; + unsigned int gck_rate, real_gck_rate; + unsigned int preset_div; + + /* + * The mult clock is provided by as a generated clock by the PMC + * controller. In order to set the rate of gck, we have to get the + * base clock rate and the clock mult from capabilities. + */ + clk_prepare_enable(priv->hclock); + caps0 = readl(host->ioaddr + SDHCI_CAPABILITIES); + caps1 = readl(host->ioaddr + SDHCI_CAPABILITIES_1); + clk_base = (caps0 & SDHCI_CLOCK_V3_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; + clk_mul = (caps1 & SDHCI_CLOCK_MUL_MASK) >> SDHCI_CLOCK_MUL_SHIFT; + gck_rate = clk_base * 1000000 * (clk_mul + 1); + ret = clk_set_rate(priv->gck, gck_rate); + if (ret < 0) { + dev_err(dev, "failed to set gck"); + clk_disable_unprepare(priv->hclock); + return ret; + } + /* + * We need to check if we have the requested rate for gck because in + * some cases this rate could be not supported. If it happens, the rate + * is the closest one gck can provide. We have to update the value + * of clk mul. + */ + real_gck_rate = clk_get_rate(priv->gck); + if (real_gck_rate != gck_rate) { + clk_mul = real_gck_rate / (clk_base * 1000000) - 1; + caps1 &= (~SDHCI_CLOCK_MUL_MASK); + caps1 |= ((clk_mul << SDHCI_CLOCK_MUL_SHIFT) & + SDHCI_CLOCK_MUL_MASK); + /* Set capabilities in r/w mode. */ + writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, + host->ioaddr + SDMMC_CACR); + writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1); + /* Set capabilities in ro mode. */ + writel(0, host->ioaddr + SDMMC_CACR); + dev_info(dev, "update clk mul to %u as gck rate is %u Hz\n", + clk_mul, real_gck_rate); + } + + /* + * We have to set preset values because it depends on the clk_mul + * value. Moreover, SDR104 is supported in a degraded mode since the + * maximum sd clock value is 120 MHz instead of 208 MHz. For that + * reason, we need to use presets to support SDR104. + */ + preset_div = DIV_ROUND_UP(real_gck_rate, 24000000) - 1; + writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, + host->ioaddr + SDHCI_PRESET_FOR_SDR12); + preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1; + writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, + host->ioaddr + SDHCI_PRESET_FOR_SDR25); + preset_div = DIV_ROUND_UP(real_gck_rate, 100000000) - 1; + writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, + host->ioaddr + SDHCI_PRESET_FOR_SDR50); + preset_div = DIV_ROUND_UP(real_gck_rate, 120000000) - 1; + writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, + host->ioaddr + SDHCI_PRESET_FOR_SDR104); + preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1; + writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, + host->ioaddr + SDHCI_PRESET_FOR_DDR50); + + clk_prepare_enable(priv->mainck); + clk_prepare_enable(priv->gck); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sdhci_at91_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = pm_runtime_force_suspend(dev); + + priv->restore_needed = true; + + return ret; +} +#endif /* CONFIG_PM_SLEEP */ + #ifdef CONFIG_PM static int sdhci_at91_runtime_suspend(struct device *dev) { @@ -155,6 +269,15 @@ static int sdhci_at91_runtime_resume(struct device *dev) struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host); int ret; + if (priv->restore_needed) { + ret = sdhci_at91_set_clks_presets(dev); + if (ret) + return ret; + + priv->restore_needed = false; + goto out; + } + ret = clk_prepare_enable(priv->mainck); if (ret) { dev_err(dev, "can't enable mainck\n"); @@ -173,13 +296,13 @@ static int sdhci_at91_runtime_resume(struct device *dev) return ret; } +out: return sdhci_runtime_resume_host(host); } #endif /* CONFIG_PM */ static const struct dev_pm_ops sdhci_at91_dev_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, - pm_runtime_force_resume) + SET_SYSTEM_SLEEP_PM_OPS(sdhci_at91_suspend, pm_runtime_force_resume) SET_RUNTIME_PM_OPS(sdhci_at91_runtime_suspend, sdhci_at91_runtime_resume, NULL) @@ -192,11 +315,7 @@ static int sdhci_at91_probe(struct platform_device *pdev) struct sdhci_host *host; struct sdhci_pltfm_host *pltfm_host; struct sdhci_at91_priv *priv; - unsigned int caps0, caps1; - unsigned int clk_base, clk_mul; - unsigned int gck_rate, real_gck_rate; int ret; - unsigned int preset_div; match = of_match_device(sdhci_at91_dt_match, &pdev->dev); if (!match) @@ -228,66 +347,11 @@ static int sdhci_at91_probe(struct platform_device *pdev) return PTR_ERR(priv->gck); } - /* - * The mult clock is provided by as a generated clock by the PMC - * controller. In order to set the rate of gck, we have to get the - * base clock rate and the clock mult from capabilities. - */ - clk_prepare_enable(priv->hclock); - caps0 = readl(host->ioaddr + SDHCI_CAPABILITIES); - caps1 = readl(host->ioaddr + SDHCI_CAPABILITIES_1); - clk_base = (caps0 & SDHCI_CLOCK_V3_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; - clk_mul = (caps1 & SDHCI_CLOCK_MUL_MASK) >> SDHCI_CLOCK_MUL_SHIFT; - gck_rate = clk_base * 1000000 * (clk_mul + 1); - ret = clk_set_rate(priv->gck, gck_rate); - if (ret < 0) { - dev_err(&pdev->dev, "failed to set gck"); - goto hclock_disable_unprepare; - } - /* - * We need to check if we have the requested rate for gck because in - * some cases this rate could be not supported. If it happens, the rate - * is the closest one gck can provide. We have to update the value - * of clk mul. - */ - real_gck_rate = clk_get_rate(priv->gck); - if (real_gck_rate != gck_rate) { - clk_mul = real_gck_rate / (clk_base * 1000000) - 1; - caps1 &= (~SDHCI_CLOCK_MUL_MASK); - caps1 |= ((clk_mul << SDHCI_CLOCK_MUL_SHIFT) & SDHCI_CLOCK_MUL_MASK); - /* Set capabilities in r/w mode. */ - writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, host->ioaddr + SDMMC_CACR); - writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1); - /* Set capabilities in ro mode. */ - writel(0, host->ioaddr + SDMMC_CACR); - dev_info(&pdev->dev, "update clk mul to %u as gck rate is %u Hz\n", - clk_mul, real_gck_rate); - } - - /* - * We have to set preset values because it depends on the clk_mul - * value. Moreover, SDR104 is supported in a degraded mode since the - * maximum sd clock value is 120 MHz instead of 208 MHz. For that - * reason, we need to use presets to support SDR104. - */ - preset_div = DIV_ROUND_UP(real_gck_rate, 24000000) - 1; - writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, - host->ioaddr + SDHCI_PRESET_FOR_SDR12); - preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1; - writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, - host->ioaddr + SDHCI_PRESET_FOR_SDR25); - preset_div = DIV_ROUND_UP(real_gck_rate, 100000000) - 1; - writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, - host->ioaddr + SDHCI_PRESET_FOR_SDR50); - preset_div = DIV_ROUND_UP(real_gck_rate, 120000000) - 1; - writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, - host->ioaddr + SDHCI_PRESET_FOR_SDR104); - preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1; - writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, - host->ioaddr + SDHCI_PRESET_FOR_DDR50); + ret = sdhci_at91_set_clks_presets(&pdev->dev); + if (ret) + goto sdhci_pltfm_free; - clk_prepare_enable(priv->mainck); - clk_prepare_enable(priv->gck); + priv->restore_needed = false; ret = mmc_of_parse(host->mmc); if (ret) @@ -324,6 +388,21 @@ static int sdhci_at91_probe(struct platform_device *pdev) host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; } + /* + * If the device attached to the MMC bus is not removable, it is safer + * to set the Force Card Detect bit. People often don't connect the + * card detect signal and use this pin for another purpose. If the card + * detect pin is not muxed to SDHCI controller, a default value is + * used. This value can be different from a SoC revision to another + * one. Problems come when this default value is not card present. To + * avoid this case, if the device is non removable then the card + * detection procedure using the SDMCC_CD signal is bypassed. + * This bit is reset when a software reset for all command is performed + * so we need to implement our own reset function to set back this bit. + */ + if (host->mmc->caps & MMC_CAP_NONREMOVABLE) + sdhci_at91_set_force_card_detect(host); + pm_runtime_put_autosuspend(&pdev->dev); return 0; @@ -335,8 +414,8 @@ pm_runtime_disable: clocks_disable_unprepare: clk_disable_unprepare(priv->gck); clk_disable_unprepare(priv->mainck); -hclock_disable_unprepare: clk_disable_unprepare(priv->hclock); +sdhci_pltfm_free: sdhci_pltfm_free(pdev); return ret; } diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 44b016baa585..1f424374bbbb 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -86,6 +86,17 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host, return ret; } + /* + * DTS properties of mmc host are used to enable each speed mode + * according to soc and board capability. So clean up + * SDR50/SDR104/DDR50 support bits here. + */ + if (spec_reg == SDHCI_CAPABILITIES_1) { + ret = value & ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 | + SDHCI_SUPPORT_DDR50); + return ret; + } + ret = value; return ret; } @@ -249,7 +260,11 @@ static u32 esdhc_be_readl(struct sdhci_host *host, int reg) u32 ret; u32 value; - value = ioread32be(host->ioaddr + reg); + if (reg == SDHCI_CAPABILITIES_1) + value = ioread32be(host->ioaddr + ESDHC_CAPABILITIES_1); + else + value = ioread32be(host->ioaddr + reg); + ret = esdhc_readl_fixup(host, reg, value); return ret; @@ -260,7 +275,11 @@ static u32 esdhc_le_readl(struct sdhci_host *host, int reg) u32 ret; u32 value; - value = ioread32(host->ioaddr + reg); + if (reg == SDHCI_CAPABILITIES_1) + value = ioread32(host->ioaddr + ESDHC_CAPABILITIES_1); + else + value = ioread32(host->ioaddr + reg); + ret = esdhc_readl_fixup(host, reg, value); return ret; @@ -439,6 +458,33 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) return clock / 256 / 16; } +static void esdhc_clock_enable(struct sdhci_host *host, bool enable) +{ + u32 val; + ktime_t timeout; + + val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); + + if (enable) + val |= ESDHC_CLOCK_SDCLKEN; + else + val &= ~ESDHC_CLOCK_SDCLKEN; + + sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL); + + /* Wait max 20 ms */ + timeout = ktime_add_ms(ktime_get(), 20); + val = ESDHC_CLOCK_STABLE; + while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) { + if (ktime_after(ktime_get(), timeout)) { + pr_err("%s: Internal clock never stabilised.\n", + mmc_hostname(host->mmc)); + break; + } + udelay(10); + } +} + static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -450,8 +496,10 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) host->mmc->actual_clock = 0; - if (clock == 0) + if (clock == 0) { + esdhc_clock_enable(host, false); return; + } /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */ if (esdhc->vendor_ver < VENDOR_V_23) @@ -539,33 +587,6 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) sdhci_writel(host, ctrl, ESDHC_PROCTL); } -static void esdhc_clock_enable(struct sdhci_host *host, bool enable) -{ - u32 val; - ktime_t timeout; - - val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - - if (enable) - val |= ESDHC_CLOCK_SDCLKEN; - else - val &= ~ESDHC_CLOCK_SDCLKEN; - - sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL); - - /* Wait max 20 ms */ - timeout = ktime_add_ms(ktime_get(), 20); - val = ESDHC_CLOCK_STABLE; - while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) { - if (ktime_after(ktime_get(), timeout)) { - pr_err("%s: Internal clock never stabilised.\n", - mmc_hostname(host->mmc)); - break; - } - udelay(10); - } -} - static void esdhc_reset(struct sdhci_host *host, u8 mask) { sdhci_reset(host, mask); diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c new file mode 100644 index 000000000000..628bfe9a3d17 --- /dev/null +++ b/drivers/mmc/host/sdhci-omap.c @@ -0,0 +1,607 @@ +/** + * SDHCI Controller driver for TI's OMAP SoCs + * + * Copyright (C) 2017 Texas Instruments + * Author: Kishon Vijay Abraham I <kishon@ti.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 of + * the License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/delay.h> +#include <linux/mmc/slot-gpio.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/regulator/consumer.h> + +#include "sdhci-pltfm.h" + +#define SDHCI_OMAP_CON 0x12c +#define CON_DW8 BIT(5) +#define CON_DMA_MASTER BIT(20) +#define CON_INIT BIT(1) +#define CON_OD BIT(0) + +#define SDHCI_OMAP_CMD 0x20c + +#define SDHCI_OMAP_HCTL 0x228 +#define HCTL_SDBP BIT(8) +#define HCTL_SDVS_SHIFT 9 +#define HCTL_SDVS_MASK (0x7 << HCTL_SDVS_SHIFT) +#define HCTL_SDVS_33 (0x7 << HCTL_SDVS_SHIFT) +#define HCTL_SDVS_30 (0x6 << HCTL_SDVS_SHIFT) +#define HCTL_SDVS_18 (0x5 << HCTL_SDVS_SHIFT) + +#define SDHCI_OMAP_SYSCTL 0x22c +#define SYSCTL_CEN BIT(2) +#define SYSCTL_CLKD_SHIFT 6 +#define SYSCTL_CLKD_MASK 0x3ff + +#define SDHCI_OMAP_STAT 0x230 + +#define SDHCI_OMAP_IE 0x234 +#define INT_CC_EN BIT(0) + +#define SDHCI_OMAP_AC12 0x23c +#define AC12_V1V8_SIGEN BIT(19) + +#define SDHCI_OMAP_CAPA 0x240 +#define CAPA_VS33 BIT(24) +#define CAPA_VS30 BIT(25) +#define CAPA_VS18 BIT(26) + +#define SDHCI_OMAP_TIMEOUT 1 /* 1 msec */ + +#define SYSCTL_CLKD_MAX 0x3FF + +#define IOV_1V8 1800000 /* 180000 uV */ +#define IOV_3V0 3000000 /* 300000 uV */ +#define IOV_3V3 3300000 /* 330000 uV */ + +struct sdhci_omap_data { + u32 offset; +}; + +struct sdhci_omap_host { + void __iomem *base; + struct device *dev; + struct regulator *pbias; + bool pbias_enabled; + struct sdhci_host *host; + u8 bus_mode; + u8 power_mode; +}; + +static inline u32 sdhci_omap_readl(struct sdhci_omap_host *host, + unsigned int offset) +{ + return readl(host->base + offset); +} + +static inline void sdhci_omap_writel(struct sdhci_omap_host *host, + unsigned int offset, u32 data) +{ + writel(data, host->base + offset); +} + +static int sdhci_omap_set_pbias(struct sdhci_omap_host *omap_host, + bool power_on, unsigned int iov) +{ + int ret; + struct device *dev = omap_host->dev; + + if (IS_ERR(omap_host->pbias)) + return 0; + + if (power_on) { + ret = regulator_set_voltage(omap_host->pbias, iov, iov); + if (ret) { + dev_err(dev, "pbias set voltage failed\n"); + return ret; + } + + if (omap_host->pbias_enabled) + return 0; + + ret = regulator_enable(omap_host->pbias); + if (ret) { + dev_err(dev, "pbias reg enable fail\n"); + return ret; + } + + omap_host->pbias_enabled = true; + } else { + if (!omap_host->pbias_enabled) + return 0; + + ret = regulator_disable(omap_host->pbias); + if (ret) { + dev_err(dev, "pbias reg disable fail\n"); + return ret; + } + omap_host->pbias_enabled = false; + } + + return 0; +} + +static int sdhci_omap_enable_iov(struct sdhci_omap_host *omap_host, + unsigned int iov) +{ + int ret; + struct sdhci_host *host = omap_host->host; + struct mmc_host *mmc = host->mmc; + + ret = sdhci_omap_set_pbias(omap_host, false, 0); + if (ret) + return ret; + + if (!IS_ERR(mmc->supply.vqmmc)) { + ret = regulator_set_voltage(mmc->supply.vqmmc, iov, iov); + if (ret) { + dev_err(mmc_dev(mmc), "vqmmc set voltage failed\n"); + return ret; + } + } + + ret = sdhci_omap_set_pbias(omap_host, true, iov); + if (ret) + return ret; + + return 0; +} + +static void sdhci_omap_conf_bus_power(struct sdhci_omap_host *omap_host, + unsigned char signal_voltage) +{ + u32 reg; + ktime_t timeout; + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL); + reg &= ~HCTL_SDVS_MASK; + + if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) + reg |= HCTL_SDVS_33; + else + reg |= HCTL_SDVS_18; + + sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, reg); + + reg |= HCTL_SDBP; + sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, reg); + + /* wait 1ms */ + timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT); + while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL) & HCTL_SDBP)) { + if (WARN_ON(ktime_after(ktime_get(), timeout))) + return; + usleep_range(5, 10); + } +} + +static int sdhci_omap_start_signal_voltage_switch(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + u32 reg; + int ret; + unsigned int iov; + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_omap_host *omap_host; + struct device *dev; + + pltfm_host = sdhci_priv(host); + omap_host = sdhci_pltfm_priv(pltfm_host); + dev = omap_host->dev; + + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA); + if (!(reg & CAPA_VS33)) + return -EOPNOTSUPP; + + sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage); + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12); + reg &= ~AC12_V1V8_SIGEN; + sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg); + + iov = IOV_3V3; + } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) { + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA); + if (!(reg & CAPA_VS18)) + return -EOPNOTSUPP; + + sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage); + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12); + reg |= AC12_V1V8_SIGEN; + sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg); + + iov = IOV_1V8; + } else { + return -EOPNOTSUPP; + } + + ret = sdhci_omap_enable_iov(omap_host, iov); + if (ret) { + dev_err(dev, "failed to switch IO voltage to %dmV\n", iov); + return ret; + } + + dev_dbg(dev, "IO voltage switched to %dmV\n", iov); + return 0; +} + +static void sdhci_omap_set_bus_mode(struct sdhci_omap_host *omap_host, + unsigned int mode) +{ + u32 reg; + + if (omap_host->bus_mode == mode) + return; + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); + if (mode == MMC_BUSMODE_OPENDRAIN) + reg |= CON_OD; + else + reg &= ~CON_OD; + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg); + + omap_host->bus_mode = mode; +} + +static void sdhci_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_omap_host *omap_host; + + pltfm_host = sdhci_priv(host); + omap_host = sdhci_pltfm_priv(pltfm_host); + + sdhci_omap_set_bus_mode(omap_host, ios->bus_mode); + sdhci_set_ios(mmc, ios); +} + +static u16 sdhci_omap_calc_divisor(struct sdhci_pltfm_host *host, + unsigned int clock) +{ + u16 dsor; + + dsor = DIV_ROUND_UP(clk_get_rate(host->clk), clock); + if (dsor > SYSCTL_CLKD_MAX) + dsor = SYSCTL_CLKD_MAX; + + return dsor; +} + +static void sdhci_omap_start_clock(struct sdhci_omap_host *omap_host) +{ + u32 reg; + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL); + reg |= SYSCTL_CEN; + sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, reg); +} + +static void sdhci_omap_stop_clock(struct sdhci_omap_host *omap_host) +{ + u32 reg; + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL); + reg &= ~SYSCTL_CEN; + sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, reg); +} + +static void sdhci_omap_set_clock(struct sdhci_host *host, unsigned int clock) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + unsigned long clkdiv; + + sdhci_omap_stop_clock(omap_host); + + if (!clock) + return; + + clkdiv = sdhci_omap_calc_divisor(pltfm_host, clock); + clkdiv = (clkdiv & SYSCTL_CLKD_MASK) << SYSCTL_CLKD_SHIFT; + sdhci_enable_clk(host, clkdiv); + + sdhci_omap_start_clock(omap_host); +} + +static void sdhci_omap_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + struct mmc_host *mmc = host->mmc; + + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); +} + +static int sdhci_omap_enable_dma(struct sdhci_host *host) +{ + u32 reg; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); + reg |= CON_DMA_MASTER; + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg); + + return 0; +} + +static unsigned int sdhci_omap_get_min_clock(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + return clk_get_rate(pltfm_host->clk) / SYSCTL_CLKD_MAX; +} + +static void sdhci_omap_set_bus_width(struct sdhci_host *host, int width) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + u32 reg; + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); + if (width == MMC_BUS_WIDTH_8) + reg |= CON_DW8; + else + reg &= ~CON_DW8; + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg); + + sdhci_set_bus_width(host, width); +} + +static void sdhci_omap_init_74_clocks(struct sdhci_host *host, u8 power_mode) +{ + u32 reg; + ktime_t timeout; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + + if (omap_host->power_mode == power_mode) + return; + + if (power_mode != MMC_POWER_ON) + return; + + disable_irq(host->irq); + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); + reg |= CON_INIT; + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg); + sdhci_omap_writel(omap_host, SDHCI_OMAP_CMD, 0x0); + + /* wait 1ms */ + timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT); + while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_STAT) & INT_CC_EN)) { + if (WARN_ON(ktime_after(ktime_get(), timeout))) + return; + usleep_range(5, 10); + } + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); + reg &= ~CON_INIT; + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg); + sdhci_omap_writel(omap_host, SDHCI_OMAP_STAT, INT_CC_EN); + + enable_irq(host->irq); + + omap_host->power_mode = power_mode; +} + +static struct sdhci_ops sdhci_omap_ops = { + .set_clock = sdhci_omap_set_clock, + .set_power = sdhci_omap_set_power, + .enable_dma = sdhci_omap_enable_dma, + .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .get_min_clock = sdhci_omap_get_min_clock, + .set_bus_width = sdhci_omap_set_bus_width, + .platform_send_init_74_clocks = sdhci_omap_init_74_clocks, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host) +{ + u32 reg; + int ret = 0; + struct device *dev = omap_host->dev; + struct regulator *vqmmc; + + vqmmc = regulator_get(dev, "vqmmc"); + if (IS_ERR(vqmmc)) { + ret = PTR_ERR(vqmmc); + goto reg_put; + } + + /* voltage capabilities might be set by boot loader, clear it */ + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA); + reg &= ~(CAPA_VS18 | CAPA_VS30 | CAPA_VS33); + + if (regulator_is_supported_voltage(vqmmc, IOV_3V3, IOV_3V3)) + reg |= CAPA_VS33; + if (regulator_is_supported_voltage(vqmmc, IOV_1V8, IOV_1V8)) + reg |= CAPA_VS18; + + sdhci_omap_writel(omap_host, SDHCI_OMAP_CAPA, reg); + +reg_put: + regulator_put(vqmmc); + + return ret; +} + +static const struct sdhci_pltfm_data sdhci_omap_pdata = { + .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | + SDHCI_QUIRK_NO_HISPD_BIT | + SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC, + .quirks2 = SDHCI_QUIRK2_NO_1_8_V | + SDHCI_QUIRK2_ACMD23_BROKEN | + SDHCI_QUIRK2_RSP_136_HAS_CRC, + .ops = &sdhci_omap_ops, +}; + +static const struct sdhci_omap_data dra7_data = { + .offset = 0x200, +}; + +static const struct of_device_id omap_sdhci_match[] = { + { .compatible = "ti,dra7-sdhci", .data = &dra7_data }, + {}, +}; +MODULE_DEVICE_TABLE(of, omap_sdhci_match); + +static int sdhci_omap_probe(struct platform_device *pdev) +{ + int ret; + u32 offset; + struct device *dev = &pdev->dev; + struct sdhci_host *host; + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_omap_host *omap_host; + struct mmc_host *mmc; + const struct of_device_id *match; + struct sdhci_omap_data *data; + + match = of_match_device(omap_sdhci_match, dev); + if (!match) + return -EINVAL; + + data = (struct sdhci_omap_data *)match->data; + if (!data) { + dev_err(dev, "no sdhci omap data\n"); + return -EINVAL; + } + offset = data->offset; + + host = sdhci_pltfm_init(pdev, &sdhci_omap_pdata, + sizeof(*omap_host)); + if (IS_ERR(host)) { + dev_err(dev, "Failed sdhci_pltfm_init\n"); + return PTR_ERR(host); + } + + pltfm_host = sdhci_priv(host); + omap_host = sdhci_pltfm_priv(pltfm_host); + omap_host->host = host; + omap_host->base = host->ioaddr; + omap_host->dev = dev; + host->ioaddr += offset; + + mmc = host->mmc; + ret = mmc_of_parse(mmc); + if (ret) + goto err_pltfm_free; + + pltfm_host->clk = devm_clk_get(dev, "fck"); + if (IS_ERR(pltfm_host->clk)) { + ret = PTR_ERR(pltfm_host->clk); + goto err_pltfm_free; + } + + ret = clk_set_rate(pltfm_host->clk, mmc->f_max); + if (ret) { + dev_err(dev, "failed to set clock to %d\n", mmc->f_max); + goto err_pltfm_free; + } + + omap_host->pbias = devm_regulator_get_optional(dev, "pbias"); + if (IS_ERR(omap_host->pbias)) { + ret = PTR_ERR(omap_host->pbias); + if (ret != -ENODEV) + goto err_pltfm_free; + dev_dbg(dev, "unable to get pbias regulator %d\n", ret); + } + omap_host->pbias_enabled = false; + + /* + * omap_device_pm_domain has callbacks to enable the main + * functional clock, interface clock and also configure the + * SYSCONFIG register of omap devices. The callback will be invoked + * as part of pm_runtime_get_sync. + */ + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync failed\n"); + pm_runtime_put_noidle(dev); + goto err_rpm_disable; + } + + ret = sdhci_omap_set_capabilities(omap_host); + if (ret) { + dev_err(dev, "failed to set system capabilities\n"); + goto err_put_sync; + } + + host->mmc_host_ops.get_ro = mmc_gpio_get_ro; + host->mmc_host_ops.start_signal_voltage_switch = + sdhci_omap_start_signal_voltage_switch; + host->mmc_host_ops.set_ios = sdhci_omap_set_ios; + + sdhci_read_caps(host); + host->caps |= SDHCI_CAN_DO_ADMA2; + + ret = sdhci_add_host(host); + if (ret) + goto err_put_sync; + + return 0; + +err_put_sync: + pm_runtime_put_sync(dev); + +err_rpm_disable: + pm_runtime_disable(dev); + +err_pltfm_free: + sdhci_pltfm_free(pdev); + return ret; +} + +static int sdhci_omap_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sdhci_host *host = platform_get_drvdata(pdev); + + sdhci_remove_host(host, true); + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + sdhci_pltfm_free(pdev); + + return 0; +} + +static struct platform_driver sdhci_omap_driver = { + .probe = sdhci_omap_probe, + .remove = sdhci_omap_remove, + .driver = { + .name = "sdhci-omap", + .of_match_table = omap_sdhci_match, + }, +}; + +module_platform_driver(sdhci_omap_driver); + +MODULE_DESCRIPTION("SDHCI driver for OMAP SoCs"); +MODULE_AUTHOR("Texas Instruments Inc."); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:sdhci_omap"); diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index e1721ac37919..3e4f04fd5175 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -32,10 +32,8 @@ #include "sdhci.h" #include "sdhci-pci.h" -#include "sdhci-pci-o2micro.h" static int sdhci_pci_enable_dma(struct sdhci_host *host); -static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width); static void sdhci_pci_hw_reset(struct sdhci_host *host); #ifdef CONFIG_PM_SLEEP @@ -393,6 +391,7 @@ static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = { enum { INTEL_DSM_FNS = 0, + INTEL_DSM_V18_SWITCH = 3, INTEL_DSM_DRV_STRENGTH = 9, INTEL_DSM_D3_RETUNE = 10, }; @@ -448,6 +447,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev, int err; u32 val; + intel_host->d3_retune = true; + err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns); if (err) { pr_debug("%s: DSM not supported, error %d\n", @@ -558,14 +559,28 @@ static void intel_hs400_enhanced_strobe(struct mmc_host *mmc, sdhci_writel(host, val, INTEL_HS400_ES_REG); } +static void sdhci_intel_voltage_switch(struct sdhci_host *host) +{ + struct sdhci_pci_slot *slot = sdhci_priv(host); + struct intel_host *intel_host = sdhci_pci_priv(slot); + struct device *dev = &slot->chip->pdev->dev; + u32 result = 0; + int err; + + err = intel_dsm(intel_host, dev, INTEL_DSM_V18_SWITCH, &result); + pr_debug("%s: %s DSM error %d result %u\n", + mmc_hostname(host->mmc), __func__, err, result); +} + static const struct sdhci_ops sdhci_intel_byt_ops = { .set_clock = sdhci_set_clock, .set_power = sdhci_intel_set_power, .enable_dma = sdhci_pci_enable_dma, - .set_bus_width = sdhci_pci_set_bus_width, + .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, .hw_reset = sdhci_pci_hw_reset, + .voltage_switch = sdhci_intel_voltage_switch, }; static void byt_read_dsm(struct sdhci_pci_slot *slot) @@ -730,6 +745,24 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { #define INTEL_MRFLD_SD 2 #define INTEL_MRFLD_SDIO 3 +#ifdef CONFIG_ACPI +static void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) +{ + struct acpi_device *device, *child; + + device = ACPI_COMPANION(&slot->chip->pdev->dev); + if (!device) + return; + + acpi_device_fix_up_power(device); + list_for_each_entry(child, &device->children, node) + if (child->status.present && child->status.enabled) + acpi_device_fix_up_power(child); +} +#else +static inline void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) {} +#endif + static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot) { unsigned int func = PCI_FUNC(slot->chip->pdev->devfn); @@ -751,6 +784,8 @@ static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot) default: return -ENODEV; } + + intel_mrfld_mmc_fix_up_power_slot(slot); return 0; } @@ -762,15 +797,6 @@ static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = { .probe_slot = intel_mrfld_mmc_probe_slot, }; -/* O2Micro extra registers */ -#define O2_SD_LOCK_WP 0xD3 -#define O2_SD_MULTI_VCC3V 0xEE -#define O2_SD_CLKREQ 0xEC -#define O2_SD_CAPS 0xE0 -#define O2_SD_ADMA1 0xE2 -#define O2_SD_ADMA2 0xE7 -#define O2_SD_INF_MOD 0xF1 - static int jmicron_pmos(struct sdhci_pci_chip *chip, int on) { u8 scratch; @@ -1197,7 +1223,7 @@ static int amd_probe(struct sdhci_pci_chip *chip) static const struct sdhci_ops amd_sdhci_pci_ops = { .set_clock = sdhci_set_clock, .enable_dma = sdhci_pci_enable_dma, - .set_bus_width = sdhci_pci_set_bus_width, + .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, .platform_execute_tuning = amd_execute_tuning, @@ -1254,6 +1280,7 @@ static const struct pci_device_id pci_ids[] = { SDHCI_PCI_DEVICE(INTEL, SPT_SDIO, intel_byt_sdio), SDHCI_PCI_DEVICE(INTEL, SPT_SD, intel_byt_sd), SDHCI_PCI_DEVICE(INTEL, DNV_EMMC, intel_byt_emmc), + SDHCI_PCI_DEVICE(INTEL, CDF_EMMC, intel_glk_emmc), SDHCI_PCI_DEVICE(INTEL, BXT_EMMC, intel_byt_emmc), SDHCI_PCI_DEVICE(INTEL, BXT_SDIO, intel_byt_sdio), SDHCI_PCI_DEVICE(INTEL, BXT_SD, intel_byt_sd), @@ -1313,29 +1340,6 @@ static int sdhci_pci_enable_dma(struct sdhci_host *host) return 0; } -static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width) -{ - u8 ctrl; - - ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); - - switch (width) { - case MMC_BUS_WIDTH_8: - ctrl |= SDHCI_CTRL_8BITBUS; - ctrl &= ~SDHCI_CTRL_4BITBUS; - break; - case MMC_BUS_WIDTH_4: - ctrl |= SDHCI_CTRL_4BITBUS; - ctrl &= ~SDHCI_CTRL_8BITBUS; - break; - default: - ctrl &= ~(SDHCI_CTRL_8BITBUS | SDHCI_CTRL_4BITBUS); - break; - } - - sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); -} - static void sdhci_pci_gpio_hw_reset(struct sdhci_host *host) { struct sdhci_pci_slot *slot = sdhci_priv(host); @@ -1362,7 +1366,7 @@ static void sdhci_pci_hw_reset(struct sdhci_host *host) static const struct sdhci_ops sdhci_pci_ops = { .set_clock = sdhci_set_clock, .enable_dma = sdhci_pci_enable_dma, - .set_bus_width = sdhci_pci_set_bus_width, + .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, .hw_reset = sdhci_pci_hw_reset, diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c index 14273ca00641..555970a29c94 100644 --- a/drivers/mmc/host/sdhci-pci-o2micro.c +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -19,7 +19,40 @@ #include "sdhci.h" #include "sdhci-pci.h" -#include "sdhci-pci-o2micro.h" + +/* + * O2Micro device registers + */ + +#define O2_SD_MISC_REG5 0x64 +#define O2_SD_LD0_CTRL 0x68 +#define O2_SD_DEV_CTRL 0x88 +#define O2_SD_LOCK_WP 0xD3 +#define O2_SD_TEST_REG 0xD4 +#define O2_SD_FUNC_REG0 0xDC +#define O2_SD_MULTI_VCC3V 0xEE +#define O2_SD_CLKREQ 0xEC +#define O2_SD_CAPS 0xE0 +#define O2_SD_ADMA1 0xE2 +#define O2_SD_ADMA2 0xE7 +#define O2_SD_INF_MOD 0xF1 +#define O2_SD_MISC_CTRL4 0xFC +#define O2_SD_TUNING_CTRL 0x300 +#define O2_SD_PLL_SETTING 0x304 +#define O2_SD_CLK_SETTING 0x328 +#define O2_SD_CAP_REG2 0x330 +#define O2_SD_CAP_REG0 0x334 +#define O2_SD_UHS1_CAP_SETTING 0x33C +#define O2_SD_DELAY_CTRL 0x350 +#define O2_SD_UHS2_L1_CTRL 0x35C +#define O2_SD_FUNC_REG3 0x3E0 +#define O2_SD_FUNC_REG4 0x3E4 +#define O2_SD_LED_ENABLE BIT(6) +#define O2_SD_FREG0_LEDOFF BIT(13) +#define O2_SD_FREG4_ENABLE_CLK_SET BIT(22) + +#define O2_SD_VENDOR_SETTING 0x110 +#define O2_SD_VENDOR_SETTING2 0x1C8 static void o2_pci_set_baseclk(struct sdhci_pci_chip *chip, u32 value) { diff --git a/drivers/mmc/host/sdhci-pci-o2micro.h b/drivers/mmc/host/sdhci-pci-o2micro.h deleted file mode 100644 index 770f53857211..000000000000 --- a/drivers/mmc/host/sdhci-pci-o2micro.h +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (C) 2013 BayHub Technology Ltd. - * - * Authors: Peter Guo <peter.guo@bayhubtech.com> - * Adam Lee <adam.lee@canonical.com> - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#ifndef __SDHCI_PCI_O2MICRO_H -#define __SDHCI_PCI_O2MICRO_H - -#include "sdhci-pci.h" - -/* - * O2Micro device IDs - */ - -#define PCI_DEVICE_ID_O2_SDS0 0x8420 -#define PCI_DEVICE_ID_O2_SDS1 0x8421 -#define PCI_DEVICE_ID_O2_FUJIN2 0x8520 -#define PCI_DEVICE_ID_O2_SEABIRD0 0x8620 -#define PCI_DEVICE_ID_O2_SEABIRD1 0x8621 - -/* - * O2Micro device registers - */ - -#define O2_SD_MISC_REG5 0x64 -#define O2_SD_LD0_CTRL 0x68 -#define O2_SD_DEV_CTRL 0x88 -#define O2_SD_LOCK_WP 0xD3 -#define O2_SD_TEST_REG 0xD4 -#define O2_SD_FUNC_REG0 0xDC -#define O2_SD_MULTI_VCC3V 0xEE -#define O2_SD_CLKREQ 0xEC -#define O2_SD_CAPS 0xE0 -#define O2_SD_ADMA1 0xE2 -#define O2_SD_ADMA2 0xE7 -#define O2_SD_INF_MOD 0xF1 -#define O2_SD_MISC_CTRL4 0xFC -#define O2_SD_TUNING_CTRL 0x300 -#define O2_SD_PLL_SETTING 0x304 -#define O2_SD_CLK_SETTING 0x328 -#define O2_SD_CAP_REG2 0x330 -#define O2_SD_CAP_REG0 0x334 -#define O2_SD_UHS1_CAP_SETTING 0x33C -#define O2_SD_DELAY_CTRL 0x350 -#define O2_SD_UHS2_L1_CTRL 0x35C -#define O2_SD_FUNC_REG3 0x3E0 -#define O2_SD_FUNC_REG4 0x3E4 -#define O2_SD_LED_ENABLE BIT(6) -#define O2_SD_FREG0_LEDOFF BIT(13) -#define O2_SD_FREG4_ENABLE_CLK_SET BIT(22) - -#define O2_SD_VENDOR_SETTING 0x110 -#define O2_SD_VENDOR_SETTING2 0x1C8 - -extern int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot); - -extern int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip); - -extern int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip); - -#endif /* __SDHCI_PCI_O2MICRO_H */ diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h index 75196a2b5289..0056f08a29cc 100644 --- a/drivers/mmc/host/sdhci-pci.h +++ b/drivers/mmc/host/sdhci-pci.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SDHCI_PCI_H #define __SDHCI_PCI_H @@ -5,6 +6,12 @@ * PCI device IDs, sub IDs */ +#define PCI_DEVICE_ID_O2_SDS0 0x8420 +#define PCI_DEVICE_ID_O2_SDS1 0x8421 +#define PCI_DEVICE_ID_O2_FUJIN2 0x8520 +#define PCI_DEVICE_ID_O2_SEABIRD0 0x8620 +#define PCI_DEVICE_ID_O2_SEABIRD1 0x8621 + #define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809 #define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a #define PCI_DEVICE_ID_INTEL_BYT_EMMC 0x0f14 @@ -25,6 +32,7 @@ #define PCI_DEVICE_ID_INTEL_SPT_SDIO 0x9d2c #define PCI_DEVICE_ID_INTEL_SPT_SD 0x9d2d #define PCI_DEVICE_ID_INTEL_DNV_EMMC 0x19db +#define PCI_DEVICE_ID_INTEL_CDF_EMMC 0x18db #define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca #define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc #define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0 @@ -163,4 +171,10 @@ static inline void *sdhci_pci_priv(struct sdhci_pci_slot *slot) int sdhci_pci_resume_host(struct sdhci_pci_chip *chip); #endif +int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot); +int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip); +#ifdef CONFIG_PM_SLEEP +int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip); +#endif + #endif /* __SDHCI_PCI_H */ diff --git a/drivers/mmc/host/sdhci-pic32.c b/drivers/mmc/host/sdhci-pic32.c index 72c13b6f05f9..a6caa49ca25a 100644 --- a/drivers/mmc/host/sdhci-pic32.c +++ b/drivers/mmc/host/sdhci-pic32.c @@ -97,7 +97,7 @@ static const struct sdhci_ops pic32_sdhci_ops = { .get_ro = pic32_sdhci_get_ro, }; -static struct sdhci_pltfm_data sdhci_pic32_pdata = { +static const struct sdhci_pltfm_data sdhci_pic32_pdata = { .ops = &pic32_sdhci_ops, .quirks = SDHCI_QUIRK_NO_HISPD_BIT, .quirks2 = SDHCI_QUIRK2_NO_1_8_V, diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c index e090d8c42ddb..02bea6159d79 100644 --- a/drivers/mmc/host/sdhci-pltfm.c +++ b/drivers/mmc/host/sdhci-pltfm.c @@ -209,22 +209,42 @@ int sdhci_pltfm_unregister(struct platform_device *pdev) EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister); #ifdef CONFIG_PM_SLEEP -static int sdhci_pltfm_suspend(struct device *dev) +int sdhci_pltfm_suspend(struct device *dev) { struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + int ret; if (host->tuning_mode != SDHCI_TUNING_MODE_3) mmc_retune_needed(host->mmc); - return sdhci_suspend_host(host); + ret = sdhci_suspend_host(host); + if (ret) + return ret; + + clk_disable_unprepare(pltfm_host->clk); + + return 0; } +EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend); -static int sdhci_pltfm_resume(struct device *dev) +int sdhci_pltfm_resume(struct device *dev) { struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + int ret; + + ret = clk_prepare_enable(pltfm_host->clk); + if (ret) + return ret; - return sdhci_resume_host(host); + ret = sdhci_resume_host(host); + if (ret) + clk_disable_unprepare(pltfm_host->clk); + + return ret; } +EXPORT_SYMBOL_GPL(sdhci_pltfm_resume); #endif const struct dev_pm_ops sdhci_pltfm_pmops = { diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h index 957839d0fe37..1e91fb1c020e 100644 --- a/drivers/mmc/host/sdhci-pltfm.h +++ b/drivers/mmc/host/sdhci-pltfm.h @@ -109,6 +109,8 @@ static inline void *sdhci_pltfm_priv(struct sdhci_pltfm_host *host) return host->private; } +int sdhci_pltfm_suspend(struct device *dev); +int sdhci_pltfm_resume(struct device *dev); extern const struct dev_pm_ops sdhci_pltfm_pmops; #endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */ diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c index 995083ce1c46..8986f9d9cf98 100644 --- a/drivers/mmc/host/sdhci-pxav2.c +++ b/drivers/mmc/host/sdhci-pxav2.c @@ -178,17 +178,17 @@ static int sdhci_pxav2_probe(struct platform_device *pdev) pltfm_host = sdhci_priv(host); - clk = clk_get(dev, "PXA-SDHCLK"); + clk = devm_clk_get(dev, "PXA-SDHCLK"); if (IS_ERR(clk)) { dev_err(dev, "failed to get io clock\n"); ret = PTR_ERR(clk); - goto err_clk_get; + goto free; } pltfm_host->clk = clk; ret = clk_prepare_enable(clk); if (ret) { dev_err(&pdev->dev, "failed to enable io clock\n"); - goto err_clk_enable; + goto free; } host->quirks = SDHCI_QUIRK_BROKEN_ADMA @@ -223,34 +223,18 @@ static int sdhci_pxav2_probe(struct platform_device *pdev) ret = sdhci_add_host(host); if (ret) { dev_err(&pdev->dev, "failed to add host\n"); - goto err_add_host; + goto disable_clk; } return 0; -err_add_host: +disable_clk: clk_disable_unprepare(clk); -err_clk_enable: - clk_put(clk); -err_clk_get: +free: sdhci_pltfm_free(pdev); return ret; } -static int sdhci_pxav2_remove(struct platform_device *pdev) -{ - struct sdhci_host *host = platform_get_drvdata(pdev); - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - - sdhci_remove_host(host, 1); - - clk_disable_unprepare(pltfm_host->clk); - clk_put(pltfm_host->clk); - sdhci_pltfm_free(pdev); - - return 0; -} - static struct platform_driver sdhci_pxav2_driver = { .driver = { .name = "sdhci-pxav2", @@ -258,7 +242,7 @@ static struct platform_driver sdhci_pxav2_driver = { .pm = &sdhci_pltfm_pmops, }, .probe = sdhci_pxav2_probe, - .remove = sdhci_pxav2_remove, + .remove = sdhci_pltfm_unregister, }; module_platform_driver(sdhci_pxav2_driver); diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index f953f35c2624..a34434166ca7 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -337,7 +337,7 @@ static const struct sdhci_ops pxav3_sdhci_ops = { .set_uhs_signaling = pxav3_set_uhs_signaling, }; -static struct sdhci_pltfm_data sdhci_pxav3_pdata = { +static const struct sdhci_pltfm_data sdhci_pxav3_pdata = { .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | SDHCI_QUIRK_32BIT_ADMA_SIZE diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 7c065a70f92b..cda83ccb2702 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -414,43 +414,11 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock) sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); } -/** - * sdhci_s3c_set_bus_width - support 8bit buswidth - * @host: The SDHCI host being queried - * @width: MMC_BUS_WIDTH_ macro for the bus width being requested - * - * We have 8-bit width support but is not a v3 controller. - * So we add platform_bus_width() and support 8bit width. - */ -static void sdhci_s3c_set_bus_width(struct sdhci_host *host, int width) -{ - u8 ctrl; - - ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); - - switch (width) { - case MMC_BUS_WIDTH_8: - ctrl |= SDHCI_CTRL_8BITBUS; - ctrl &= ~SDHCI_CTRL_4BITBUS; - break; - case MMC_BUS_WIDTH_4: - ctrl |= SDHCI_CTRL_4BITBUS; - ctrl &= ~SDHCI_CTRL_8BITBUS; - break; - default: - ctrl &= ~SDHCI_CTRL_4BITBUS; - ctrl &= ~SDHCI_CTRL_8BITBUS; - break; - } - - sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); -} - static struct sdhci_ops sdhci_s3c_ops = { .get_max_clock = sdhci_s3c_get_max_clk, .set_clock = sdhci_s3c_set_clock, .get_min_clock = sdhci_s3c_get_min_clock, - .set_bus_width = sdhci_s3c_set_bus_width, + .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, }; @@ -793,32 +761,24 @@ static const struct dev_pm_ops sdhci_s3c_pmops = { NULL) }; -#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212) -static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = { - .no_divider = true, -}; -#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)&exynos4_sdhci_drv_data) -#else -#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)NULL) -#endif - static const struct platform_device_id sdhci_s3c_driver_ids[] = { { .name = "s3c-sdhci", .driver_data = (kernel_ulong_t)NULL, - }, { - .name = "exynos4-sdhci", - .driver_data = EXYNOS4_SDHCI_DRV_DATA, }, { } }; MODULE_DEVICE_TABLE(platform, sdhci_s3c_driver_ids); #ifdef CONFIG_OF +static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = { + .no_divider = true, +}; + static const struct of_device_id sdhci_s3c_dt_match[] = { { .compatible = "samsung,s3c6410-sdhci", }, { .compatible = "samsung,exynos4210-sdhci", - .data = (void *)EXYNOS4_SDHCI_DRV_DATA }, + .data = &exynos4_sdhci_drv_data }, {}, }; MODULE_DEVICE_TABLE(of, sdhci_s3c_dt_match); diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c index c251c6c0a112..391d52b467ca 100644 --- a/drivers/mmc/host/sdhci-sirf.c +++ b/drivers/mmc/host/sdhci-sirf.c @@ -146,7 +146,7 @@ retry: return rc; } -static struct sdhci_ops sdhci_sirf_ops = { +static const struct sdhci_ops sdhci_sirf_ops = { .read_l = sdhci_sirf_readl_le, .read_w = sdhci_sirf_readw_le, .platform_execute_tuning = sdhci_sirf_execute_tuning, @@ -157,7 +157,7 @@ static struct sdhci_ops sdhci_sirf_ops = { .set_uhs_signaling = sdhci_set_uhs_signaling, }; -static struct sdhci_pltfm_data sdhci_sirf_pdata = { +static const struct sdhci_pltfm_data sdhci_sirf_pdata = { .ops = &sdhci_sirf_ops, .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | @@ -230,43 +230,6 @@ err_clk_prepare: return ret; } -#ifdef CONFIG_PM_SLEEP -static int sdhci_sirf_suspend(struct device *dev) -{ - struct sdhci_host *host = dev_get_drvdata(dev); - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - int ret; - - if (host->tuning_mode != SDHCI_TUNING_MODE_3) - mmc_retune_needed(host->mmc); - - ret = sdhci_suspend_host(host); - if (ret) - return ret; - - clk_disable(pltfm_host->clk); - - return 0; -} - -static int sdhci_sirf_resume(struct device *dev) -{ - struct sdhci_host *host = dev_get_drvdata(dev); - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - int ret; - - ret = clk_enable(pltfm_host->clk); - if (ret) { - dev_dbg(dev, "Resume: Error enabling clock\n"); - return ret; - } - - return sdhci_resume_host(host); -} -#endif - -static SIMPLE_DEV_PM_OPS(sdhci_sirf_pm_ops, sdhci_sirf_suspend, sdhci_sirf_resume); - static const struct of_device_id sdhci_sirf_of_match[] = { { .compatible = "sirf,prima2-sdhc" }, { } @@ -277,7 +240,7 @@ static struct platform_driver sdhci_sirf_driver = { .driver = { .name = "sdhci-sirf", .of_match_table = sdhci_sirf_of_match, - .pm = &sdhci_sirf_pm_ops, + .pm = &sdhci_pltfm_pmops, }, .probe = sdhci_sirf_probe, .remove = sdhci_pltfm_unregister, diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c index 68c36c9fa231..c32daed0d418 100644 --- a/drivers/mmc/host/sdhci-st.c +++ b/drivers/mmc/host/sdhci-st.c @@ -371,7 +371,7 @@ static int sdhci_st_probe(struct platform_device *pdev) if (IS_ERR(icnclk)) icnclk = NULL; - rstc = devm_reset_control_get(&pdev->dev, NULL); + rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (IS_ERR(rstc)) rstc = NULL; else @@ -394,8 +394,17 @@ static int sdhci_st_probe(struct platform_device *pdev) goto err_of; } - clk_prepare_enable(clk); - clk_prepare_enable(icnclk); + ret = clk_prepare_enable(clk); + if (ret) { + dev_err(&pdev->dev, "Failed to prepare clock\n"); + goto err_of; + } + + ret = clk_prepare_enable(icnclk); + if (ret) { + dev_err(&pdev->dev, "Failed to prepare icn clock\n"); + goto err_icnclk; + } /* Configure the FlashSS Top registers for setting eMMC TX/RX delay */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, @@ -429,6 +438,7 @@ static int sdhci_st_probe(struct platform_device *pdev) err_out: clk_disable_unprepare(icnclk); +err_icnclk: clk_disable_unprepare(clk); err_of: sdhci_pltfm_free(pdev); @@ -487,9 +497,17 @@ static int sdhci_st_resume(struct device *dev) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host); struct device_node *np = dev->of_node; + int ret; + + ret = clk_prepare_enable(pltfm_host->clk); + if (ret) + return ret; - clk_prepare_enable(pltfm_host->clk); - clk_prepare_enable(pdata->icnclk); + ret = clk_prepare_enable(pdata->icnclk); + if (ret) { + clk_disable_unprepare(pltfm_host->clk); + return ret; + } if (pdata->rstc) reset_control_deassert(pdata->rstc); diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index 7f93079c7a3a..b877c13184c2 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -190,25 +190,6 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) tegra_host->ddr_signaling = false; } -static void tegra_sdhci_set_bus_width(struct sdhci_host *host, int bus_width) -{ - u32 ctrl; - - ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); - if ((host->mmc->caps & MMC_CAP_8_BIT_DATA) && - (bus_width == MMC_BUS_WIDTH_8)) { - ctrl &= ~SDHCI_CTRL_4BITBUS; - ctrl |= SDHCI_CTRL_8BITBUS; - } else { - ctrl &= ~SDHCI_CTRL_8BITBUS; - if (bus_width == MMC_BUS_WIDTH_4) - ctrl |= SDHCI_CTRL_4BITBUS; - else - ctrl &= ~SDHCI_CTRL_4BITBUS; - } - sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); -} - static void tegra_sdhci_pad_autocalib(struct sdhci_host *host) { u32 val; @@ -323,7 +304,7 @@ static const struct sdhci_ops tegra_sdhci_ops = { .read_w = tegra_sdhci_readw, .write_l = tegra_sdhci_writel, .set_clock = tegra_sdhci_set_clock, - .set_bus_width = tegra_sdhci_set_bus_width, + .set_bus_width = sdhci_set_bus_width, .reset = tegra_sdhci_reset, .platform_execute_tuning = tegra_sdhci_execute_tuning, .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, @@ -371,7 +352,7 @@ static const struct sdhci_ops tegra114_sdhci_ops = { .write_w = tegra_sdhci_writew, .write_l = tegra_sdhci_writel, .set_clock = tegra_sdhci_set_clock, - .set_bus_width = tegra_sdhci_set_bus_width, + .set_bus_width = sdhci_set_bus_width, .reset = tegra_sdhci_reset, .platform_execute_tuning = tegra_sdhci_execute_tuning, .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, @@ -441,7 +422,15 @@ static const struct sdhci_pltfm_data sdhci_tegra186_pdata = { SDHCI_QUIRK_NO_HISPD_BIT | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, - .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + /* SDHCI controllers on Tegra186 support 40-bit addressing. + * IOVA addresses are 48-bit wide on Tegra186. + * With 64-bit dma mask used for SDHCI, accesses can + * be broken. Disable 64-bit dma, which would fall back + * to 32-bit dma mask. Ideally 40-bit dma mask would work, + * But it is not supported as of now. + */ + SDHCI_QUIRK2_BROKEN_64_BIT_DMA, .ops = &tegra114_sdhci_ops, }; @@ -508,7 +497,8 @@ static int sdhci_tegra_probe(struct platform_device *pdev) clk_prepare_enable(clk); pltfm_host->clk = clk; - tegra_host->rst = devm_reset_control_get(&pdev->dev, "sdhci"); + tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev, + "sdhci"); if (IS_ERR(tegra_host->rst)) { rc = PTR_ERR(tegra_host->rst); dev_err(&pdev->dev, "failed to get reset control: %d\n", rc); diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c index f7e26b031e76..ec8794335241 100644 --- a/drivers/mmc/host/sdhci-xenon-phy.c +++ b/drivers/mmc/host/sdhci-xenon-phy.c @@ -409,17 +409,30 @@ static int xenon_emmc_phy_config_tuning(struct sdhci_host *host) return 0; } -static void xenon_emmc_phy_disable_data_strobe(struct sdhci_host *host) +static void xenon_emmc_phy_disable_strobe(struct sdhci_host *host) { + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); u32 reg; - /* Disable SDHC Data Strobe */ + /* Disable both SDHC Data Strobe and Enhanced Strobe */ reg = sdhci_readl(host, XENON_SLOT_EMMC_CTRL); - reg &= ~XENON_ENABLE_DATA_STROBE; + reg &= ~(XENON_ENABLE_DATA_STROBE | XENON_ENABLE_RESP_STROBE); sdhci_writel(host, reg, XENON_SLOT_EMMC_CTRL); + + /* Clear Strobe line Pull down or Pull up */ + if (priv->phy_type == EMMC_5_0_PHY) { + reg = sdhci_readl(host, XENON_EMMC_5_0_PHY_PAD_CONTROL); + reg &= ~(XENON_EMMC5_FC_QSP_PD | XENON_EMMC5_FC_QSP_PU); + sdhci_writel(host, reg, XENON_EMMC_5_0_PHY_PAD_CONTROL); + } else { + reg = sdhci_readl(host, XENON_EMMC_PHY_PAD_CONTROL1); + reg &= ~(XENON_EMMC5_1_FC_QSP_PD | XENON_EMMC5_1_FC_QSP_PU); + sdhci_writel(host, reg, XENON_EMMC_PHY_PAD_CONTROL1); + } } -/* Set HS400 Data Strobe */ +/* Set HS400 Data Strobe and Enhanced Strobe */ static void xenon_emmc_phy_strobe_delay_adj(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -439,6 +452,15 @@ static void xenon_emmc_phy_strobe_delay_adj(struct sdhci_host *host) /* Enable SDHC Data Strobe */ reg = sdhci_readl(host, XENON_SLOT_EMMC_CTRL); reg |= XENON_ENABLE_DATA_STROBE; + /* + * Enable SDHC Enhanced Strobe if supported + * Xenon Enhanced Strobe should be enabled only when + * 1. card is in HS400 mode and + * 2. SDCLK is higher than 52MHz + * 3. DLL is enabled + */ + if (host->mmc->ios.enhanced_strobe) + reg |= XENON_ENABLE_RESP_STROBE; sdhci_writel(host, reg, XENON_SLOT_EMMC_CTRL); /* Set Data Strobe Pull down */ @@ -615,7 +637,7 @@ static void xenon_emmc_phy_set(struct sdhci_host *host, sdhci_writel(host, phy_regs->logic_timing_val, phy_regs->logic_timing_adj); else - xenon_emmc_phy_disable_data_strobe(host); + xenon_emmc_phy_disable_strobe(host); phy_init: xenon_emmc_phy_init(host); @@ -705,7 +727,7 @@ void xenon_soc_pad_ctrl(struct sdhci_host *host, /* * Setting PHY when card is working in High Speed Mode. - * HS400 set data strobe line. + * HS400 set Data Strobe and Enhanced Strobe if it is supported. * HS200/SDR104 set tuning config to prepare for tuning. */ static int xenon_hs_delay_adj(struct sdhci_host *host) diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c index bc1781bb070b..0842bbc2d7ad 100644 --- a/drivers/mmc/host/sdhci-xenon.c +++ b/drivers/mmc/host/sdhci-xenon.c @@ -18,6 +18,8 @@ #include <linux/ktime.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> #include "sdhci-pltfm.h" #include "sdhci-xenon.h" @@ -210,8 +212,27 @@ static void xenon_set_uhs_signaling(struct sdhci_host *host, sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); } +static void xenon_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + struct mmc_host *mmc = host->mmc; + u8 pwr = host->pwr; + + sdhci_set_power_noreg(host, mode, vdd); + + if (host->pwr == pwr) + return; + + if (host->pwr == 0) + vdd = 0; + + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); +} + static const struct sdhci_ops sdhci_xenon_ops = { .set_clock = sdhci_set_clock, + .set_power = xenon_set_power, .set_bus_width = sdhci_set_bus_width, .reset = xenon_reset, .set_uhs_signaling = xenon_set_uhs_signaling, @@ -311,7 +332,8 @@ static int xenon_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct sdhci_host *host = mmc_priv(mmc); - if (host->timing == MMC_TIMING_UHS_DDR50) + if (host->timing == MMC_TIMING_UHS_DDR50 || + host->timing == MMC_TIMING_MMC_DDR52) return 0; /* @@ -471,9 +493,20 @@ static int xenon_probe(struct platform_device *pdev) if (err) goto free_pltfm; + priv->axi_clk = devm_clk_get(&pdev->dev, "axi"); + if (IS_ERR(priv->axi_clk)) { + err = PTR_ERR(priv->axi_clk); + if (err == -EPROBE_DEFER) + goto err_clk; + } else { + err = clk_prepare_enable(priv->axi_clk); + if (err) + goto err_clk; + } + err = mmc_of_parse(host->mmc); if (err) - goto err_clk; + goto err_clk_axi; sdhci_get_of_property(pdev); @@ -482,20 +515,33 @@ static int xenon_probe(struct platform_device *pdev) /* Xenon specific dt parse */ err = xenon_probe_dt(pdev); if (err) - goto err_clk; + goto err_clk_axi; err = xenon_sdhc_prepare(host); if (err) - goto err_clk; + goto err_clk_axi; + + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, 50); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_suspend_ignore_children(&pdev->dev, 1); err = sdhci_add_host(host); if (err) goto remove_sdhc; + pm_runtime_put_autosuspend(&pdev->dev); + return 0; remove_sdhc: + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); xenon_sdhc_unprepare(host); +err_clk_axi: + clk_disable_unprepare(priv->axi_clk); err_clk: clk_disable_unprepare(pltfm_host->clk); free_pltfm: @@ -507,11 +553,16 @@ static int xenon_remove(struct platform_device *pdev) { struct sdhci_host *host = platform_get_drvdata(pdev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + + pm_runtime_get_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); sdhci_remove_host(host, 0); xenon_sdhc_unprepare(host); - + clk_disable_unprepare(priv->axi_clk); clk_disable_unprepare(pltfm_host->clk); sdhci_pltfm_free(pdev); @@ -519,6 +570,84 @@ static int xenon_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int xenon_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = pm_runtime_force_suspend(dev); + + priv->restore_needed = true; + return ret; +} +#endif + +#ifdef CONFIG_PM +static int xenon_runtime_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = sdhci_runtime_suspend_host(host); + if (ret) + return ret; + + if (host->tuning_mode != SDHCI_TUNING_MODE_3) + mmc_retune_needed(host->mmc); + + clk_disable_unprepare(pltfm_host->clk); + /* + * Need to update the priv->clock here, or when runtime resume + * back, phy don't aware the clock change and won't adjust phy + * which will cause cmd err + */ + priv->clock = 0; + return 0; +} + +static int xenon_runtime_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = clk_prepare_enable(pltfm_host->clk); + if (ret) { + dev_err(dev, "can't enable mainck\n"); + return ret; + } + + if (priv->restore_needed) { + ret = xenon_sdhc_prepare(host); + if (ret) + goto out; + priv->restore_needed = false; + } + + ret = sdhci_runtime_resume_host(host); + if (ret) + goto out; + return 0; +out: + clk_disable_unprepare(pltfm_host->clk); + return ret; +} +#endif /* CONFIG_PM */ + +static const struct dev_pm_ops sdhci_xenon_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(xenon_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(xenon_runtime_suspend, + xenon_runtime_resume, + NULL) +}; + static const struct of_device_id sdhci_xenon_dt_ids[] = { { .compatible = "marvell,armada-ap806-sdhci",}, { .compatible = "marvell,armada-cp110-sdhci",}, @@ -531,7 +660,7 @@ static struct platform_driver sdhci_xenon_driver = { .driver = { .name = "xenon-sdhci", .of_match_table = sdhci_xenon_dt_ids, - .pm = &sdhci_pltfm_pmops, + .pm = &sdhci_xenon_dev_pm_ops, }, .probe = xenon_probe, .remove = xenon_remove, diff --git a/drivers/mmc/host/sdhci-xenon.h b/drivers/mmc/host/sdhci-xenon.h index 73debb42dc2f..9994995c7c56 100644 --- a/drivers/mmc/host/sdhci-xenon.h +++ b/drivers/mmc/host/sdhci-xenon.h @@ -33,6 +33,7 @@ #define XENON_TUNING_STEP_DIVIDER BIT(6) #define XENON_SLOT_EMMC_CTRL 0x0130 +#define XENON_ENABLE_RESP_STROBE BIT(25) #define XENON_ENABLE_DATA_STROBE BIT(24) #define XENON_SLOT_RETUNING_REQ_CTRL 0x0144 @@ -82,6 +83,7 @@ struct xenon_priv { unsigned char bus_width; unsigned char timing; unsigned int clock; + struct clk *axi_clk; int phy_type; /* @@ -90,6 +92,7 @@ struct xenon_priv { */ void *phy_params; struct xenon_emmc_phy_regs *emmc_phy_regs; + bool restore_needed; }; int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios); diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index ecd0d4350e8a..2f14334e42df 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -897,8 +897,8 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) sdhci_set_transfer_irqs(host); /* Set the DMA boundary value and block size */ - sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, - data->blksz), SDHCI_BLOCK_SIZE); + sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), + SDHCI_BLOCK_SIZE); sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); } @@ -1173,24 +1173,35 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) } EXPORT_SYMBOL_GPL(sdhci_send_command); +static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd) +{ + int i, reg; + + for (i = 0; i < 4; i++) { + reg = SDHCI_RESPONSE + (3 - i) * 4; + cmd->resp[i] = sdhci_readl(host, reg); + } + + if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC) + return; + + /* CRC is stripped so we need to do some shifting */ + for (i = 0; i < 4; i++) { + cmd->resp[i] <<= 8; + if (i != 3) + cmd->resp[i] |= cmd->resp[i + 1] >> 24; + } +} + static void sdhci_finish_command(struct sdhci_host *host) { struct mmc_command *cmd = host->cmd; - int i; host->cmd = NULL; if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136) { - /* CRC is stripped so we need to do some shifting. */ - for (i = 0;i < 4;i++) { - cmd->resp[i] = sdhci_readl(host, - SDHCI_RESPONSE + (3-i)*4) << 8; - if (i != 3) - cmd->resp[i] |= - sdhci_readb(host, - SDHCI_RESPONSE + (3-i)*4-1); - } + sdhci_read_rsp_136(host, cmd); } else { cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); } @@ -1544,10 +1555,9 @@ void sdhci_set_bus_width(struct sdhci_host *host, int width) ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); if (width == MMC_BUS_WIDTH_8) { ctrl &= ~SDHCI_CTRL_4BITBUS; - if (host->version >= SDHCI_SPEC_300) - ctrl |= SDHCI_CTRL_8BITBUS; + ctrl |= SDHCI_CTRL_8BITBUS; } else { - if (host->version >= SDHCI_SPEC_300) + if (host->mmc->caps & MMC_CAP_8_BIT_DATA) ctrl &= ~SDHCI_CTRL_8BITBUS; if (width == MMC_BUS_WIDTH_4) ctrl |= SDHCI_CTRL_4BITBUS; @@ -1641,19 +1651,20 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); - if ((ios->timing == MMC_TIMING_SD_HS || - ios->timing == MMC_TIMING_MMC_HS || - ios->timing == MMC_TIMING_MMC_HS400 || - ios->timing == MMC_TIMING_MMC_HS200 || - ios->timing == MMC_TIMING_MMC_DDR52 || - ios->timing == MMC_TIMING_UHS_SDR50 || - ios->timing == MMC_TIMING_UHS_SDR104 || - ios->timing == MMC_TIMING_UHS_DDR50 || - ios->timing == MMC_TIMING_UHS_SDR25) - && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) - ctrl |= SDHCI_CTRL_HISPD; - else - ctrl &= ~SDHCI_CTRL_HISPD; + if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) { + if (ios->timing == MMC_TIMING_SD_HS || + ios->timing == MMC_TIMING_MMC_HS || + ios->timing == MMC_TIMING_MMC_HS400 || + ios->timing == MMC_TIMING_MMC_HS200 || + ios->timing == MMC_TIMING_MMC_DDR52 || + ios->timing == MMC_TIMING_UHS_SDR50 || + ios->timing == MMC_TIMING_UHS_SDR104 || + ios->timing == MMC_TIMING_UHS_DDR50 || + ios->timing == MMC_TIMING_UHS_SDR25) + ctrl |= SDHCI_CTRL_HISPD; + else + ctrl &= ~SDHCI_CTRL_HISPD; + } if (host->version >= SDHCI_SPEC_300) { u16 clk, ctrl_2; @@ -2037,6 +2048,7 @@ static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode) struct mmc_command cmd = {}; struct mmc_request mrq = {}; unsigned long flags; + u32 b = host->sdma_boundary; spin_lock_irqsave(&host->lock, flags); @@ -2052,9 +2064,9 @@ static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode) */ if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 && mmc->ios.bus_width == MMC_BUS_WIDTH_8) - sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128), SDHCI_BLOCK_SIZE); + sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE); else - sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE); + sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE); /* * The tuning block is sent by the card to the host controller. @@ -2395,12 +2407,12 @@ static void sdhci_tasklet_finish(unsigned long param) ; } -static void sdhci_timeout_timer(unsigned long data) +static void sdhci_timeout_timer(struct timer_list *t) { struct sdhci_host *host; unsigned long flags; - host = (struct sdhci_host*)data; + host = from_timer(host, t, timer); spin_lock_irqsave(&host->lock, flags); @@ -2417,12 +2429,12 @@ static void sdhci_timeout_timer(unsigned long data) spin_unlock_irqrestore(&host->lock, flags); } -static void sdhci_timeout_data_timer(unsigned long data) +static void sdhci_timeout_data_timer(struct timer_list *t) { struct sdhci_host *host; unsigned long flags; - host = (struct sdhci_host *)data; + host = from_timer(host, t, data_timer); spin_lock_irqsave(&host->lock, flags); @@ -2502,7 +2514,6 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask) sdhci_finish_command(host); } -#ifdef CONFIG_MMC_DEBUG static void sdhci_adma_show_error(struct sdhci_host *host) { void *desc = host->adma_table; @@ -2530,9 +2541,6 @@ static void sdhci_adma_show_error(struct sdhci_host *host) break; } } -#else -static void sdhci_adma_show_error(struct sdhci_host *host) { } -#endif static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) { @@ -2938,7 +2946,8 @@ int sdhci_runtime_resume_host(struct sdhci_host *host) sdhci_init(host, 0); - if (mmc->ios.power_mode != MMC_POWER_UNDEFINED) { + if (mmc->ios.power_mode != MMC_POWER_UNDEFINED && + mmc->ios.power_mode != MMC_POWER_OFF) { /* Force clock and power re-program */ host->pwr = 0; host->clock = 0; @@ -2998,7 +3007,7 @@ void sdhci_cqe_enable(struct mmc_host *mmc) ctrl |= SDHCI_CTRL_ADMA32; sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); - sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, 512), + sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512), SDHCI_BLOCK_SIZE); /* Set maximum timeout */ @@ -3119,6 +3128,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev, host->tuning_delay = -1; + host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG; + return host; } @@ -3227,9 +3238,16 @@ int sdhci_setup_host(struct sdhci_host *host) * available. */ ret = mmc_regulator_get_supply(mmc); - if (ret == -EPROBE_DEFER) + if (ret) return ret; + DBG("Version: 0x%08x | Present: 0x%08x\n", + sdhci_readw(host, SDHCI_HOST_VERSION), + sdhci_readl(host, SDHCI_PRESENT_STATE)); + DBG("Caps: 0x%08x | Caps_1: 0x%08x\n", + sdhci_readl(host, SDHCI_CAPABILITIES), + sdhci_readl(host, SDHCI_CAPABILITIES_1)); + sdhci_read_caps(host); override_timeout_clk = host->timeout_clk; @@ -3731,9 +3749,8 @@ int __sdhci_add_host(struct sdhci_host *host) tasklet_init(&host->finish_tasklet, sdhci_tasklet_finish, (unsigned long)host); - setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host); - setup_timer(&host->data_timer, sdhci_timeout_data_timer, - (unsigned long)host); + timer_setup(&host->timer, sdhci_timeout_timer, 0); + timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0); init_waitqueue_head(&host->buf_ready_int); @@ -3747,10 +3764,6 @@ int __sdhci_add_host(struct sdhci_host *host) goto untasklet; } -#ifdef CONFIG_MMC_DEBUG - sdhci_dumpregs(host); -#endif - ret = sdhci_led_register(host); if (ret) { pr_err("%s: Failed to register LED device: %d\n", diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 0469fa191493..54bc444c317f 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -435,6 +435,8 @@ struct sdhci_host { #define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14) /* Broken Clock divider zero in controller */ #define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1<<15) +/* Controller has CRC in 136 bit Command Response */ +#define SDHCI_QUIRK2_RSP_136_HAS_CRC (1<<16) int irq; /* Device IRQ */ void __iomem *ioaddr; /* Mapped address */ @@ -541,6 +543,9 @@ struct sdhci_host { /* Delay (ms) between tuning commands */ int tuning_delay; + /* Host SDMA buffer boundary. */ + u32 sdma_boundary; + unsigned long private[0] ____cacheline_aligned; }; diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c index 111b66f5439b..04ca0d33a521 100644 --- a/drivers/mmc/host/sdhci_f_sdh30.c +++ b/drivers/mmc/host/sdhci_f_sdh30.c @@ -13,6 +13,7 @@ #include <linux/err.h> #include <linux/delay.h> #include <linux/module.h> +#include <linux/property.h> #include <linux/clk.h> #include "sdhci-pltfm.h" @@ -47,6 +48,7 @@ struct f_sdhost_priv { struct clk *clk; u32 vendor_hs200; struct device *dev; + bool enable_cmd_dat_delay; }; static void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host) @@ -84,10 +86,19 @@ static unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host) static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask) { + struct f_sdhost_priv *priv = sdhci_priv(host); + u32 ctl; + if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0) sdhci_writew(host, 0xBC01, SDHCI_CLOCK_CONTROL); sdhci_reset(host, mask); + + if (priv->enable_cmd_dat_delay) { + ctl = sdhci_readl(host, F_SDH30_ESD_CONTROL); + ctl |= F_SDH30_CMD_DAT_DELAY; + sdhci_writel(host, ctl, F_SDH30_ESD_CONTROL); + } } static const struct sdhci_ops sdhci_f_sdh30_ops = { @@ -126,6 +137,9 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE | SDHCI_QUIRK2_TUNING_WORK_AROUND; + priv->enable_cmd_dat_delay = device_property_read_bool(dev, + "fujitsu,cmd-dat-delay-select"); + ret = mmc_of_parse(host->mmc); if (ret) goto err; diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c index 70cb00aa79a0..9e46039282d2 100644 --- a/drivers/mmc/host/sdricoh_cs.c +++ b/drivers/mmc/host/sdricoh_cs.c @@ -385,7 +385,7 @@ static int sdricoh_get_ro(struct mmc_host *mmc) return (status & STATUS_CARD_LOCKED); } -static struct mmc_host_ops sdricoh_ops = { +static const struct mmc_host_ops sdricoh_ops = { .request = sdricoh_request, .set_ios = sdricoh_set_ios, .get_ro = sdricoh_get_ro, diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 4062d6bef3c8..53fb18bb7bee 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -1079,7 +1079,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) host->state = STATE_IDLE; } -static struct mmc_host_ops sh_mmcif_ops = { +static const struct mmc_host_ops sh_mmcif_ops = { .request = sh_mmcif_request, .set_ios = sh_mmcif_set_ios, .get_cd = mmc_gpio_get_cd, diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index d6fa2214aaae..cc98355dbdb9 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -22,6 +22,7 @@ #include <linux/err.h> #include <linux/clk.h> +#include <linux/clk/sunxi-ng.h> #include <linux/gpio.h> #include <linux/platform_device.h> #include <linux/spinlock.h> @@ -259,7 +260,11 @@ struct sunxi_mmc_cfg { /* Does DATA0 needs to be masked while the clock is updated */ bool mask_data0; + /* hardware only supports new timing mode */ bool needs_new_timings; + + /* hardware can switch between old and new timing modes */ + bool has_timings_switch; }; struct sunxi_mmc_host { @@ -293,6 +298,9 @@ struct sunxi_mmc_host { /* vqmmc */ bool vqmmc_enabled; + + /* timings */ + bool use_new_timings; }; static int sunxi_mmc_reset_host(struct sunxi_mmc_host *host) @@ -714,6 +722,11 @@ static int sunxi_mmc_clk_set_phase(struct sunxi_mmc_host *host, { int index; + /* clk controller delays not used under new timings mode */ + if (host->use_new_timings) + return 0; + + /* some old controllers don't support delays */ if (!host->cfg->clk_delays) return 0; @@ -747,7 +760,7 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, { struct mmc_host *mmc = host->mmc; long rate; - u32 rval, clock = ios->clock; + u32 rval, clock = ios->clock, div = 1; int ret; ret = sunxi_mmc_oclk_onoff(host, 0); @@ -760,10 +773,30 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, if (!ios->clock) return 0; - /* 8 bit DDR requires a higher module clock */ + /* + * Under the old timing mode, 8 bit DDR requires the module + * clock to be double the card clock. Under the new timing + * mode, all DDR modes require a doubled module clock. + * + * We currently only support the standard MMC DDR52 mode. + * This block should be updated once support for other DDR + * modes is added. + */ if (ios->timing == MMC_TIMING_MMC_DDR52 && - ios->bus_width == MMC_BUS_WIDTH_8) + (host->use_new_timings || + ios->bus_width == MMC_BUS_WIDTH_8)) { + div = 2; clock <<= 1; + } + + if (host->use_new_timings && host->cfg->has_timings_switch) { + ret = sunxi_ccu_set_mmc_timing_mode(host->clk_mmc, true); + if (ret) { + dev_err(mmc_dev(mmc), + "error setting new timing mode\n"); + return ret; + } + } rate = clk_round_rate(host->clk_mmc, clock); if (rate < 0) { @@ -782,20 +815,23 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, return ret; } - /* clear internal divider */ + /* set internal divider */ rval = mmc_readl(host, REG_CLKCR); rval &= ~0xff; - /* set internal divider for 8 bit eMMC DDR, so card clock is right */ - if (ios->timing == MMC_TIMING_MMC_DDR52 && - ios->bus_width == MMC_BUS_WIDTH_8) { - rval |= 1; - rate >>= 1; - } + rval |= div - 1; mmc_writel(host, REG_CLKCR, rval); - if (host->cfg->needs_new_timings) - mmc_writel(host, REG_SD_NTSR, SDXC_2X_TIMING_MODE); + /* update card clock rate to account for internal divider */ + rate /= div; + + if (host->use_new_timings) { + /* Don't touch the delay bits */ + rval = mmc_readl(host, REG_SD_NTSR); + rval |= SDXC_2X_TIMING_MODE; + mmc_writel(host, REG_SD_NTSR, rval); + } + /* sunxi_mmc_clk_set_phase expects the actual card clock rate */ ret = sunxi_mmc_clk_set_phase(host, ios, rate); if (ret) return ret; @@ -1044,7 +1080,7 @@ static int sunxi_mmc_card_busy(struct mmc_host *mmc) return !!(mmc_readl(host, REG_STAS) & SDXC_CARD_DATA_BUSY); } -static struct mmc_host_ops sunxi_mmc_ops = { +static const struct mmc_host_ops sunxi_mmc_ops = { .request = sunxi_mmc_request, .set_ios = sunxi_mmc_set_ios, .get_ro = mmc_gpio_get_ro, @@ -1090,6 +1126,13 @@ static const struct sunxi_mmc_cfg sun7i_a20_cfg = { .can_calibrate = false, }; +static const struct sunxi_mmc_cfg sun8i_a83t_emmc_cfg = { + .idma_des_size_bits = 16, + .clk_delays = sunxi_mmc_clk_delays, + .can_calibrate = false, + .has_timings_switch = true, +}; + static const struct sunxi_mmc_cfg sun9i_a80_cfg = { .idma_des_size_bits = 16, .clk_delays = sun9i_mmc_clk_delays, @@ -1114,6 +1157,7 @@ static const struct of_device_id sunxi_mmc_of_match[] = { { .compatible = "allwinner,sun4i-a10-mmc", .data = &sun4i_a10_cfg }, { .compatible = "allwinner,sun5i-a13-mmc", .data = &sun5i_a13_cfg }, { .compatible = "allwinner,sun7i-a20-mmc", .data = &sun7i_a20_cfg }, + { .compatible = "allwinner,sun8i-a83t-emmc", .data = &sun8i_a83t_emmc_cfg }, { .compatible = "allwinner,sun9i-a80-mmc", .data = &sun9i_a80_cfg }, { .compatible = "allwinner,sun50i-a64-mmc", .data = &sun50i_a64_cfg }, { .compatible = "allwinner,sun50i-a64-emmc", .data = &sun50i_a64_emmc_cfg }, @@ -1131,11 +1175,8 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, return -EINVAL; ret = mmc_regulator_get_supply(host->mmc); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(&pdev->dev, "Could not get vmmc supply\n"); + if (ret) return ret; - } host->reg_base = devm_ioremap_resource(&pdev->dev, platform_get_resource(pdev, IORESOURCE_MEM, 0)); @@ -1168,7 +1209,8 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, } } - host->reset = devm_reset_control_get_optional(&pdev->dev, "ahb"); + host->reset = devm_reset_control_get_optional_exclusive(&pdev->dev, + "ahb"); if (PTR_ERR(host->reset) == -EPROBE_DEFER) return PTR_ERR(host->reset); @@ -1197,7 +1239,7 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, } if (!IS_ERR(host->reset)) { - ret = reset_control_deassert(host->reset); + ret = reset_control_reset(host->reset); if (ret) { dev_err(&pdev->dev, "reset err %d\n", ret); goto error_disable_clk_sample; @@ -1258,6 +1300,30 @@ static int sunxi_mmc_probe(struct platform_device *pdev) goto error_free_host; } + if (host->cfg->has_timings_switch) { + /* + * Supports both old and new timing modes. + * Try setting the clk to new timing mode. + */ + sunxi_ccu_set_mmc_timing_mode(host->clk_mmc, true); + + /* And check the result */ + ret = sunxi_ccu_get_mmc_timing_mode(host->clk_mmc); + if (ret < 0) { + /* + * For whatever reason we were not able to get + * the current active mode. Default to old mode. + */ + dev_warn(&pdev->dev, "MMC clk timing mode unknown\n"); + host->use_new_timings = false; + } else { + host->use_new_timings = !!ret; + } + } else if (host->cfg->needs_new_timings) { + /* Supports new timing mode only */ + host->use_new_timings = true; + } + mmc->ops = &sunxi_mmc_ops; mmc->max_blk_count = 8192; mmc->max_blk_size = 4096; @@ -1270,7 +1336,7 @@ static int sunxi_mmc_probe(struct platform_device *pdev) mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; - if (host->cfg->clk_delays) + if (host->cfg->clk_delays || host->use_new_timings) mmc->caps |= MMC_CAP_1_8V_DDR; ret = mmc_of_parse(mmc); diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c index 93c4b40df90a..a3d8380ab480 100644 --- a/drivers/mmc/host/tifm_sd.c +++ b/drivers/mmc/host/tifm_sd.c @@ -783,9 +783,9 @@ static void tifm_sd_end_cmd(unsigned long data) mmc_request_done(mmc, mrq); } -static void tifm_sd_abort(unsigned long data) +static void tifm_sd_abort(struct timer_list *t) { - struct tifm_sd *host = (struct tifm_sd*)data; + struct tifm_sd *host = from_timer(host, t, timer); pr_err("%s : card failed to respond for a long period of time " "(%x, %x)\n", @@ -968,7 +968,7 @@ static int tifm_sd_probe(struct tifm_dev *sock) tasklet_init(&host->finish_tasklet, tifm_sd_end_cmd, (unsigned long)host); - setup_timer(&host->timer, tifm_sd_abort, (unsigned long)host); + timer_setup(&host->timer, tifm_sd_abort, 0); mmc->ops = &tifm_sd_ops; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index 6ad6704175dc..3e6ff8921440 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -81,14 +81,14 @@ #define TMIO_STAT_CMD_BUSY BIT(30) #define TMIO_STAT_ILL_ACCESS BIT(31) +/* Definitions for values the CTL_SD_CARD_CLK_CTL register can take */ #define CLK_CTL_DIV_MASK 0xff #define CLK_CTL_SCLKEN BIT(8) +/* Definitions for values the CTL_SD_MEM_CARD_OPT register can take */ #define CARD_OPT_WIDTH8 BIT(13) #define CARD_OPT_WIDTH BIT(15) -#define TMIO_BBS 512 /* Boot block size */ - /* Definitions for values the CTL_SDIO_STATUS register can take */ #define TMIO_SDIO_STAT_IOIRQ 0x0001 #define TMIO_SDIO_STAT_EXPUB52 0x4000 @@ -97,6 +97,9 @@ #define TMIO_SDIO_SETBITS_MASK 0x0006 +/* Definitions for values the CTL_DMA_ENABLE register can take */ +#define DMA_ENABLE_DMASDRW BIT(1) + /* Define some IRQ masks */ /* This is the mask used at reset by the chip */ #define TMIO_MASK_ALL 0x837f031d @@ -122,6 +125,7 @@ struct tmio_mmc_dma_ops { struct tmio_mmc_data *pdata); void (*release)(struct tmio_mmc_host *host); void (*abort)(struct tmio_mmc_host *host); + void (*dataend)(struct tmio_mmc_host *host); }; struct tmio_mmc_host { @@ -151,6 +155,7 @@ struct tmio_mmc_host { struct dma_chan *chan_rx; struct dma_chan *chan_tx; struct completion dma_dataend; + struct tasklet_struct dma_complete; struct tasklet_struct dma_issue; struct scatterlist bounce_sg; u8 *bounce_buf; diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 88a94355ac90..583bf3262df5 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -47,6 +47,7 @@ #include <linux/mmc/sdio.h> #include <linux/scatterlist.h> #include <linux/spinlock.h> +#include <linux/swiotlb.h> #include <linux/workqueue.h> #include "tmio_mmc.h" @@ -87,6 +88,12 @@ static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host) host->dma_ops->abort(host); } +static inline void tmio_mmc_dataend_dma(struct tmio_mmc_host *host) +{ + if (host->dma_ops) + host->dma_ops->dataend(host); +} + void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) { host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ); @@ -123,50 +130,6 @@ static int tmio_mmc_next_sg(struct tmio_mmc_host *host) #define CMDREQ_TIMEOUT 5000 -#ifdef CONFIG_MMC_DEBUG - -#define STATUS_TO_TEXT(a, status, i) \ - do { \ - if ((status) & TMIO_STAT_##a) { \ - if ((i)++) \ - printk(KERN_DEBUG " | "); \ - printk(KERN_DEBUG #a); \ - } \ - } while (0) - -static void pr_debug_status(u32 status) -{ - int i = 0; - - pr_debug("status: %08x = ", status); - STATUS_TO_TEXT(CARD_REMOVE, status, i); - STATUS_TO_TEXT(CARD_INSERT, status, i); - STATUS_TO_TEXT(SIGSTATE, status, i); - STATUS_TO_TEXT(WRPROTECT, status, i); - STATUS_TO_TEXT(CARD_REMOVE_A, status, i); - STATUS_TO_TEXT(CARD_INSERT_A, status, i); - STATUS_TO_TEXT(SIGSTATE_A, status, i); - STATUS_TO_TEXT(CMD_IDX_ERR, status, i); - STATUS_TO_TEXT(STOPBIT_ERR, status, i); - STATUS_TO_TEXT(ILL_FUNC, status, i); - STATUS_TO_TEXT(CMD_BUSY, status, i); - STATUS_TO_TEXT(CMDRESPEND, status, i); - STATUS_TO_TEXT(DATAEND, status, i); - STATUS_TO_TEXT(CRCFAIL, status, i); - STATUS_TO_TEXT(DATATIMEOUT, status, i); - STATUS_TO_TEXT(CMDTIMEOUT, status, i); - STATUS_TO_TEXT(RXOVERFLOW, status, i); - STATUS_TO_TEXT(TXUNDERRUN, status, i); - STATUS_TO_TEXT(RXRDY, status, i); - STATUS_TO_TEXT(TXRQ, status, i); - STATUS_TO_TEXT(ILL_ACCESS, status, i); - printk("\n"); -} - -#else -#define pr_debug_status(s) do { } while (0) -#endif - static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) { struct tmio_mmc_host *host = mmc_priv(mmc); @@ -201,11 +164,14 @@ static void tmio_mmc_clk_start(struct tmio_mmc_host *host) { sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); - msleep(host->pdata->flags & TMIO_MMC_MIN_RCAR2 ? 1 : 10); + + /* HW engineers overrode docs: no sleep needed on R-Car2+ */ + if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) + usleep_range(10000, 11000); if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) { sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); - msleep(10); + usleep_range(10000, 11000); } } @@ -213,12 +179,15 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) { if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) { sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); - msleep(10); + usleep_range(10000, 11000); } sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); - msleep(host->pdata->flags & TMIO_MMC_MIN_RCAR2 ? 5 : 10); + + /* HW engineers overrode docs: no sleep needed on R-Car2+ */ + if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) + usleep_range(10000, 11000); } static void tmio_mmc_set_clock(struct tmio_mmc_host *host, @@ -250,7 +219,7 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host, sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK); if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) - msleep(10); + usleep_range(10000, 11000); tmio_mmc_clk_start(host); } @@ -261,11 +230,11 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host) sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); - msleep(10); + usleep_range(10000, 11000); sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); - msleep(10); + usleep_range(10000, 11000); if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) { sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); @@ -343,12 +312,6 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host, int c = cmd->opcode; u32 irq_mask = TMIO_MASK_CMD; - /* CMD12 is handled by hardware */ - if (cmd->opcode == MMC_STOP_TRANSMISSION && !cmd->arg) { - sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, TMIO_STOP_STP); - return 0; - } - switch (mmc_resp_type(cmd)) { case MMC_RSP_NONE: c |= RESP_NONE; break; case MMC_RSP_R1: @@ -605,11 +568,11 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat) if (done) { tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); - complete(&host->dma_dataend); + tmio_mmc_dataend_dma(host); } } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) { tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); - complete(&host->dma_dataend); + tmio_mmc_dataend_dma(host); } else { tmio_mmc_do_data_irq(host); tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP); @@ -756,9 +719,6 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid) status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS); ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask; - pr_debug_status(status); - pr_debug_status(ireg); - /* Clear the status except the interrupt status */ sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ); @@ -1153,8 +1113,11 @@ static int tmio_mmc_init_ocr(struct tmio_mmc_host *host) { struct tmio_mmc_data *pdata = host->pdata; struct mmc_host *mmc = host->mmc; + int err; - mmc_regulator_get_supply(mmc); + err = mmc_regulator_get_supply(mmc); + if (err) + return err; /* use ocr_mask if no regulator */ if (!mmc->ocr_avail) @@ -1251,11 +1214,23 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host, mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities; mmc->caps2 |= pdata->capabilities2; - mmc->max_segs = 32; + mmc->max_segs = pdata->max_segs ? : 32; mmc->max_blk_size = 512; - mmc->max_blk_count = (PAGE_SIZE / mmc->max_blk_size) * - mmc->max_segs; + mmc->max_blk_count = pdata->max_blk_count ? : + (PAGE_SIZE / mmc->max_blk_size) * mmc->max_segs; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; + /* + * Since swiotlb has memory size limitation, this will calculate + * the maximum size locally (because we don't have any APIs for it now) + * and check the current max_req_size. And then, this will update + * the max_req_size if needed as a workaround. + */ + if (swiotlb_max_segment()) { + unsigned int max_size = (1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE; + + if (mmc->max_req_size > max_size) + mmc->max_req_size = max_size; + } mmc->max_seg_size = mmc->max_req_size; _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD || @@ -1327,23 +1302,24 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host, pm_runtime_enable(&pdev->dev); ret = mmc_add_host(mmc); - if (ret < 0) { - tmio_mmc_host_remove(_host); - return ret; - } + if (ret) + goto remove_host; dev_pm_qos_expose_latency_limit(&pdev->dev, 100); if (pdata->flags & TMIO_MMC_USE_GPIO_CD) { ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0); - if (ret < 0) { - tmio_mmc_host_remove(_host); - return ret; - } + if (ret) + goto remove_host; + mmc_gpiod_request_cd_irq(mmc); } return 0; + +remove_host: + tmio_mmc_host_remove(_host); + return ret; } EXPORT_SYMBOL_GPL(tmio_mmc_host_probe); diff --git a/drivers/mmc/host/toshsd.c b/drivers/mmc/host/toshsd.c index 553ef41bb806..dd961c54a6a9 100644 --- a/drivers/mmc/host/toshsd.c +++ b/drivers/mmc/host/toshsd.c @@ -550,7 +550,7 @@ static int toshsd_get_cd(struct mmc_host *mmc) return !!(ioread16(host->ioaddr + SD_CARDSTATUS) & SD_CARD_PRESENT_0); } -static struct mmc_host_ops toshsd_ops = { +static const struct mmc_host_ops toshsd_ops = { .request = toshsd_request, .set_ios = toshsd_set_ios, .get_ro = toshsd_get_ro, diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c index 1bd5f1a18d4e..cdfeb15b6f05 100644 --- a/drivers/mmc/host/usdhi6rol0.c +++ b/drivers/mmc/host/usdhi6rol0.c @@ -1185,7 +1185,7 @@ static int usdhi6_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios) return ret; } -static struct mmc_host_ops usdhi6_ops = { +static const struct mmc_host_ops usdhi6_ops = { .request = usdhi6_request, .set_ios = usdhi6_set_ios, .get_cd = usdhi6_get_cd, @@ -1757,7 +1757,7 @@ static int usdhi6_probe(struct platform_device *pdev) return -ENOMEM; ret = mmc_regulator_get_supply(mmc); - if (ret == -EPROBE_DEFER) + if (ret) goto e_free_mmc; ret = mmc_of_parse(mmc); diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index 6380044c0628..32c4211506fc 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c @@ -323,7 +323,7 @@ struct via_crdr_mmc_host { /* some devices need a very long delay for power to stabilize */ #define VIA_CRDR_QUIRK_300MS_PWRDELAY 0x0001 -static struct pci_device_id via_ids[] = { +static const struct pci_device_id via_ids[] = { {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_9530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,}, {0,} @@ -932,12 +932,12 @@ out: return result; } -static void via_sdc_timeout(unsigned long ulongdata) +static void via_sdc_timeout(struct timer_list *t) { struct via_crdr_mmc_host *sdhost; unsigned long flags; - sdhost = (struct via_crdr_mmc_host *)ulongdata; + sdhost = from_timer(sdhost, t, timer); spin_lock_irqsave(&sdhost->lock, flags); @@ -1036,9 +1036,7 @@ static void via_init_mmc_host(struct via_crdr_mmc_host *host) u32 lenreg; u32 status; - init_timer(&host->timer); - host->timer.data = (unsigned long)host; - host->timer.function = via_sdc_timeout; + timer_setup(&host->timer, via_sdc_timeout, 0); spin_lock_init(&host->lock); diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c index fbeea1a491a6..1fe68137a30f 100644 --- a/drivers/mmc/host/vub300.c +++ b/drivers/mmc/host/vub300.c @@ -266,7 +266,7 @@ MODULE_PARM_DESC(firmware_rom_wait_states, #define ELAN_VENDOR_ID 0x2201 #define VUB300_VENDOR_ID 0x0424 #define VUB300_PRODUCT_ID 0x012C -static struct usb_device_id vub300_table[] = { +static const struct usb_device_id vub300_table[] = { {USB_DEVICE(ELAN_VENDOR_ID, VUB300_PRODUCT_ID)}, {USB_DEVICE(VUB300_VENDOR_ID, VUB300_PRODUCT_ID)}, {} /* Terminating entry */ @@ -741,9 +741,10 @@ static void vub300_deadwork_thread(struct work_struct *work) kref_put(&vub300->kref, vub300_delete); } -static void vub300_inactivity_timer_expired(unsigned long data) +static void vub300_inactivity_timer_expired(struct timer_list *t) { /* softirq */ - struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data; + struct vub300_mmc_host *vub300 = from_timer(vub300, t, + inactivity_timer); if (!vub300->interface) { kref_put(&vub300->kref, vub300_delete); } else if (vub300->cmd) { @@ -1180,9 +1181,10 @@ static void send_command(struct vub300_mmc_host *vub300) * timer callback runs in atomic mode * so it cannot call usb_kill_urb() */ -static void vub300_sg_timed_out(unsigned long data) +static void vub300_sg_timed_out(struct timer_list *t) { - struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data; + struct vub300_mmc_host *vub300 = from_timer(vub300, t, + sg_transfer_timer); vub300->usb_timed_out = 1; usb_sg_cancel(&vub300->sg_request); usb_unlink_urb(vub300->command_out_urb); @@ -1244,12 +1246,8 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300, USB_RECIP_DEVICE, 0x0000, 0x0000, xfer_buffer, xfer_length, HZ); kfree(xfer_buffer); - if (retval < 0) { - strncpy(vub300->vub_name, - "SDIO pseudocode download failed", - sizeof(vub300->vub_name)); - return; - } + if (retval < 0) + goto copy_error_message; } else { dev_err(&vub300->udev->dev, "not enough memory for xfer buffer to send" @@ -1291,12 +1289,8 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300, USB_RECIP_DEVICE, 0x0000, 0x0000, xfer_buffer, xfer_length, HZ); kfree(xfer_buffer); - if (retval < 0) { - strncpy(vub300->vub_name, - "SDIO pseudocode download failed", - sizeof(vub300->vub_name)); - return; - } + if (retval < 0) + goto copy_error_message; } else { dev_err(&vub300->udev->dev, "not enough memory for xfer buffer to send" @@ -1349,6 +1343,12 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300, sizeof(vub300->vub_name)); return; } + + return; + +copy_error_message: + strncpy(vub300->vub_name, "SDIO pseudocode download failed", + sizeof(vub300->vub_name)); } /* @@ -2079,7 +2079,7 @@ static void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card) dev_info(&vub300->udev->dev, "NO host QUIRKS for this card\n"); } -static struct mmc_host_ops vub300_mmc_ops = { +static const struct mmc_host_ops vub300_mmc_ops = { .request = vub300_mmc_request, .set_ios = vub300_mmc_set_ios, .get_ro = vub300_mmc_get_ro, @@ -2323,13 +2323,10 @@ static int vub300_probe(struct usb_interface *interface, INIT_WORK(&vub300->cmndwork, vub300_cmndwork_thread); INIT_WORK(&vub300->deadwork, vub300_deadwork_thread); kref_init(&vub300->kref); - init_timer(&vub300->sg_transfer_timer); - vub300->sg_transfer_timer.data = (unsigned long)vub300; - vub300->sg_transfer_timer.function = vub300_sg_timed_out; + timer_setup(&vub300->sg_transfer_timer, vub300_sg_timed_out, 0); kref_get(&vub300->kref); - init_timer(&vub300->inactivity_timer); - vub300->inactivity_timer.data = (unsigned long)vub300; - vub300->inactivity_timer.function = vub300_inactivity_timer_expired; + timer_setup(&vub300->inactivity_timer, + vub300_inactivity_timer_expired, 0); vub300->inactivity_timer.expires = jiffies + HZ; add_timer(&vub300->inactivity_timer); if (vub300->card_present) diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index 9668616faf16..f4233576153b 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -802,10 +802,8 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq) break; default: -#ifdef CONFIG_MMC_DEBUG pr_warn("%s: Data command %d is not supported by this controller\n", mmc_hostname(host->mmc), cmd->opcode); -#endif cmd->error = -EINVAL; goto done; @@ -958,9 +956,9 @@ static const struct mmc_host_ops wbsd_ops = { * Helper function to reset detection ignore */ -static void wbsd_reset_ignore(unsigned long data) +static void wbsd_reset_ignore(struct timer_list *t) { - struct wbsd_host *host = (struct wbsd_host *)data; + struct wbsd_host *host = from_timer(host, t, ignore_timer); BUG_ON(host == NULL); @@ -1226,9 +1224,7 @@ static int wbsd_alloc_mmc(struct device *dev) /* * Set up timers */ - init_timer(&host->ignore_timer); - host->ignore_timer.data = (unsigned long)host; - host->ignore_timer.function = wbsd_reset_ignore; + timer_setup(&host->ignore_timer, wbsd_reset_ignore, 0); /* * Maximum number of segments. Worst case is one sector per segment diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c index 21ebba88679c..fd30ac7da5e5 100644 --- a/drivers/mmc/host/wmt-sdmmc.c +++ b/drivers/mmc/host/wmt-sdmmc.c @@ -726,7 +726,7 @@ static int wmt_mci_get_cd(struct mmc_host *mmc) return !(cd ^ priv->cd_inverted); } -static struct mmc_host_ops wmt_mci_ops = { +static const struct mmc_host_ops wmt_mci_ops = { .request = wmt_mci_request, .set_ios = wmt_mci_set_ios, .get_ro = wmt_mci_get_ro, @@ -856,7 +856,9 @@ static int wmt_mci_probe(struct platform_device *pdev) goto fail5; } - clk_prepare_enable(priv->clk_sdmmc); + ret = clk_prepare_enable(priv->clk_sdmmc); + if (ret) + goto fail6; /* configure the controller to a known 'ready' state */ wmt_reset_hardware(mmc); @@ -866,6 +868,8 @@ static int wmt_mci_probe(struct platform_device *pdev) dev_info(&pdev->dev, "WMT SDHC Controller initialized\n"); return 0; +fail6: + clk_put(priv->clk_sdmmc); fail5: free_irq(dma_irq, priv); fail4: |