summaryrefslogtreecommitdiff
path: root/drivers/mmc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/Kconfig2
-rw-r--r--drivers/mmc/Makefile1
-rw-r--r--drivers/mmc/card/Kconfig70
-rw-r--r--drivers/mmc/card/Makefile10
-rw-r--r--drivers/mmc/card/block.h1
-rw-r--r--drivers/mmc/core/Kconfig76
-rw-r--r--drivers/mmc/core/Makefile7
-rw-r--r--drivers/mmc/core/block.c (renamed from drivers/mmc/card/block.c)869
-rw-r--r--drivers/mmc/core/block.h9
-rw-r--r--drivers/mmc/core/bus.c2
-rw-r--r--drivers/mmc/core/bus.h16
-rw-r--r--drivers/mmc/core/card.h221
-rw-r--r--drivers/mmc/core/core.c207
-rw-r--r--drivers/mmc/core/core.h45
-rw-r--r--drivers/mmc/core/debugfs.c8
-rw-r--r--drivers/mmc/core/host.c24
-rw-r--r--drivers/mmc/core/host.h48
-rw-r--r--drivers/mmc/core/mmc.c193
-rw-r--r--drivers/mmc/core/mmc_ops.c261
-rw-r--r--drivers/mmc/core/mmc_ops.h21
-rw-r--r--drivers/mmc/core/mmc_test.c (renamed from drivers/mmc/card/mmc_test.c)168
-rw-r--r--drivers/mmc/core/pwrseq.h6
-rw-r--r--drivers/mmc/core/pwrseq_sd8787.c117
-rw-r--r--drivers/mmc/core/queue.c (renamed from drivers/mmc/card/queue.c)343
-rw-r--r--drivers/mmc/core/queue.h (renamed from drivers/mmc/card/queue.h)42
-rw-r--r--drivers/mmc/core/quirks.c83
-rw-r--r--drivers/mmc/core/quirks.h148
-rw-r--r--drivers/mmc/core/sd.c31
-rw-r--r--drivers/mmc/core/sd.h5
-rw-r--r--drivers/mmc/core/sd_ops.c57
-rw-r--r--drivers/mmc/core/sd_ops.h9
-rw-r--r--drivers/mmc/core/sdio.c63
-rw-r--r--drivers/mmc/core/sdio_bus.c13
-rw-r--r--drivers/mmc/core/sdio_bus.h3
-rw-r--r--drivers/mmc/core/sdio_cis.c3
-rw-r--r--drivers/mmc/core/sdio_cis.h3
-rw-r--r--drivers/mmc/core/sdio_io.c2
-rw-r--r--drivers/mmc/core/sdio_irq.c15
-rw-r--r--drivers/mmc/core/sdio_ops.c10
-rw-r--r--drivers/mmc/core/sdio_ops.h5
-rw-r--r--drivers/mmc/core/sdio_uart.c (renamed from drivers/mmc/card/sdio_uart.c)4
-rw-r--r--drivers/mmc/core/slot-gpio.c14
-rw-r--r--drivers/mmc/core/slot-gpio.h2
-rw-r--r--drivers/mmc/host/Kconfig32
-rw-r--r--drivers/mmc/host/Makefile3
-rw-r--r--drivers/mmc/host/android-goldfish.c2
-rw-r--r--drivers/mmc/host/davinci_mmc.c131
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c53
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c40
-rw-r--r--drivers/mmc/host/dw_mmc-pci.c30
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c29
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c43
-rw-r--r--drivers/mmc/host/dw_mmc-zx.c241
-rw-r--r--drivers/mmc/host/dw_mmc-zx.h31
-rw-r--r--drivers/mmc/host/dw_mmc.c229
-rw-r--r--drivers/mmc/host/dw_mmc.h270
-rw-r--r--drivers/mmc/host/jz4740_mmc.c3
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c857
-rw-r--r--drivers/mmc/host/mmci.c161
-rw-r--r--drivers/mmc/host/mmci.h74
-rw-r--r--drivers/mmc/host/mmci_qcom_dml.c2
-rw-r--r--drivers/mmc/host/mtk-sd.c16
-rw-r--r--drivers/mmc/host/mxs-mmc.c22
-rw-r--r--drivers/mmc/host/omap.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c32
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c5
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c5
-rw-r--r--drivers/mmc/host/s3cmci.c16
-rw-r--r--drivers/mmc/host/sdhci-acpi.c9
-rw-r--r--drivers/mmc/host/sdhci-cadence.c285
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c1
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h44
-rw-r--r--drivers/mmc/host/sdhci-iproc.c46
-rw-r--r--drivers/mmc/host/sdhci-msm.c789
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c14
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c31
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c59
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c197
-rw-r--r--drivers/mmc/host/sdhci-pci.h4
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h2
-rw-r--r--drivers/mmc/host/sdhci-s3c-regs.h87
-rw-r--r--drivers/mmc/host/sdhci-s3c.c73
-rw-r--r--drivers/mmc/host/sdhci.c410
-rw-r--r--drivers/mmc/host/sdhci.h6
-rw-r--r--drivers/mmc/host/sh_mmcif.c28
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c295
-rw-r--r--drivers/mmc/host/sunxi-mmc.c129
-rw-r--r--drivers/mmc/host/tmio_mmc.h35
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c180
-rw-r--r--drivers/mmc/host/ushc.c3
-rw-r--r--drivers/mmc/host/via-sdmmc.c1
-rw-r--r--drivers/mmc/host/vub300.c8
-rw-r--r--drivers/mmc/host/wbsd.c16
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c1
94 files changed, 5861 insertions, 2458 deletions
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index f2eeb38efa65..7e803fc454d1 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -23,8 +23,6 @@ if MMC
source "drivers/mmc/core/Kconfig"
-source "drivers/mmc/card/Kconfig"
-
source "drivers/mmc/host/Kconfig"
endif # MMC
diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile
index 400756ec7c49..416b6d1c9ec6 100644
--- a/drivers/mmc/Makefile
+++ b/drivers/mmc/Makefile
@@ -5,5 +5,4 @@
subdir-ccflags-$(CONFIG_MMC_DEBUG) := -DDEBUG
obj-$(CONFIG_MMC) += core/
-obj-$(CONFIG_MMC) += card/
obj-$(subst m,y,$(CONFIG_MMC)) += host/
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
deleted file mode 100644
index 5562308699bc..000000000000
--- a/drivers/mmc/card/Kconfig
+++ /dev/null
@@ -1,70 +0,0 @@
-#
-# MMC/SD card drivers
-#
-
-comment "MMC/SD/SDIO Card Drivers"
-
-config MMC_BLOCK
- tristate "MMC block device driver"
- depends on BLOCK
- default y
- help
- Say Y here to enable the MMC block device driver support.
- This provides a block device driver, which you can use to
- mount the filesystem. Almost everyone wishing MMC support
- should say Y or M here.
-
-config MMC_BLOCK_MINORS
- int "Number of minors per block device"
- depends on MMC_BLOCK
- range 4 256
- default 8
- help
- Number of minors per block device. One is needed for every
- partition on the disk (plus one for the whole disk).
-
- Number of total MMC minors available is 256, so your number
- of supported block devices will be limited to 256 divided
- by this number.
-
- Default is 8 to be backwards compatible with previous
- hardwired device numbering.
-
- If unsure, say 8 here.
-
-config MMC_BLOCK_BOUNCE
- bool "Use bounce buffer for simple hosts"
- depends on MMC_BLOCK
- default y
- help
- SD/MMC is a high latency protocol where it is crucial to
- send large requests in order to get high performance. Many
- controllers, however, are restricted to continuous memory
- (i.e. they can't do scatter-gather), something the kernel
- rarely can provide.
-
- Say Y here to help these restricted hosts by bouncing
- requests back and forth from a large buffer. You will get
- a big performance gain at the cost of up to 64 KiB of
- physical memory.
-
- If unsure, say Y here.
-
-config SDIO_UART
- tristate "SDIO UART/GPS class support"
- depends on TTY
- help
- SDIO function driver for SDIO cards that implements the UART
- class, as well as the GPS class which appears like a UART.
-
-config MMC_TEST
- tristate "MMC host test driver"
- help
- Development driver that performs a series of reads and writes
- to a memory card in order to expose certain well known bugs
- in host controllers. The tests are executed by writing to the
- "test" file in debugfs under each card. Note that whatever is
- on your card will be overwritten by these tests.
-
- This driver is only of interest to those developing or
- testing a host driver. Most people should say N here.
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
deleted file mode 100644
index c73b406a06cd..000000000000
--- a/drivers/mmc/card/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-#
-# Makefile for MMC/SD card drivers
-#
-
-obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
-mmc_block-objs := block.o queue.o
-obj-$(CONFIG_MMC_TEST) += mmc_test.o
-
-obj-$(CONFIG_SDIO_UART) += sdio_uart.o
-
diff --git a/drivers/mmc/card/block.h b/drivers/mmc/card/block.h
deleted file mode 100644
index cdabb2ee74be..000000000000
--- a/drivers/mmc/card/block.h
+++ /dev/null
@@ -1 +0,0 @@
-int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req);
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index 250f223aaa80..fc1ecdaaa9ca 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -12,6 +12,16 @@ config PWRSEQ_EMMC
This driver can also be built as a module. If so, the module
will be called pwrseq_emmc.
+config PWRSEQ_SD8787
+ tristate "HW reset support for SD8787 BT + Wifi module"
+ depends on OF && (MWIFIEX || BT_MRVL_SDIO)
+ help
+ This selects hardware reset support for the SD8787 BT + Wifi
+ module. By default this option is set to n.
+
+ This driver can also be built as a module. If so, the module
+ will be called pwrseq_sd8787.
+
config PWRSEQ_SIMPLE
tristate "Simple HW reset support for MMC"
default y
@@ -22,3 +32,69 @@ config PWRSEQ_SIMPLE
This driver can also be built as a module. If so, the module
will be called pwrseq_simple.
+
+config MMC_BLOCK
+ tristate "MMC block device driver"
+ depends on BLOCK
+ default y
+ help
+ Say Y here to enable the MMC block device driver support.
+ This provides a block device driver, which you can use to
+ mount the filesystem. Almost everyone wishing MMC support
+ should say Y or M here.
+
+config MMC_BLOCK_MINORS
+ int "Number of minors per block device"
+ depends on MMC_BLOCK
+ range 4 256
+ default 8
+ help
+ Number of minors per block device. One is needed for every
+ partition on the disk (plus one for the whole disk).
+
+ Number of total MMC minors available is 256, so your number
+ of supported block devices will be limited to 256 divided
+ by this number.
+
+ Default is 8 to be backwards compatible with previous
+ hardwired device numbering.
+
+ If unsure, say 8 here.
+
+config MMC_BLOCK_BOUNCE
+ bool "Use bounce buffer for simple hosts"
+ depends on MMC_BLOCK
+ default y
+ help
+ SD/MMC is a high latency protocol where it is crucial to
+ send large requests in order to get high performance. Many
+ controllers, however, are restricted to continuous memory
+ (i.e. they can't do scatter-gather), something the kernel
+ rarely can provide.
+
+ Say Y here to help these restricted hosts by bouncing
+ requests back and forth from a large buffer. You will get
+ a big performance gain at the cost of up to 64 KiB of
+ physical memory.
+
+ If unsure, say Y here.
+
+config SDIO_UART
+ tristate "SDIO UART/GPS class support"
+ depends on TTY
+ help
+ SDIO function driver for SDIO cards that implements the UART
+ class, as well as the GPS class which appears like a UART.
+
+config MMC_TEST
+ tristate "MMC host test driver"
+ help
+ Development driver that performs a series of reads and writes
+ to a memory card in order to expose certain well known bugs
+ in host controllers. The tests are executed by writing to the
+ "test" file in debugfs under each card. Note that whatever is
+ on your card will be overwritten by these tests.
+
+ This driver is only of interest to those developing or
+ testing a host driver. Most people should say N here.
+
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index f007151dfdc6..7e3ed1aeada2 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -7,8 +7,13 @@ mmc_core-y := core.o bus.o host.o \
mmc.o mmc_ops.o sd.o sd_ops.o \
sdio.o sdio_ops.o sdio_bus.o \
sdio_cis.o sdio_io.o sdio_irq.o \
- quirks.o slot-gpio.o
+ slot-gpio.o
mmc_core-$(CONFIG_OF) += pwrseq.o
obj-$(CONFIG_PWRSEQ_SIMPLE) += pwrseq_simple.o
+obj-$(CONFIG_PWRSEQ_SD8787) += pwrseq_sd8787.o
obj-$(CONFIG_PWRSEQ_EMMC) += pwrseq_emmc.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
+obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
+mmc_block-objs := block.o queue.o
+obj-$(CONFIG_MMC_TEST) += mmc_test.o
+obj-$(CONFIG_SDIO_UART) += sdio_uart.o
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/core/block.c
index 709a872ed484..ff3da960c473 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/core/block.c
@@ -43,10 +43,17 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "queue.h"
#include "block.h"
+#include "core.h"
+#include "card.h"
+#include "host.h"
+#include "bus.h"
+#include "mmc_ops.h"
+#include "quirks.h"
+#include "sd_ops.h"
MODULE_ALIAS("mmc:block");
#ifdef MODULE_PARAM_PREFIX
@@ -54,21 +61,12 @@ MODULE_ALIAS("mmc:block");
#endif
#define MODULE_PARAM_PREFIX "mmcblk."
-#define INAND_CMD38_ARG_EXT_CSD 113
-#define INAND_CMD38_ARG_ERASE 0x00
-#define INAND_CMD38_ARG_TRIM 0x01
-#define INAND_CMD38_ARG_SECERASE 0x80
-#define INAND_CMD38_ARG_SECTRIM1 0x81
-#define INAND_CMD38_ARG_SECTRIM2 0x88
#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
#define MMC_SANITIZE_REQ_TIMEOUT 240000
#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
(rq_data_dir(req) == WRITE))
-#define PACKED_CMD_VER 0x01
-#define PACKED_CMD_WR 0x02
-
static DEFINE_MUTEX(block_mutex);
/*
@@ -87,7 +85,6 @@ static int max_devices;
#define MAX_DEVICES 256
static DEFINE_IDA(mmc_blk_ida);
-static DEFINE_SPINLOCK(mmc_blk_lock);
/*
* There is one mmc_blk_data per slot.
@@ -102,7 +99,6 @@ struct mmc_blk_data {
unsigned int flags;
#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
-#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
unsigned int usage;
unsigned int read_only;
@@ -126,12 +122,6 @@ struct mmc_blk_data {
static DEFINE_MUTEX(open_lock);
-enum {
- MMC_PACKED_NR_IDX = -1,
- MMC_PACKED_NR_ZERO,
- MMC_PACKED_NR_SINGLE,
-};
-
module_param(perdev_minors, int, 0444);
MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
@@ -139,17 +129,6 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
struct mmc_blk_data *md);
static int get_card_status(struct mmc_card *card, u32 *status, int retries);
-static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
-{
- struct mmc_packed *packed = mqrq->packed;
-
- mqrq->cmd_type = MMC_PACKED_NONE;
- packed->nr_entries = MMC_PACKED_NR_ZERO;
- packed->idx_failure = MMC_PACKED_NR_IDX;
- packed->retries = 0;
- packed->blocks = 0;
-}
-
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
struct mmc_blk_data *md;
@@ -178,11 +157,7 @@ static void mmc_blk_put(struct mmc_blk_data *md)
if (md->usage == 0) {
int devidx = mmc_get_devidx(md->disk);
blk_cleanup_queue(md->queue.queue);
-
- spin_lock(&mmc_blk_lock);
- ida_remove(&mmc_blk_ida, devidx);
- spin_unlock(&mmc_blk_lock);
-
+ ida_simple_remove(&mmc_blk_ida, devidx);
put_disk(md->disk);
kfree(md);
}
@@ -463,9 +438,9 @@ out:
static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct mmc_blk_ioc_data *idata)
{
- struct mmc_command cmd = {0};
- struct mmc_data data = {0};
- struct mmc_request mrq = {NULL};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
+ struct mmc_request mrq = {};
struct scatterlist sg;
int err;
int is_rpmb = false;
@@ -783,15 +758,15 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
return 0;
}
-static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
+static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
{
int err;
u32 result;
__be32 *blocks;
- struct mmc_request mrq = {NULL};
- struct mmc_command cmd = {0};
- struct mmc_data data = {0};
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
struct scatterlist sg;
@@ -801,9 +776,9 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err)
- return (u32)-1;
+ return err;
if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
- return (u32)-1;
+ return -EIO;
memset(&cmd, 0, sizeof(struct mmc_command));
@@ -823,7 +798,7 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
blocks = kmalloc(4, GFP_KERNEL);
if (!blocks)
- return (u32)-1;
+ return -ENOMEM;
sg_init_one(&sg, blocks, 4);
@@ -833,14 +808,16 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
kfree(blocks);
if (cmd.error || data.error)
- result = (u32)-1;
+ return -EIO;
- return result;
+ *written_blocks = result;
+
+ return 0;
}
static int get_card_status(struct mmc_card *card, u32 *status, int retries)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
int err;
cmd.opcode = MMC_SEND_STATUS;
@@ -854,7 +831,7 @@ static int get_card_status(struct mmc_card *card, u32 *status, int retries)
}
static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
- bool hw_busy_detect, struct request *req, int *gen_err)
+ bool hw_busy_detect, struct request *req, bool *gen_err)
{
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
int err = 0;
@@ -871,7 +848,7 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
if (status & R1_ERROR) {
pr_err("%s: %s: error sending status cmd, status %#x\n",
req->rq_disk->disk_name, __func__, status);
- *gen_err = 1;
+ *gen_err = true;
}
/* We may rely on the host hw to handle busy detection.*/
@@ -902,10 +879,10 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
}
static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
- struct request *req, int *gen_err, u32 *stop_status)
+ struct request *req, bool *gen_err, u32 *stop_status)
{
struct mmc_host *host = card->host;
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
int err;
bool use_r1b_resp = rq_data_dir(req) == WRITE;
@@ -940,7 +917,7 @@ static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
(*stop_status & R1_ERROR)) {
pr_err("%s: %s: general error sending stop command, resp %#x\n",
req->rq_disk->disk_name, __func__, *stop_status);
- *gen_err = 1;
+ *gen_err = true;
}
return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
@@ -1014,7 +991,7 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
* Otherwise we don't understand what happened, so abort.
*/
static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
- struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
+ struct mmc_blk_request *brq, bool *ecc_err, bool *gen_err)
{
bool prev_cmd_status_valid = true;
u32 status, stop_status = 0;
@@ -1053,7 +1030,7 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
if ((status & R1_CARD_ECC_FAILED) ||
(brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
(brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
- *ecc_err = 1;
+ *ecc_err = true;
/* Flag General errors */
if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
@@ -1062,7 +1039,7 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
req->rq_disk->disk_name, __func__,
brq->stop.resp[0], status);
- *gen_err = 1;
+ *gen_err = true;
}
/*
@@ -1085,7 +1062,7 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
}
if (stop_status & R1_CARD_ECC_FAILED)
- *ecc_err = 1;
+ *ecc_err = true;
}
/* Check for set block count errors */
@@ -1154,7 +1131,7 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
int mmc_access_rpmb(struct mmc_queue *mq)
{
- struct mmc_blk_data *md = mq->data;
+ struct mmc_blk_data *md = mq->blkdata;
/*
* If this is a RPMB partition access, return ture
*/
@@ -1164,16 +1141,16 @@ int mmc_access_rpmb(struct mmc_queue *mq)
return false;
}
-static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
+static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
{
- struct mmc_blk_data *md = mq->data;
+ struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
unsigned int from, nr, arg;
int err = 0, type = MMC_BLK_DISCARD;
if (!mmc_can_erase(card)) {
err = -EOPNOTSUPP;
- goto out;
+ goto fail;
}
from = blk_rq_pos(req);
@@ -1185,32 +1162,29 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
arg = MMC_TRIM_ARG;
else
arg = MMC_ERASE_ARG;
-retry:
- if (card->quirks & MMC_QUIRK_INAND_CMD38) {
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- INAND_CMD38_ARG_EXT_CSD,
- arg == MMC_TRIM_ARG ?
- INAND_CMD38_ARG_TRIM :
- INAND_CMD38_ARG_ERASE,
- 0);
- if (err)
- goto out;
- }
- err = mmc_erase(card, from, nr, arg);
-out:
- if (err == -EIO && !mmc_blk_reset(md, card->host, type))
- goto retry;
+ do {
+ err = 0;
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ arg == MMC_TRIM_ARG ?
+ INAND_CMD38_ARG_TRIM :
+ INAND_CMD38_ARG_ERASE,
+ 0);
+ }
+ if (!err)
+ err = mmc_erase(card, from, nr, arg);
+ } while (err == -EIO && !mmc_blk_reset(md, card->host, type));
if (!err)
mmc_blk_reset_success(md, type);
+fail:
blk_end_request(req, err, blk_rq_bytes(req));
-
- return err ? 0 : 1;
}
-static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
+static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
struct request *req)
{
- struct mmc_blk_data *md = mq->data;
+ struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
unsigned int from, nr, arg;
int err = 0, type = MMC_BLK_SECDISCARD;
@@ -1270,13 +1244,11 @@ out_retry:
mmc_blk_reset_success(md, type);
out:
blk_end_request(req, err, blk_rq_bytes(req));
-
- return err ? 0 : 1;
}
-static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
+static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
{
- struct mmc_blk_data *md = mq->data;
+ struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
int ret = 0;
@@ -1285,8 +1257,6 @@ static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
ret = -EIO;
blk_end_request_all(req, ret);
-
- return ret ? 0 : 1;
}
/*
@@ -1320,15 +1290,16 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
R1_CC_ERROR | /* Card controller error */ \
R1_ERROR) /* General/unknown error */
-static int mmc_blk_err_check(struct mmc_card *card,
- struct mmc_async_req *areq)
+static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
+ struct mmc_async_req *areq)
{
struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
- mmc_active);
+ areq);
struct mmc_blk_request *brq = &mq_mrq->brq;
struct request *req = mq_mrq->req;
int need_retune = card->host->need_retune;
- int ecc_err = 0, gen_err = 0;
+ bool ecc_err = false;
+ bool gen_err = false;
/*
* sbc.error indicates a problem with the set block count
@@ -1378,7 +1349,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
req->rq_disk->disk_name, __func__,
brq->stop.resp[0]);
- gen_err = 1;
+ gen_err = true;
}
err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
@@ -1419,67 +1390,12 @@ static int mmc_blk_err_check(struct mmc_card *card,
if (!brq->data.bytes_xfered)
return MMC_BLK_RETRY;
- if (mmc_packed_cmd(mq_mrq->cmd_type)) {
- if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
- return MMC_BLK_PARTIAL;
- else
- return MMC_BLK_SUCCESS;
- }
-
if (blk_rq_bytes(req) != brq->data.bytes_xfered)
return MMC_BLK_PARTIAL;
return MMC_BLK_SUCCESS;
}
-static int mmc_blk_packed_err_check(struct mmc_card *card,
- struct mmc_async_req *areq)
-{
- struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
- mmc_active);
- struct request *req = mq_rq->req;
- struct mmc_packed *packed = mq_rq->packed;
- int err, check, status;
- u8 *ext_csd;
-
- packed->retries--;
- check = mmc_blk_err_check(card, areq);
- err = get_card_status(card, &status, 0);
- if (err) {
- pr_err("%s: error %d sending status command\n",
- req->rq_disk->disk_name, err);
- return MMC_BLK_ABORT;
- }
-
- if (status & R1_EXCEPTION_EVENT) {
- err = mmc_get_ext_csd(card, &ext_csd);
- if (err) {
- pr_err("%s: error %d sending ext_csd\n",
- req->rq_disk->disk_name, err);
- return MMC_BLK_ABORT;
- }
-
- if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
- EXT_CSD_PACKED_FAILURE) &&
- (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
- EXT_CSD_PACKED_GENERIC_ERROR)) {
- if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
- EXT_CSD_PACKED_INDEXED_ERROR) {
- packed->idx_failure =
- ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
- check = MMC_BLK_PARTIAL;
- }
- pr_err("%s: packed cmd failed, nr %u, sectors %u, "
- "failure index: %d\n",
- req->rq_disk->disk_name, packed->nr_entries,
- packed->blocks, packed->idx_failure);
- }
- kfree(ext_csd);
- }
-
- return check;
-}
-
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
int disable_multi,
@@ -1488,7 +1404,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
u32 readcmd, writecmd;
struct mmc_blk_request *brq = &mqrq->brq;
struct request *req = mqrq->req;
- struct mmc_blk_data *md = mq->data;
+ struct mmc_blk_data *md = mq->blkdata;
bool do_data_tag;
/*
@@ -1634,236 +1550,17 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
brq->data.sg_len = i;
}
- mqrq->mmc_active.mrq = &brq->mrq;
- mqrq->mmc_active.err_check = mmc_blk_err_check;
-
- mmc_queue_bounce_pre(mqrq);
-}
-
-static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
- struct mmc_card *card)
-{
- unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
- unsigned int max_seg_sz = queue_max_segment_size(q);
- unsigned int len, nr_segs = 0;
-
- do {
- len = min(hdr_sz, max_seg_sz);
- hdr_sz -= len;
- nr_segs++;
- } while (hdr_sz);
-
- return nr_segs;
-}
-
-static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
-{
- struct request_queue *q = mq->queue;
- struct mmc_card *card = mq->card;
- struct request *cur = req, *next = NULL;
- struct mmc_blk_data *md = mq->data;
- struct mmc_queue_req *mqrq = mq->mqrq_cur;
- bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
- unsigned int req_sectors = 0, phys_segments = 0;
- unsigned int max_blk_count, max_phys_segs;
- bool put_back = true;
- u8 max_packed_rw = 0;
- u8 reqs = 0;
-
- /*
- * We don't need to check packed for any further
- * operation of packed stuff as we set MMC_PACKED_NONE
- * and return zero for reqs if geting null packed. Also
- * we clean the flag of MMC_BLK_PACKED_CMD to avoid doing
- * it again when removing blk req.
- */
- if (!mqrq->packed) {
- md->flags &= (~MMC_BLK_PACKED_CMD);
- goto no_packed;
- }
-
- if (!(md->flags & MMC_BLK_PACKED_CMD))
- goto no_packed;
-
- if ((rq_data_dir(cur) == WRITE) &&
- mmc_host_packed_wr(card->host))
- max_packed_rw = card->ext_csd.max_packed_writes;
-
- if (max_packed_rw == 0)
- goto no_packed;
-
- if (mmc_req_rel_wr(cur) &&
- (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
- goto no_packed;
-
- if (mmc_large_sector(card) &&
- !IS_ALIGNED(blk_rq_sectors(cur), 8))
- goto no_packed;
-
- mmc_blk_clear_packed(mqrq);
-
- max_blk_count = min(card->host->max_blk_count,
- card->host->max_req_size >> 9);
- if (unlikely(max_blk_count > 0xffff))
- max_blk_count = 0xffff;
-
- max_phys_segs = queue_max_segments(q);
- req_sectors += blk_rq_sectors(cur);
- phys_segments += cur->nr_phys_segments;
-
- if (rq_data_dir(cur) == WRITE) {
- req_sectors += mmc_large_sector(card) ? 8 : 1;
- phys_segments += mmc_calc_packed_hdr_segs(q, card);
- }
-
- do {
- if (reqs >= max_packed_rw - 1) {
- put_back = false;
- break;
- }
-
- spin_lock_irq(q->queue_lock);
- next = blk_fetch_request(q);
- spin_unlock_irq(q->queue_lock);
- if (!next) {
- put_back = false;
- break;
- }
-
- if (mmc_large_sector(card) &&
- !IS_ALIGNED(blk_rq_sectors(next), 8))
- break;
-
- if (req_op(next) == REQ_OP_DISCARD ||
- req_op(next) == REQ_OP_SECURE_ERASE ||
- req_op(next) == REQ_OP_FLUSH)
- break;
-
- if (rq_data_dir(cur) != rq_data_dir(next))
- break;
-
- if (mmc_req_rel_wr(next) &&
- (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
- break;
-
- req_sectors += blk_rq_sectors(next);
- if (req_sectors > max_blk_count)
- break;
-
- phys_segments += next->nr_phys_segments;
- if (phys_segments > max_phys_segs)
- break;
-
- list_add_tail(&next->queuelist, &mqrq->packed->list);
- cur = next;
- reqs++;
- } while (1);
-
- if (put_back) {
- spin_lock_irq(q->queue_lock);
- blk_requeue_request(q, next);
- spin_unlock_irq(q->queue_lock);
- }
-
- if (reqs > 0) {
- list_add(&req->queuelist, &mqrq->packed->list);
- mqrq->packed->nr_entries = ++reqs;
- mqrq->packed->retries = reqs;
- return reqs;
- }
-
-no_packed:
- mqrq->cmd_type = MMC_PACKED_NONE;
- return 0;
-}
-
-static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
- struct mmc_card *card,
- struct mmc_queue *mq)
-{
- struct mmc_blk_request *brq = &mqrq->brq;
- struct request *req = mqrq->req;
- struct request *prq;
- struct mmc_blk_data *md = mq->data;
- struct mmc_packed *packed = mqrq->packed;
- bool do_rel_wr, do_data_tag;
- __le32 *packed_cmd_hdr;
- u8 hdr_blocks;
- u8 i = 1;
-
- mqrq->cmd_type = MMC_PACKED_WRITE;
- packed->blocks = 0;
- packed->idx_failure = MMC_PACKED_NR_IDX;
-
- packed_cmd_hdr = packed->cmd_hdr;
- memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
- packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
- (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
- hdr_blocks = mmc_large_sector(card) ? 8 : 1;
-
- /*
- * Argument for each entry of packed group
- */
- list_for_each_entry(prq, &packed->list, queuelist) {
- do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
- do_data_tag = (card->ext_csd.data_tag_unit_size) &&
- (prq->cmd_flags & REQ_META) &&
- (rq_data_dir(prq) == WRITE) &&
- blk_rq_bytes(prq) >= card->ext_csd.data_tag_unit_size;
- /* Argument of CMD23 */
- packed_cmd_hdr[(i * 2)] = cpu_to_le32(
- (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
- (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
- blk_rq_sectors(prq));
- /* Argument of CMD18 or CMD25 */
- packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
- mmc_card_blockaddr(card) ?
- blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
- packed->blocks += blk_rq_sectors(prq);
- i++;
- }
-
- memset(brq, 0, sizeof(struct mmc_blk_request));
- brq->mrq.cmd = &brq->cmd;
- brq->mrq.data = &brq->data;
- brq->mrq.sbc = &brq->sbc;
- brq->mrq.stop = &brq->stop;
-
- brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
- brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
- brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
-
- brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
- brq->cmd.arg = blk_rq_pos(req);
- if (!mmc_card_blockaddr(card))
- brq->cmd.arg <<= 9;
- brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
-
- brq->data.blksz = 512;
- brq->data.blocks = packed->blocks + hdr_blocks;
- brq->data.flags = MMC_DATA_WRITE;
-
- brq->stop.opcode = MMC_STOP_TRANSMISSION;
- brq->stop.arg = 0;
- brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
-
- mmc_set_data_timeout(&brq->data, card);
-
- brq->data.sg = mqrq->sg;
- brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
-
- mqrq->mmc_active.mrq = &brq->mrq;
- mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
+ mqrq->areq.mrq = &brq->mrq;
+ mqrq->areq.err_check = mmc_blk_err_check;
mmc_queue_bounce_pre(mqrq);
}
-static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
- struct mmc_blk_request *brq, struct request *req,
- int ret)
+static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
+ struct mmc_blk_request *brq, struct request *req,
+ bool old_req_pending)
{
- struct mmc_queue_req *mq_rq;
- mq_rq = container_of(brq, struct mmc_queue_req, brq);
+ bool req_pending;
/*
* If this is an SD card and we're writing, we can first
@@ -1875,136 +1572,104 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
*/
if (mmc_card_sd(card)) {
u32 blocks;
+ int err;
- blocks = mmc_sd_num_wr_blocks(card);
- if (blocks != (u32)-1) {
- ret = blk_end_request(req, 0, blocks << 9);
- }
+ err = mmc_sd_num_wr_blocks(card, &blocks);
+ if (err)
+ req_pending = old_req_pending;
+ else
+ req_pending = blk_end_request(req, 0, blocks << 9);
} else {
- if (!mmc_packed_cmd(mq_rq->cmd_type))
- ret = blk_end_request(req, 0, brq->data.bytes_xfered);
- }
- return ret;
-}
-
-static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
-{
- struct request *prq;
- struct mmc_packed *packed = mq_rq->packed;
- int idx = packed->idx_failure, i = 0;
- int ret = 0;
-
- while (!list_empty(&packed->list)) {
- prq = list_entry_rq(packed->list.next);
- if (idx == i) {
- /* retry from error index */
- packed->nr_entries -= idx;
- mq_rq->req = prq;
- ret = 1;
-
- if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
- list_del_init(&prq->queuelist);
- mmc_blk_clear_packed(mq_rq);
- }
- return ret;
- }
- list_del_init(&prq->queuelist);
- blk_end_request(prq, 0, blk_rq_bytes(prq));
- i++;
+ req_pending = blk_end_request(req, 0, brq->data.bytes_xfered);
}
-
- mmc_blk_clear_packed(mq_rq);
- return ret;
+ return req_pending;
}
-static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
+static void mmc_blk_rw_cmd_abort(struct mmc_card *card, struct request *req)
{
- struct request *prq;
- struct mmc_packed *packed = mq_rq->packed;
-
- while (!list_empty(&packed->list)) {
- prq = list_entry_rq(packed->list.next);
- list_del_init(&prq->queuelist);
- blk_end_request(prq, -EIO, blk_rq_bytes(prq));
- }
-
- mmc_blk_clear_packed(mq_rq);
+ if (mmc_card_removed(card))
+ req->rq_flags |= RQF_QUIET;
+ while (blk_end_request(req, -EIO, blk_rq_cur_bytes(req)));
}
-static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
- struct mmc_queue_req *mq_rq)
+/**
+ * mmc_blk_rw_try_restart() - tries to restart the current async request
+ * @mq: the queue with the card and host to restart
+ * @req: a new request that want to be started after the current one
+ */
+static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req)
{
- struct request *prq;
- struct request_queue *q = mq->queue;
- struct mmc_packed *packed = mq_rq->packed;
-
- while (!list_empty(&packed->list)) {
- prq = list_entry_rq(packed->list.prev);
- if (prq->queuelist.prev != &packed->list) {
- list_del_init(&prq->queuelist);
- spin_lock_irq(q->queue_lock);
- blk_requeue_request(mq->queue, prq);
- spin_unlock_irq(q->queue_lock);
- } else {
- list_del_init(&prq->queuelist);
- }
- }
+ if (!req)
+ return;
- mmc_blk_clear_packed(mq_rq);
+ /*
+ * If the card was removed, just cancel everything and return.
+ */
+ if (mmc_card_removed(mq->card)) {
+ req->rq_flags |= RQF_QUIET;
+ blk_end_request_all(req, -EIO);
+ return;
+ }
+ /* Else proceed and try to restart the current async request */
+ mmc_blk_rw_rq_prep(mq->mqrq_cur, mq->card, 0, mq);
+ mmc_start_areq(mq->card->host, &mq->mqrq_cur->areq, NULL);
}
-static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
+static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
{
- struct mmc_blk_data *md = mq->data;
+ struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
- struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
- int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0;
+ struct mmc_blk_request *brq;
+ int disable_multi = 0, retry = 0, type, retune_retry_done = 0;
enum mmc_blk_status status;
struct mmc_queue_req *mq_rq;
- struct request *req = rqc;
- struct mmc_async_req *areq;
- const u8 packed_nr = 2;
- u8 reqs = 0;
+ struct request *old_req;
+ struct mmc_async_req *new_areq;
+ struct mmc_async_req *old_areq;
+ bool req_pending = true;
- if (!rqc && !mq->mqrq_prev->req)
- return 0;
-
- if (rqc)
- reqs = mmc_blk_prep_packed_list(mq, rqc);
+ if (!new_req && !mq->mqrq_prev->req)
+ return;
do {
- if (rqc) {
+ if (new_req) {
/*
* When 4KB native sector is enabled, only 8 blocks
* multiple read or write is allowed
*/
if (mmc_large_sector(card) &&
- !IS_ALIGNED(blk_rq_sectors(rqc), 8)) {
+ !IS_ALIGNED(blk_rq_sectors(new_req), 8)) {
pr_err("%s: Transfer size is not 4KB sector size aligned\n",
- req->rq_disk->disk_name);
- mq_rq = mq->mqrq_cur;
- goto cmd_abort;
+ new_req->rq_disk->disk_name);
+ mmc_blk_rw_cmd_abort(card, new_req);
+ return;
}
- if (reqs >= packed_nr)
- mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
- card, mq);
- else
- mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
- areq = &mq->mqrq_cur->mmc_active;
+ mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+ new_areq = &mq->mqrq_cur->areq;
} else
- areq = NULL;
- areq = mmc_start_req(card->host, areq, (int *) &status);
- if (!areq) {
+ new_areq = NULL;
+
+ old_areq = mmc_start_areq(card->host, new_areq, &status);
+ if (!old_areq) {
+ /*
+ * We have just put the first request into the pipeline
+ * and there is nothing more to do until it is
+ * complete.
+ */
if (status == MMC_BLK_NEW_REQUEST)
- mq->flags |= MMC_QUEUE_NEW_REQUEST;
- return 0;
+ mq->new_request = true;
+ return;
}
- mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
+ /*
+ * An asynchronous request has been completed and we proceed
+ * to handle the result of it.
+ */
+ mq_rq = container_of(old_areq, struct mmc_queue_req, areq);
brq = &mq_rq->brq;
- req = mq_rq->req;
- type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
+ old_req = mq_rq->req;
+ type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
mmc_queue_bounce_post(mq_rq);
switch (status) {
@@ -2015,33 +1680,33 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
*/
mmc_blk_reset_success(md, type);
- if (mmc_packed_cmd(mq_rq->cmd_type)) {
- ret = mmc_blk_end_packed_req(mq_rq);
- break;
- } else {
- ret = blk_end_request(req, 0,
- brq->data.bytes_xfered);
- }
-
+ req_pending = blk_end_request(old_req, 0,
+ brq->data.bytes_xfered);
/*
* If the blk_end_request function returns non-zero even
* though all data has been transferred and no errors
* were returned by the host controller, it's a bug.
*/
- if (status == MMC_BLK_SUCCESS && ret) {
+ if (status == MMC_BLK_SUCCESS && req_pending) {
pr_err("%s BUG rq_tot %d d_xfer %d\n",
- __func__, blk_rq_bytes(req),
+ __func__, blk_rq_bytes(old_req),
brq->data.bytes_xfered);
- rqc = NULL;
- goto cmd_abort;
+ mmc_blk_rw_cmd_abort(card, old_req);
+ return;
}
break;
case MMC_BLK_CMD_ERR:
- ret = mmc_blk_cmd_err(md, card, brq, req, ret);
- if (mmc_blk_reset(md, card->host, type))
- goto cmd_abort;
- if (!ret)
- goto start_new_req;
+ req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending);
+ if (mmc_blk_reset(md, card->host, type)) {
+ if (req_pending)
+ mmc_blk_rw_cmd_abort(card, old_req);
+ mmc_blk_rw_try_restart(mq, new_req);
+ return;
+ }
+ if (!req_pending) {
+ mmc_blk_rw_try_restart(mq, new_req);
+ return;
+ }
break;
case MMC_BLK_RETRY:
retune_retry_done = brq->retune_retry_done;
@@ -2051,23 +1716,27 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
case MMC_BLK_ABORT:
if (!mmc_blk_reset(md, card->host, type))
break;
- goto cmd_abort;
+ mmc_blk_rw_cmd_abort(card, old_req);
+ mmc_blk_rw_try_restart(mq, new_req);
+ return;
case MMC_BLK_DATA_ERR: {
int err;
err = mmc_blk_reset(md, card->host, type);
if (!err)
break;
- if (err == -ENODEV ||
- mmc_packed_cmd(mq_rq->cmd_type))
- goto cmd_abort;
+ if (err == -ENODEV) {
+ mmc_blk_rw_cmd_abort(card, old_req);
+ mmc_blk_rw_try_restart(mq, new_req);
+ return;
+ }
/* Fall through */
}
case MMC_BLK_ECC_ERR:
if (brq->data.blocks > 1) {
/* Redo read one sector at a time */
pr_warn("%s: retrying using single block read\n",
- req->rq_disk->disk_name);
+ old_req->rq_disk->disk_name);
disable_multi = 1;
break;
}
@@ -2076,82 +1745,44 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
* time, so we only reach here after trying to
* read a single sector.
*/
- ret = blk_end_request(req, -EIO,
- brq->data.blksz);
- if (!ret)
- goto start_new_req;
+ req_pending = blk_end_request(old_req, -EIO,
+ brq->data.blksz);
+ if (!req_pending) {
+ mmc_blk_rw_try_restart(mq, new_req);
+ return;
+ }
break;
case MMC_BLK_NOMEDIUM:
- goto cmd_abort;
+ mmc_blk_rw_cmd_abort(card, old_req);
+ mmc_blk_rw_try_restart(mq, new_req);
+ return;
default:
pr_err("%s: Unhandled return value (%d)",
- req->rq_disk->disk_name, status);
- goto cmd_abort;
- }
-
- if (ret) {
- if (mmc_packed_cmd(mq_rq->cmd_type)) {
- if (!mq_rq->packed->retries)
- goto cmd_abort;
- mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
- mmc_start_req(card->host,
- &mq_rq->mmc_active, NULL);
- } else {
-
- /*
- * In case of a incomplete request
- * prepare it again and resend.
- */
- mmc_blk_rw_rq_prep(mq_rq, card,
- disable_multi, mq);
- mmc_start_req(card->host,
- &mq_rq->mmc_active, NULL);
- }
- mq_rq->brq.retune_retry_done = retune_retry_done;
+ old_req->rq_disk->disk_name, status);
+ mmc_blk_rw_cmd_abort(card, old_req);
+ mmc_blk_rw_try_restart(mq, new_req);
+ return;
}
- } while (ret);
-
- return 1;
- cmd_abort:
- if (mmc_packed_cmd(mq_rq->cmd_type)) {
- mmc_blk_abort_packed_req(mq_rq);
- } else {
- if (mmc_card_removed(card))
- req->cmd_flags |= REQ_QUIET;
- while (ret)
- ret = blk_end_request(req, -EIO,
- blk_rq_cur_bytes(req));
- }
-
- start_new_req:
- if (rqc) {
- if (mmc_card_removed(card)) {
- rqc->cmd_flags |= REQ_QUIET;
- blk_end_request_all(rqc, -EIO);
- } else {
+ if (req_pending) {
/*
- * If current request is packed, it needs to put back.
+ * In case of a incomplete request
+ * prepare it again and resend.
*/
- if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
- mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
-
- mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
- mmc_start_req(card->host,
- &mq->mqrq_cur->mmc_active, NULL);
+ mmc_blk_rw_rq_prep(mq_rq, card,
+ disable_multi, mq);
+ mmc_start_areq(card->host,
+ &mq_rq->areq, NULL);
+ mq_rq->brq.retune_retry_done = retune_retry_done;
}
- }
-
- return 0;
+ } while (req_pending);
}
-int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
int ret;
- struct mmc_blk_data *md = mq->data;
+ struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
- struct mmc_host *host = card->host;
- unsigned long flags;
bool req_is_special = mmc_req_is_special(req);
if (req && !mq->mqrq_prev->req)
@@ -2163,37 +1794,32 @@ int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
if (req) {
blk_end_request_all(req, -EIO);
}
- ret = 0;
goto out;
}
- mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
+ mq->new_request = false;
if (req && req_op(req) == REQ_OP_DISCARD) {
/* complete ongoing async transfer before issuing discard */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
- ret = mmc_blk_issue_discard_rq(mq, req);
+ mmc_blk_issue_discard_rq(mq, req);
} else if (req && req_op(req) == REQ_OP_SECURE_ERASE) {
/* complete ongoing async transfer before issuing secure erase*/
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
- ret = mmc_blk_issue_secdiscard_rq(mq, req);
+ mmc_blk_issue_secdiscard_rq(mq, req);
} else if (req && req_op(req) == REQ_OP_FLUSH) {
/* complete ongoing async transfer before issuing flush */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
- ret = mmc_blk_issue_flush(mq, req);
+ mmc_blk_issue_flush(mq, req);
} else {
- if (!req && host->areq) {
- spin_lock_irqsave(&host->context_info.lock, flags);
- host->context_info.is_waiting_last_req = true;
- spin_unlock_irqrestore(&host->context_info.lock, flags);
- }
- ret = mmc_blk_issue_rw_rq(mq, req);
+ mmc_blk_issue_rw_rq(mq, req);
+ card->host->context_info.is_waiting_last_req = false;
}
out:
- if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || req_is_special)
+ if ((!req && !mq->new_request) || req_is_special)
/*
* Release host when there are no more requests
* and after special request(discard, flush) is done.
@@ -2201,7 +1827,6 @@ out:
* the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
*/
mmc_put_card(card);
- return ret;
}
static inline int mmc_blk_readonly(struct mmc_card *card)
@@ -2220,23 +1845,9 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
struct mmc_blk_data *md;
int devidx, ret;
-again:
- if (!ida_pre_get(&mmc_blk_ida, GFP_KERNEL))
- return ERR_PTR(-ENOMEM);
-
- spin_lock(&mmc_blk_lock);
- ret = ida_get_new(&mmc_blk_ida, &devidx);
- spin_unlock(&mmc_blk_lock);
-
- if (ret == -EAGAIN)
- goto again;
- else if (ret)
- return ERR_PTR(ret);
-
- if (devidx >= max_devices) {
- ret = -ENOSPC;
- goto out;
- }
+ devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL);
+ if (devidx < 0)
+ return ERR_PTR(devidx);
md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
if (!md) {
@@ -2266,7 +1877,7 @@ again:
if (ret)
goto err_putdisk;
- md->queue.data = md;
+ md->queue.blkdata = md;
md->disk->major = MMC_BLOCK_MAJOR;
md->disk->first_minor = devidx * perdev_minors;
@@ -2318,14 +1929,6 @@ again:
blk_queue_write_cache(md->queue.queue, true, true);
}
- if (mmc_card_mmc(card) &&
- (area_type == MMC_BLK_DATA_AREA_MAIN) &&
- (md->flags & MMC_BLK_CMD23) &&
- card->ext_csd.packed_event_en) {
- if (!mmc_packed_init(&md->queue, card))
- md->flags |= MMC_BLK_PACKED_CMD;
- }
-
return md;
err_putdisk:
@@ -2333,9 +1936,7 @@ again:
err_kfree:
kfree(md);
out:
- spin_lock(&mmc_blk_lock);
- ida_remove(&mmc_blk_ida, devidx);
- spin_unlock(&mmc_blk_lock);
+ ida_simple_remove(&mmc_blk_ida, devidx);
return ERR_PTR(ret);
}
@@ -2429,8 +2030,6 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
*/
card = md->queue.card;
mmc_cleanup_queue(&md->queue);
- if (md->flags & MMC_BLK_PACKED_CMD)
- mmc_packed_clean(&md->queue);
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
@@ -2502,80 +2101,6 @@ force_ro_fail:
return ret;
}
-static const struct mmc_fixup blk_fixups[] =
-{
- MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
- MMC_QUIRK_INAND_CMD38),
- MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
- MMC_QUIRK_INAND_CMD38),
- MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
- MMC_QUIRK_INAND_CMD38),
- MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
- MMC_QUIRK_INAND_CMD38),
- MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
- MMC_QUIRK_INAND_CMD38),
-
- /*
- * Some MMC cards experience performance degradation with CMD23
- * instead of CMD12-bounded multiblock transfers. For now we'll
- * black list what's bad...
- * - Certain Toshiba cards.
- *
- * N.B. This doesn't affect SD cards.
- */
- MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_BLK_NO_CMD23),
- MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_BLK_NO_CMD23),
- MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_BLK_NO_CMD23),
- MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_BLK_NO_CMD23),
- MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_BLK_NO_CMD23),
-
- /*
- * Some MMC cards need longer data read timeout than indicated in CSD.
- */
- MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
- MMC_QUIRK_LONG_READ_TIME),
- MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_LONG_READ_TIME),
-
- /*
- * On these Samsung MoviNAND parts, performing secure erase or
- * secure trim can result in unrecoverable corruption due to a
- * firmware bug.
- */
- MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
- MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
- MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
- MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
- MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
- MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
- MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
- MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
-
- /*
- * On Some Kingston eMMCs, performing trim can result in
- * unrecoverable data conrruption occasionally due to a firmware bug.
- */
- MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_TRIM_BROKEN),
- MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
- MMC_QUIRK_TRIM_BROKEN),
-
- END_FIXUP
-};
-
static int mmc_blk_probe(struct mmc_card *card)
{
struct mmc_blk_data *md, *part_md;
@@ -2587,7 +2112,7 @@ static int mmc_blk_probe(struct mmc_card *card)
if (!(card->csd.cmdclass & CCC_BLOCK_READ))
return -ENODEV;
- mmc_fixup_device(card, blk_fixups);
+ mmc_fixup_device(card, mmc_blk_fixups);
md = mmc_blk_alloc(card);
if (IS_ERR(md))
diff --git a/drivers/mmc/core/block.h b/drivers/mmc/core/block.h
new file mode 100644
index 000000000000..860ca7c8df86
--- /dev/null
+++ b/drivers/mmc/core/block.h
@@ -0,0 +1,9 @@
+#ifndef _MMC_CORE_BLOCK_H
+#define _MMC_CORE_BLOCK_H
+
+struct mmc_queue;
+struct request;
+
+void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req);
+
+#endif
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index c64266f5a399..301246513a37 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -23,6 +23,8 @@
#include <linux/mmc/host.h>
#include "core.h"
+#include "card.h"
+#include "host.h"
#include "sdio_cis.h"
#include "bus.h"
diff --git a/drivers/mmc/core/bus.h b/drivers/mmc/core/bus.h
index 00a19710b6b4..72b0ef03f10a 100644
--- a/drivers/mmc/core/bus.h
+++ b/drivers/mmc/core/bus.h
@@ -11,6 +11,11 @@
#ifndef _MMC_CORE_BUS_H
#define _MMC_CORE_BUS_H
+#include <linux/device.h>
+
+struct mmc_host;
+struct mmc_card;
+
#define MMC_DEV_ATTR(name, fmt, args...) \
static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *attr, char *buf) \
{ \
@@ -27,5 +32,14 @@ void mmc_remove_card(struct mmc_card *card);
int mmc_register_bus(void);
void mmc_unregister_bus(void);
-#endif
+struct mmc_driver {
+ struct device_driver drv;
+ int (*probe)(struct mmc_card *card);
+ void (*remove)(struct mmc_card *card);
+ void (*shutdown)(struct mmc_card *card);
+};
+int mmc_register_driver(struct mmc_driver *drv);
+void mmc_unregister_driver(struct mmc_driver *drv);
+
+#endif
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
new file mode 100644
index 000000000000..f06cd91964ce
--- /dev/null
+++ b/drivers/mmc/core/card.h
@@ -0,0 +1,221 @@
+/*
+ * Private header for the mmc subsystem
+ *
+ * Copyright (C) 2016 Linaro Ltd
+ *
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef _MMC_CORE_CARD_H
+#define _MMC_CORE_CARD_H
+
+#include <linux/mmc/card.h>
+
+#define mmc_card_name(c) ((c)->cid.prod_name)
+#define mmc_card_id(c) (dev_name(&(c)->dev))
+#define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev)
+
+/* Card states */
+#define MMC_STATE_PRESENT (1<<0) /* present in sysfs */
+#define MMC_STATE_READONLY (1<<1) /* card is read-only */
+#define MMC_STATE_BLOCKADDR (1<<2) /* card uses block-addressing */
+#define MMC_CARD_SDXC (1<<3) /* card is SDXC */
+#define MMC_CARD_REMOVED (1<<4) /* card has been removed */
+#define MMC_STATE_DOING_BKOPS (1<<5) /* card is doing BKOPS */
+#define MMC_STATE_SUSPENDED (1<<6) /* card is suspended */
+
+#define mmc_card_present(c) ((c)->state & MMC_STATE_PRESENT)
+#define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY)
+#define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR)
+#define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC)
+#define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED))
+#define mmc_card_doing_bkops(c) ((c)->state & MMC_STATE_DOING_BKOPS)
+#define mmc_card_suspended(c) ((c)->state & MMC_STATE_SUSPENDED)
+
+#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
+#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
+#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
+#define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC)
+#define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED)
+#define mmc_card_set_doing_bkops(c) ((c)->state |= MMC_STATE_DOING_BKOPS)
+#define mmc_card_clr_doing_bkops(c) ((c)->state &= ~MMC_STATE_DOING_BKOPS)
+#define mmc_card_set_suspended(c) ((c)->state |= MMC_STATE_SUSPENDED)
+#define mmc_card_clr_suspended(c) ((c)->state &= ~MMC_STATE_SUSPENDED)
+
+/*
+ * The world is not perfect and supplies us with broken mmc/sdio devices.
+ * For at least some of these bugs we need a work-around.
+ */
+struct mmc_fixup {
+ /* CID-specific fields. */
+ const char *name;
+
+ /* Valid revision range */
+ u64 rev_start, rev_end;
+
+ unsigned int manfid;
+ unsigned short oemid;
+
+ /* SDIO-specific fields. You can use SDIO_ANY_ID here of course */
+ u16 cis_vendor, cis_device;
+
+ /* for MMC cards */
+ unsigned int ext_csd_rev;
+
+ void (*vendor_fixup)(struct mmc_card *card, int data);
+ int data;
+};
+
+#define CID_MANFID_ANY (-1u)
+#define CID_OEMID_ANY ((unsigned short) -1)
+#define CID_NAME_ANY (NULL)
+
+#define EXT_CSD_REV_ANY (-1u)
+
+#define CID_MANFID_SANDISK 0x2
+#define CID_MANFID_TOSHIBA 0x11
+#define CID_MANFID_MICRON 0x13
+#define CID_MANFID_SAMSUNG 0x15
+#define CID_MANFID_KINGSTON 0x70
+#define CID_MANFID_HYNIX 0x90
+
+#define END_FIXUP { NULL }
+
+#define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end, \
+ _cis_vendor, _cis_device, \
+ _fixup, _data, _ext_csd_rev) \
+ { \
+ .name = (_name), \
+ .manfid = (_manfid), \
+ .oemid = (_oemid), \
+ .rev_start = (_rev_start), \
+ .rev_end = (_rev_end), \
+ .cis_vendor = (_cis_vendor), \
+ .cis_device = (_cis_device), \
+ .vendor_fixup = (_fixup), \
+ .data = (_data), \
+ .ext_csd_rev = (_ext_csd_rev), \
+ }
+
+#define MMC_FIXUP_REV(_name, _manfid, _oemid, _rev_start, _rev_end, \
+ _fixup, _data, _ext_csd_rev) \
+ _FIXUP_EXT(_name, _manfid, \
+ _oemid, _rev_start, _rev_end, \
+ SDIO_ANY_ID, SDIO_ANY_ID, \
+ _fixup, _data, _ext_csd_rev) \
+
+#define MMC_FIXUP(_name, _manfid, _oemid, _fixup, _data) \
+ MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data, \
+ EXT_CSD_REV_ANY)
+
+#define MMC_FIXUP_EXT_CSD_REV(_name, _manfid, _oemid, _fixup, _data, \
+ _ext_csd_rev) \
+ MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data, \
+ _ext_csd_rev)
+
+#define SDIO_FIXUP(_vendor, _device, _fixup, _data) \
+ _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY, \
+ CID_OEMID_ANY, 0, -1ull, \
+ _vendor, _device, \
+ _fixup, _data, EXT_CSD_REV_ANY) \
+
+#define cid_rev(hwrev, fwrev, year, month) \
+ (((u64) hwrev) << 40 | \
+ ((u64) fwrev) << 32 | \
+ ((u64) year) << 16 | \
+ ((u64) month))
+
+#define cid_rev_card(card) \
+ cid_rev(card->cid.hwrev, \
+ card->cid.fwrev, \
+ card->cid.year, \
+ card->cid.month)
+
+/*
+ * Unconditionally quirk add/remove.
+ */
+static inline void __maybe_unused add_quirk(struct mmc_card *card, int data)
+{
+ card->quirks |= data;
+}
+
+static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data)
+{
+ card->quirks &= ~data;
+}
+
+/*
+ * Quirk add/remove for MMC products.
+ */
+static inline void __maybe_unused add_quirk_mmc(struct mmc_card *card, int data)
+{
+ if (mmc_card_mmc(card))
+ card->quirks |= data;
+}
+
+static inline void __maybe_unused remove_quirk_mmc(struct mmc_card *card,
+ int data)
+{
+ if (mmc_card_mmc(card))
+ card->quirks &= ~data;
+}
+
+/*
+ * Quirk add/remove for SD products.
+ */
+static inline void __maybe_unused add_quirk_sd(struct mmc_card *card, int data)
+{
+ if (mmc_card_sd(card))
+ card->quirks |= data;
+}
+
+static inline void __maybe_unused remove_quirk_sd(struct mmc_card *card,
+ int data)
+{
+ if (mmc_card_sd(card))
+ card->quirks &= ~data;
+}
+
+static inline int mmc_card_lenient_fn0(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_LENIENT_FN0;
+}
+
+static inline int mmc_blksz_for_byte_mode(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
+}
+
+static inline int mmc_card_disable_cd(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_DISABLE_CD;
+}
+
+static inline int mmc_card_nonstd_func_interface(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_NONSTD_FUNC_IF;
+}
+
+static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512;
+}
+
+static inline int mmc_card_long_read_time(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_LONG_READ_TIME;
+}
+
+static inline int mmc_card_broken_irq_polling(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_IRQ_POLLING;
+}
+
+static inline int mmc_card_broken_hpi(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_HPI;
+}
+
+#endif
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 2553d903a82b..926e0fde07d7 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -40,6 +40,7 @@
#include <trace/events/mmc.h>
#include "core.h"
+#include "card.h"
#include "bus.h"
#include "host.h"
#include "sdio_bus.h"
@@ -306,16 +307,16 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
mrq->sbc->mrq = mrq;
}
if (mrq->data) {
- BUG_ON(mrq->data->blksz > host->max_blk_size);
- BUG_ON(mrq->data->blocks > host->max_blk_count);
- BUG_ON(mrq->data->blocks * mrq->data->blksz >
- host->max_req_size);
-
+ if (mrq->data->blksz > host->max_blk_size ||
+ mrq->data->blocks > host->max_blk_count ||
+ mrq->data->blocks * mrq->data->blksz > host->max_req_size)
+ return -EINVAL;
#ifdef CONFIG_MMC_DEBUG
sz = 0;
for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
sz += sg->length;
- BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
+ if (sz != mrq->data->blocks * mrq->data->blksz)
+ return -EINVAL;
#endif
mrq->cmd->data = mrq->data;
@@ -349,8 +350,6 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
int timeout;
bool use_busy_signal;
- BUG_ON(!card);
-
if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
return;
@@ -380,7 +379,7 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
mmc_retune_hold(card->host);
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_BKOPS_START, 1, timeout,
+ EXT_CSD_BKOPS_START, 1, timeout, 0,
use_busy_signal, true, false);
if (err) {
pr_warn("%s: Error %d starting bkops\n",
@@ -497,32 +496,27 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
*
* Returns enum mmc_blk_status after checking errors.
*/
-static int mmc_wait_for_data_req_done(struct mmc_host *host,
- struct mmc_request *mrq,
- struct mmc_async_req *next_req)
+static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host,
+ struct mmc_request *mrq)
{
struct mmc_command *cmd;
struct mmc_context_info *context_info = &host->context_info;
- int err;
- unsigned long flags;
+ enum mmc_blk_status status;
while (1) {
wait_event_interruptible(context_info->wait,
(context_info->is_done_rcv ||
context_info->is_new_req));
- spin_lock_irqsave(&context_info->lock, flags);
- context_info->is_waiting_last_req = false;
- spin_unlock_irqrestore(&context_info->lock, flags);
+
if (context_info->is_done_rcv) {
context_info->is_done_rcv = false;
- context_info->is_new_req = false;
cmd = mrq->cmd;
if (!cmd->error || !cmd->retries ||
mmc_card_removed(host->card)) {
- err = host->areq->err_check(host->card,
- host->areq);
- break; /* return err */
+ status = host->areq->err_check(host->card,
+ host->areq);
+ break; /* return status */
} else {
mmc_retune_recheck(host);
pr_info("%s: req failed (CMD%u): %d, retrying...\n",
@@ -533,14 +527,12 @@ static int mmc_wait_for_data_req_done(struct mmc_host *host,
__mmc_start_request(host, mrq);
continue; /* wait for done/new event again */
}
- } else if (context_info->is_new_req) {
- context_info->is_new_req = false;
- if (!next_req)
- return MMC_BLK_NEW_REQUEST;
}
+
+ return MMC_BLK_NEW_REQUEST;
}
mmc_retune_release(host);
- return err;
+ return status;
}
void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
@@ -611,18 +603,15 @@ EXPORT_SYMBOL(mmc_is_req_done);
* mmc_pre_req - Prepare for a new request
* @host: MMC host to prepare command
* @mrq: MMC request to prepare for
- * @is_first_req: true if there is no previous started request
- * that may run in parellel to this call, otherwise false
*
* mmc_pre_req() is called in prior to mmc_start_req() to let
* host prepare for the new request. Preparation of a request may be
* performed while another request is running on the host.
*/
-static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
- bool is_first_req)
+static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq)
{
if (host->ops->pre_req)
- host->ops->pre_req(host, mrq, is_first_req);
+ host->ops->pre_req(host, mrq);
}
/**
@@ -642,10 +631,41 @@ static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
}
/**
- * mmc_start_req - start a non-blocking request
+ * mmc_finalize_areq() - finalize an asynchronous request
+ * @host: MMC host to finalize any ongoing request on
+ *
+ * Returns the status of the ongoing asynchronous request, but
+ * MMC_BLK_SUCCESS if no request was going on.
+ */
+static enum mmc_blk_status mmc_finalize_areq(struct mmc_host *host)
+{
+ enum mmc_blk_status status;
+
+ if (!host->areq)
+ return MMC_BLK_SUCCESS;
+
+ status = mmc_wait_for_data_req_done(host, host->areq->mrq);
+ if (status == MMC_BLK_NEW_REQUEST)
+ return status;
+
+ /*
+ * Check BKOPS urgency for each R1 response
+ */
+ if (host->card && mmc_card_mmc(host->card) &&
+ ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
+ (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
+ (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
+ mmc_start_bkops(host->card, true);
+ }
+
+ return status;
+}
+
+/**
+ * mmc_start_areq - start an asynchronous request
* @host: MMC host to start command
- * @areq: async request to start
- * @error: out parameter returns 0 for success, otherwise non zero
+ * @areq: asynchronous request to start
+ * @ret_stat: out parameter for status
*
* Start a new MMC custom command request for a host.
* If there is on ongoing async request wait for completion
@@ -657,68 +677,50 @@ static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
* return the completed request. If there is no ongoing request, NULL
* is returned without waiting. NULL is not an error condition.
*/
-struct mmc_async_req *mmc_start_req(struct mmc_host *host,
- struct mmc_async_req *areq, int *error)
+struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
+ struct mmc_async_req *areq,
+ enum mmc_blk_status *ret_stat)
{
- int err = 0;
+ enum mmc_blk_status status;
int start_err = 0;
struct mmc_async_req *data = host->areq;
/* Prepare a new request */
if (areq)
- mmc_pre_req(host, areq->mrq, !host->areq);
-
- if (host->areq) {
- err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq);
- if (err == MMC_BLK_NEW_REQUEST) {
- if (error)
- *error = err;
- /*
- * The previous request was not completed,
- * nothing to return
- */
- return NULL;
- }
- /*
- * Check BKOPS urgency for each R1 response
- */
- if (host->card && mmc_card_mmc(host->card) &&
- ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
- (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
- (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
-
- /* Cancel the prepared request */
- if (areq)
- mmc_post_req(host, areq->mrq, -EINVAL);
+ mmc_pre_req(host, areq->mrq);
- mmc_start_bkops(host->card, true);
+ /* Finalize previous request */
+ status = mmc_finalize_areq(host);
- /* prepare the request again */
- if (areq)
- mmc_pre_req(host, areq->mrq, !host->areq);
- }
+ /* The previous request is still going on... */
+ if (status == MMC_BLK_NEW_REQUEST) {
+ if (ret_stat)
+ *ret_stat = status;
+ return NULL;
}
- if (!err && areq)
+ /* Fine so far, start the new request! */
+ if (status == MMC_BLK_SUCCESS && areq)
start_err = __mmc_start_data_req(host, areq->mrq);
+ /* Postprocess the old request at this point */
if (host->areq)
mmc_post_req(host, host->areq->mrq, 0);
- /* Cancel a prepared request if it was not started. */
- if ((err || start_err) && areq)
+ /* Cancel a prepared request if it was not started. */
+ if ((status != MMC_BLK_SUCCESS || start_err) && areq)
mmc_post_req(host, areq->mrq, -EINVAL);
- if (err)
+ if (status != MMC_BLK_SUCCESS)
host->areq = NULL;
else
host->areq = areq;
- if (error)
- *error = err;
+ if (ret_stat)
+ *ret_stat = status;
return data;
}
-EXPORT_SYMBOL(mmc_start_req);
+EXPORT_SYMBOL(mmc_start_areq);
/**
* mmc_wait_for_req - start a request and wait for completion
@@ -754,8 +756,6 @@ int mmc_interrupt_hpi(struct mmc_card *card)
u32 status;
unsigned long prg_wait;
- BUG_ON(!card);
-
if (!card->ext_csd.hpi_en) {
pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
return 1;
@@ -820,7 +820,7 @@ EXPORT_SYMBOL(mmc_interrupt_hpi);
*/
int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
{
- struct mmc_request mrq = {NULL};
+ struct mmc_request mrq = {};
WARN_ON(!host->claimed);
@@ -850,7 +850,6 @@ int mmc_stop_bkops(struct mmc_card *card)
{
int err = 0;
- BUG_ON(!card);
err = mmc_interrupt_hpi(card);
/*
@@ -1644,7 +1643,7 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
return ocr;
}
-int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
+int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
{
int err = 0;
int old_signal_voltage = host->ios.signal_voltage;
@@ -1660,21 +1659,12 @@ int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
}
-int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
+int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
int err = 0;
u32 clock;
- BUG_ON(!host);
-
- /*
- * Send CMD11 only if the request is to switch the card to
- * 1.8V signalling.
- */
- if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
- return __mmc_set_signal_voltage(host, signal_voltage);
-
/*
* If we cannot switch voltages, return failure so the caller
* can continue without UHS mode
@@ -1713,7 +1703,7 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
host->ios.clock = 0;
mmc_set_ios(host);
- if (__mmc_set_signal_voltage(host, signal_voltage)) {
+ if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180)) {
/*
* Voltages may not have been switched, but we've already
* sent CMD11, so a power cycle is required anyway
@@ -1822,11 +1812,11 @@ void mmc_power_up(struct mmc_host *host, u32 ocr)
mmc_set_initial_state(host);
/* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
- if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0)
+ if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330))
dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
- else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0)
+ else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
- else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0)
+ else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120))
dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
/*
@@ -1884,9 +1874,7 @@ void mmc_power_cycle(struct mmc_host *host, u32 ocr)
*/
static void __mmc_release_bus(struct mmc_host *host)
{
- BUG_ON(!host);
- BUG_ON(host->bus_refs);
- BUG_ON(!host->bus_dead);
+ WARN_ON(!host->bus_dead);
host->bus_ops = NULL;
}
@@ -1926,15 +1914,12 @@ void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
{
unsigned long flags;
- BUG_ON(!host);
- BUG_ON(!ops);
-
WARN_ON(!host->claimed);
spin_lock_irqsave(&host->lock, flags);
- BUG_ON(host->bus_ops);
- BUG_ON(host->bus_refs);
+ WARN_ON(host->bus_ops);
+ WARN_ON(host->bus_refs);
host->bus_ops = ops;
host->bus_refs = 1;
@@ -1950,8 +1935,6 @@ void mmc_detach_bus(struct mmc_host *host)
{
unsigned long flags;
- BUG_ON(!host);
-
WARN_ON(!host->claimed);
WARN_ON(!host->bus_ops);
@@ -2152,7 +2135,7 @@ static unsigned int mmc_erase_timeout(struct mmc_card *card,
static int mmc_do_erase(struct mmc_card *card, unsigned int from,
unsigned int to, unsigned int arg)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
unsigned int qty = 0, busy_timeout = 0;
bool use_r1b_resp = false;
unsigned long timeout;
@@ -2574,7 +2557,7 @@ EXPORT_SYMBOL(mmc_calc_max_discard);
int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
mmc_card_hs400(card) || mmc_card_hs400es(card))
@@ -2590,7 +2573,7 @@ EXPORT_SYMBOL(mmc_set_blocklen);
int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
bool is_rel_write)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
cmd.opcode = MMC_SET_BLOCK_COUNT;
cmd.arg = blockcount & 0x0000FFFF;
@@ -2824,12 +2807,11 @@ void mmc_start_host(struct mmc_host *host)
host->rescan_disable = 0;
host->ios.power_mode = MMC_POWER_UNDEFINED;
- mmc_claim_host(host);
- if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
- mmc_power_off(host);
- else
+ if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
+ mmc_claim_host(host);
mmc_power_up(host, host->ocr_avail);
- mmc_release_host(host);
+ mmc_release_host(host);
+ }
mmc_gpiod_request_cd_irq(host);
_mmc_detect_change(host, 0, false);
@@ -2865,8 +2847,6 @@ void mmc_stop_host(struct mmc_host *host)
}
mmc_bus_put(host);
- BUG_ON(host->card);
-
mmc_claim_host(host);
mmc_power_off(host);
mmc_release_host(host);
@@ -3019,7 +2999,6 @@ void mmc_unregister_pm_notifier(struct mmc_host *host)
*/
void mmc_init_context_info(struct mmc_host *host)
{
- spin_lock_init(&host->context_info.lock);
host->context_info.is_new_req = false;
host->context_info.is_done_rcv = false;
host->context_info.is_waiting_last_req = false;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 0fa86a2afc26..55f543fd37c4 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -12,6 +12,11 @@
#define _MMC_CORE_CORE_H
#include <linux/delay.h>
+#include <linux/sched.h>
+
+struct mmc_host;
+struct mmc_card;
+struct mmc_request;
#define MMC_CMD_RETRIES 3
@@ -43,8 +48,8 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz);
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
-int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr);
-int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage);
+int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr);
+int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage);
void mmc_set_timing(struct mmc_host *host, unsigned int timing);
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type);
int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
@@ -69,6 +74,7 @@ void mmc_start_host(struct mmc_host *host);
void mmc_stop_host(struct mmc_host *host);
int _mmc_detect_card_removed(struct mmc_host *host);
+int mmc_detect_card_removed(struct mmc_host *host);
int mmc_attach_mmc(struct mmc_host *host);
int mmc_attach_sd(struct mmc_host *host);
@@ -98,5 +104,38 @@ static inline void mmc_register_pm_notifier(struct mmc_host *host) { }
static inline void mmc_unregister_pm_notifier(struct mmc_host *host) { }
#endif
-#endif
+void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq);
+bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
+
+int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
+ unsigned int arg);
+int mmc_can_erase(struct mmc_card *card);
+int mmc_can_trim(struct mmc_card *card);
+int mmc_can_discard(struct mmc_card *card);
+int mmc_can_sanitize(struct mmc_card *card);
+int mmc_can_secure_erase_trim(struct mmc_card *card);
+int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
+ unsigned int nr);
+unsigned int mmc_calc_max_discard(struct mmc_card *card);
+
+int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen);
+int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
+ bool is_rel_write);
+
+int __mmc_claim_host(struct mmc_host *host, atomic_t *abort);
+void mmc_release_host(struct mmc_host *host);
+void mmc_get_card(struct mmc_card *card);
+void mmc_put_card(struct mmc_card *card);
+
+/**
+ * mmc_claim_host - exclusively claim a host
+ * @host: mmc host to claim
+ *
+ * Claim a host for a set of operations.
+ */
+static inline void mmc_claim_host(struct mmc_host *host)
+{
+ __mmc_claim_host(host, NULL);
+}
+#endif
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index c8451ce557ae..a1fba5732d66 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -20,6 +20,8 @@
#include <linux/mmc/host.h>
#include "core.h"
+#include "card.h"
+#include "host.h"
#include "mmc_ops.h"
#ifdef CONFIG_FAIL_MMC_REQUEST
@@ -321,7 +323,11 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
for (i = 0; i < 512; i++)
n += sprintf(buf + n, "%02x", ext_csd[i]);
n += sprintf(buf + n, "\n");
- BUG_ON(n != EXT_CSD_STR_LEN);
+
+ if (n != EXT_CSD_STR_LEN) {
+ err = -EINVAL;
+ goto out_free;
+ }
filp->private_data = buf;
kfree(ext_csd);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 98f25ffb4258..3f8c85d5aa09 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -34,14 +34,11 @@
#define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev)
static DEFINE_IDA(mmc_host_ida);
-static DEFINE_SPINLOCK(mmc_host_lock);
static void mmc_host_classdev_release(struct device *dev)
{
struct mmc_host *host = cls_dev_to_mmc_host(dev);
- spin_lock(&mmc_host_lock);
- ida_remove(&mmc_host_ida, host->index);
- spin_unlock(&mmc_host_lock);
+ ida_simple_remove(&mmc_host_ida, host->index);
kfree(host);
}
@@ -301,6 +298,8 @@ int mmc_of_parse(struct mmc_host *host)
if (of_property_read_bool(np, "wakeup-source") ||
of_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */
host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
+ if (of_property_read_bool(np, "mmc-ddr-3_3v"))
+ host->caps |= MMC_CAP_3_3V_DDR;
if (of_property_read_bool(np, "mmc-ddr-1_8v"))
host->caps |= MMC_CAP_1_8V_DDR;
if (of_property_read_bool(np, "mmc-ddr-1_2v"))
@@ -354,22 +353,13 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
/* scanning will be enabled when we're ready */
host->rescan_disable = 1;
-again:
- if (!ida_pre_get(&mmc_host_ida, GFP_KERNEL)) {
+ err = ida_simple_get(&mmc_host_ida, 0, 0, GFP_KERNEL);
+ if (err < 0) {
kfree(host);
return NULL;
}
- spin_lock(&mmc_host_lock);
- err = ida_get_new(&mmc_host_ida, &host->index);
- spin_unlock(&mmc_host_lock);
-
- if (err == -EAGAIN) {
- goto again;
- } else if (err) {
- kfree(host);
- return NULL;
- }
+ host->index = err;
dev_set_name(&host->class_dev, "mmc%d", host->index);
@@ -381,6 +371,8 @@ again:
if (mmc_gpio_alloc(host)) {
put_device(&host->class_dev);
+ ida_simple_remove(&mmc_host_ida, host->index);
+ kfree(host);
return NULL;
}
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index 992bf5397633..fb6a76a03833 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -10,6 +10,7 @@
*/
#ifndef _MMC_CORE_HOST_H
#define _MMC_CORE_HOST_H
+
#include <linux/mmc/host.h>
int mmc_register_host_class(void);
@@ -20,6 +21,53 @@ void mmc_retune_disable(struct mmc_host *host);
void mmc_retune_hold(struct mmc_host *host);
void mmc_retune_release(struct mmc_host *host);
int mmc_retune(struct mmc_host *host);
+void mmc_retune_pause(struct mmc_host *host);
+void mmc_retune_unpause(struct mmc_host *host);
+
+static inline void mmc_retune_recheck(struct mmc_host *host)
+{
+ if (host->hold_retune <= 1)
+ host->retune_now = 1;
+}
+
+static inline int mmc_host_cmd23(struct mmc_host *host)
+{
+ return host->caps & MMC_CAP_CMD23;
+}
+
+static inline int mmc_boot_partition_access(struct mmc_host *host)
+{
+ return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC);
+}
+
+static inline int mmc_host_uhs(struct mmc_host *host)
+{
+ return host->caps &
+ (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_DDR50);
+}
+
+static inline bool mmc_card_hs200(struct mmc_card *card)
+{
+ return card->host->ios.timing == MMC_TIMING_MMC_HS200;
+}
+
+static inline bool mmc_card_ddr52(struct mmc_card *card)
+{
+ return card->host->ios.timing == MMC_TIMING_MMC_DDR52;
+}
+
+static inline bool mmc_card_hs400(struct mmc_card *card)
+{
+ return card->host->ios.timing == MMC_TIMING_MMC_HS400;
+}
+
+static inline bool mmc_card_hs400es(struct mmc_card *card)
+{
+ return card->host->ios.enhanced_strobe;
+}
+
#endif
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index df19777068a6..b502601df228 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -21,9 +21,11 @@
#include <linux/mmc/mmc.h>
#include "core.h"
+#include "card.h"
#include "host.h"
#include "bus.h"
#include "mmc_ops.h"
+#include "quirks.h"
#include "sd_ops.h"
#define DEFAULT_CMD6_TIMEOUT_MS 500
@@ -47,17 +49,6 @@ static const unsigned int tacc_mant[] = {
35, 40, 45, 50, 55, 60, 70, 80,
};
-static const struct mmc_fixup mmc_ext_csd_fixups[] = {
- /*
- * Certain Hynix eMMC 4.41 cards might get broken when HPI feature
- * is used so disable the HPI feature for such buggy cards.
- */
- MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX,
- 0x014a, add_quirk, MMC_QUIRK_BROKEN_HPI, 5),
-
- END_FIXUP
-};
-
#define UNSTUFF_BITS(resp,start,size) \
({ \
const int __size = size; \
@@ -212,7 +203,7 @@ static void mmc_select_card_type(struct mmc_card *card)
avail_type |= EXT_CSD_CARD_TYPE_HS_52;
}
- if (caps & MMC_CAP_1_8V_DDR &&
+ if (caps & (MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR) &&
card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) {
hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
@@ -307,6 +298,18 @@ static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
}
}
+static void mmc_part_add(struct mmc_card *card, unsigned int size,
+ unsigned int part_cfg, char *name, int idx, bool ro,
+ int area_type)
+{
+ card->part[card->nr_parts].size = size;
+ card->part[card->nr_parts].part_cfg = part_cfg;
+ sprintf(card->part[card->nr_parts].name, name, idx);
+ card->part[card->nr_parts].force_ro = ro;
+ card->part[card->nr_parts].area_type = area_type;
+ card->nr_parts++;
+}
+
static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
{
int idx;
@@ -530,8 +533,14 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
EXT_CSD_MANUAL_BKOPS_MASK);
card->ext_csd.raw_bkops_status =
ext_csd[EXT_CSD_BKOPS_STATUS];
- if (!card->ext_csd.man_bkops_en)
- pr_debug("%s: MAN_BKOPS_EN bit is not set\n",
+ if (card->ext_csd.man_bkops_en)
+ pr_debug("%s: MAN_BKOPS_EN bit is set\n",
+ mmc_hostname(card->host));
+ card->ext_csd.auto_bkops_en =
+ (ext_csd[EXT_CSD_BKOPS_EN] &
+ EXT_CSD_AUTO_BKOPS_MASK);
+ if (card->ext_csd.auto_bkops_en)
+ pr_debug("%s: AUTO_BKOPS_EN bit is set\n",
mmc_hostname(card->host));
}
@@ -617,6 +626,30 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->ext_csd.ffu_capable =
(ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
!(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
+
+ card->ext_csd.pre_eol_info = ext_csd[EXT_CSD_PRE_EOL_INFO];
+ card->ext_csd.device_life_time_est_typ_a =
+ ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A];
+ card->ext_csd.device_life_time_est_typ_b =
+ ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B];
+ }
+
+ /* eMMC v5.1 or later */
+ if (card->ext_csd.rev >= 8) {
+ card->ext_csd.cmdq_support = ext_csd[EXT_CSD_CMDQ_SUPPORT] &
+ EXT_CSD_CMDQ_SUPPORTED;
+ card->ext_csd.cmdq_depth = (ext_csd[EXT_CSD_CMDQ_DEPTH] &
+ EXT_CSD_CMDQ_DEPTH_MASK) + 1;
+ /* Exclude inefficiently small queue depths */
+ if (card->ext_csd.cmdq_depth <= 2) {
+ card->ext_csd.cmdq_support = false;
+ card->ext_csd.cmdq_depth = 0;
+ }
+ if (card->ext_csd.cmdq_support) {
+ pr_debug("%s: Command Queue supported depth %u\n",
+ mmc_hostname(card->host),
+ card->ext_csd.cmdq_depth);
+ }
}
out:
return err;
@@ -746,6 +779,10 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
+MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info);
+MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
+ card->ext_csd.device_life_time_est_typ_a,
+ card->ext_csd.device_life_time_est_typ_b);
MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
card->ext_csd.enhanced_area_offset);
@@ -799,6 +836,8 @@ static struct attribute *mmc_std_attrs[] = {
&dev_attr_name.attr,
&dev_attr_oemid.attr,
&dev_attr_prv.attr,
+ &dev_attr_pre_eol_info.attr,
+ &dev_attr_life_time.attr,
&dev_attr_serial.attr,
&dev_attr_enhanced_area_offset.attr,
&dev_attr_enhanced_area_size.attr,
@@ -1003,19 +1042,6 @@ static int mmc_select_bus_width(struct mmc_card *card)
return err;
}
-/* Caller must hold re-tuning */
-static int mmc_switch_status(struct mmc_card *card)
-{
- u32 status;
- int err;
-
- err = mmc_send_status(card, &status);
- if (err)
- return err;
-
- return mmc_switch_status_error(card->host, status);
-}
-
/*
* Switch to the high-speed mode
*/
@@ -1025,13 +1051,8 @@ static int mmc_select_hs(struct mmc_card *card)
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
- card->ext_csd.generic_cmd6_time,
- true, false, true);
- if (!err) {
- mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
- err = mmc_switch_status(card);
- }
-
+ card->ext_csd.generic_cmd6_time, MMC_TIMING_MMC_HS,
+ true, true, true);
if (err)
pr_warn("%s: switch to high-speed failed, err:%d\n",
mmc_hostname(card->host), err);
@@ -1058,10 +1079,12 @@ static int mmc_select_hs_ddr(struct mmc_card *card)
ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_BUS_WIDTH,
- ext_csd_bits,
- card->ext_csd.generic_cmd6_time);
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BUS_WIDTH,
+ ext_csd_bits,
+ card->ext_csd.generic_cmd6_time,
+ MMC_TIMING_MMC_DDR52,
+ true, true, true);
if (err) {
pr_err("%s: switch to bus width %d ddr failed\n",
mmc_hostname(host), 1 << bus_width);
@@ -1093,19 +1116,19 @@ static int mmc_select_hs_ddr(struct mmc_card *card)
*
* WARNING: eMMC rules are NOT the same as SD DDR
*/
- err = -EINVAL;
- if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
- err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
+ if (!err)
+ return 0;
+ }
- if (err && (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_8V))
- err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_8V &&
+ host->caps & MMC_CAP_1_8V_DDR)
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
/* make sure vccq is 3.3v after switching disaster */
if (err)
- err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
-
- if (!err)
- mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
return err;
}
@@ -1128,7 +1151,7 @@ static int mmc_select_hs400(struct mmc_card *card)
val = EXT_CSD_TIMING_HS;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
- card->ext_csd.generic_cmd6_time,
+ card->ext_csd.generic_cmd6_time, 0,
true, false, true);
if (err) {
pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
@@ -1163,7 +1186,7 @@ static int mmc_select_hs400(struct mmc_card *card)
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
- card->ext_csd.generic_cmd6_time,
+ card->ext_csd.generic_cmd6_time, 0,
true, false, true);
if (err) {
pr_err("%s: switch to hs400 failed, err:%d\n",
@@ -1206,7 +1229,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
/* Switch HS400 to HS DDR */
val = EXT_CSD_TIMING_HS;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
- val, card->ext_csd.generic_cmd6_time,
+ val, card->ext_csd.generic_cmd6_time, 0,
true, false, true);
if (err)
goto out_err;
@@ -1220,7 +1243,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
/* Switch HS DDR to HS */
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
EXT_CSD_BUS_WIDTH_8, card->ext_csd.generic_cmd6_time,
- true, false, true);
+ 0, true, false, true);
if (err)
goto out_err;
@@ -1234,14 +1257,19 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
val = EXT_CSD_TIMING_HS200 |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
- val, card->ext_csd.generic_cmd6_time,
+ val, card->ext_csd.generic_cmd6_time, 0,
true, false, true);
if (err)
goto out_err;
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
- err = mmc_switch_status(card);
+ /*
+ * For HS200, CRC errors are not a reliable way to know the switch
+ * failed. If there really is a problem, we would expect tuning will
+ * fail and the result ends up the same.
+ */
+ err = __mmc_switch_status(card, false);
if (err)
goto out_err;
@@ -1267,10 +1295,10 @@ static int mmc_select_hs400es(struct mmc_card *card)
}
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V)
- err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)
- err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
/* If fails try again during next card power cycle */
if (err)
@@ -1281,16 +1309,23 @@ static int mmc_select_hs400es(struct mmc_card *card)
goto out_err;
/* Switch card to HS mode */
- err = mmc_select_hs(card);
- if (err)
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
+ card->ext_csd.generic_cmd6_time, 0,
+ true, false, true);
+ if (err) {
+ pr_err("%s: switch to hs for hs400es failed, err:%d\n",
+ mmc_hostname(host), err);
goto out_err;
+ }
- mmc_set_clock(host, card->ext_csd.hs_max_dtr);
-
+ mmc_set_timing(host, MMC_TIMING_MMC_HS);
err = mmc_switch_status(card);
if (err)
goto out_err;
+ mmc_set_clock(host, card->ext_csd.hs_max_dtr);
+
/* Switch card to DDR with strobe bit */
val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1308,7 +1343,7 @@ static int mmc_select_hs400es(struct mmc_card *card)
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
- card->ext_csd.generic_cmd6_time,
+ card->ext_csd.generic_cmd6_time, 0,
true, false, true);
if (err) {
pr_err("%s: switch to hs400es failed, err:%d\n",
@@ -1369,10 +1404,10 @@ static int mmc_select_hs200(struct mmc_card *card)
old_signal_voltage = host->ios.signal_voltage;
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
- err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
- err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
/* If fails try again during next card power cycle */
if (err)
@@ -1390,14 +1425,20 @@ static int mmc_select_hs200(struct mmc_card *card)
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
- card->ext_csd.generic_cmd6_time,
+ card->ext_csd.generic_cmd6_time, 0,
true, false, true);
if (err)
goto err;
old_timing = host->ios.timing;
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
- err = mmc_switch_status(card);
+ /*
+ * For HS200, CRC errors are not a reliable way to know the
+ * switch failed. If there really is a problem, we would expect
+ * tuning will fail and the result ends up the same.
+ */
+ err = __mmc_switch_status(card, false);
+
/*
* mmc_select_timing() assumes timing has not changed if
* it is a switch error.
@@ -1408,7 +1449,7 @@ static int mmc_select_hs200(struct mmc_card *card)
err:
if (err) {
/* fall back to the old signal voltage, if fails report error */
- if (__mmc_set_signal_voltage(host, old_signal_voltage))
+ if (mmc_set_signal_voltage(host, old_signal_voltage))
err = -EIO;
pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
@@ -1480,7 +1521,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
u32 cid[4];
u32 rocr;
- BUG_ON(!host);
WARN_ON(!host->claimed);
/* Set correct bus mode for MMC before attempting init */
@@ -1690,10 +1730,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
err = mmc_select_hs400(card);
if (err)
goto free_card;
- } else if (mmc_card_hs(card)) {
+ } else if (!mmc_card_hs400es(card)) {
/* Select the desired bus width optionally */
err = mmc_select_bus_width(card);
- if (err > 0) {
+ if (err > 0 && mmc_card_hs(card)) {
err = mmc_select_hs_ddr(card);
if (err)
goto free_card;
@@ -1789,7 +1829,7 @@ static int mmc_can_sleep(struct mmc_card *card)
static int mmc_sleep(struct mmc_host *host)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
struct mmc_card *card = host->card;
unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
int err;
@@ -1854,7 +1894,7 @@ static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_POWER_OFF_NOTIFICATION,
- notify_type, timeout, true, false, false);
+ notify_type, timeout, 0, true, false, false);
if (err)
pr_err("%s: Power Off Notification timed out, %u\n",
mmc_hostname(card->host), timeout);
@@ -1870,9 +1910,6 @@ static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
*/
static void mmc_remove(struct mmc_host *host)
{
- BUG_ON(!host);
- BUG_ON(!host->card);
-
mmc_remove_card(host->card);
host->card = NULL;
}
@@ -1892,9 +1929,6 @@ static void mmc_detect(struct mmc_host *host)
{
int err;
- BUG_ON(!host);
- BUG_ON(!host->card);
-
mmc_get_card(host->card);
/*
@@ -1920,9 +1954,6 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
EXT_CSD_POWER_OFF_LONG;
- BUG_ON(!host);
- BUG_ON(!host->card);
-
mmc_claim_host(host);
if (mmc_card_suspended(host->card))
@@ -1979,9 +2010,6 @@ static int _mmc_resume(struct mmc_host *host)
{
int err = 0;
- BUG_ON(!host);
- BUG_ON(!host->card);
-
mmc_claim_host(host);
if (!mmc_card_suspended(host->card))
@@ -2114,7 +2142,6 @@ int mmc_attach_mmc(struct mmc_host *host)
int err;
u32 ocr, rocr;
- BUG_ON(!host);
WARN_ON(!host->claimed);
/* Set correct bus mode for MMC before attempting attach */
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index ad6e9798e949..fe80f26d6971 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -54,21 +54,15 @@ static const u8 tuning_blk_pattern_8bit[] = {
0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
};
-static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
- bool ignore_crc)
+int mmc_send_status(struct mmc_card *card, u32 *status)
{
int err;
- struct mmc_command cmd = {0};
-
- BUG_ON(!card);
- BUG_ON(!card->host);
+ struct mmc_command cmd = {};
cmd.opcode = MMC_SEND_STATUS;
if (!mmc_host_is_spi(card->host))
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
- if (ignore_crc)
- cmd.flags &= ~MMC_RSP_CRC;
err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
if (err)
@@ -83,16 +77,9 @@ static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
return 0;
}
-int mmc_send_status(struct mmc_card *card, u32 *status)
-{
- return __mmc_send_status(card, status, false);
-}
-
static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
{
- struct mmc_command cmd = {0};
-
- BUG_ON(!host);
+ struct mmc_command cmd = {};
cmd.opcode = MMC_SELECT_CARD;
@@ -109,7 +96,6 @@ static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
int mmc_select_card(struct mmc_card *card)
{
- BUG_ON(!card);
return _mmc_select_card(card->host, card);
}
@@ -129,7 +115,7 @@ int mmc_deselect_cards(struct mmc_host *host)
*/
int mmc_set_dsr(struct mmc_host *host)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
cmd.opcode = MMC_SET_DSR;
@@ -142,7 +128,7 @@ int mmc_set_dsr(struct mmc_host *host)
int mmc_go_idle(struct mmc_host *host)
{
int err;
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
/*
* Non-SPI hosts need to prevent chipselect going active during
@@ -178,11 +164,9 @@ int mmc_go_idle(struct mmc_host *host)
int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
int i, err = 0;
- BUG_ON(!host);
-
cmd.opcode = MMC_SEND_OP_COND;
cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
@@ -219,10 +203,7 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
{
int err;
- struct mmc_command cmd = {0};
-
- BUG_ON(!host);
- BUG_ON(!cid);
+ struct mmc_command cmd = {};
cmd.opcode = MMC_ALL_SEND_CID;
cmd.arg = 0;
@@ -239,10 +220,7 @@ int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
int mmc_set_relative_addr(struct mmc_card *card)
{
- struct mmc_command cmd = {0};
-
- BUG_ON(!card);
- BUG_ON(!card->host);
+ struct mmc_command cmd = {};
cmd.opcode = MMC_SET_RELATIVE_ADDR;
cmd.arg = card->rca << 16;
@@ -255,10 +233,7 @@ static int
mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
{
int err;
- struct mmc_command cmd = {0};
-
- BUG_ON(!host);
- BUG_ON(!cxd);
+ struct mmc_command cmd = {};
cmd.opcode = opcode;
cmd.arg = arg;
@@ -281,9 +256,9 @@ static int
mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
u32 opcode, void *buf, unsigned len)
{
- struct mmc_request mrq = {NULL};
- struct mmc_command cmd = {0};
- struct mmc_data data = {0};
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
struct scatterlist sg;
mrq.cmd = &cmd;
@@ -412,7 +387,7 @@ EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
int err;
cmd.opcode = MMC_SPI_READ_OCR;
@@ -427,7 +402,7 @@ int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
int err;
cmd.opcode = MMC_SPI_CRC_ON_OFF;
@@ -440,7 +415,7 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
return err;
}
-int mmc_switch_status_error(struct mmc_host *host, u32 status)
+static int mmc_switch_status_error(struct mmc_host *host, u32 status)
{
if (mmc_host_is_spi(host)) {
if (status & R1_SPI_ILLEGAL_COMMAND)
@@ -455,6 +430,85 @@ int mmc_switch_status_error(struct mmc_host *host, u32 status)
return 0;
}
+/* Caller must hold re-tuning */
+int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
+{
+ u32 status;
+ int err;
+
+ err = mmc_send_status(card, &status);
+ if (!crc_err_fatal && err == -EILSEQ)
+ return 0;
+ if (err)
+ return err;
+
+ return mmc_switch_status_error(card->host, status);
+}
+
+int mmc_switch_status(struct mmc_card *card)
+{
+ return __mmc_switch_status(card, true);
+}
+
+static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
+ bool send_status, bool retry_crc_err)
+{
+ struct mmc_host *host = card->host;
+ int err;
+ unsigned long timeout;
+ u32 status = 0;
+ bool expired = false;
+ bool busy = false;
+
+ /* We have an unspecified cmd timeout, use the fallback value. */
+ if (!timeout_ms)
+ timeout_ms = MMC_OPS_TIMEOUT_MS;
+
+ /*
+ * In cases when not allowed to poll by using CMD13 or because we aren't
+ * capable of polling by using ->card_busy(), then rely on waiting the
+ * stated timeout to be sufficient.
+ */
+ if (!send_status && !host->ops->card_busy) {
+ mmc_delay(timeout_ms);
+ return 0;
+ }
+
+ timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
+ do {
+ /*
+ * Due to the possibility of being preempted while polling,
+ * check the expiration time first.
+ */
+ expired = time_after(jiffies, timeout);
+
+ if (host->ops->card_busy) {
+ busy = host->ops->card_busy(host);
+ } else {
+ err = mmc_send_status(card, &status);
+ if (retry_crc_err && err == -EILSEQ) {
+ busy = true;
+ } else if (err) {
+ return err;
+ } else {
+ err = mmc_switch_status_error(host, status);
+ if (err)
+ return err;
+ busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
+ }
+ }
+
+ /* Timeout if the device still remains busy. */
+ if (expired && busy) {
+ pr_err("%s: Card stuck being busy! %s\n",
+ mmc_hostname(host), __func__);
+ return -ETIMEDOUT;
+ }
+ } while (busy);
+
+ return 0;
+}
+
/**
* __mmc_switch - modify EXT_CSD register
* @card: the MMC card associated with the data transfer
@@ -463,24 +517,22 @@ int mmc_switch_status_error(struct mmc_host *host, u32 status)
* @value: value to program into EXT_CSD register
* @timeout_ms: timeout (ms) for operation performed by register write,
* timeout of zero implies maximum possible timeout
+ * @timing: new timing to change to
* @use_busy_signal: use the busy signal as response type
* @send_status: send status cmd to poll for busy
- * @ignore_crc: ignore CRC errors when sending status cmd to poll for busy
+ * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
*
* Modifies the EXT_CSD register for selected card.
*/
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
- unsigned int timeout_ms, bool use_busy_signal, bool send_status,
- bool ignore_crc)
+ unsigned int timeout_ms, unsigned char timing,
+ bool use_busy_signal, bool send_status, bool retry_crc_err)
{
struct mmc_host *host = card->host;
int err;
- struct mmc_command cmd = {0};
- unsigned long timeout;
- u32 status = 0;
+ struct mmc_command cmd = {};
bool use_r1b_resp = use_busy_signal;
- bool expired = false;
- bool busy = false;
+ unsigned char old_timing = host->ios.timing;
mmc_retune_hold(host);
@@ -522,62 +574,26 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
if (!use_busy_signal)
goto out;
- /*
- * CRC errors shall only be ignored in cases were CMD13 is used to poll
- * to detect busy completion.
- */
- if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
- ignore_crc = false;
-
- /* We have an unspecified cmd timeout, use the fallback value. */
- if (!timeout_ms)
- timeout_ms = MMC_OPS_TIMEOUT_MS;
-
- /* Must check status to be sure of no errors. */
- timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
- do {
- /*
- * Due to the possibility of being preempted after
- * sending the status command, check the expiration
- * time first.
- */
- expired = time_after(jiffies, timeout);
- if (send_status) {
- err = __mmc_send_status(card, &status, ignore_crc);
- if (err)
- goto out;
- }
- if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
- break;
- if (host->ops->card_busy) {
- if (!host->ops->card_busy(host))
- break;
- busy = true;
- }
- if (mmc_host_is_spi(host))
- break;
+ /*If SPI or used HW busy detection above, then we don't need to poll. */
+ if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
+ mmc_host_is_spi(host))
+ goto out_tim;
- /*
- * We are not allowed to issue a status command and the host
- * does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only
- * rely on waiting for the stated timeout to be sufficient.
- */
- if (!send_status && !host->ops->card_busy) {
- mmc_delay(timeout_ms);
- goto out;
- }
+ /* Let's try to poll to find out when the command is completed. */
+ err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
+ if (err)
+ goto out;
- /* Timeout if the device never leaves the program state. */
- if (expired &&
- (R1_CURRENT_STATE(status) == R1_STATE_PRG || busy)) {
- pr_err("%s: Card stuck in programming state! %s\n",
- mmc_hostname(host), __func__);
- err = -ETIMEDOUT;
- goto out;
- }
- } while (R1_CURRENT_STATE(status) == R1_STATE_PRG || busy);
+out_tim:
+ /* Switch to new timing before check switch status. */
+ if (timing)
+ mmc_set_timing(host, timing);
- err = mmc_switch_status_error(host, status);
+ if (send_status) {
+ err = mmc_switch_status(card);
+ if (err && timing)
+ mmc_set_timing(host, old_timing);
+ }
out:
mmc_retune_release(host);
@@ -587,16 +603,16 @@ out:
int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms)
{
- return __mmc_switch(card, set, index, value, timeout_ms, true, true,
- false);
+ return __mmc_switch(card, set, index, value, timeout_ms, 0,
+ true, true, false);
}
EXPORT_SYMBOL_GPL(mmc_switch);
int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
{
- struct mmc_request mrq = {NULL};
- struct mmc_command cmd = {0};
- struct mmc_data data = {0};
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
struct scatterlist sg;
struct mmc_ios *ios = &host->ios;
const u8 *tuning_block_pattern;
@@ -661,13 +677,38 @@ out:
}
EXPORT_SYMBOL_GPL(mmc_send_tuning);
+int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
+{
+ struct mmc_command cmd = {};
+
+ /*
+ * eMMC specification specifies that CMD12 can be used to stop a tuning
+ * command, but SD specification does not, so do nothing unless it is
+ * eMMC.
+ */
+ if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
+ return 0;
+
+ cmd.opcode = MMC_STOP_TRANSMISSION;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+
+ /*
+ * For drivers that override R1 to R1b, set an arbitrary timeout based
+ * on the tuning timeout i.e. 150ms.
+ */
+ cmd.busy_timeout = 150;
+
+ return mmc_wait_for_cmd(host, &cmd, 0);
+}
+EXPORT_SYMBOL_GPL(mmc_abort_tuning);
+
static int
mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
u8 len)
{
- struct mmc_request mrq = {NULL};
- struct mmc_command cmd = {0};
- struct mmc_data data = {0};
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
struct scatterlist sg;
u8 *data_buf;
u8 *test_buf;
@@ -761,7 +802,7 @@ int mmc_bus_test(struct mmc_card *card, u8 bus_width)
int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
unsigned int opcode;
int err;
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index f1b8e81aaa28..74beea8a9c7e 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -12,6 +12,11 @@
#ifndef _MMC_MMC_OPS_H
#define _MMC_MMC_OPS_H
+#include <linux/types.h>
+
+struct mmc_host;
+struct mmc_card;
+
int mmc_select_card(struct mmc_card *card);
int mmc_deselect_cards(struct mmc_host *host);
int mmc_set_dsr(struct mmc_host *host);
@@ -26,11 +31,21 @@ int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
int mmc_bus_test(struct mmc_card *card, u8 bus_width);
int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status);
+int mmc_interrupt_hpi(struct mmc_card *card);
int mmc_can_ext_csd(struct mmc_card *card);
-int mmc_switch_status_error(struct mmc_host *host, u32 status);
+int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
+int mmc_switch_status(struct mmc_card *card);
+int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
- unsigned int timeout_ms, bool use_busy_signal, bool send_status,
- bool ignore_crc);
+ unsigned int timeout_ms, unsigned char timing,
+ bool use_busy_signal, bool send_status, bool retry_crc_err);
+int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
+ unsigned int timeout_ms);
+int mmc_stop_bkops(struct mmc_card *card);
+int mmc_read_bkops_status(struct mmc_card *card);
+void mmc_start_bkops(struct mmc_card *card, bool from_exception);
+int mmc_can_reset(struct mmc_card *card);
+int mmc_flush_cache(struct mmc_card *card);
#endif
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/core/mmc_test.c
index 3678220964fe..f99ac3123fd2 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/mmc/card/mmc_test.c
- *
* Copyright 2007-2008 Pierre Ossman
*
* This program is free software; you can redistribute it and/or modify
@@ -24,6 +22,11 @@
#include <linux/seq_file.h>
#include <linux/module.h>
+#include "core.h"
+#include "card.h"
+#include "host.h"
+#include "bus.h"
+
#define RESULT_OK 0
#define RESULT_FAIL 1
#define RESULT_UNSUP_HOST 2
@@ -214,7 +217,8 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,
struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
{
- BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
+ if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop))
+ return;
if (blocks > 1) {
mrq->cmd->opcode = write ?
@@ -261,7 +265,7 @@ static int mmc_test_busy(struct mmc_command *cmd)
static int mmc_test_wait_busy(struct mmc_test_card *test)
{
int ret, busy;
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
busy = 0;
do {
@@ -278,8 +282,7 @@ static int mmc_test_wait_busy(struct mmc_test_card *test)
if (!busy && mmc_test_busy(&cmd)) {
busy = 1;
if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
- pr_info("%s: Warning: Host did not "
- "wait for busy state to end.\n",
+ pr_info("%s: Warning: Host did not wait for busy state to end.\n",
mmc_hostname(test->card->host));
}
} while (mmc_test_busy(&cmd));
@@ -293,10 +296,10 @@ static int mmc_test_wait_busy(struct mmc_test_card *test)
static int mmc_test_buffer_transfer(struct mmc_test_card *test,
u8 *buffer, unsigned addr, unsigned blksz, int write)
{
- struct mmc_request mrq = {0};
- struct mmc_command cmd = {0};
- struct mmc_command stop = {0};
- struct mmc_data data = {0};
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_command stop = {};
+ struct mmc_data data = {};
struct scatterlist sg;
@@ -358,12 +361,11 @@ static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
if (max_segs > max_page_cnt)
max_segs = max_page_cnt;
- mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
if (!mem)
return NULL;
- mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
- GFP_KERNEL);
+ mem->arr = kcalloc(max_segs, sizeof(*mem->arr), GFP_KERNEL);
if (!mem->arr)
goto out_free;
@@ -547,7 +549,7 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test,
if (!test->gr)
return;
- tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
+ tr = kmalloc(sizeof(*tr), GFP_KERNEL);
if (!tr)
return;
@@ -642,11 +644,11 @@ static int __mmc_test_prepare(struct mmc_test_card *test, int write)
if (write)
memset(test->buffer, 0xDF, 512);
else {
- for (i = 0;i < 512;i++)
+ for (i = 0; i < 512; i++)
test->buffer[i] = i;
}
- for (i = 0;i < BUFFER_SIZE / 512;i++) {
+ for (i = 0; i < BUFFER_SIZE / 512; i++) {
ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
if (ret)
return ret;
@@ -675,7 +677,7 @@ static int mmc_test_cleanup(struct mmc_test_card *test)
memset(test->buffer, 0, 512);
- for (i = 0;i < BUFFER_SIZE / 512;i++) {
+ for (i = 0; i < BUFFER_SIZE / 512; i++) {
ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
if (ret)
return ret;
@@ -694,7 +696,8 @@ static int mmc_test_cleanup(struct mmc_test_card *test)
static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
struct mmc_request *mrq, int write)
{
- BUG_ON(!mrq || !mrq->cmd || !mrq->data);
+ if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
+ return;
if (mrq->data->blocks > 1) {
mrq->cmd->opcode = write ?
@@ -714,7 +717,8 @@ static int mmc_test_check_result(struct mmc_test_card *test,
{
int ret;
- BUG_ON(!mrq || !mrq->cmd || !mrq->data);
+ if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
+ return -EINVAL;
ret = 0;
@@ -736,15 +740,28 @@ static int mmc_test_check_result(struct mmc_test_card *test,
return ret;
}
-static int mmc_test_check_result_async(struct mmc_card *card,
+static enum mmc_blk_status mmc_test_check_result_async(struct mmc_card *card,
struct mmc_async_req *areq)
{
struct mmc_test_async_req *test_async =
container_of(areq, struct mmc_test_async_req, areq);
+ int ret;
mmc_test_wait_busy(test_async->test);
- return mmc_test_check_result(test_async->test, areq->mrq);
+ /*
+ * FIXME: this would earlier just casts a regular error code,
+ * either of the kernel type -ERRORCODE or the local test framework
+ * RESULT_* errorcode, into an enum mmc_blk_status and return as
+ * result check. Instead, convert it to some reasonable type by just
+ * returning either MMC_BLK_SUCCESS or MMC_BLK_CMD_ERR.
+ * If possible, a reasonable error code should be returned.
+ */
+ ret = mmc_test_check_result(test_async->test, areq->mrq);
+ if (ret)
+ return MMC_BLK_CMD_ERR;
+
+ return MMC_BLK_SUCCESS;
}
/*
@@ -755,7 +772,8 @@ static int mmc_test_check_broken_result(struct mmc_test_card *test,
{
int ret;
- BUG_ON(!mrq || !mrq->cmd || !mrq->data);
+ if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
+ return -EINVAL;
ret = 0;
@@ -817,8 +835,9 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
struct mmc_async_req *done_areq;
struct mmc_async_req *cur_areq = &test_areq[0].areq;
struct mmc_async_req *other_areq = &test_areq[1].areq;
+ enum mmc_blk_status status;
int i;
- int ret;
+ int ret = RESULT_OK;
test_areq[0].test = test;
test_areq[1].test = test;
@@ -834,10 +853,12 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
for (i = 0; i < count; i++) {
mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
blocks, blksz, write);
- done_areq = mmc_start_req(test->card->host, cur_areq, &ret);
+ done_areq = mmc_start_areq(test->card->host, cur_areq, &status);
- if (ret || (!done_areq && i > 0))
+ if (status != MMC_BLK_SUCCESS || (!done_areq && i > 0)) {
+ ret = RESULT_FAIL;
goto err;
+ }
if (done_areq) {
if (done_areq->mrq == &mrq2)
@@ -851,7 +872,9 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
dev_addr += blocks;
}
- done_areq = mmc_start_req(test->card->host, NULL, &ret);
+ done_areq = mmc_start_areq(test->card->host, NULL, &status);
+ if (status != MMC_BLK_SUCCESS)
+ ret = RESULT_FAIL;
return ret;
err:
@@ -865,10 +888,10 @@ static int mmc_test_simple_transfer(struct mmc_test_card *test,
struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
unsigned blocks, unsigned blksz, int write)
{
- struct mmc_request mrq = {0};
- struct mmc_command cmd = {0};
- struct mmc_command stop = {0};
- struct mmc_data data = {0};
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_command stop = {};
+ struct mmc_data data = {};
mrq.cmd = &cmd;
mrq.data = &data;
@@ -890,10 +913,10 @@ static int mmc_test_simple_transfer(struct mmc_test_card *test,
static int mmc_test_broken_transfer(struct mmc_test_card *test,
unsigned blocks, unsigned blksz, int write)
{
- struct mmc_request mrq = {0};
- struct mmc_command cmd = {0};
- struct mmc_command stop = {0};
- struct mmc_data data = {0};
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_command stop = {};
+ struct mmc_data data = {};
struct scatterlist sg;
@@ -926,7 +949,7 @@ static int mmc_test_transfer(struct mmc_test_card *test,
unsigned long flags;
if (write) {
- for (i = 0;i < blocks * blksz;i++)
+ for (i = 0; i < blocks * blksz; i++)
test->scratch[i] = i;
} else {
memset(test->scratch, 0, BUFFER_SIZE);
@@ -960,7 +983,7 @@ static int mmc_test_transfer(struct mmc_test_card *test,
memset(test->buffer, 0, sectors * 512);
- for (i = 0;i < sectors;i++) {
+ for (i = 0; i < sectors; i++) {
ret = mmc_test_buffer_transfer(test,
test->buffer + i * 512,
dev_addr + i, 512, 0);
@@ -968,12 +991,12 @@ static int mmc_test_transfer(struct mmc_test_card *test,
return ret;
}
- for (i = 0;i < blocks * blksz;i++) {
+ for (i = 0; i < blocks * blksz; i++) {
if (test->buffer[i] != (u8)i)
return RESULT_FAIL;
}
- for (;i < sectors * 512;i++) {
+ for (; i < sectors * 512; i++) {
if (test->buffer[i] != 0xDF)
return RESULT_FAIL;
}
@@ -981,7 +1004,7 @@ static int mmc_test_transfer(struct mmc_test_card *test,
local_irq_save(flags);
sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
local_irq_restore(flags);
- for (i = 0;i < blocks * blksz;i++) {
+ for (i = 0; i < blocks * blksz; i++) {
if (test->scratch[i] != (u8)i)
return RESULT_FAIL;
}
@@ -1066,7 +1089,7 @@ static int mmc_test_multi_write(struct mmc_test_card *test)
sg_init_one(&sg, test->buffer, size);
- return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
+ return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
}
static int mmc_test_multi_read(struct mmc_test_card *test)
@@ -1087,7 +1110,7 @@ static int mmc_test_multi_read(struct mmc_test_card *test)
sg_init_one(&sg, test->buffer, size);
- return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
+ return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
}
static int mmc_test_pow2_write(struct mmc_test_card *test)
@@ -1098,7 +1121,7 @@ static int mmc_test_pow2_write(struct mmc_test_card *test)
if (!test->card->csd.write_partial)
return RESULT_UNSUP_CARD;
- for (i = 1; i < 512;i <<= 1) {
+ for (i = 1; i < 512; i <<= 1) {
sg_init_one(&sg, test->buffer, i);
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
if (ret)
@@ -1116,7 +1139,7 @@ static int mmc_test_pow2_read(struct mmc_test_card *test)
if (!test->card->csd.read_partial)
return RESULT_UNSUP_CARD;
- for (i = 1; i < 512;i <<= 1) {
+ for (i = 1; i < 512; i <<= 1) {
sg_init_one(&sg, test->buffer, i);
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
if (ret)
@@ -1134,7 +1157,7 @@ static int mmc_test_weird_write(struct mmc_test_card *test)
if (!test->card->csd.write_partial)
return RESULT_UNSUP_CARD;
- for (i = 3; i < 512;i += 7) {
+ for (i = 3; i < 512; i += 7) {
sg_init_one(&sg, test->buffer, i);
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
if (ret)
@@ -1152,7 +1175,7 @@ static int mmc_test_weird_read(struct mmc_test_card *test)
if (!test->card->csd.read_partial)
return RESULT_UNSUP_CARD;
- for (i = 3; i < 512;i += 7) {
+ for (i = 3; i < 512; i += 7) {
sg_init_one(&sg, test->buffer, i);
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
if (ret)
@@ -1211,7 +1234,7 @@ static int mmc_test_align_multi_write(struct mmc_test_card *test)
for (i = 1; i < TEST_ALIGN_END; i++) {
sg_init_one(&sg, test->buffer + i, size);
- ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
+ ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
if (ret)
return ret;
}
@@ -1238,7 +1261,7 @@ static int mmc_test_align_multi_read(struct mmc_test_card *test)
for (i = 1; i < TEST_ALIGN_END; i++) {
sg_init_one(&sg, test->buffer + i, size);
- ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
+ ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
if (ret)
return ret;
}
@@ -1337,7 +1360,7 @@ static int mmc_test_multi_write_high(struct mmc_test_card *test)
sg_init_table(&sg, 1);
sg_set_page(&sg, test->highmem, size, 0);
- return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
+ return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
}
static int mmc_test_multi_read_high(struct mmc_test_card *test)
@@ -1359,7 +1382,7 @@ static int mmc_test_multi_read_high(struct mmc_test_card *test)
sg_init_table(&sg, 1);
sg_set_page(&sg, test->highmem, size, 0);
- return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
+ return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
}
#else
@@ -1513,7 +1536,7 @@ static int mmc_test_area_cleanup(struct mmc_test_card *test)
/*
* Initialize an area for testing large transfers. The test area is set to the
- * middle of the card because cards may have different charateristics at the
+ * middle of the card because cards may have different characteristics at the
* front (for FAT file system optimization). Optionally, the area is erased
* (if the card supports it) which may improve write performance. Optionally,
* the area is filled with data for subsequent read tests.
@@ -1559,7 +1582,7 @@ static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
if (!t->mem)
return -ENOMEM;
- t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
+ t->sg = kmalloc_array(t->max_segs, sizeof(*t->sg), GFP_KERNEL);
if (!t->sg) {
ret = -ENOMEM;
goto out_free;
@@ -2127,7 +2150,7 @@ static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
int i;
for (i = 0 ; i < rw->len && ret == 0; i++) {
- ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
+ ret = mmc_test_rw_multiple(test, rw, 512 * 1024, rw->size,
rw->sg_len[i]);
if (ret)
break;
@@ -2351,6 +2374,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
struct mmc_request *mrq;
unsigned long timeout;
bool expired = false;
+ enum mmc_blk_status blkstat = MMC_BLK_SUCCESS;
int ret = 0, cmd_ret;
u32 status = 0;
int count = 0;
@@ -2378,9 +2402,11 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
/* Start ongoing data request */
if (use_areq) {
- mmc_start_req(host, &test_areq.areq, &ret);
- if (ret)
+ mmc_start_areq(host, &test_areq.areq, &blkstat);
+ if (blkstat != MMC_BLK_SUCCESS) {
+ ret = RESULT_FAIL;
goto out_free;
+ }
} else {
mmc_wait_for_req(host, mrq);
}
@@ -2413,10 +2439,13 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
} while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN);
/* Wait for data request to complete */
- if (use_areq)
- mmc_start_req(host, NULL, &ret);
- else
+ if (use_areq) {
+ mmc_start_areq(host, NULL, &blkstat);
+ if (blkstat != MMC_BLK_SUCCESS)
+ ret = RESULT_FAIL;
+ } else {
mmc_wait_for_req_done(test->card->host, mrq);
+ }
/*
* For cap_cmd_during_tfr request, upper layer must send stop if
@@ -2928,7 +2957,7 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
mmc_claim_host(test->card->host);
- for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
+ for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) {
struct mmc_test_general_result *gr;
if (testcase && ((i + 1) != testcase))
@@ -2941,16 +2970,14 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
if (mmc_test_cases[i].prepare) {
ret = mmc_test_cases[i].prepare(test);
if (ret) {
- pr_info("%s: Result: Prepare "
- "stage failed! (%d)\n",
+ pr_info("%s: Result: Prepare stage failed! (%d)\n",
mmc_hostname(test->card->host),
ret);
continue;
}
}
- gr = kzalloc(sizeof(struct mmc_test_general_result),
- GFP_KERNEL);
+ gr = kzalloc(sizeof(*gr), GFP_KERNEL);
if (gr) {
INIT_LIST_HEAD(&gr->tr_lst);
@@ -2979,13 +3006,11 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
mmc_hostname(test->card->host));
break;
case RESULT_UNSUP_HOST:
- pr_info("%s: Result: UNSUPPORTED "
- "(by host)\n",
+ pr_info("%s: Result: UNSUPPORTED (by host)\n",
mmc_hostname(test->card->host));
break;
case RESULT_UNSUP_CARD:
- pr_info("%s: Result: UNSUPPORTED "
- "(by card)\n",
+ pr_info("%s: Result: UNSUPPORTED (by card)\n",
mmc_hostname(test->card->host));
break;
default:
@@ -3000,8 +3025,7 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
if (mmc_test_cases[i].cleanup) {
ret = mmc_test_cases[i].cleanup(test);
if (ret) {
- pr_info("%s: Warning: Cleanup "
- "stage failed! (%d)\n",
+ pr_info("%s: Warning: Cleanup stage failed! (%d)\n",
mmc_hostname(test->card->host),
ret);
}
@@ -3087,7 +3111,7 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf,
if (ret)
return ret;
- test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
+ test = kzalloc(sizeof(*test), GFP_KERNEL);
if (!test)
return -ENOMEM;
@@ -3137,9 +3161,9 @@ static int mtf_testlist_show(struct seq_file *sf, void *data)
mutex_lock(&mmc_test_lock);
- seq_printf(sf, "0:\tRun all tests\n");
+ seq_puts(sf, "0:\tRun all tests\n");
for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
- seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
+ seq_printf(sf, "%d:\t%s\n", i + 1, mmc_test_cases[i].name);
mutex_unlock(&mmc_test_lock);
@@ -3192,7 +3216,7 @@ static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
return -ENODEV;
}
- df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
+ df = kmalloc(sizeof(*df), GFP_KERNEL);
if (!df) {
debugfs_remove(file);
dev_err(&card->dev,
diff --git a/drivers/mmc/core/pwrseq.h b/drivers/mmc/core/pwrseq.h
index d69e751f148b..39c911aa6ebb 100644
--- a/drivers/mmc/core/pwrseq.h
+++ b/drivers/mmc/core/pwrseq.h
@@ -8,7 +8,11 @@
#ifndef _MMC_CORE_PWRSEQ_H
#define _MMC_CORE_PWRSEQ_H
-#include <linux/mmc/host.h>
+#include <linux/types.h>
+
+struct mmc_host;
+struct device;
+struct module;
struct mmc_pwrseq_ops {
void (*pre_power_on)(struct mmc_host *host);
diff --git a/drivers/mmc/core/pwrseq_sd8787.c b/drivers/mmc/core/pwrseq_sd8787.c
new file mode 100644
index 000000000000..1a21e14458d3
--- /dev/null
+++ b/drivers/mmc/core/pwrseq_sd8787.c
@@ -0,0 +1,117 @@
+/*
+ * pwrseq_sd8787.c - power sequence support for Marvell SD8787 BT + Wifi chip
+ *
+ * Copyright (C) 2016 Matt Ranostay <matt@ranostay.consulting>
+ *
+ * Based on the original work pwrseq_simple.c
+ * Copyright (C) 2014 Linaro Ltd
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+
+#include <linux/mmc/host.h>
+
+#include "pwrseq.h"
+
+struct mmc_pwrseq_sd8787 {
+ struct mmc_pwrseq pwrseq;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *pwrdn_gpio;
+};
+
+#define to_pwrseq_sd8787(p) container_of(p, struct mmc_pwrseq_sd8787, pwrseq)
+
+static void mmc_pwrseq_sd8787_pre_power_on(struct mmc_host *host)
+{
+ struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq);
+
+ gpiod_set_value_cansleep(pwrseq->reset_gpio, 1);
+
+ msleep(300);
+ gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 1);
+}
+
+static void mmc_pwrseq_sd8787_power_off(struct mmc_host *host)
+{
+ struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq);
+
+ gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 0);
+ gpiod_set_value_cansleep(pwrseq->reset_gpio, 0);
+}
+
+static const struct mmc_pwrseq_ops mmc_pwrseq_sd8787_ops = {
+ .pre_power_on = mmc_pwrseq_sd8787_pre_power_on,
+ .power_off = mmc_pwrseq_sd8787_power_off,
+};
+
+static const struct of_device_id mmc_pwrseq_sd8787_of_match[] = {
+ { .compatible = "mmc-pwrseq-sd8787",},
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, mmc_pwrseq_sd8787_of_match);
+
+static int mmc_pwrseq_sd8787_probe(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_sd8787 *pwrseq;
+ struct device *dev = &pdev->dev;
+
+ pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
+ if (!pwrseq)
+ return -ENOMEM;
+
+ pwrseq->pwrdn_gpio = devm_gpiod_get(dev, "powerdown", GPIOD_OUT_LOW);
+ if (IS_ERR(pwrseq->pwrdn_gpio))
+ return PTR_ERR(pwrseq->pwrdn_gpio);
+
+ pwrseq->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(pwrseq->reset_gpio))
+ return PTR_ERR(pwrseq->reset_gpio);
+
+ pwrseq->pwrseq.dev = dev;
+ pwrseq->pwrseq.ops = &mmc_pwrseq_sd8787_ops;
+ pwrseq->pwrseq.owner = THIS_MODULE;
+ platform_set_drvdata(pdev, pwrseq);
+
+ return mmc_pwrseq_register(&pwrseq->pwrseq);
+}
+
+static int mmc_pwrseq_sd8787_remove(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_sd8787 *pwrseq = platform_get_drvdata(pdev);
+
+ mmc_pwrseq_unregister(&pwrseq->pwrseq);
+
+ return 0;
+}
+
+static struct platform_driver mmc_pwrseq_sd8787_driver = {
+ .probe = mmc_pwrseq_sd8787_probe,
+ .remove = mmc_pwrseq_sd8787_remove,
+ .driver = {
+ .name = "pwrseq_sd8787",
+ .of_match_table = mmc_pwrseq_sd8787_of_match,
+ },
+};
+
+module_platform_driver(mmc_pwrseq_sd8787_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/core/queue.c
index 8037f73a109a..493eb10ce580 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/mmc/card/queue.c
- *
* Copyright (C) 2003 Russell King, All Rights Reserved.
* Copyright 2006-2007 Pierre Ossman
*
@@ -22,6 +20,8 @@
#include "queue.h"
#include "block.h"
+#include "core.h"
+#include "card.h"
#define MMC_QUEUE_BOUNCESZ 65536
@@ -32,19 +32,10 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
{
struct mmc_queue *mq = q->queuedata;
- /*
- * We only like normal block requests and discards.
- */
- if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
- req_op(req) != REQ_OP_SECURE_ERASE) {
- blk_dump_rq_flags(req, "MMC bad request");
- return BLKPREP_KILL;
- }
-
if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
return BLKPREP_KILL;
- req->cmd_flags |= REQ_DONTPREP;
+ req->rq_flags |= RQF_DONTPREP;
return BLKPREP_OK;
}
@@ -53,6 +44,7 @@ static int mmc_queue_thread(void *d)
{
struct mmc_queue *mq = d;
struct request_queue *q = mq->queue;
+ struct mmc_context_info *cntx = &mq->card->host->context_info;
current->flags |= PF_MEMALLOC;
@@ -63,6 +55,19 @@ static int mmc_queue_thread(void *d)
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
req = blk_fetch_request(q);
+ mq->asleep = false;
+ cntx->is_waiting_last_req = false;
+ cntx->is_new_req = false;
+ if (!req) {
+ /*
+ * Dispatch queue is empty so set flags for
+ * mmc_request_fn() to wake us up.
+ */
+ if (mq->mqrq_prev->req)
+ cntx->is_waiting_last_req = true;
+ else
+ mq->asleep = true;
+ }
mq->mqrq_cur->req = req;
spin_unlock_irq(q->queue_lock);
@@ -72,8 +77,8 @@ static int mmc_queue_thread(void *d)
set_current_state(TASK_RUNNING);
mmc_blk_issue_rq(mq, req);
cond_resched();
- if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
- mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
+ if (mq->new_request) {
+ mq->new_request = false;
continue; /* fetch again */
}
@@ -115,31 +120,24 @@ static void mmc_request_fn(struct request_queue *q)
{
struct mmc_queue *mq = q->queuedata;
struct request *req;
- unsigned long flags;
struct mmc_context_info *cntx;
if (!mq) {
while ((req = blk_fetch_request(q)) != NULL) {
- req->cmd_flags |= REQ_QUIET;
+ req->rq_flags |= RQF_QUIET;
__blk_end_request_all(req, -EIO);
}
return;
}
cntx = &mq->card->host->context_info;
- if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
- /*
- * New MMC request arrived when MMC thread may be
- * blocked on the previous request to be complete
- * with no current request fetched
- */
- spin_lock_irqsave(&cntx->lock, flags);
- if (cntx->is_waiting_last_req) {
- cntx->is_new_req = true;
- wake_up_interruptible(&cntx->wait);
- }
- spin_unlock_irqrestore(&cntx->lock, flags);
- } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
+
+ if (cntx->is_waiting_last_req) {
+ cntx->is_new_req = true;
+ wake_up_interruptible(&cntx->wait);
+ }
+
+ if (mq->asleep)
wake_up_process(mq->thread);
}
@@ -147,7 +145,7 @@ static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
{
struct scatterlist *sg;
- sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
+ sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL);
if (!sg)
*err = -ENOMEM;
else {
@@ -179,6 +177,82 @@ static void mmc_queue_setup_discard(struct request_queue *q,
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
}
+#ifdef CONFIG_MMC_BLOCK_BOUNCE
+static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq,
+ unsigned int bouncesz)
+{
+ int i;
+
+ for (i = 0; i < mq->qdepth; i++) {
+ mq->mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+ if (!mq->mqrq[i].bounce_buf)
+ goto out_err;
+ }
+
+ return true;
+
+out_err:
+ while (--i >= 0) {
+ kfree(mq->mqrq[i].bounce_buf);
+ mq->mqrq[i].bounce_buf = NULL;
+ }
+ pr_warn("%s: unable to allocate bounce buffers\n",
+ mmc_card_name(mq->card));
+ return false;
+}
+
+static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq,
+ unsigned int bouncesz)
+{
+ int i, ret;
+
+ for (i = 0; i < mq->qdepth; i++) {
+ mq->mqrq[i].sg = mmc_alloc_sg(1, &ret);
+ if (ret)
+ return ret;
+
+ mq->mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs)
+{
+ int i, ret;
+
+ for (i = 0; i < mq->qdepth; i++) {
+ mq->mqrq[i].sg = mmc_alloc_sg(max_segs, &ret);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq)
+{
+ kfree(mqrq->bounce_sg);
+ mqrq->bounce_sg = NULL;
+
+ kfree(mqrq->sg);
+ mqrq->sg = NULL;
+
+ kfree(mqrq->bounce_buf);
+ mqrq->bounce_buf = NULL;
+}
+
+static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq)
+{
+ int i;
+
+ for (i = 0; i < mq->qdepth; i++)
+ mmc_queue_req_free_bufs(&mq->mqrq[i]);
+}
+
/**
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
@@ -193,9 +267,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
{
struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH;
- int ret;
- struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
- struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
+ bool bounce = false;
+ int ret = -ENOMEM;
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
@@ -205,8 +278,13 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (!mq->queue)
return -ENOMEM;
- mq->mqrq_cur = mqrq_cur;
- mq->mqrq_prev = mqrq_prev;
+ mq->qdepth = 2;
+ mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req),
+ GFP_KERNEL);
+ if (!mq->mqrq)
+ goto blk_cleanup;
+ mq->mqrq_cur = &mq->mqrq[0];
+ mq->mqrq_prev = &mq->mqrq[1];
mq->queue->queuedata = mq;
blk_queue_prep_rq(mq->queue, mmc_prep_request);
@@ -228,63 +306,29 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (bouncesz > (host->max_blk_count * 512))
bouncesz = host->max_blk_count * 512;
- if (bouncesz > 512) {
- mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
- if (!mqrq_cur->bounce_buf) {
- pr_warn("%s: unable to allocate bounce cur buffer\n",
- mmc_card_name(card));
- } else {
- mqrq_prev->bounce_buf =
- kmalloc(bouncesz, GFP_KERNEL);
- if (!mqrq_prev->bounce_buf) {
- pr_warn("%s: unable to allocate bounce prev buffer\n",
- mmc_card_name(card));
- kfree(mqrq_cur->bounce_buf);
- mqrq_cur->bounce_buf = NULL;
- }
- }
- }
-
- if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
+ if (bouncesz > 512 &&
+ mmc_queue_alloc_bounce_bufs(mq, bouncesz)) {
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
blk_queue_max_segments(mq->queue, bouncesz / 512);
blk_queue_max_segment_size(mq->queue, bouncesz);
- mqrq_cur->sg = mmc_alloc_sg(1, &ret);
- if (ret)
- goto cleanup_queue;
-
- mqrq_cur->bounce_sg =
- mmc_alloc_sg(bouncesz / 512, &ret);
- if (ret)
- goto cleanup_queue;
-
- mqrq_prev->sg = mmc_alloc_sg(1, &ret);
- if (ret)
- goto cleanup_queue;
-
- mqrq_prev->bounce_sg =
- mmc_alloc_sg(bouncesz / 512, &ret);
+ ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz);
if (ret)
goto cleanup_queue;
+ bounce = true;
}
}
#endif
- if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
+ if (!bounce) {
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
- mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
- if (ret)
- goto cleanup_queue;
-
-
- mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
+ ret = mmc_queue_alloc_sgs(mq, host->max_segs);
if (ret)
goto cleanup_queue;
}
@@ -296,27 +340,16 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (IS_ERR(mq->thread)) {
ret = PTR_ERR(mq->thread);
- goto free_bounce_sg;
+ goto cleanup_queue;
}
return 0;
- free_bounce_sg:
- kfree(mqrq_cur->bounce_sg);
- mqrq_cur->bounce_sg = NULL;
- kfree(mqrq_prev->bounce_sg);
- mqrq_prev->bounce_sg = NULL;
cleanup_queue:
- kfree(mqrq_cur->sg);
- mqrq_cur->sg = NULL;
- kfree(mqrq_cur->bounce_buf);
- mqrq_cur->bounce_buf = NULL;
-
- kfree(mqrq_prev->sg);
- mqrq_prev->sg = NULL;
- kfree(mqrq_prev->bounce_buf);
- mqrq_prev->bounce_buf = NULL;
-
+ mmc_queue_reqs_free_bufs(mq);
+ kfree(mq->mqrq);
+ mq->mqrq = NULL;
+blk_cleanup:
blk_cleanup_queue(mq->queue);
return ret;
}
@@ -325,8 +358,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
unsigned long flags;
- struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
- struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
/* Make sure the queue isn't suspended, as that will deadlock */
mmc_queue_resume(mq);
@@ -340,71 +371,14 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
- kfree(mqrq_cur->bounce_sg);
- mqrq_cur->bounce_sg = NULL;
-
- kfree(mqrq_cur->sg);
- mqrq_cur->sg = NULL;
-
- kfree(mqrq_cur->bounce_buf);
- mqrq_cur->bounce_buf = NULL;
-
- kfree(mqrq_prev->bounce_sg);
- mqrq_prev->bounce_sg = NULL;
-
- kfree(mqrq_prev->sg);
- mqrq_prev->sg = NULL;
-
- kfree(mqrq_prev->bounce_buf);
- mqrq_prev->bounce_buf = NULL;
+ mmc_queue_reqs_free_bufs(mq);
+ kfree(mq->mqrq);
+ mq->mqrq = NULL;
mq->card = NULL;
}
EXPORT_SYMBOL(mmc_cleanup_queue);
-int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
-{
- struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
- struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
- int ret = 0;
-
-
- mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
- if (!mqrq_cur->packed) {
- pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
- mmc_card_name(card));
- ret = -ENOMEM;
- goto out;
- }
-
- mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
- if (!mqrq_prev->packed) {
- pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
- mmc_card_name(card));
- kfree(mqrq_cur->packed);
- mqrq_cur->packed = NULL;
- ret = -ENOMEM;
- goto out;
- }
-
- INIT_LIST_HEAD(&mqrq_cur->packed->list);
- INIT_LIST_HEAD(&mqrq_prev->packed->list);
-
-out:
- return ret;
-}
-
-void mmc_packed_clean(struct mmc_queue *mq)
-{
- struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
- struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
-
- kfree(mqrq_cur->packed);
- mqrq_cur->packed = NULL;
- kfree(mqrq_prev->packed);
- mqrq_prev->packed = NULL;
-}
-
/**
* mmc_queue_suspend - suspend a MMC request queue
* @mq: MMC queue to suspend
@@ -418,8 +392,8 @@ void mmc_queue_suspend(struct mmc_queue *mq)
struct request_queue *q = mq->queue;
unsigned long flags;
- if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
- mq->flags |= MMC_QUEUE_SUSPENDED;
+ if (!mq->suspended) {
+ mq->suspended |= true;
spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
@@ -438,8 +412,8 @@ void mmc_queue_resume(struct mmc_queue *mq)
struct request_queue *q = mq->queue;
unsigned long flags;
- if (mq->flags & MMC_QUEUE_SUSPENDED) {
- mq->flags &= ~MMC_QUEUE_SUSPENDED;
+ if (mq->suspended) {
+ mq->suspended = false;
up(&mq->thread_sem);
@@ -449,41 +423,6 @@ void mmc_queue_resume(struct mmc_queue *mq)
}
}
-static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
- struct mmc_packed *packed,
- struct scatterlist *sg,
- enum mmc_packed_type cmd_type)
-{
- struct scatterlist *__sg = sg;
- unsigned int sg_len = 0;
- struct request *req;
-
- if (mmc_packed_wr(cmd_type)) {
- unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
- unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
- unsigned int len, remain, offset = 0;
- u8 *buf = (u8 *)packed->cmd_hdr;
-
- remain = hdr_sz;
- do {
- len = min(remain, max_seg_sz);
- sg_set_buf(__sg, buf + offset, len);
- offset += len;
- remain -= len;
- sg_unmark_end(__sg++);
- sg_len++;
- } while (remain);
- }
-
- list_for_each_entry(req, &packed->list, queuelist) {
- sg_len += blk_rq_map_sg(mq->queue, req, __sg);
- __sg = sg + (sg_len - 1);
- sg_unmark_end(__sg++);
- }
- sg_mark_end(sg + (sg_len - 1));
- return sg_len;
-}
-
/*
* Prepare the sg list(s) to be handed of to the host driver
*/
@@ -492,26 +431,12 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
unsigned int sg_len;
size_t buflen;
struct scatterlist *sg;
- enum mmc_packed_type cmd_type;
int i;
- cmd_type = mqrq->cmd_type;
-
- if (!mqrq->bounce_buf) {
- if (mmc_packed_cmd(cmd_type))
- return mmc_queue_packed_map_sg(mq, mqrq->packed,
- mqrq->sg, cmd_type);
- else
- return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
- }
-
- BUG_ON(!mqrq->bounce_sg);
+ if (!mqrq->bounce_buf)
+ return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
- if (mmc_packed_cmd(cmd_type))
- sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
- mqrq->bounce_sg, cmd_type);
- else
- sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
+ sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
mqrq->bounce_sg_len = sg_len;
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/core/queue.h
index 342f1e3f301e..e298f100101b 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -1,6 +1,11 @@
#ifndef MMC_QUEUE_H
#define MMC_QUEUE_H
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
+
static inline bool mmc_req_is_special(struct request *req)
{
return req &&
@@ -9,8 +14,8 @@ static inline bool mmc_req_is_special(struct request *req)
req_op(req) == REQ_OP_SECURE_ERASE);
}
-struct request;
struct task_struct;
+struct mmc_blk_data;
struct mmc_blk_request {
struct mmc_request mrq;
@@ -21,23 +26,6 @@ struct mmc_blk_request {
int retune_retry_done;
};
-enum mmc_packed_type {
- MMC_PACKED_NONE = 0,
- MMC_PACKED_WRITE,
-};
-
-#define mmc_packed_cmd(type) ((type) != MMC_PACKED_NONE)
-#define mmc_packed_wr(type) ((type) == MMC_PACKED_WRITE)
-
-struct mmc_packed {
- struct list_head list;
- __le32 cmd_hdr[1024];
- unsigned int blocks;
- u8 nr_entries;
- u8 retries;
- s16 idx_failure;
-};
-
struct mmc_queue_req {
struct request *req;
struct mmc_blk_request brq;
@@ -45,23 +33,22 @@ struct mmc_queue_req {
char *bounce_buf;
struct scatterlist *bounce_sg;
unsigned int bounce_sg_len;
- struct mmc_async_req mmc_active;
- enum mmc_packed_type cmd_type;
- struct mmc_packed *packed;
+ struct mmc_async_req areq;
};
struct mmc_queue {
struct mmc_card *card;
struct task_struct *thread;
struct semaphore thread_sem;
- unsigned int flags;
-#define MMC_QUEUE_SUSPENDED (1 << 0)
-#define MMC_QUEUE_NEW_REQUEST (1 << 1)
- void *data;
+ bool new_request;
+ bool suspended;
+ bool asleep;
+ struct mmc_blk_data *blkdata;
struct request_queue *queue;
- struct mmc_queue_req mqrq[2];
+ struct mmc_queue_req *mqrq;
struct mmc_queue_req *mqrq_cur;
struct mmc_queue_req *mqrq_prev;
+ int qdepth;
};
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
@@ -75,9 +62,6 @@ extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
extern void mmc_queue_bounce_post(struct mmc_queue_req *);
-extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
-extern void mmc_packed_clean(struct mmc_queue *);
-
extern int mmc_access_rpmb(struct mmc_queue *);
#endif
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
deleted file mode 100644
index ca9cade317c7..000000000000
--- a/drivers/mmc/core/quirks.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * This file contains work-arounds for many known SD/MMC
- * and SDIO hardware bugs.
- *
- * Copyright (c) 2011 Andrei Warkentin <andreiw@motorola.com>
- * Copyright (c) 2011 Pierre Tardy <tardyp@gmail.com>
- * Inspired from pci fixup code:
- * Copyright (c) 1999 Martin Mares <mj@ucw.cz>
- *
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/sdio_ids.h>
-
-#ifndef SDIO_VENDOR_ID_TI
-#define SDIO_VENDOR_ID_TI 0x0097
-#endif
-
-#ifndef SDIO_DEVICE_ID_TI_WL1271
-#define SDIO_DEVICE_ID_TI_WL1271 0x4076
-#endif
-
-#ifndef SDIO_VENDOR_ID_STE
-#define SDIO_VENDOR_ID_STE 0x0020
-#endif
-
-#ifndef SDIO_DEVICE_ID_STE_CW1200
-#define SDIO_DEVICE_ID_STE_CW1200 0x2280
-#endif
-
-#ifndef SDIO_DEVICE_ID_MARVELL_8797_F0
-#define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128
-#endif
-
-static const struct mmc_fixup mmc_fixup_methods[] = {
- SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
- add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
-
- SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
- add_quirk, MMC_QUIRK_DISABLE_CD),
-
- SDIO_FIXUP(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200,
- add_quirk, MMC_QUIRK_BROKEN_BYTE_MODE_512),
-
- SDIO_FIXUP(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797_F0,
- add_quirk, MMC_QUIRK_BROKEN_IRQ_POLLING),
-
- END_FIXUP
-};
-
-void mmc_fixup_device(struct mmc_card *card, const struct mmc_fixup *table)
-{
- const struct mmc_fixup *f;
- u64 rev = cid_rev_card(card);
-
- /* Non-core specific workarounds. */
- if (!table)
- table = mmc_fixup_methods;
-
- for (f = table; f->vendor_fixup; f++) {
- if ((f->manfid == CID_MANFID_ANY ||
- f->manfid == card->cid.manfid) &&
- (f->oemid == CID_OEMID_ANY ||
- f->oemid == card->cid.oemid) &&
- (f->name == CID_NAME_ANY ||
- !strncmp(f->name, card->cid.prod_name,
- sizeof(card->cid.prod_name))) &&
- (f->cis_vendor == card->cis.vendor ||
- f->cis_vendor == (u16) SDIO_ANY_ID) &&
- (f->cis_device == card->cis.device ||
- f->cis_device == (u16) SDIO_ANY_ID) &&
- (f->ext_csd_rev == EXT_CSD_REV_ANY ||
- f->ext_csd_rev == card->ext_csd.rev) &&
- rev >= f->rev_start && rev <= f->rev_end) {
- dev_dbg(&card->dev, "calling %pf\n", f->vendor_fixup);
- f->vendor_fixup(card, f->data);
- }
- }
-}
-EXPORT_SYMBOL(mmc_fixup_device);
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
new file mode 100644
index 000000000000..fb725934fa21
--- /dev/null
+++ b/drivers/mmc/core/quirks.h
@@ -0,0 +1,148 @@
+/*
+ * This file contains work-arounds for many known SD/MMC
+ * and SDIO hardware bugs.
+ *
+ * Copyright (c) 2011 Andrei Warkentin <andreiw@motorola.com>
+ * Copyright (c) 2011 Pierre Tardy <tardyp@gmail.com>
+ * Inspired from pci fixup code:
+ * Copyright (c) 1999 Martin Mares <mj@ucw.cz>
+ *
+ */
+
+#include <linux/mmc/sdio_ids.h>
+
+#include "card.h"
+
+static const struct mmc_fixup mmc_blk_fixups[] = {
+#define INAND_CMD38_ARG_EXT_CSD 113
+#define INAND_CMD38_ARG_ERASE 0x00
+#define INAND_CMD38_ARG_TRIM 0x01
+#define INAND_CMD38_ARG_SECERASE 0x80
+#define INAND_CMD38_ARG_SECTRIM1 0x81
+#define INAND_CMD38_ARG_SECTRIM2 0x88
+ /* CMD38 argument is passed through EXT_CSD[113] */
+ MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+
+ /*
+ * Some MMC cards experience performance degradation with CMD23
+ * instead of CMD12-bounded multiblock transfers. For now we'll
+ * black list what's bad...
+ * - Certain Toshiba cards.
+ *
+ * N.B. This doesn't affect SD cards.
+ */
+ MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+
+ /*
+ * Some MMC cards need longer data read timeout than indicated in CSD.
+ */
+ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
+ MMC_QUIRK_LONG_READ_TIME),
+ MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_LONG_READ_TIME),
+
+ /*
+ * On these Samsung MoviNAND parts, performing secure erase or
+ * secure trim can result in unrecoverable corruption due to a
+ * firmware bug.
+ */
+ MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+
+ /*
+ * On Some Kingston eMMCs, performing trim can result in
+ * unrecoverable data conrruption occasionally due to a firmware bug.
+ */
+ MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_TRIM_BROKEN),
+ MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_TRIM_BROKEN),
+
+ END_FIXUP
+};
+
+static const struct mmc_fixup mmc_ext_csd_fixups[] = {
+ /*
+ * Certain Hynix eMMC 4.41 cards might get broken when HPI feature
+ * is used so disable the HPI feature for such buggy cards.
+ */
+ MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX,
+ 0x014a, add_quirk, MMC_QUIRK_BROKEN_HPI, 5),
+
+ END_FIXUP
+};
+
+static const struct mmc_fixup sdio_fixup_methods[] = {
+ SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
+ add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
+ add_quirk, MMC_QUIRK_DISABLE_CD),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200,
+ add_quirk, MMC_QUIRK_BROKEN_BYTE_MODE_512),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797_F0,
+ add_quirk, MMC_QUIRK_BROKEN_IRQ_POLLING),
+
+ END_FIXUP
+};
+
+static inline void mmc_fixup_device(struct mmc_card *card,
+ const struct mmc_fixup *table)
+{
+ const struct mmc_fixup *f;
+ u64 rev = cid_rev_card(card);
+
+ for (f = table; f->vendor_fixup; f++) {
+ if ((f->manfid == CID_MANFID_ANY ||
+ f->manfid == card->cid.manfid) &&
+ (f->oemid == CID_OEMID_ANY ||
+ f->oemid == card->cid.oemid) &&
+ (f->name == CID_NAME_ANY ||
+ !strncmp(f->name, card->cid.prod_name,
+ sizeof(card->cid.prod_name))) &&
+ (f->cis_vendor == card->cis.vendor ||
+ f->cis_vendor == (u16) SDIO_ANY_ID) &&
+ (f->cis_device == card->cis.device ||
+ f->cis_device == (u16) SDIO_ANY_ID) &&
+ (f->ext_csd_rev == EXT_CSD_REV_ANY ||
+ f->ext_csd_rev == card->ext_csd.rev) &&
+ rev >= f->rev_start && rev <= f->rev_end) {
+ dev_dbg(&card->dev, "calling %pf\n", f->vendor_fixup);
+ f->vendor_fixup(card, f->data);
+ }
+ }
+}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 73c762a28dfe..89531b48ae84 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -22,6 +22,8 @@
#include <linux/mmc/sd.h>
#include "core.h"
+#include "card.h"
+#include "host.h"
#include "bus.h"
#include "mmc_ops.h"
#include "sd.h"
@@ -223,6 +225,7 @@ static int mmc_decode_scr(struct mmc_card *card)
static int mmc_read_ssr(struct mmc_card *card)
{
unsigned int au, es, et, eo;
+ u32 *raw_ssr;
int i;
if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
@@ -231,14 +234,21 @@ static int mmc_read_ssr(struct mmc_card *card)
return 0;
}
- if (mmc_app_sd_status(card, card->raw_ssr)) {
+ raw_ssr = kmalloc(sizeof(card->raw_ssr), GFP_KERNEL);
+ if (!raw_ssr)
+ return -ENOMEM;
+
+ if (mmc_app_sd_status(card, raw_ssr)) {
pr_warn("%s: problem reading SD Status register\n",
mmc_hostname(card->host));
+ kfree(raw_ssr);
return 0;
}
for (i = 0; i < 16; i++)
- card->raw_ssr[i] = be32_to_cpu(card->raw_ssr[i]);
+ card->raw_ssr[i] = be32_to_cpu(raw_ssr[i]);
+
+ kfree(raw_ssr);
/*
* UNSTUFF_BITS only works with four u32s so we have to offset the
@@ -778,8 +788,7 @@ try_again:
*/
if (!mmc_host_is_spi(host) && rocr &&
((*rocr & 0x41000000) == 0x41000000)) {
- err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
- pocr);
+ err = mmc_set_uhs_voltage(host, pocr);
if (err == -EAGAIN) {
retries--;
goto try_again;
@@ -927,7 +936,6 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
u32 cid[4];
u32 rocr = 0;
- BUG_ON(!host);
WARN_ON(!host->claimed);
err = mmc_sd_get_cid(host, ocr, cid, &rocr);
@@ -1043,9 +1051,6 @@ free_card:
*/
static void mmc_sd_remove(struct mmc_host *host)
{
- BUG_ON(!host);
- BUG_ON(!host->card);
-
mmc_remove_card(host->card);
host->card = NULL;
}
@@ -1065,9 +1070,6 @@ static void mmc_sd_detect(struct mmc_host *host)
{
int err;
- BUG_ON(!host);
- BUG_ON(!host->card);
-
mmc_get_card(host->card);
/*
@@ -1091,9 +1093,6 @@ static int _mmc_sd_suspend(struct mmc_host *host)
{
int err = 0;
- BUG_ON(!host);
- BUG_ON(!host->card);
-
mmc_claim_host(host);
if (mmc_card_suspended(host->card))
@@ -1136,9 +1135,6 @@ static int _mmc_sd_resume(struct mmc_host *host)
{
int err = 0;
- BUG_ON(!host);
- BUG_ON(!host->card);
-
mmc_claim_host(host);
if (!mmc_card_suspended(host->card))
@@ -1221,7 +1217,6 @@ int mmc_attach_sd(struct mmc_host *host)
int err;
u32 ocr, rocr;
- BUG_ON(!host);
WARN_ON(!host->claimed);
err = mmc_send_app_op_cond(host, 0, &ocr);
diff --git a/drivers/mmc/core/sd.h b/drivers/mmc/core/sd.h
index aab824a9a7f3..1ada9808c329 100644
--- a/drivers/mmc/core/sd.h
+++ b/drivers/mmc/core/sd.h
@@ -1,10 +1,13 @@
#ifndef _MMC_CORE_SD_H
#define _MMC_CORE_SD_H
-#include <linux/mmc/card.h>
+#include <linux/types.h>
extern struct device_type sd_type;
+struct mmc_host;
+struct mmc_card;
+
int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr);
int mmc_sd_get_csd(struct mmc_host *host, struct mmc_card *card);
void mmc_decode_cid(struct mmc_card *card);
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index 16b774c18e75..9d5824a37586 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -25,10 +25,10 @@
int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
{
int err;
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
- BUG_ON(!host);
- BUG_ON(card && (card->host != host));
+ if (WARN_ON(card && card->host != host))
+ return -EINVAL;
cmd.opcode = MMC_APP_CMD;
@@ -68,12 +68,12 @@ EXPORT_SYMBOL_GPL(mmc_app_cmd);
int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
struct mmc_command *cmd, int retries)
{
- struct mmc_request mrq = {NULL};
+ struct mmc_request mrq = {};
int i, err;
- BUG_ON(!cmd);
- BUG_ON(retries < 0);
+ if (retries < 0)
+ retries = MMC_CMD_RETRIES;
err = -EIO;
@@ -120,10 +120,7 @@ EXPORT_SYMBOL(mmc_wait_for_app_cmd);
int mmc_app_set_bus_width(struct mmc_card *card, int width)
{
- struct mmc_command cmd = {0};
-
- BUG_ON(!card);
- BUG_ON(!card->host);
+ struct mmc_command cmd = {};
cmd.opcode = SD_APP_SET_BUS_WIDTH;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
@@ -144,11 +141,9 @@ int mmc_app_set_bus_width(struct mmc_card *card, int width)
int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
int i, err = 0;
- BUG_ON(!host);
-
cmd.opcode = SD_APP_OP_COND;
if (mmc_host_is_spi(host))
cmd.arg = ocr & (1 << 30); /* SPI only defines one bit */
@@ -190,7 +185,7 @@ int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
int mmc_send_if_cond(struct mmc_host *host, u32 ocr)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
int err;
static const u8 test_pattern = 0xAA;
u8 result_pattern;
@@ -222,10 +217,7 @@ int mmc_send_if_cond(struct mmc_host *host, u32 ocr)
int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca)
{
int err;
- struct mmc_command cmd = {0};
-
- BUG_ON(!host);
- BUG_ON(!rca);
+ struct mmc_command cmd = {};
cmd.opcode = SD_SEND_RELATIVE_ADDR;
cmd.arg = 0;
@@ -243,16 +235,12 @@ int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca)
int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
{
int err;
- struct mmc_request mrq = {NULL};
- struct mmc_command cmd = {0};
- struct mmc_data data = {0};
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
struct scatterlist sg;
void *data_buf;
- BUG_ON(!card);
- BUG_ON(!card->host);
- BUG_ON(!scr);
-
/* NOTE: caller guarantees scr is heap-allocated */
err = mmc_app_cmd(card->host, card);
@@ -302,14 +290,11 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
int mmc_sd_switch(struct mmc_card *card, int mode, int group,
u8 value, u8 *resp)
{
- struct mmc_request mrq = {NULL};
- struct mmc_command cmd = {0};
- struct mmc_data data = {0};
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
struct scatterlist sg;
- BUG_ON(!card);
- BUG_ON(!card->host);
-
/* NOTE: caller guarantees resp is heap-allocated */
mode = !!mode;
@@ -347,15 +332,11 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group,
int mmc_app_sd_status(struct mmc_card *card, void *ssr)
{
int err;
- struct mmc_request mrq = {NULL};
- struct mmc_command cmd = {0};
- struct mmc_data data = {0};
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
struct scatterlist sg;
- BUG_ON(!card);
- BUG_ON(!card->host);
- BUG_ON(!ssr);
-
/* NOTE: caller guarantees ssr is heap-allocated */
err = mmc_app_cmd(card->host, card);
diff --git a/drivers/mmc/core/sd_ops.h b/drivers/mmc/core/sd_ops.h
index ffc2305d905f..784f8e6b6baa 100644
--- a/drivers/mmc/core/sd_ops.h
+++ b/drivers/mmc/core/sd_ops.h
@@ -12,6 +12,12 @@
#ifndef _MMC_SD_OPS_H
#define _MMC_SD_OPS_H
+#include <linux/types.h>
+
+struct mmc_card;
+struct mmc_host;
+struct mmc_command;
+
int mmc_app_set_bus_width(struct mmc_card *card, int width);
int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
int mmc_send_if_cond(struct mmc_host *host, u32 ocr);
@@ -20,6 +26,9 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr);
int mmc_sd_switch(struct mmc_card *card, int mode, int group,
u8 value, u8 *resp);
int mmc_app_sd_status(struct mmc_card *card, void *ssr);
+int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card);
+int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
+ struct mmc_command *cmd, int retries);
#endif
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index bd44ba8116d1..fae732c870a9 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -20,7 +20,10 @@
#include <linux/mmc/sdio_ids.h>
#include "core.h"
+#include "card.h"
+#include "host.h"
#include "bus.h"
+#include "quirks.h"
#include "sd.h"
#include "sdio_bus.h"
#include "mmc_ops.h"
@@ -63,7 +66,8 @@ static int sdio_init_func(struct mmc_card *card, unsigned int fn)
int ret;
struct sdio_func *func;
- BUG_ON(fn > SDIO_MAX_FUNCS);
+ if (WARN_ON(fn > SDIO_MAX_FUNCS))
+ return -EINVAL;
func = sdio_alloc_func(card);
if (IS_ERR(func))
@@ -540,6 +544,15 @@ out:
return err;
}
+static void mmc_sdio_resend_if_cond(struct mmc_host *host,
+ struct mmc_card *card)
+{
+ sdio_reset(host);
+ mmc_go_idle(host);
+ mmc_send_if_cond(host, host->ocr_avail);
+ mmc_remove_card(card);
+}
+
/*
* Handle the detection and initialisation of a card.
*
@@ -555,7 +568,6 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
u32 rocr = 0;
u32 ocr_card = ocr;
- BUG_ON(!host);
WARN_ON(!host->claimed);
/* to query card if 1.8V signalling is supported */
@@ -624,24 +636,21 @@ try_again:
* to switch to 1.8V signaling level. No 1.8v signalling if
* UHS mode is not enabled to maintain compatibility and some
* systems that claim 1.8v signalling in fact do not support
- * it.
+ * it. Per SDIO spec v3, section 3.1.2, if the voltage is already
+ * 1.8v, the card sets S18A to 0 in the R4 response. So it will
+ * fails to check rocr & R4_18V_PRESENT, but we still need to
+ * try to init uhs card. sdio_read_cccr will take over this task
+ * to make sure which speed mode should work.
*/
if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) {
- err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
- ocr_card);
+ err = mmc_set_uhs_voltage(host, ocr_card);
if (err == -EAGAIN) {
- sdio_reset(host);
- mmc_go_idle(host);
- mmc_send_if_cond(host, host->ocr_avail);
- mmc_remove_card(card);
+ mmc_sdio_resend_if_cond(host, card);
retries--;
goto try_again;
} else if (err) {
ocr &= ~R4_18V_PRESENT;
}
- err = 0;
- } else {
- ocr &= ~R4_18V_PRESENT;
}
/*
@@ -698,11 +707,20 @@ try_again:
}
/*
- * Read the common registers.
+ * Read the common registers. Note that we should try to
+ * validate whether UHS would work or not.
*/
err = sdio_read_cccr(card, ocr);
- if (err)
- goto remove;
+ if (err) {
+ mmc_sdio_resend_if_cond(host, card);
+ if (ocr & R4_18V_PRESENT) {
+ /* Retry init sequence, but without R4_18V_PRESENT. */
+ retries = 0;
+ goto try_again;
+ } else {
+ goto remove;
+ }
+ }
/*
* Read the common CIS tuples.
@@ -721,7 +739,7 @@ try_again:
card = oldcard;
}
card->ocr = ocr_card;
- mmc_fixup_device(card, NULL);
+ mmc_fixup_device(card, sdio_fixup_methods);
if (card->type == MMC_TYPE_SD_COMBO) {
err = mmc_sd_setup_card(host, card, oldcard != NULL);
@@ -791,9 +809,6 @@ static void mmc_sdio_remove(struct mmc_host *host)
{
int i;
- BUG_ON(!host);
- BUG_ON(!host->card);
-
for (i = 0;i < host->card->sdio_funcs;i++) {
if (host->card->sdio_func[i]) {
sdio_remove_func(host->card->sdio_func[i]);
@@ -820,9 +835,6 @@ static void mmc_sdio_detect(struct mmc_host *host)
{
int err;
- BUG_ON(!host);
- BUG_ON(!host->card);
-
/* Make sure card is powered before detecting it */
if (host->caps & MMC_CAP_POWER_OFF_CARD) {
err = pm_runtime_get_sync(&host->card->dev);
@@ -916,9 +928,6 @@ static int mmc_sdio_resume(struct mmc_host *host)
{
int err = 0;
- BUG_ON(!host);
- BUG_ON(!host->card);
-
/* Basic card reinitialization. */
mmc_claim_host(host);
@@ -970,9 +979,6 @@ static int mmc_sdio_power_restore(struct mmc_host *host)
{
int ret;
- BUG_ON(!host);
- BUG_ON(!host->card);
-
mmc_claim_host(host);
/*
@@ -1063,7 +1069,6 @@ int mmc_attach_sdio(struct mmc_host *host)
u32 ocr, rocr;
struct mmc_card *card;
- BUG_ON(!host);
WARN_ON(!host->claimed);
err = mmc_send_io_op_cond(host, 0, &ocr);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 86f5b3223aae..2b32b88949ba 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -25,6 +25,7 @@
#include <linux/of.h>
#include "core.h"
+#include "card.h"
#include "sdio_cis.h"
#include "sdio_bus.h"
@@ -266,7 +267,7 @@ static void sdio_release_func(struct device *dev)
sdio_free_func_cis(func);
kfree(func->info);
-
+ kfree(func->tmpbuf);
kfree(func);
}
@@ -281,6 +282,16 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card)
if (!func)
return ERR_PTR(-ENOMEM);
+ /*
+ * allocate buffer separately to make sure it's properly aligned for
+ * DMA usage (incl. 64 bit DMA)
+ */
+ func->tmpbuf = kmalloc(4, GFP_KERNEL);
+ if (!func->tmpbuf) {
+ kfree(func);
+ return ERR_PTR(-ENOMEM);
+ }
+
func->card = card;
device_initialize(&func->dev);
diff --git a/drivers/mmc/core/sdio_bus.h b/drivers/mmc/core/sdio_bus.h
index 567a76821ba7..b69a2540a076 100644
--- a/drivers/mmc/core/sdio_bus.h
+++ b/drivers/mmc/core/sdio_bus.h
@@ -11,6 +11,9 @@
#ifndef _MMC_CORE_SDIO_BUS_H
#define _MMC_CORE_SDIO_BUS_H
+struct mmc_card;
+struct sdio_func;
+
struct sdio_func *sdio_alloc_func(struct mmc_card *card);
int sdio_add_func(struct sdio_func *func);
void sdio_remove_func(struct sdio_func *func);
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index dcb3dee59fa5..f8c372839d24 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -262,7 +262,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
else
prev = &card->tuples;
- BUG_ON(*prev);
+ if (*prev)
+ return -EINVAL;
do {
unsigned char tpl_code, tpl_link;
diff --git a/drivers/mmc/core/sdio_cis.h b/drivers/mmc/core/sdio_cis.h
index 4d903c2e425e..16aa563faa00 100644
--- a/drivers/mmc/core/sdio_cis.h
+++ b/drivers/mmc/core/sdio_cis.h
@@ -14,6 +14,9 @@
#ifndef _MMC_SDIO_CIS_H
#define _MMC_SDIO_CIS_H
+struct mmc_card;
+struct sdio_func;
+
int sdio_read_common_cis(struct mmc_card *card);
void sdio_free_common_cis(struct mmc_card *card);
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index 406e5f037e32..74195d772f5a 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -16,6 +16,8 @@
#include <linux/mmc/sdio_func.h>
#include "sdio_ops.h"
+#include "core.h"
+#include "card.h"
/**
* sdio_claim_host - exclusively claim a bus for a certain SDIO function
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 91bbbfb29f3f..6d4b72080d51 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <uapi/linux/sched/types.h>
#include <linux/kthread.h>
#include <linux/export.h>
#include <linux/wait.h>
@@ -27,6 +28,8 @@
#include <linux/mmc/sdio_func.h>
#include "sdio_ops.h"
+#include "core.h"
+#include "card.h"
static int process_sdio_pending_irqs(struct mmc_host *host)
{
@@ -214,7 +217,9 @@ static int sdio_card_irq_put(struct mmc_card *card)
struct mmc_host *host = card->host;
WARN_ON(!host->claimed);
- BUG_ON(host->sdio_irqs < 1);
+
+ if (host->sdio_irqs < 1)
+ return -EINVAL;
if (!--host->sdio_irqs) {
if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
@@ -261,8 +266,8 @@ int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler)
int ret;
unsigned char reg;
- BUG_ON(!func);
- BUG_ON(!func->card);
+ if (!func)
+ return -EINVAL;
pr_debug("SDIO: Enabling IRQ for %s...\n", sdio_func_id(func));
@@ -304,8 +309,8 @@ int sdio_release_irq(struct sdio_func *func)
int ret;
unsigned char reg;
- BUG_ON(!func);
- BUG_ON(!func->card);
+ if (!func)
+ return -EINVAL;
pr_debug("SDIO: Disabling IRQ for %s...\n", sdio_func_id(func));
diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c
index 90fe5545c677..3c0d3ab4324c 100644
--- a/drivers/mmc/core/sdio_ops.c
+++ b/drivers/mmc/core/sdio_ops.c
@@ -21,7 +21,7 @@
int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
int i, err = 0;
cmd.opcode = SD_IO_SEND_OP_COND;
@@ -66,7 +66,7 @@ int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn,
unsigned addr, u8 in, u8 *out)
{
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
int err;
if (fn > 7)
@@ -118,9 +118,9 @@ int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz)
{
- struct mmc_request mrq = {NULL};
- struct mmc_command cmd = {0};
- struct mmc_data data = {0};
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
struct scatterlist sg, *sg_ptr;
struct sg_table sgtable;
unsigned int nents, left_size, i;
diff --git a/drivers/mmc/core/sdio_ops.h b/drivers/mmc/core/sdio_ops.h
index 5660c7f459e9..bed8a8377fec 100644
--- a/drivers/mmc/core/sdio_ops.h
+++ b/drivers/mmc/core/sdio_ops.h
@@ -12,14 +12,19 @@
#ifndef _MMC_SDIO_OPS_H
#define _MMC_SDIO_OPS_H
+#include <linux/types.h>
#include <linux/mmc/sdio.h>
+struct mmc_host;
+struct mmc_card;
+
int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
unsigned addr, u8 in, u8* out);
int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz);
int sdio_reset(struct mmc_host *host);
+unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz);
static inline bool mmc_is_io_op(u32 opcode)
{
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/core/sdio_uart.c
index 5af6fb9a9ce2..d3c91f412b69 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/core/sdio_uart.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/mmc/card/sdio_uart.c - SDIO UART/GPS driver
+ * SDIO UART/GPS driver
*
* Based on drivers/serial/8250.c and drivers/serial/serial_core.c
* by Russell King.
@@ -135,8 +135,6 @@ static void sdio_uart_port_remove(struct sdio_uart_port *port)
{
struct sdio_func *func;
- BUG_ON(sdio_uart_table[port->index] != port);
-
spin_lock(&sdio_uart_table_lock);
sdio_uart_table[port->index] = NULL;
spin_unlock(&sdio_uart_table_lock);
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 27117ba47073..a8450a8701e4 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -235,9 +235,6 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
struct gpio_desc *desc;
int ret;
- if (!con_id)
- con_id = ctx->cd_label;
-
desc = devm_gpiod_get_index(host->parent, con_id, idx, GPIOD_IN);
if (IS_ERR(desc))
return PTR_ERR(desc);
@@ -258,6 +255,14 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
}
EXPORT_SYMBOL(mmc_gpiod_request_cd);
+bool mmc_can_gpio_cd(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ return ctx->cd_gpio ? true : false;
+}
+EXPORT_SYMBOL(mmc_can_gpio_cd);
+
/**
* mmc_gpiod_request_ro - request a gpio descriptor for write protection
* @host: mmc host
@@ -281,9 +286,6 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
struct gpio_desc *desc;
int ret;
- if (!con_id)
- con_id = ctx->ro_label;
-
desc = devm_gpiod_get_index(host->parent, con_id, idx, GPIOD_IN);
if (IS_ERR(desc))
return PTR_ERR(desc);
diff --git a/drivers/mmc/core/slot-gpio.h b/drivers/mmc/core/slot-gpio.h
index 8c1854dc5d58..a06fd843f025 100644
--- a/drivers/mmc/core/slot-gpio.h
+++ b/drivers/mmc/core/slot-gpio.h
@@ -8,6 +8,8 @@
#ifndef _MMC_CORE_SLOTGPIO_H
#define _MMC_CORE_SLOTGPIO_H
+struct mmc_host;
+
int mmc_gpio_alloc(struct mmc_host *host);
#endif
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 5274f503a39a..f08691a58d7e 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -135,7 +135,6 @@ config MMC_SDHCI_OF_AT91
tristate "SDHCI OF support for the Atmel SDMMC controller"
depends on MMC_SDHCI_PLTFM
depends on OF
- select MMC_SDHCI_IO_ACCESSORS
help
This selects the Atmel SDMMC driver
@@ -144,6 +143,7 @@ config MMC_SDHCI_OF_ESDHC
depends on MMC_SDHCI_PLTFM
depends on PPC || ARCH_MXC || ARCH_LAYERSCAPE
select MMC_SDHCI_IO_ACCESSORS
+ select FSL_GUTS
help
This selects the Freescale eSDHC controller support.
@@ -165,6 +165,17 @@ config MMC_SDHCI_OF_HLWD
If unsure, say N.
+config MMC_SDHCI_CADENCE
+ tristate "SDHCI support for the Cadence SD/SDIO/eMMC controller"
+ depends on MMC_SDHCI_PLTFM
+ depends on OF
+ help
+ This selects the Cadence SD/SDIO/eMMC driver.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_SDHCI_CNS3XXX
tristate "SDHCI support on the Cavium Networks CNS3xxx SoC"
depends on ARCH_CNS3XXX
@@ -322,6 +333,16 @@ config MMC_SDHCI_IPROC
If unsure, say N.
+config MMC_MESON_GX
+ tristate "Amlogic S905/GX* SD/MMC Host Controller support"
+ depends on ARCH_MESON && MMC
+ help
+ This selects support for the Amlogic SD/MMC Host Controller
+ found on the S905/GX* family of SoCs. This controller is
+ MMC 5.1 compliant and supports SD, eMMC and SDIO interfaces.
+
+ If you have a controller with this interface, say Y here.
+
config MMC_MOXART
tristate "MOXART SD/MMC Host Controller support"
depends on ARCH_MOXART && MMC
@@ -662,6 +683,15 @@ config MMC_DW_ROCKCHIP
Synopsys DesignWare Memory Card Interface driver. Select this option
for platforms based on RK3066, RK3188 and RK3288 SoC's.
+config MMC_DW_ZX
+ tristate "ZTE specific extensions for Synopsys DW Memory Card Interface"
+ depends on MMC_DW && ARCH_ZX
+ select MMC_DW_PLTFM
+ help
+ This selects support for ZTE SoC specific extensions to the
+ Synopsys DesignWare Memory Card Interface driver. Select this option
+ for platforms based on ZX296718 SoC's.
+
config MMC_SH_MMCIF
tristate "SuperH Internal MMCIF support"
depends on HAS_DMA
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index e2bdaaf43184..6d548c4ee2fa 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -48,11 +48,13 @@ obj-$(CONFIG_MMC_DW_EXYNOS) += dw_mmc-exynos.o
obj-$(CONFIG_MMC_DW_K3) += dw_mmc-k3.o
obj-$(CONFIG_MMC_DW_PCI) += dw_mmc-pci.o
obj-$(CONFIG_MMC_DW_ROCKCHIP) += dw_mmc-rockchip.o
+obj-$(CONFIG_MMC_DW_ZX) += dw_mmc-zx.o
obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
obj-$(CONFIG_MMC_VUB300) += vub300.o
obj-$(CONFIG_MMC_USHC) += ushc.o
obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o
+obj-$(CONFIG_MMC_MESON_GX) += meson-gx-mmc.o
obj-$(CONFIG_MMC_MOXART) += moxart-mmc.o
obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o
obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o
@@ -62,6 +64,7 @@ obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o
obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
+obj-$(CONFIG_MMC_SDHCI_CADENCE) += sdhci-cadence.o
obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
obj-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c
index dca5518b0139..590a8a4522be 100644
--- a/drivers/mmc/host/android-goldfish.c
+++ b/drivers/mmc/host/android-goldfish.c
@@ -49,7 +49,7 @@
#include <asm/types.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define DRIVER_NAME "goldfish_mmc"
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 8fa478c3b0db..1e2600da105f 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -35,6 +35,8 @@
#include <linux/mmc/mmc.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/interrupt.h>
#include <linux/platform_data/mmc-davinci.h>
@@ -1029,9 +1031,10 @@ static int mmc_davinci_get_cd(struct mmc_host *mmc)
struct platform_device *pdev = to_platform_device(mmc->parent);
struct davinci_mmc_config *config = pdev->dev.platform_data;
- if (!config || !config->get_cd)
- return -ENOSYS;
- return config->get_cd(pdev->id);
+ if (config && config->get_cd)
+ return config->get_cd(pdev->id);
+
+ return mmc_gpio_get_cd(mmc);
}
static int mmc_davinci_get_ro(struct mmc_host *mmc)
@@ -1039,9 +1042,10 @@ static int mmc_davinci_get_ro(struct mmc_host *mmc)
struct platform_device *pdev = to_platform_device(mmc->parent);
struct davinci_mmc_config *config = pdev->dev.platform_data;
- if (!config || !config->get_ro)
- return -ENOSYS;
- return config->get_ro(pdev->id);
+ if (config && config->get_ro)
+ return config->get_ro(pdev->id);
+
+ return mmc_gpio_get_ro(mmc);
}
static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable)
@@ -1159,49 +1163,53 @@ static const struct of_device_id davinci_mmc_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, davinci_mmc_dt_ids);
-static struct davinci_mmc_config
- *mmc_parse_pdata(struct platform_device *pdev)
+static int mmc_davinci_parse_pdata(struct mmc_host *mmc)
{
- struct device_node *np;
+ struct platform_device *pdev = to_platform_device(mmc->parent);
struct davinci_mmc_config *pdata = pdev->dev.platform_data;
- const struct of_device_id *match =
- of_match_device(davinci_mmc_dt_ids, &pdev->dev);
- u32 data;
-
- np = pdev->dev.of_node;
- if (!np)
- return pdata;
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(&pdev->dev, "Failed to allocate memory for struct davinci_mmc_config\n");
- goto nodata;
- }
+ struct mmc_davinci_host *host;
+ int ret;
- if (match)
- pdev->id_entry = match->data;
+ if (!pdata)
+ return -EINVAL;
- if (of_property_read_u32(np, "max-frequency", &pdata->max_freq))
- dev_info(&pdev->dev, "'max-frequency' property not specified, defaulting to 25MHz\n");
+ host = mmc_priv(mmc);
+ if (!host)
+ return -EINVAL;
- of_property_read_u32(np, "bus-width", &data);
- switch (data) {
- case 1:
- case 4:
- case 8:
- pdata->wires = data;
- break;
- default:
- pdata->wires = 1;
- dev_info(&pdev->dev, "Unsupported buswidth, defaulting to 1 bit\n");
- }
-nodata:
- return pdata;
+ if (pdata && pdata->nr_sg)
+ host->nr_sg = pdata->nr_sg - 1;
+
+ if (pdata && (pdata->wires == 4 || pdata->wires == 0))
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+
+ if (pdata && (pdata->wires == 8))
+ mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA);
+
+ mmc->f_min = 312500;
+ mmc->f_max = 25000000;
+ if (pdata && pdata->max_freq)
+ mmc->f_max = pdata->max_freq;
+ if (pdata && pdata->caps)
+ mmc->caps |= pdata->caps;
+
+ /* Register a cd gpio, if there is not one, enable polling */
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ else if (ret)
+ mmc->caps |= MMC_CAP_NEEDS_POLL;
+
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ return 0;
}
static int __init davinci_mmcsd_probe(struct platform_device *pdev)
{
- struct davinci_mmc_config *pdata = NULL;
+ const struct of_device_id *match;
struct mmc_davinci_host *host = NULL;
struct mmc_host *mmc = NULL;
struct resource *r, *mem = NULL;
@@ -1209,12 +1217,6 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
size_t mem_size;
const struct platform_device_id *id_entry;
- pdata = mmc_parse_pdata(pdev);
- if (pdata == NULL) {
- dev_err(&pdev->dev, "Couldn't get platform data\n");
- return -ENOENT;
- }
-
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r)
return -ENODEV;
@@ -1253,14 +1255,28 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
host->mmc_input_clk = clk_get_rate(host->clk);
- init_mmcsd_host(host);
-
- if (pdata->nr_sg)
- host->nr_sg = pdata->nr_sg - 1;
+ match = of_match_device(davinci_mmc_dt_ids, &pdev->dev);
+ if (match) {
+ pdev->id_entry = match->data;
+ ret = mmc_of_parse(mmc);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "could not parse of data: %d\n", ret);
+ goto parse_fail;
+ }
+ } else {
+ ret = mmc_davinci_parse_pdata(mmc);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "could not parse platform data: %d\n", ret);
+ goto parse_fail;
+ } }
if (host->nr_sg > MAX_NR_SG || !host->nr_sg)
host->nr_sg = MAX_NR_SG;
+ init_mmcsd_host(host);
+
host->use_dma = use_dma;
host->mmc_irq = irq;
host->sdio_irq = platform_get_irq(pdev, 1);
@@ -1273,27 +1289,13 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
host->use_dma = 0;
}
- /* REVISIT: someday, support IRQ-driven card detection. */
- mmc->caps |= MMC_CAP_NEEDS_POLL;
mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
- if (pdata && (pdata->wires == 4 || pdata->wires == 0))
- mmc->caps |= MMC_CAP_4_BIT_DATA;
-
- if (pdata && (pdata->wires == 8))
- mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA);
-
id_entry = platform_get_device_id(pdev);
if (id_entry)
host->version = id_entry->driver_data;
mmc->ops = &mmc_davinci_ops;
- mmc->f_min = 312500;
- mmc->f_max = 25000000;
- if (pdata && pdata->max_freq)
- mmc->f_max = pdata->max_freq;
- if (pdata && pdata->caps)
- mmc->caps |= pdata->caps;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
/* With no iommu coalescing pages, each phys_seg is a hw_seg.
@@ -1354,6 +1356,7 @@ mmc_add_host_fail:
mmc_davinci_cpufreq_deregister(host);
cpu_freq_fail:
davinci_release_dma_channels(host);
+parse_fail:
dma_probe_defer:
clk_disable_unprepare(host->clk);
clk_prepare_enable_fail:
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 7ab3d749b5ae..25691cca1881 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -13,10 +13,10 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/mmc/host.h>
-#include <linux/mmc/dw_mmc.h>
#include <linux/mmc/mmc.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include "dw_mmc.h"
@@ -161,20 +161,13 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
set_bit(DW_MMC_CARD_NO_USE_HOLD, &host->cur_slot->flags);
}
-#ifdef CONFIG_PM_SLEEP
-static int dw_mci_exynos_suspend(struct device *dev)
-{
- struct dw_mci *host = dev_get_drvdata(dev);
-
- return dw_mci_suspend(host);
-}
-
-static int dw_mci_exynos_resume(struct device *dev)
+#ifdef CONFIG_PM
+static int dw_mci_exynos_runtime_resume(struct device *dev)
{
struct dw_mci *host = dev_get_drvdata(dev);
dw_mci_exynos_config_smu(host);
- return dw_mci_resume(host);
+ return dw_mci_runtime_resume(dev);
}
/**
@@ -211,10 +204,8 @@ static int dw_mci_exynos_resume_noirq(struct device *dev)
return 0;
}
#else
-#define dw_mci_exynos_suspend NULL
-#define dw_mci_exynos_resume NULL
#define dw_mci_exynos_resume_noirq NULL
-#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
static void dw_mci_exynos_config_hs400(struct dw_mci *host, u32 timing)
{
@@ -524,14 +515,42 @@ static int dw_mci_exynos_probe(struct platform_device *pdev)
{
const struct dw_mci_drv_data *drv_data;
const struct of_device_id *match;
+ int ret;
match = of_match_node(dw_mci_exynos_match, pdev->dev.of_node);
drv_data = match->data;
- return dw_mci_pltfm_register(pdev, drv_data);
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = dw_mci_pltfm_register(pdev, drv_data);
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw_mci_exynos_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return dw_mci_pltfm_remove(pdev);
}
static const struct dev_pm_ops dw_mci_exynos_pmops = {
- SET_SYSTEM_SLEEP_PM_OPS(dw_mci_exynos_suspend, dw_mci_exynos_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
+ dw_mci_exynos_runtime_resume,
+ NULL)
.resume_noirq = dw_mci_exynos_resume_noirq,
.thaw_noirq = dw_mci_exynos_resume_noirq,
.restore_noirq = dw_mci_exynos_resume_noirq,
@@ -539,7 +558,7 @@ static const struct dev_pm_ops dw_mci_exynos_pmops = {
static struct platform_driver dw_mci_exynos_pltfm_driver = {
.probe = dw_mci_exynos_probe,
- .remove = dw_mci_pltfm_remove,
+ .remove = dw_mci_exynos_remove,
.driver = {
.name = "dwmmc_exynos",
.of_match_table = dw_mci_exynos_match,
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
index 624789496dce..e38fb0020bb1 100644
--- a/drivers/mmc/host/dw_mmc-k3.c
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -11,10 +11,10 @@
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <linux/mmc/host.h>
-#include <linux/mmc/dw_mmc.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
@@ -162,35 +162,13 @@ static int dw_mci_k3_probe(struct platform_device *pdev)
return dw_mci_pltfm_register(pdev, drv_data);
}
-#ifdef CONFIG_PM_SLEEP
-static int dw_mci_k3_suspend(struct device *dev)
-{
- struct dw_mci *host = dev_get_drvdata(dev);
- int ret;
-
- ret = dw_mci_suspend(host);
- if (!ret)
- clk_disable_unprepare(host->ciu_clk);
-
- return ret;
-}
-
-static int dw_mci_k3_resume(struct device *dev)
-{
- struct dw_mci *host = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_prepare_enable(host->ciu_clk);
- if (ret) {
- dev_err(host->dev, "failed to enable ciu clock\n");
- return ret;
- }
-
- return dw_mci_resume(host);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(dw_mci_k3_pmops, dw_mci_k3_suspend, dw_mci_k3_resume);
+static const struct dev_pm_ops dw_mci_k3_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
+ dw_mci_runtime_resume,
+ NULL)
+};
static struct platform_driver dw_mci_k3_pltfm_driver = {
.probe = dw_mci_k3_probe,
@@ -198,7 +176,7 @@ static struct platform_driver dw_mci_k3_pltfm_driver = {
.driver = {
.name = "dwmmc_k3",
.of_match_table = dw_mci_k3_match,
- .pm = &dw_mci_k3_pmops,
+ .pm = &dw_mci_k3_dev_pm_ops,
},
};
diff --git a/drivers/mmc/host/dw_mmc-pci.c b/drivers/mmc/host/dw_mmc-pci.c
index 4c69fbd29811..ab8713297edb 100644
--- a/drivers/mmc/host/dw_mmc-pci.c
+++ b/drivers/mmc/host/dw_mmc-pci.c
@@ -14,10 +14,10 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
-#include <linux/mmc/dw_mmc.h>
#include "dw_mmc.h"
#define PCI_BAR_NO 2
@@ -79,25 +79,13 @@ static void dw_mci_pci_remove(struct pci_dev *pdev)
dw_mci_remove(host);
}
-#ifdef CONFIG_PM_SLEEP
-static int dw_mci_pci_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct dw_mci *host = pci_get_drvdata(pdev);
-
- return dw_mci_suspend(host);
-}
-
-static int dw_mci_pci_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct dw_mci *host = pci_get_drvdata(pdev);
-
- return dw_mci_resume(host);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(dw_mci_pci_pmops, dw_mci_pci_suspend, dw_mci_pci_resume);
+static const struct dev_pm_ops dw_mci_pci_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
+ dw_mci_runtime_resume,
+ NULL)
+};
static const struct pci_device_id dw_mci_pci_id[] = {
{ PCI_DEVICE(SYNOPSYS_DW_MCI_VENDOR_ID, SYNOPSYS_DW_MCI_DEVICE_ID) },
@@ -111,7 +99,7 @@ static struct pci_driver dw_mci_pci_driver = {
.probe = dw_mci_pci_probe,
.remove = dw_mci_pci_remove,
.driver = {
- .pm = &dw_mci_pci_pmops
+ .pm = &dw_mci_pci_dev_pm_ops,
},
};
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index dbbc4303bdd0..58c13e21bd5a 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -16,10 +16,10 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
-#include <linux/mmc/dw_mmc.h>
#include <linux/of.h>
#include <linux/clk.h>
@@ -58,26 +58,13 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
}
EXPORT_SYMBOL_GPL(dw_mci_pltfm_register);
-#ifdef CONFIG_PM_SLEEP
-/*
- * TODO: we should probably disable the clock to the card in the suspend path.
- */
-static int dw_mci_pltfm_suspend(struct device *dev)
-{
- struct dw_mci *host = dev_get_drvdata(dev);
-
- return dw_mci_suspend(host);
-}
-
-static int dw_mci_pltfm_resume(struct device *dev)
-{
- struct dw_mci *host = dev_get_drvdata(dev);
-
- return dw_mci_resume(host);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-SIMPLE_DEV_PM_OPS(dw_mci_pltfm_pmops, dw_mci_pltfm_suspend, dw_mci_pltfm_resume);
+const struct dev_pm_ops dw_mci_pltfm_pmops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
+ dw_mci_runtime_resume,
+ NULL)
+};
EXPORT_SYMBOL_GPL(dw_mci_pltfm_pmops);
static const struct of_device_id dw_mci_pltfm_match[] = {
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index 25eae359a5ea..372fb6e948c1 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -11,8 +11,9 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/mmc/host.h>
-#include <linux/mmc/dw_mmc.h>
#include <linux/of_address.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include "dw_mmc.h"
@@ -325,6 +326,7 @@ static int dw_mci_rockchip_probe(struct platform_device *pdev)
{
const struct dw_mci_drv_data *drv_data;
const struct of_device_id *match;
+ int ret;
if (!pdev->dev.of_node)
return -ENODEV;
@@ -332,16 +334,49 @@ static int dw_mci_rockchip_probe(struct platform_device *pdev)
match = of_match_node(dw_mci_rockchip_match, pdev->dev.of_node);
drv_data = match->data;
- return dw_mci_pltfm_register(pdev, drv_data);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+ pm_runtime_use_autosuspend(&pdev->dev);
+
+ ret = dw_mci_pltfm_register(pdev, drv_data);
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ return ret;
+ }
+
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ return 0;
}
+static int dw_mci_rockchip_remove(struct platform_device *pdev)
+{
+ pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return dw_mci_pltfm_remove(pdev);
+}
+
+static const struct dev_pm_ops dw_mci_rockchip_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
+ dw_mci_runtime_resume,
+ NULL)
+};
+
static struct platform_driver dw_mci_rockchip_pltfm_driver = {
.probe = dw_mci_rockchip_probe,
- .remove = dw_mci_pltfm_remove,
+ .remove = dw_mci_rockchip_remove,
.driver = {
.name = "dwmmc_rockchip",
.of_match_table = dw_mci_rockchip_match,
- .pm = &dw_mci_pltfm_pmops,
+ .pm = &dw_mci_rockchip_dev_pm_ops,
},
};
diff --git a/drivers/mmc/host/dw_mmc-zx.c b/drivers/mmc/host/dw_mmc-zx.c
new file mode 100644
index 000000000000..d38e94ae2b85
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc-zx.c
@@ -0,0 +1,241 @@
+/*
+ * ZX Specific Extensions for Synopsys DW Multimedia Card Interface driver
+ *
+ * Copyright (C) 2016, Linaro Ltd.
+ * Copyright (C) 2016, ZTE Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "dw_mmc.h"
+#include "dw_mmc-pltfm.h"
+#include "dw_mmc-zx.h"
+
+struct dw_mci_zx_priv_data {
+ struct regmap *sysc_base;
+};
+
+enum delay_type {
+ DELAY_TYPE_READ, /* read dqs delay */
+ DELAY_TYPE_CLK, /* clk sample delay */
+};
+
+static int dw_mci_zx_emmc_set_delay(struct dw_mci *host, unsigned int delay,
+ enum delay_type dflag)
+{
+ struct dw_mci_zx_priv_data *priv = host->priv;
+ struct regmap *sysc_base = priv->sysc_base;
+ unsigned int clksel;
+ unsigned int loop = 1000;
+ int ret;
+
+ if (!sysc_base)
+ return -EINVAL;
+
+ ret = regmap_update_bits(sysc_base, LB_AON_EMMC_CFG_REG0,
+ PARA_HALF_CLK_MODE | PARA_DLL_BYPASS_MODE |
+ PARA_PHASE_DET_SEL_MASK |
+ PARA_DLL_LOCK_NUM_MASK |
+ DLL_REG_SET | PARA_DLL_START_MASK,
+ PARA_DLL_START(4) | PARA_DLL_LOCK_NUM(4));
+ if (ret)
+ return ret;
+
+ ret = regmap_read(sysc_base, LB_AON_EMMC_CFG_REG1, &clksel);
+ if (ret)
+ return ret;
+
+ if (dflag == DELAY_TYPE_CLK) {
+ clksel &= ~CLK_SAMP_DELAY_MASK;
+ clksel |= CLK_SAMP_DELAY(delay);
+ } else {
+ clksel &= ~READ_DQS_DELAY_MASK;
+ clksel |= READ_DQS_DELAY(delay);
+ }
+
+ regmap_write(sysc_base, LB_AON_EMMC_CFG_REG1, clksel);
+ regmap_update_bits(sysc_base, LB_AON_EMMC_CFG_REG0,
+ PARA_DLL_START_MASK | PARA_DLL_LOCK_NUM_MASK |
+ DLL_REG_SET,
+ PARA_DLL_START(4) | PARA_DLL_LOCK_NUM(4) |
+ DLL_REG_SET);
+
+ do {
+ ret = regmap_read(sysc_base, LB_AON_EMMC_CFG_REG2, &clksel);
+ if (ret)
+ return ret;
+
+ } while (--loop && !(clksel & ZX_DLL_LOCKED));
+
+ if (!loop) {
+ dev_err(host->dev, "Error: %s dll lock fail\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int dw_mci_zx_emmc_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
+{
+ struct dw_mci *host = slot->host;
+ struct mmc_host *mmc = slot->mmc;
+ int ret, len = 0, start = 0, end = 0, delay, best = 0;
+
+ for (delay = 1; delay < 128; delay++) {
+ ret = dw_mci_zx_emmc_set_delay(host, delay, DELAY_TYPE_CLK);
+ if (!ret && mmc_send_tuning(mmc, opcode, NULL)) {
+ if (start >= 0) {
+ end = delay - 1;
+ /* check and update longest good range */
+ if ((end - start) > len) {
+ best = (start + end) >> 1;
+ len = end - start;
+ }
+ }
+ start = -1;
+ end = 0;
+ continue;
+ }
+ if (start < 0)
+ start = delay;
+ }
+
+ if (start >= 0) {
+ end = delay - 1;
+ if ((end - start) > len) {
+ best = (start + end) >> 1;
+ len = end - start;
+ }
+ }
+ if (best < 0)
+ return -EIO;
+
+ dev_info(host->dev, "%s best range: start %d end %d\n", __func__,
+ start, end);
+ return dw_mci_zx_emmc_set_delay(host, best, DELAY_TYPE_CLK);
+}
+
+static int dw_mci_zx_prepare_hs400_tuning(struct dw_mci *host,
+ struct mmc_ios *ios)
+{
+ int ret;
+
+ /* config phase shift as 90 degree */
+ ret = dw_mci_zx_emmc_set_delay(host, 32, DELAY_TYPE_READ);
+ if (ret < 0)
+ return -EIO;
+
+ return 0;
+}
+
+static int dw_mci_zx_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
+{
+ struct dw_mci *host = slot->host;
+
+ if (host->verid == 0x290a) /* only for emmc */
+ return dw_mci_zx_emmc_execute_tuning(slot, opcode);
+ /* TODO: Add 0x210a dedicated tuning for sd/sdio */
+
+ return 0;
+}
+
+static int dw_mci_zx_parse_dt(struct dw_mci *host)
+{
+ struct device_node *np = host->dev->of_node;
+ struct device_node *node;
+ struct dw_mci_zx_priv_data *priv;
+ struct regmap *sysc_base;
+ int ret;
+
+ /* syscon is needed only by emmc */
+ node = of_parse_phandle(np, "zte,aon-syscon", 0);
+ if (node) {
+ sysc_base = syscon_node_to_regmap(node);
+ of_node_put(node);
+
+ if (IS_ERR(sysc_base)) {
+ ret = PTR_ERR(sysc_base);
+ if (ret != -EPROBE_DEFER)
+ dev_err(host->dev, "Can't get syscon: %d\n",
+ ret);
+ return ret;
+ }
+ } else {
+ return 0;
+ }
+
+ priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->sysc_base = sysc_base;
+ host->priv = priv;
+
+ return 0;
+}
+
+static unsigned long zx_dwmmc_caps[3] = {
+ MMC_CAP_CMD23,
+ MMC_CAP_CMD23,
+ MMC_CAP_CMD23,
+};
+
+static const struct dw_mci_drv_data zx_drv_data = {
+ .caps = zx_dwmmc_caps,
+ .execute_tuning = dw_mci_zx_execute_tuning,
+ .prepare_hs400_tuning = dw_mci_zx_prepare_hs400_tuning,
+ .parse_dt = dw_mci_zx_parse_dt,
+};
+
+static const struct of_device_id dw_mci_zx_match[] = {
+ { .compatible = "zte,zx296718-dw-mshc", .data = &zx_drv_data},
+ {},
+};
+MODULE_DEVICE_TABLE(of, dw_mci_zx_match);
+
+static int dw_mci_zx_probe(struct platform_device *pdev)
+{
+ const struct dw_mci_drv_data *drv_data;
+ const struct of_device_id *match;
+
+ match = of_match_node(dw_mci_zx_match, pdev->dev.of_node);
+ drv_data = match->data;
+
+ return dw_mci_pltfm_register(pdev, drv_data);
+}
+
+static const struct dev_pm_ops dw_mci_zx_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
+ dw_mci_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver dw_mci_zx_pltfm_driver = {
+ .probe = dw_mci_zx_probe,
+ .remove = dw_mci_pltfm_remove,
+ .driver = {
+ .name = "dwmmc_zx",
+ .of_match_table = dw_mci_zx_match,
+ .pm = &dw_mci_zx_dev_pm_ops,
+ },
+};
+
+module_platform_driver(dw_mci_zx_pltfm_driver);
+
+MODULE_DESCRIPTION("ZTE emmc/sd driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/dw_mmc-zx.h b/drivers/mmc/host/dw_mmc-zx.h
new file mode 100644
index 000000000000..f369997a39ec
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc-zx.h
@@ -0,0 +1,31 @@
+#ifndef _DW_MMC_ZX_H_
+#define _DW_MMC_ZX_H_
+
+/* ZX296718 SoC specific DLL register offset. */
+#define LB_AON_EMMC_CFG_REG0 0x1B0
+#define LB_AON_EMMC_CFG_REG1 0x1B4
+#define LB_AON_EMMC_CFG_REG2 0x1B8
+
+/* LB_AON_EMMC_CFG_REG0 register defines */
+#define PARA_DLL_START(x) ((x) & 0xFF)
+#define PARA_DLL_START_MASK 0xFF
+#define DLL_REG_SET BIT(8)
+#define PARA_DLL_LOCK_NUM(x) (((x) & 7) << 16)
+#define PARA_DLL_LOCK_NUM_MASK (7 << 16)
+#define PARA_PHASE_DET_SEL(x) (((x) & 7) << 20)
+#define PARA_PHASE_DET_SEL_MASK (7 << 20)
+#define PARA_DLL_BYPASS_MODE BIT(23)
+#define PARA_HALF_CLK_MODE BIT(24)
+
+/* LB_AON_EMMC_CFG_REG1 register defines */
+#define READ_DQS_DELAY(x) ((x) & 0x7F)
+#define READ_DQS_DELAY_MASK (0x7F)
+#define READ_DQS_BYPASS_MODE BIT(7)
+#define CLK_SAMP_DELAY(x) (((x) & 0x7F) << 8)
+#define CLK_SAMP_DELAY_MASK (0x7F << 8)
+#define CLK_SAMP_BYPASS_MODE BIT(15)
+
+/* LB_AON_EMMC_CFG_REG2 register defines */
+#define ZX_DLL_LOCKED BIT(2)
+
+#endif /* _DW_MMC_ZX_H_ */
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index df478ae72e23..8718432751c5 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -22,6 +22,7 @@
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/stat.h>
@@ -32,7 +33,6 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
#include <linux/mmc/sdio.h>
-#include <linux/mmc/dw_mmc.h>
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
@@ -54,7 +54,7 @@
#define DW_MCI_DMA_THRESHOLD 16
#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
-#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
+#define DW_MCI_FREQ_MIN 100000 /* unit: HZ */
#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
@@ -165,12 +165,14 @@ static const struct file_operations dw_mci_req_fops = {
static int dw_mci_regs_show(struct seq_file *s, void *v)
{
- seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
- seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
- seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
- seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
- seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
- seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
+ struct dw_mci *host = s->private;
+
+ seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
+ seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
+ seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
+ seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL));
+ seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
+ seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
return 0;
}
@@ -234,7 +236,6 @@ static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg);
static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
{
- struct mmc_data *data;
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
u32 cmdr;
@@ -289,10 +290,9 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
if (cmd->flags & MMC_RSP_CRC)
cmdr |= SDMMC_CMD_RESP_CRC;
- data = cmd->data;
- if (data) {
+ if (cmd->data) {
cmdr |= SDMMC_CMD_DAT_EXP;
- if (data->flags & MMC_DATA_WRITE)
+ if (cmd->data->flags & MMC_DATA_WRITE)
cmdr |= SDMMC_CMD_DAT_WR;
}
@@ -335,6 +335,9 @@ static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
cmdr = stop->opcode | SDMMC_CMD_STOP |
SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
+ if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->cur_slot->flags))
+ cmdr |= SDMMC_CMD_USE_HOLD_REG;
+
return cmdr;
}
@@ -380,7 +383,7 @@ static void dw_mci_start_command(struct dw_mci *host,
static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
{
- struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
+ struct mmc_command *stop = &host->stop_abort;
dw_mci_start_command(host, stop, host->stop_cmdr);
}
@@ -409,12 +412,13 @@ static void dw_mci_dma_cleanup(struct dw_mci *host)
{
struct mmc_data *data = host->data;
- if (data)
- if (!data->host_cookie)
- dma_unmap_sg(host->dev,
- data->sg,
- data->sg_len,
- dw_mci_get_dma_dir(data));
+ if (data && data->host_cookie == COOKIE_MAPPED) {
+ dma_unmap_sg(host->dev,
+ data->sg,
+ data->sg_len,
+ dw_mci_get_dma_dir(data));
+ data->host_cookie = COOKIE_UNMAPPED;
+ }
}
static void dw_mci_idmac_reset(struct dw_mci *host)
@@ -612,7 +616,7 @@ static inline int dw_mci_prepare_desc64(struct dw_mci *host,
return 0;
err_own_bit:
/* restore the descriptor chain as it's polluted */
- dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
+ dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
dw_mci_idmac_init(host);
return -EINVAL;
@@ -688,7 +692,7 @@ static inline int dw_mci_prepare_desc32(struct dw_mci *host,
return 0;
err_own_bit:
/* restore the descriptor chain as it's polluted */
- dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
+ dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
dw_mci_idmac_init(host);
return -EINVAL;
@@ -845,13 +849,13 @@ static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
static int dw_mci_pre_dma_transfer(struct dw_mci *host,
struct mmc_data *data,
- bool next)
+ int cookie)
{
struct scatterlist *sg;
unsigned int i, sg_len;
- if (!next && data->host_cookie)
- return data->host_cookie;
+ if (data->host_cookie == COOKIE_PRE_MAPPED)
+ return data->sg_len;
/*
* We don't do DMA on "complex" transfers, i.e. with
@@ -876,15 +880,13 @@ static int dw_mci_pre_dma_transfer(struct dw_mci *host,
if (sg_len == 0)
return -EINVAL;
- if (next)
- data->host_cookie = sg_len;
+ data->host_cookie = cookie;
return sg_len;
}
static void dw_mci_pre_req(struct mmc_host *mmc,
- struct mmc_request *mrq,
- bool is_first_req)
+ struct mmc_request *mrq)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
@@ -892,13 +894,12 @@ static void dw_mci_pre_req(struct mmc_host *mmc,
if (!slot->host->use_dma || !data)
return;
- if (data->host_cookie) {
- data->host_cookie = 0;
- return;
- }
+ /* This data might be unmapped at this time */
+ data->host_cookie = COOKIE_UNMAPPED;
- if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
- data->host_cookie = 0;
+ if (dw_mci_pre_dma_transfer(slot->host, mrq->data,
+ COOKIE_PRE_MAPPED) < 0)
+ data->host_cookie = COOKIE_UNMAPPED;
}
static void dw_mci_post_req(struct mmc_host *mmc,
@@ -911,12 +912,12 @@ static void dw_mci_post_req(struct mmc_host *mmc,
if (!slot->host->use_dma || !data)
return;
- if (data->host_cookie)
+ if (data->host_cookie != COOKIE_UNMAPPED)
dma_unmap_sg(slot->host->dev,
data->sg,
data->sg_len,
dw_mci_get_dma_dir(data));
- data->host_cookie = 0;
+ data->host_cookie = COOKIE_UNMAPPED;
}
static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
@@ -1022,7 +1023,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
if (!host->use_dma)
return -ENODEV;
- sg_len = dw_mci_pre_dma_transfer(host, data, 0);
+ sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED);
if (sg_len < 0) {
host->dma_ops->stop(host);
return sg_len;
@@ -1112,11 +1113,15 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
mci_writel(host, CTRL, temp);
/*
- * Use the initial fifoth_val for PIO mode.
+ * Use the initial fifoth_val for PIO mode. If wm_algined
+ * is set, we set watermark same as data size.
* If next issued data may be transfered by DMA mode,
* prev_blksz should be invalidated.
*/
- mci_writel(host, FIFOTH, host->fifoth_val);
+ if (host->wm_aligned)
+ dw_mci_adjust_fifoth(host, data);
+ else
+ mci_writel(host, FIFOTH, host->fifoth_val);
host->prev_blksz = 0;
} else {
/*
@@ -1175,12 +1180,25 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
- if (clock != slot->__clk_old || force_clkinit)
- dev_info(&slot->mmc->class_dev,
- "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
- slot->id, host->bus_hz, clock,
- div ? ((host->bus_hz / div) >> 1) :
- host->bus_hz, div);
+ if ((clock != slot->__clk_old &&
+ !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
+ force_clkinit) {
+ /* Silent the verbose log if calling from PM context */
+ if (!force_clkinit)
+ dev_info(&slot->mmc->class_dev,
+ "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
+ slot->id, host->bus_hz, clock,
+ div ? ((host->bus_hz / div) >> 1) :
+ host->bus_hz, div);
+
+ /*
+ * If card is polling, display the message only
+ * one time at boot time.
+ */
+ if (slot->mmc->caps & MMC_CAP_NEEDS_POLL &&
+ slot->mmc->f_min == clock)
+ set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags);
+ }
/* disable clock */
mci_writel(host, CLKENA, 0);
@@ -1273,10 +1291,7 @@ static void __dw_mci_start_request(struct dw_mci *host,
spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
- if (mrq->stop)
- host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
- else
- host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
+ host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
}
static void dw_mci_start_request(struct dw_mci *host,
@@ -1527,22 +1542,34 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
int gpio_cd = mmc_gpio_get_cd(mmc);
/* Use platform get_cd function, else try onboard card detect */
- if ((mmc->caps & MMC_CAP_NEEDS_POLL) || !mmc_card_is_removable(mmc))
+ if (((mmc->caps & MMC_CAP_NEEDS_POLL)
+ || !mmc_card_is_removable(mmc))) {
present = 1;
- else if (gpio_cd >= 0)
+
+ if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
+ if (mmc->caps & MMC_CAP_NEEDS_POLL) {
+ dev_info(&mmc->class_dev,
+ "card is polling.\n");
+ } else {
+ dev_info(&mmc->class_dev,
+ "card is non-removable.\n");
+ }
+ set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+ }
+
+ return present;
+ } else if (gpio_cd >= 0)
present = gpio_cd;
else
present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
== 0 ? 1 : 0;
spin_lock_bh(&host->lock);
- if (present) {
- set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+ if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
dev_dbg(&mmc->class_dev, "card is present\n");
- } else {
- clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+ else if (!present &&
+ !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
dev_dbg(&mmc->class_dev, "card is not present\n");
- }
spin_unlock_bh(&host->lock);
return present;
@@ -1595,10 +1622,16 @@ static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
if (card->type == MMC_TYPE_SDIO ||
card->type == MMC_TYPE_SD_COMBO) {
- set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+ if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) {
+ pm_runtime_get_noresume(mmc->parent);
+ set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+ }
clk_en_a = clk_en_a_old & ~clken_low_pwr;
} else {
- clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+ if (test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) {
+ pm_runtime_put_noidle(mmc->parent);
+ clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+ }
clk_en_a = clk_en_a_old | clken_low_pwr;
}
@@ -1889,8 +1922,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) {
dw_mci_stop_dma(host);
- if (data->stop ||
- !(host->data_status & (SDMMC_INT_DRTO |
+ if (!(host->data_status & (SDMMC_INT_DRTO |
SDMMC_INT_EBE)))
send_stop_abort(host, data);
state = STATE_DATA_ERROR;
@@ -1926,8 +1958,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) {
dw_mci_stop_dma(host);
- if (data->stop ||
- !(host->data_status & (SDMMC_INT_DRTO |
+ if (!(host->data_status & (SDMMC_INT_DRTO |
SDMMC_INT_EBE)))
send_stop_abort(host, data);
state = STATE_DATA_ERROR;
@@ -2003,7 +2034,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
host->cmd = NULL;
host->data = NULL;
- if (mrq->stop)
+ if (!mrq->sbc && mrq->stop)
dw_mci_command_complete(host, mrq->stop);
else
host->cmd_status = 0;
@@ -2615,6 +2646,8 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
mmc->f_min = DW_MCI_FREQ_MIN;
mmc->f_max = DW_MCI_FREQ_MAX;
} else {
+ dev_info(host->dev,
+ "'clock-freq-min-max' property was deprecated.\n");
mmc->f_min = freq[0];
mmc->f_max = freq[1];
}
@@ -2956,6 +2989,11 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
+ of_property_read_u32(np, "data-addr", &host->data_addr_override);
+
+ if (of_get_property(np, "fifo-watermark-aligned", NULL))
+ host->wm_aligned = true;
+
if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
pdata->bus_hz = clock_frequency;
@@ -3159,7 +3197,9 @@ int dw_mci_probe(struct dw_mci *host)
host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
dev_info(host->dev, "Version ID is %04x\n", host->verid);
- if (host->verid < DW_MMC_240A)
+ if (host->data_addr_override)
+ host->fifo_reg = host->regs + host->data_addr_override;
+ else if (host->verid < DW_MMC_240A)
host->fifo_reg = host->regs + DATA_OFFSET;
else
host->fifo_reg = host->regs + DATA_240A_OFFSET;
@@ -3267,26 +3307,46 @@ EXPORT_SYMBOL(dw_mci_remove);
-#ifdef CONFIG_PM_SLEEP
-/*
- * TODO: we should probably disable the clock to the card in the suspend path.
- */
-int dw_mci_suspend(struct dw_mci *host)
+#ifdef CONFIG_PM
+int dw_mci_runtime_suspend(struct device *dev)
{
+ struct dw_mci *host = dev_get_drvdata(dev);
+
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
+ clk_disable_unprepare(host->ciu_clk);
+
+ if (host->cur_slot &&
+ (mmc_can_gpio_cd(host->cur_slot->mmc) ||
+ !mmc_card_is_removable(host->cur_slot->mmc)))
+ clk_disable_unprepare(host->biu_clk);
+
return 0;
}
-EXPORT_SYMBOL(dw_mci_suspend);
+EXPORT_SYMBOL(dw_mci_runtime_suspend);
-int dw_mci_resume(struct dw_mci *host)
+int dw_mci_runtime_resume(struct device *dev)
{
- int i, ret;
+ int i, ret = 0;
+ struct dw_mci *host = dev_get_drvdata(dev);
+
+ if (host->cur_slot &&
+ (mmc_can_gpio_cd(host->cur_slot->mmc) ||
+ !mmc_card_is_removable(host->cur_slot->mmc))) {
+ ret = clk_prepare_enable(host->biu_clk);
+ if (ret)
+ return ret;
+ }
+
+ ret = clk_prepare_enable(host->ciu_clk);
+ if (ret)
+ goto err;
if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
+ clk_disable_unprepare(host->ciu_clk);
ret = -ENODEV;
- return ret;
+ goto err;
}
if (host->use_dma && host->dma_ops->init)
@@ -3296,8 +3356,8 @@ int dw_mci_resume(struct dw_mci *host)
* Restore the initial value at FIFOTH register
* And Invalidate the prev_blksz with zero
*/
- mci_writel(host, FIFOTH, host->fifoth_val);
- host->prev_blksz = 0;
+ mci_writel(host, FIFOTH, host->fifoth_val);
+ host->prev_blksz = 0;
/* Put in max timeout */
mci_writel(host, TMOUT, 0xFFFFFFFF);
@@ -3313,19 +3373,28 @@ int dw_mci_resume(struct dw_mci *host)
if (!slot)
continue;
- if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
+ if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
- dw_mci_setup_bus(slot, true);
- }
+
+ /* Force setup bus to guarantee available clock output */
+ dw_mci_setup_bus(slot, true);
}
/* Now that slots are all setup, we can enable card detect */
dw_mci_enable_cd(host);
return 0;
+
+err:
+ if (host->cur_slot &&
+ (mmc_can_gpio_cd(host->cur_slot->mmc) ||
+ !mmc_card_is_removable(host->cur_slot->mmc)))
+ clk_disable_unprepare(host->biu_clk);
+
+ return ret;
}
-EXPORT_SYMBOL(dw_mci_resume);
-#endif /* CONFIG_PM_SLEEP */
+EXPORT_SYMBOL(dw_mci_runtime_resume);
+#endif /* CONFIG_PM */
static int __init dw_mci_init(void)
{
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index e8cd2dec3263..ce347361f3dc 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -14,6 +14,269 @@
#ifndef _DW_MMC_H_
#define _DW_MMC_H_
+#include <linux/scatterlist.h>
+#include <linux/mmc/core.h>
+#include <linux/dmaengine.h>
+#include <linux/reset.h>
+#include <linux/interrupt.h>
+
+#define MAX_MCI_SLOTS 2
+
+enum dw_mci_state {
+ STATE_IDLE = 0,
+ STATE_SENDING_CMD,
+ STATE_SENDING_DATA,
+ STATE_DATA_BUSY,
+ STATE_SENDING_STOP,
+ STATE_DATA_ERROR,
+ STATE_SENDING_CMD11,
+ STATE_WAITING_CMD11_DONE,
+};
+
+enum {
+ EVENT_CMD_COMPLETE = 0,
+ EVENT_XFER_COMPLETE,
+ EVENT_DATA_COMPLETE,
+ EVENT_DATA_ERROR,
+};
+
+enum dw_mci_cookie {
+ COOKIE_UNMAPPED,
+ COOKIE_PRE_MAPPED, /* mapped by pre_req() of dwmmc */
+ COOKIE_MAPPED, /* mapped by prepare_data() of dwmmc */
+};
+
+struct mmc_data;
+
+enum {
+ TRANS_MODE_PIO = 0,
+ TRANS_MODE_IDMAC,
+ TRANS_MODE_EDMAC
+};
+
+struct dw_mci_dma_slave {
+ struct dma_chan *ch;
+ enum dma_transfer_direction direction;
+};
+
+/**
+ * struct dw_mci - MMC controller state shared between all slots
+ * @lock: Spinlock protecting the queue and associated data.
+ * @irq_lock: Spinlock protecting the INTMASK setting.
+ * @regs: Pointer to MMIO registers.
+ * @fifo_reg: Pointer to MMIO registers for data FIFO
+ * @sg: Scatterlist entry currently being processed by PIO code, if any.
+ * @sg_miter: PIO mapping scatterlist iterator.
+ * @cur_slot: The slot which is currently using the controller.
+ * @mrq: The request currently being processed on @cur_slot,
+ * or NULL if the controller is idle.
+ * @cmd: The command currently being sent to the card, or NULL.
+ * @data: The data currently being transferred, or NULL if no data
+ * transfer is in progress.
+ * @stop_abort: The command currently prepared for stoping transfer.
+ * @prev_blksz: The former transfer blksz record.
+ * @timing: Record of current ios timing.
+ * @use_dma: Whether DMA channel is initialized or not.
+ * @using_dma: Whether DMA is in use for the current transfer.
+ * @dma_64bit_address: Whether DMA supports 64-bit address mode or not.
+ * @sg_dma: Bus address of DMA buffer.
+ * @sg_cpu: Virtual address of DMA buffer.
+ * @dma_ops: Pointer to platform-specific DMA callbacks.
+ * @cmd_status: Snapshot of SR taken upon completion of the current
+ * @ring_size: Buffer size for idma descriptors.
+ * command. Only valid when EVENT_CMD_COMPLETE is pending.
+ * @dms: structure of slave-dma private data.
+ * @phy_regs: physical address of controller's register map
+ * @data_status: Snapshot of SR taken upon completion of the current
+ * data transfer. Only valid when EVENT_DATA_COMPLETE or
+ * EVENT_DATA_ERROR is pending.
+ * @stop_cmdr: Value to be loaded into CMDR when the stop command is
+ * to be sent.
+ * @dir_status: Direction of current transfer.
+ * @tasklet: Tasklet running the request state machine.
+ * @pending_events: Bitmask of events flagged by the interrupt handler
+ * to be processed by the tasklet.
+ * @completed_events: Bitmask of events which the state machine has
+ * processed.
+ * @state: Tasklet state.
+ * @queue: List of slots waiting for access to the controller.
+ * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
+ * rate and timeout calculations.
+ * @current_speed: Configured rate of the controller.
+ * @num_slots: Number of slots available.
+ * @fifoth_val: The value of FIFOTH register.
+ * @verid: Denote Version ID.
+ * @dev: Device associated with the MMC controller.
+ * @pdata: Platform data associated with the MMC controller.
+ * @drv_data: Driver specific data for identified variant of the controller
+ * @priv: Implementation defined private data.
+ * @biu_clk: Pointer to bus interface unit clock instance.
+ * @ciu_clk: Pointer to card interface unit clock instance.
+ * @slot: Slots sharing this MMC controller.
+ * @fifo_depth: depth of FIFO.
+ * @data_addr_override: override fifo reg offset with this value.
+ * @wm_aligned: force fifo watermark equal with data length in PIO mode.
+ * Set as true if alignment is needed.
+ * @data_shift: log2 of FIFO item size.
+ * @part_buf_start: Start index in part_buf.
+ * @part_buf_count: Bytes of partial data in part_buf.
+ * @part_buf: Simple buffer for partial fifo reads/writes.
+ * @push_data: Pointer to FIFO push function.
+ * @pull_data: Pointer to FIFO pull function.
+ * @vqmmc_enabled: Status of vqmmc, should be true or false.
+ * @irq_flags: The flags to be passed to request_irq.
+ * @irq: The irq value to be passed to request_irq.
+ * @sdio_id0: Number of slot0 in the SDIO interrupt registers.
+ * @cmd11_timer: Timer for SD3.0 voltage switch over scheme.
+ * @dto_timer: Timer for broken data transfer over scheme.
+ *
+ * Locking
+ * =======
+ *
+ * @lock is a softirq-safe spinlock protecting @queue as well as
+ * @cur_slot, @mrq and @state. These must always be updated
+ * at the same time while holding @lock.
+ *
+ * @irq_lock is an irq-safe spinlock protecting the INTMASK register
+ * to allow the interrupt handler to modify it directly. Held for only long
+ * enough to read-modify-write INTMASK and no other locks are grabbed when
+ * holding this one.
+ *
+ * The @mrq field of struct dw_mci_slot is also protected by @lock,
+ * and must always be written at the same time as the slot is added to
+ * @queue.
+ *
+ * @pending_events and @completed_events are accessed using atomic bit
+ * operations, so they don't need any locking.
+ *
+ * None of the fields touched by the interrupt handler need any
+ * locking. However, ordering is important: Before EVENT_DATA_ERROR or
+ * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
+ * interrupts must be disabled and @data_status updated with a
+ * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
+ * CMDRDY interrupt must be disabled and @cmd_status updated with a
+ * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
+ * bytes_xfered field of @data must be written. This is ensured by
+ * using barriers.
+ */
+struct dw_mci {
+ spinlock_t lock;
+ spinlock_t irq_lock;
+ void __iomem *regs;
+ void __iomem *fifo_reg;
+ u32 data_addr_override;
+ bool wm_aligned;
+
+ struct scatterlist *sg;
+ struct sg_mapping_iter sg_miter;
+
+ struct dw_mci_slot *cur_slot;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ struct mmc_command stop_abort;
+ unsigned int prev_blksz;
+ unsigned char timing;
+
+ /* DMA interface members*/
+ int use_dma;
+ int using_dma;
+ int dma_64bit_address;
+
+ dma_addr_t sg_dma;
+ void *sg_cpu;
+ const struct dw_mci_dma_ops *dma_ops;
+ /* For idmac */
+ unsigned int ring_size;
+
+ /* For edmac */
+ struct dw_mci_dma_slave *dms;
+ /* Registers's physical base address */
+ resource_size_t phy_regs;
+
+ u32 cmd_status;
+ u32 data_status;
+ u32 stop_cmdr;
+ u32 dir_status;
+ struct tasklet_struct tasklet;
+ unsigned long pending_events;
+ unsigned long completed_events;
+ enum dw_mci_state state;
+ struct list_head queue;
+
+ u32 bus_hz;
+ u32 current_speed;
+ u32 num_slots;
+ u32 fifoth_val;
+ u16 verid;
+ struct device *dev;
+ struct dw_mci_board *pdata;
+ const struct dw_mci_drv_data *drv_data;
+ void *priv;
+ struct clk *biu_clk;
+ struct clk *ciu_clk;
+ struct dw_mci_slot *slot[MAX_MCI_SLOTS];
+
+ /* FIFO push and pull */
+ int fifo_depth;
+ int data_shift;
+ u8 part_buf_start;
+ u8 part_buf_count;
+ union {
+ u16 part_buf16;
+ u32 part_buf32;
+ u64 part_buf;
+ };
+ void (*push_data)(struct dw_mci *host, void *buf, int cnt);
+ void (*pull_data)(struct dw_mci *host, void *buf, int cnt);
+
+ bool vqmmc_enabled;
+ unsigned long irq_flags; /* IRQ flags */
+ int irq;
+
+ int sdio_id0;
+
+ struct timer_list cmd11_timer;
+ struct timer_list dto_timer;
+};
+
+/* DMA ops for Internal/External DMAC interface */
+struct dw_mci_dma_ops {
+ /* DMA Ops */
+ int (*init)(struct dw_mci *host);
+ int (*start)(struct dw_mci *host, unsigned int sg_len);
+ void (*complete)(void *host);
+ void (*stop)(struct dw_mci *host);
+ void (*cleanup)(struct dw_mci *host);
+ void (*exit)(struct dw_mci *host);
+};
+
+struct dma_pdata;
+
+/* Board platform data */
+struct dw_mci_board {
+ u32 num_slots;
+
+ unsigned int bus_hz; /* Clock speed at the cclk_in pad */
+
+ u32 caps; /* Capabilities */
+ u32 caps2; /* More capabilities */
+ u32 pm_caps; /* PM capabilities */
+ /*
+ * Override fifo depth. If 0, autodetect it from the FIFOTH register,
+ * but note that this may not be reliable after a bootloader has used
+ * it.
+ */
+ unsigned int fifo_depth;
+
+ /* delay in mS before detecting cards after interrupt */
+ u32 detect_delay_ms;
+
+ struct reset_control *rstc;
+ struct dw_mci_dma_ops *dma_ops;
+ struct dma_pdata *data;
+};
+
#define DW_MMC_240A 0x240a
#define DW_MMC_280A 0x280a
@@ -234,9 +497,9 @@
extern int dw_mci_probe(struct dw_mci *host);
extern void dw_mci_remove(struct dw_mci *host);
-#ifdef CONFIG_PM_SLEEP
-extern int dw_mci_suspend(struct dw_mci *host);
-extern int dw_mci_resume(struct dw_mci *host);
+#ifdef CONFIG_PM
+extern int dw_mci_runtime_suspend(struct device *device);
+extern int dw_mci_runtime_resume(struct device *device);
#endif
/**
@@ -272,6 +535,7 @@ struct dw_mci_slot {
#define DW_MMC_CARD_NEED_INIT 1
#define DW_MMC_CARD_NO_LOW_PWR 2
#define DW_MMC_CARD_NO_USE_HOLD 3
+#define DW_MMC_CARD_NEEDS_POLL 4
int id;
int sdio_id;
};
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 684087db170b..819ad32964fc 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -320,8 +320,7 @@ dma_unmap:
}
static void jz4740_mmc_pre_request(struct mmc_host *mmc,
- struct mmc_request *mrq,
- bool is_first_req)
+ struct mmc_request *mrq)
{
struct jz4740_mmc_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
new file mode 100644
index 000000000000..5a959783304b
--- /dev/null
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -0,0 +1,857 @@
+/*
+ * Amlogic SD/eMMC driver for the GX/S905 family SoCs
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Kevin Hilman <khilman@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+
+#define DRIVER_NAME "meson-gx-mmc"
+
+#define SD_EMMC_CLOCK 0x0
+#define CLK_DIV_SHIFT 0
+#define CLK_DIV_WIDTH 6
+#define CLK_DIV_MASK 0x3f
+#define CLK_DIV_MAX 63
+#define CLK_SRC_SHIFT 6
+#define CLK_SRC_WIDTH 2
+#define CLK_SRC_MASK 0x3
+#define CLK_SRC_XTAL 0 /* external crystal */
+#define CLK_SRC_XTAL_RATE 24000000
+#define CLK_SRC_PLL 1 /* FCLK_DIV2 */
+#define CLK_SRC_PLL_RATE 1000000000
+#define CLK_PHASE_SHIFT 8
+#define CLK_PHASE_MASK 0x3
+#define CLK_PHASE_0 0
+#define CLK_PHASE_90 1
+#define CLK_PHASE_180 2
+#define CLK_PHASE_270 3
+#define CLK_ALWAYS_ON BIT(24)
+
+#define SD_EMMC_DElAY 0x4
+#define SD_EMMC_ADJUST 0x8
+#define SD_EMMC_CALOUT 0x10
+#define SD_EMMC_START 0x40
+#define START_DESC_INIT BIT(0)
+#define START_DESC_BUSY BIT(1)
+#define START_DESC_ADDR_SHIFT 2
+#define START_DESC_ADDR_MASK (~0x3)
+
+#define SD_EMMC_CFG 0x44
+#define CFG_BUS_WIDTH_SHIFT 0
+#define CFG_BUS_WIDTH_MASK 0x3
+#define CFG_BUS_WIDTH_1 0x0
+#define CFG_BUS_WIDTH_4 0x1
+#define CFG_BUS_WIDTH_8 0x2
+#define CFG_DDR BIT(2)
+#define CFG_BLK_LEN_SHIFT 4
+#define CFG_BLK_LEN_MASK 0xf
+#define CFG_RESP_TIMEOUT_SHIFT 8
+#define CFG_RESP_TIMEOUT_MASK 0xf
+#define CFG_RC_CC_SHIFT 12
+#define CFG_RC_CC_MASK 0xf
+#define CFG_STOP_CLOCK BIT(22)
+#define CFG_CLK_ALWAYS_ON BIT(18)
+#define CFG_CHK_DS BIT(20)
+#define CFG_AUTO_CLK BIT(23)
+
+#define SD_EMMC_STATUS 0x48
+#define STATUS_BUSY BIT(31)
+
+#define SD_EMMC_IRQ_EN 0x4c
+#define IRQ_EN_MASK 0x3fff
+#define IRQ_RXD_ERR_SHIFT 0
+#define IRQ_RXD_ERR_MASK 0xff
+#define IRQ_TXD_ERR BIT(8)
+#define IRQ_DESC_ERR BIT(9)
+#define IRQ_RESP_ERR BIT(10)
+#define IRQ_RESP_TIMEOUT BIT(11)
+#define IRQ_DESC_TIMEOUT BIT(12)
+#define IRQ_END_OF_CHAIN BIT(13)
+#define IRQ_RESP_STATUS BIT(14)
+#define IRQ_SDIO BIT(15)
+
+#define SD_EMMC_CMD_CFG 0x50
+#define SD_EMMC_CMD_ARG 0x54
+#define SD_EMMC_CMD_DAT 0x58
+#define SD_EMMC_CMD_RSP 0x5c
+#define SD_EMMC_CMD_RSP1 0x60
+#define SD_EMMC_CMD_RSP2 0x64
+#define SD_EMMC_CMD_RSP3 0x68
+
+#define SD_EMMC_RXD 0x94
+#define SD_EMMC_TXD 0x94
+#define SD_EMMC_LAST_REG SD_EMMC_TXD
+
+#define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */
+#define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */
+#define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */
+#define MUX_CLK_NUM_PARENTS 2
+
+struct meson_host {
+ struct device *dev;
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+
+ spinlock_t lock;
+ void __iomem *regs;
+ int irq;
+ u32 ocr_mask;
+ struct clk *core_clk;
+ struct clk_mux mux;
+ struct clk *mux_clk;
+ struct clk *mux_parent[MUX_CLK_NUM_PARENTS];
+ unsigned long current_clock;
+
+ struct clk_divider cfg_div;
+ struct clk *cfg_div_clk;
+
+ unsigned int bounce_buf_size;
+ void *bounce_buf;
+ dma_addr_t bounce_dma_addr;
+
+ bool vqmmc_enabled;
+};
+
+struct sd_emmc_desc {
+ u32 cmd_cfg;
+ u32 cmd_arg;
+ u32 cmd_data;
+ u32 cmd_resp;
+};
+#define CMD_CFG_LENGTH_SHIFT 0
+#define CMD_CFG_LENGTH_MASK 0x1ff
+#define CMD_CFG_BLOCK_MODE BIT(9)
+#define CMD_CFG_R1B BIT(10)
+#define CMD_CFG_END_OF_CHAIN BIT(11)
+#define CMD_CFG_TIMEOUT_SHIFT 12
+#define CMD_CFG_TIMEOUT_MASK 0xf
+#define CMD_CFG_NO_RESP BIT(16)
+#define CMD_CFG_NO_CMD BIT(17)
+#define CMD_CFG_DATA_IO BIT(18)
+#define CMD_CFG_DATA_WR BIT(19)
+#define CMD_CFG_RESP_NOCRC BIT(20)
+#define CMD_CFG_RESP_128 BIT(21)
+#define CMD_CFG_RESP_NUM BIT(22)
+#define CMD_CFG_DATA_NUM BIT(23)
+#define CMD_CFG_CMD_INDEX_SHIFT 24
+#define CMD_CFG_CMD_INDEX_MASK 0x3f
+#define CMD_CFG_ERROR BIT(30)
+#define CMD_CFG_OWNER BIT(31)
+
+#define CMD_DATA_MASK (~0x3)
+#define CMD_DATA_BIG_ENDIAN BIT(1)
+#define CMD_DATA_SRAM BIT(0)
+#define CMD_RESP_MASK (~0x1)
+#define CMD_RESP_SRAM BIT(0)
+
+static int meson_mmc_clk_set(struct meson_host *host, unsigned long clk_rate)
+{
+ struct mmc_host *mmc = host->mmc;
+ int ret;
+ u32 cfg;
+
+ if (clk_rate) {
+ if (WARN_ON(clk_rate > mmc->f_max))
+ clk_rate = mmc->f_max;
+ else if (WARN_ON(clk_rate < mmc->f_min))
+ clk_rate = mmc->f_min;
+ }
+
+ if (clk_rate == host->current_clock)
+ return 0;
+
+ /* stop clock */
+ cfg = readl(host->regs + SD_EMMC_CFG);
+ if (!(cfg & CFG_STOP_CLOCK)) {
+ cfg |= CFG_STOP_CLOCK;
+ writel(cfg, host->regs + SD_EMMC_CFG);
+ }
+
+ dev_dbg(host->dev, "change clock rate %u -> %lu\n",
+ mmc->actual_clock, clk_rate);
+
+ if (!clk_rate) {
+ mmc->actual_clock = 0;
+ host->current_clock = 0;
+ /* return with clock being stopped */
+ return 0;
+ }
+
+ ret = clk_set_rate(host->cfg_div_clk, clk_rate);
+ if (ret) {
+ dev_err(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n",
+ clk_rate, ret);
+ return ret;
+ }
+
+ mmc->actual_clock = clk_get_rate(host->cfg_div_clk);
+ host->current_clock = clk_rate;
+
+ if (clk_rate != mmc->actual_clock)
+ dev_dbg(host->dev,
+ "divider requested rate %lu != actual rate %u\n",
+ clk_rate, mmc->actual_clock);
+
+ /* (re)start clock */
+ cfg = readl(host->regs + SD_EMMC_CFG);
+ cfg &= ~CFG_STOP_CLOCK;
+ writel(cfg, host->regs + SD_EMMC_CFG);
+
+ return 0;
+}
+
+/*
+ * The SD/eMMC IP block has an internal mux and divider used for
+ * generating the MMC clock. Use the clock framework to create and
+ * manage these clocks.
+ */
+static int meson_mmc_clk_init(struct meson_host *host)
+{
+ struct clk_init_data init;
+ char clk_name[32];
+ int i, ret = 0;
+ const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
+ unsigned int mux_parent_count = 0;
+ const char *clk_div_parents[1];
+ u32 clk_reg, cfg;
+
+ /* get the mux parents */
+ for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) {
+ char name[16];
+
+ snprintf(name, sizeof(name), "clkin%d", i);
+ host->mux_parent[i] = devm_clk_get(host->dev, name);
+ if (IS_ERR(host->mux_parent[i])) {
+ ret = PTR_ERR(host->mux_parent[i]);
+ if (PTR_ERR(host->mux_parent[i]) != -EPROBE_DEFER)
+ dev_err(host->dev, "Missing clock %s\n", name);
+ host->mux_parent[i] = NULL;
+ return ret;
+ }
+
+ mux_parent_names[i] = __clk_get_name(host->mux_parent[i]);
+ mux_parent_count++;
+ }
+
+ /* create the mux */
+ snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev));
+ init.name = clk_name;
+ init.ops = &clk_mux_ops;
+ init.flags = 0;
+ init.parent_names = mux_parent_names;
+ init.num_parents = mux_parent_count;
+
+ host->mux.reg = host->regs + SD_EMMC_CLOCK;
+ host->mux.shift = CLK_SRC_SHIFT;
+ host->mux.mask = CLK_SRC_MASK;
+ host->mux.flags = 0;
+ host->mux.table = NULL;
+ host->mux.hw.init = &init;
+
+ host->mux_clk = devm_clk_register(host->dev, &host->mux.hw);
+ if (WARN_ON(IS_ERR(host->mux_clk)))
+ return PTR_ERR(host->mux_clk);
+
+ /* create the divider */
+ snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev));
+ init.name = devm_kstrdup(host->dev, clk_name, GFP_KERNEL);
+ init.ops = &clk_divider_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ clk_div_parents[0] = __clk_get_name(host->mux_clk);
+ init.parent_names = clk_div_parents;
+ init.num_parents = ARRAY_SIZE(clk_div_parents);
+
+ host->cfg_div.reg = host->regs + SD_EMMC_CLOCK;
+ host->cfg_div.shift = CLK_DIV_SHIFT;
+ host->cfg_div.width = CLK_DIV_WIDTH;
+ host->cfg_div.hw.init = &init;
+ host->cfg_div.flags = CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ROUND_CLOSEST | CLK_DIVIDER_ALLOW_ZERO;
+
+ host->cfg_div_clk = devm_clk_register(host->dev, &host->cfg_div.hw);
+ if (WARN_ON(PTR_ERR_OR_ZERO(host->cfg_div_clk)))
+ return PTR_ERR(host->cfg_div_clk);
+
+ /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
+ clk_reg = 0;
+ clk_reg |= CLK_PHASE_180 << CLK_PHASE_SHIFT;
+ clk_reg |= CLK_SRC_XTAL << CLK_SRC_SHIFT;
+ clk_reg |= CLK_DIV_MAX << CLK_DIV_SHIFT;
+ clk_reg &= ~CLK_ALWAYS_ON;
+ writel(clk_reg, host->regs + SD_EMMC_CLOCK);
+
+ /* Ensure clock starts in "auto" mode, not "always on" */
+ cfg = readl(host->regs + SD_EMMC_CFG);
+ cfg &= ~CFG_CLK_ALWAYS_ON;
+ cfg |= CFG_AUTO_CLK;
+ writel(cfg, host->regs + SD_EMMC_CFG);
+
+ ret = clk_prepare_enable(host->cfg_div_clk);
+ if (ret)
+ return ret;
+
+ /* Get the nearest minimum clock to 400KHz */
+ host->mmc->f_min = clk_round_rate(host->cfg_div_clk, 400000);
+
+ ret = meson_mmc_clk_set(host, host->mmc->f_min);
+ if (!ret)
+ clk_disable_unprepare(host->cfg_div_clk);
+
+ return ret;
+}
+
+static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct meson_host *host = mmc_priv(mmc);
+ u32 bus_width;
+ u32 val, orig;
+
+ /*
+ * GPIO regulator, only controls switching between 1v8 and
+ * 3v3, doesn't support MMC_POWER_OFF, MMC_POWER_ON.
+ */
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+
+ if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
+ regulator_disable(mmc->supply.vqmmc);
+ host->vqmmc_enabled = false;
+ }
+
+ break;
+
+ case MMC_POWER_UP:
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+ break;
+
+ case MMC_POWER_ON:
+ if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
+ int ret = regulator_enable(mmc->supply.vqmmc);
+
+ if (ret < 0)
+ dev_err(mmc_dev(mmc),
+ "failed to enable vqmmc regulator\n");
+ else
+ host->vqmmc_enabled = true;
+ }
+
+ break;
+ }
+
+
+ meson_mmc_clk_set(host, ios->clock);
+
+ /* Bus width */
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ bus_width = CFG_BUS_WIDTH_1;
+ break;
+ case MMC_BUS_WIDTH_4:
+ bus_width = CFG_BUS_WIDTH_4;
+ break;
+ case MMC_BUS_WIDTH_8:
+ bus_width = CFG_BUS_WIDTH_8;
+ break;
+ default:
+ dev_err(host->dev, "Invalid ios->bus_width: %u. Setting to 4.\n",
+ ios->bus_width);
+ bus_width = CFG_BUS_WIDTH_4;
+ }
+
+ val = readl(host->regs + SD_EMMC_CFG);
+ orig = val;
+
+ val &= ~(CFG_BUS_WIDTH_MASK << CFG_BUS_WIDTH_SHIFT);
+ val |= bus_width << CFG_BUS_WIDTH_SHIFT;
+
+ val &= ~(CFG_BLK_LEN_MASK << CFG_BLK_LEN_SHIFT);
+ val |= ilog2(SD_EMMC_CFG_BLK_SIZE) << CFG_BLK_LEN_SHIFT;
+
+ val &= ~(CFG_RESP_TIMEOUT_MASK << CFG_RESP_TIMEOUT_SHIFT);
+ val |= ilog2(SD_EMMC_CFG_RESP_TIMEOUT) << CFG_RESP_TIMEOUT_SHIFT;
+
+ val &= ~(CFG_RC_CC_MASK << CFG_RC_CC_SHIFT);
+ val |= ilog2(SD_EMMC_CFG_CMD_GAP) << CFG_RC_CC_SHIFT;
+
+ val &= ~CFG_DDR;
+ if (ios->timing == MMC_TIMING_UHS_DDR50 ||
+ ios->timing == MMC_TIMING_MMC_DDR52 ||
+ ios->timing == MMC_TIMING_MMC_HS400)
+ val |= CFG_DDR;
+
+ val &= ~CFG_CHK_DS;
+ if (ios->timing == MMC_TIMING_MMC_HS400)
+ val |= CFG_CHK_DS;
+
+ writel(val, host->regs + SD_EMMC_CFG);
+
+ if (val != orig)
+ dev_dbg(host->dev, "%s: SD_EMMC_CFG: 0x%08x -> 0x%08x\n",
+ __func__, orig, val);
+}
+
+static int meson_mmc_request_done(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct meson_host *host = mmc_priv(mmc);
+
+ WARN_ON(host->mrq != mrq);
+
+ host->mrq = NULL;
+ host->cmd = NULL;
+ mmc_request_done(host->mmc, mrq);
+
+ return 0;
+}
+
+static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
+{
+ struct meson_host *host = mmc_priv(mmc);
+ struct sd_emmc_desc *desc, desc_tmp;
+ u32 cfg;
+ u8 blk_len, cmd_cfg_timeout;
+ unsigned int xfer_bytes = 0;
+
+ /* Setup descriptors */
+ dma_rmb();
+ desc = &desc_tmp;
+ memset(desc, 0, sizeof(struct sd_emmc_desc));
+
+ desc->cmd_cfg |= (cmd->opcode & CMD_CFG_CMD_INDEX_MASK) <<
+ CMD_CFG_CMD_INDEX_SHIFT;
+ desc->cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */
+ desc->cmd_arg = cmd->arg;
+
+ /* Response */
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ desc->cmd_cfg &= ~CMD_CFG_NO_RESP;
+ if (cmd->flags & MMC_RSP_136)
+ desc->cmd_cfg |= CMD_CFG_RESP_128;
+ desc->cmd_cfg |= CMD_CFG_RESP_NUM;
+ desc->cmd_resp = 0;
+
+ if (!(cmd->flags & MMC_RSP_CRC))
+ desc->cmd_cfg |= CMD_CFG_RESP_NOCRC;
+
+ if (cmd->flags & MMC_RSP_BUSY)
+ desc->cmd_cfg |= CMD_CFG_R1B;
+ } else {
+ desc->cmd_cfg |= CMD_CFG_NO_RESP;
+ }
+
+ /* data? */
+ if (cmd->data) {
+ desc->cmd_cfg |= CMD_CFG_DATA_IO;
+ if (cmd->data->blocks > 1) {
+ desc->cmd_cfg |= CMD_CFG_BLOCK_MODE;
+ desc->cmd_cfg |=
+ (cmd->data->blocks & CMD_CFG_LENGTH_MASK) <<
+ CMD_CFG_LENGTH_SHIFT;
+
+ /* check if block-size matches, if not update */
+ cfg = readl(host->regs + SD_EMMC_CFG);
+ blk_len = cfg & (CFG_BLK_LEN_MASK << CFG_BLK_LEN_SHIFT);
+ blk_len >>= CFG_BLK_LEN_SHIFT;
+ if (blk_len != ilog2(cmd->data->blksz)) {
+ dev_dbg(host->dev, "%s: update blk_len %d -> %d\n",
+ __func__, blk_len,
+ ilog2(cmd->data->blksz));
+ blk_len = ilog2(cmd->data->blksz);
+ cfg &= ~(CFG_BLK_LEN_MASK << CFG_BLK_LEN_SHIFT);
+ cfg |= blk_len << CFG_BLK_LEN_SHIFT;
+ writel(cfg, host->regs + SD_EMMC_CFG);
+ }
+ } else {
+ desc->cmd_cfg &= ~CMD_CFG_BLOCK_MODE;
+ desc->cmd_cfg |=
+ (cmd->data->blksz & CMD_CFG_LENGTH_MASK) <<
+ CMD_CFG_LENGTH_SHIFT;
+ }
+
+ cmd->data->bytes_xfered = 0;
+ xfer_bytes = cmd->data->blksz * cmd->data->blocks;
+ if (cmd->data->flags & MMC_DATA_WRITE) {
+ desc->cmd_cfg |= CMD_CFG_DATA_WR;
+ WARN_ON(xfer_bytes > host->bounce_buf_size);
+ sg_copy_to_buffer(cmd->data->sg, cmd->data->sg_len,
+ host->bounce_buf, xfer_bytes);
+ cmd->data->bytes_xfered = xfer_bytes;
+ dma_wmb();
+ } else {
+ desc->cmd_cfg &= ~CMD_CFG_DATA_WR;
+ }
+
+ if (xfer_bytes > 0) {
+ desc->cmd_cfg &= ~CMD_CFG_DATA_NUM;
+ desc->cmd_data = host->bounce_dma_addr & CMD_DATA_MASK;
+ } else {
+ /* write data to data_addr */
+ desc->cmd_cfg |= CMD_CFG_DATA_NUM;
+ desc->cmd_data = 0;
+ }
+
+ cmd_cfg_timeout = 12;
+ } else {
+ desc->cmd_cfg &= ~CMD_CFG_DATA_IO;
+ cmd_cfg_timeout = 10;
+ }
+ desc->cmd_cfg |= (cmd_cfg_timeout & CMD_CFG_TIMEOUT_MASK) <<
+ CMD_CFG_TIMEOUT_SHIFT;
+
+ host->cmd = cmd;
+
+ /* Last descriptor */
+ desc->cmd_cfg |= CMD_CFG_END_OF_CHAIN;
+ writel(desc->cmd_cfg, host->regs + SD_EMMC_CMD_CFG);
+ writel(desc->cmd_data, host->regs + SD_EMMC_CMD_DAT);
+ writel(desc->cmd_resp, host->regs + SD_EMMC_CMD_RSP);
+ wmb(); /* ensure descriptor is written before kicked */
+ writel(desc->cmd_arg, host->regs + SD_EMMC_CMD_ARG);
+}
+
+static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct meson_host *host = mmc_priv(mmc);
+
+ WARN_ON(host->mrq != NULL);
+
+ /* Stop execution */
+ writel(0, host->regs + SD_EMMC_START);
+
+ host->mrq = mrq;
+
+ if (mrq->sbc)
+ meson_mmc_start_cmd(mmc, mrq->sbc);
+ else
+ meson_mmc_start_cmd(mmc, mrq->cmd);
+}
+
+static int meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
+{
+ struct meson_host *host = mmc_priv(mmc);
+
+ if (cmd->flags & MMC_RSP_136) {
+ cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP3);
+ cmd->resp[1] = readl(host->regs + SD_EMMC_CMD_RSP2);
+ cmd->resp[2] = readl(host->regs + SD_EMMC_CMD_RSP1);
+ cmd->resp[3] = readl(host->regs + SD_EMMC_CMD_RSP);
+ } else if (cmd->flags & MMC_RSP_PRESENT) {
+ cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP);
+ }
+
+ return 0;
+}
+
+static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
+{
+ struct meson_host *host = dev_id;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ u32 irq_en, status, raw_status;
+ irqreturn_t ret = IRQ_HANDLED;
+
+ if (WARN_ON(!host))
+ return IRQ_NONE;
+
+ cmd = host->cmd;
+
+ mrq = host->mrq;
+
+ if (WARN_ON(!mrq))
+ return IRQ_NONE;
+
+ if (WARN_ON(!cmd))
+ return IRQ_NONE;
+
+ spin_lock(&host->lock);
+ irq_en = readl(host->regs + SD_EMMC_IRQ_EN);
+ raw_status = readl(host->regs + SD_EMMC_STATUS);
+ status = raw_status & irq_en;
+
+ if (!status) {
+ dev_warn(host->dev, "Spurious IRQ! status=0x%08x, irq_en=0x%08x\n",
+ raw_status, irq_en);
+ ret = IRQ_NONE;
+ goto out;
+ }
+
+ cmd->error = 0;
+ if (status & IRQ_RXD_ERR_MASK) {
+ dev_dbg(host->dev, "Unhandled IRQ: RXD error\n");
+ cmd->error = -EILSEQ;
+ }
+ if (status & IRQ_TXD_ERR) {
+ dev_dbg(host->dev, "Unhandled IRQ: TXD error\n");
+ cmd->error = -EILSEQ;
+ }
+ if (status & IRQ_DESC_ERR)
+ dev_dbg(host->dev, "Unhandled IRQ: Descriptor error\n");
+ if (status & IRQ_RESP_ERR) {
+ dev_dbg(host->dev, "Unhandled IRQ: Response error\n");
+ cmd->error = -EILSEQ;
+ }
+ if (status & IRQ_RESP_TIMEOUT) {
+ dev_dbg(host->dev, "Unhandled IRQ: Response timeout\n");
+ cmd->error = -ETIMEDOUT;
+ }
+ if (status & IRQ_DESC_TIMEOUT) {
+ dev_dbg(host->dev, "Unhandled IRQ: Descriptor timeout\n");
+ cmd->error = -ETIMEDOUT;
+ }
+ if (status & IRQ_SDIO)
+ dev_dbg(host->dev, "Unhandled IRQ: SDIO.\n");
+
+ if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS))
+ ret = IRQ_WAKE_THREAD;
+ else {
+ dev_warn(host->dev, "Unknown IRQ! status=0x%04x: MMC CMD%u arg=0x%08x flags=0x%08x stop=%d\n",
+ status, cmd->opcode, cmd->arg,
+ cmd->flags, mrq->stop ? 1 : 0);
+ if (cmd->data) {
+ struct mmc_data *data = cmd->data;
+
+ dev_warn(host->dev, "\tblksz %u blocks %u flags 0x%08x (%s%s)",
+ data->blksz, data->blocks, data->flags,
+ data->flags & MMC_DATA_WRITE ? "write" : "",
+ data->flags & MMC_DATA_READ ? "read" : "");
+ }
+ }
+
+out:
+ /* ack all (enabled) interrupts */
+ writel(status, host->regs + SD_EMMC_STATUS);
+
+ if (ret == IRQ_HANDLED) {
+ meson_mmc_read_resp(host->mmc, cmd);
+ meson_mmc_request_done(host->mmc, cmd->mrq);
+ }
+
+ spin_unlock(&host->lock);
+ return ret;
+}
+
+static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
+{
+ struct meson_host *host = dev_id;
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_command *cmd = host->cmd;
+ struct mmc_data *data;
+ unsigned int xfer_bytes;
+
+ if (WARN_ON(!mrq))
+ return IRQ_NONE;
+
+ if (WARN_ON(!cmd))
+ return IRQ_NONE;
+
+ data = cmd->data;
+ if (data && data->flags & MMC_DATA_READ) {
+ xfer_bytes = data->blksz * data->blocks;
+ WARN_ON(xfer_bytes > host->bounce_buf_size);
+ sg_copy_from_buffer(data->sg, data->sg_len,
+ host->bounce_buf, xfer_bytes);
+ data->bytes_xfered = xfer_bytes;
+ }
+
+ meson_mmc_read_resp(host->mmc, cmd);
+ if (!data || !data->stop || mrq->sbc)
+ meson_mmc_request_done(host->mmc, mrq);
+ else
+ meson_mmc_start_cmd(host->mmc, data->stop);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * NOTE: we only need this until the GPIO/pinctrl driver can handle
+ * interrupts. For now, the MMC core will use this for polling.
+ */
+static int meson_mmc_get_cd(struct mmc_host *mmc)
+{
+ int status = mmc_gpio_get_cd(mmc);
+
+ if (status == -ENOSYS)
+ return 1; /* assume present */
+
+ return status;
+}
+
+static const struct mmc_host_ops meson_mmc_ops = {
+ .request = meson_mmc_request,
+ .set_ios = meson_mmc_set_ios,
+ .get_cd = meson_mmc_get_cd,
+};
+
+static int meson_mmc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct meson_host *host;
+ struct mmc_host *mmc;
+ int ret;
+
+ mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
+ if (!mmc)
+ return -ENOMEM;
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, host);
+
+ spin_lock_init(&host->lock);
+
+ /* Get regulators and the supported OCR mask */
+ host->vqmmc_enabled = false;
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret == -EPROBE_DEFER)
+ goto free_host;
+
+ ret = mmc_of_parse(mmc);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_warn(&pdev->dev, "error parsing DT: %d\n", ret);
+ goto free_host;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->regs)) {
+ ret = PTR_ERR(host->regs);
+ goto free_host;
+ }
+
+ host->irq = platform_get_irq(pdev, 0);
+ if (host->irq == 0) {
+ dev_err(&pdev->dev, "failed to get interrupt resource.\n");
+ ret = -EINVAL;
+ goto free_host;
+ }
+
+ host->core_clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(host->core_clk)) {
+ ret = PTR_ERR(host->core_clk);
+ goto free_host;
+ }
+
+ ret = clk_prepare_enable(host->core_clk);
+ if (ret)
+ goto free_host;
+
+ ret = meson_mmc_clk_init(host);
+ if (ret)
+ goto free_host;
+
+ /* Stop execution */
+ writel(0, host->regs + SD_EMMC_START);
+
+ /* clear, ack, enable all interrupts */
+ writel(0, host->regs + SD_EMMC_IRQ_EN);
+ writel(IRQ_EN_MASK, host->regs + SD_EMMC_STATUS);
+ writel(IRQ_EN_MASK, host->regs + SD_EMMC_IRQ_EN);
+
+ ret = devm_request_threaded_irq(&pdev->dev, host->irq,
+ meson_mmc_irq, meson_mmc_irq_thread,
+ IRQF_SHARED, DRIVER_NAME, host);
+ if (ret)
+ goto free_host;
+
+ mmc->max_blk_count = CMD_CFG_LENGTH_MASK;
+ mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size;
+
+ /* data bounce buffer */
+ host->bounce_buf_size = mmc->max_req_size;
+ host->bounce_buf =
+ dma_alloc_coherent(host->dev, host->bounce_buf_size,
+ &host->bounce_dma_addr, GFP_KERNEL);
+ if (host->bounce_buf == NULL) {
+ dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
+ ret = -ENOMEM;
+ goto free_host;
+ }
+
+ mmc->ops = &meson_mmc_ops;
+ mmc_add_host(mmc);
+
+ return 0;
+
+free_host:
+ clk_disable_unprepare(host->cfg_div_clk);
+ clk_disable_unprepare(host->core_clk);
+ mmc_free_host(mmc);
+ return ret;
+}
+
+static int meson_mmc_remove(struct platform_device *pdev)
+{
+ struct meson_host *host = dev_get_drvdata(&pdev->dev);
+
+ /* disable interrupts */
+ writel(0, host->regs + SD_EMMC_IRQ_EN);
+
+ dma_free_coherent(host->dev, host->bounce_buf_size,
+ host->bounce_buf, host->bounce_dma_addr);
+
+ clk_disable_unprepare(host->cfg_div_clk);
+ clk_disable_unprepare(host->core_clk);
+
+ mmc_free_host(host->mmc);
+ return 0;
+}
+
+static const struct of_device_id meson_mmc_of_match[] = {
+ { .compatible = "amlogic,meson-gx-mmc", },
+ { .compatible = "amlogic,meson-gxbb-mmc", },
+ { .compatible = "amlogic,meson-gxl-mmc", },
+ { .compatible = "amlogic,meson-gxm-mmc", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, meson_mmc_of_match);
+
+static struct platform_driver meson_mmc_driver = {
+ .probe = meson_mmc_probe,
+ .remove = meson_mmc_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(meson_mmc_of_match),
+ },
+};
+
+module_platform_driver(meson_mmc_driver);
+
+MODULE_DESCRIPTION("Amlogic S905*/GX* SD/eMMC driver");
+MODULE_AUTHOR("Kevin Hilman <khilman@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index df990bb8c873..0c6420bb2f00 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -71,7 +71,12 @@ static unsigned int fmax = 515633;
* @f_max: maximum clk frequency supported by the controller.
* @signal_direction: input/out direction of bus signals can be indicated
* @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
- * @busy_detect: true if busy detection on dat0 is supported
+ * @busy_detect: true if the variant supports busy detection on DAT0.
+ * @busy_dpsm_flag: bitmask enabling busy detection in the DPSM
+ * @busy_detect_flag: bitmask identifying the bit in the MMCISTATUS register
+ * indicating that the card is busy
+ * @busy_detect_mask: bitmask identifying the bit in the MMCIMASK0 to mask for
+ * getting busy end detection interrupts
* @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply
* @explicit_mclk_control: enable explicit mclk control in driver.
* @qcom_fifo: enables qcom specific fifo pio read logic.
@@ -98,6 +103,9 @@ struct variant_data {
bool signal_direction;
bool pwrreg_clkgate;
bool busy_detect;
+ u32 busy_dpsm_flag;
+ u32 busy_detect_flag;
+ u32 busy_detect_mask;
bool pwrreg_nopower;
bool explicit_mclk_control;
bool qcom_fifo;
@@ -137,7 +145,7 @@ static struct variant_data variant_u300 = {
.clkreg_enable = MCI_ST_U300_HWFCEN,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.datalength_bits = 16,
- .datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
+ .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.pwrreg_powerup = MCI_PWR_ON,
.f_max = 100000000,
@@ -152,7 +160,7 @@ static struct variant_data variant_nomadik = {
.clkreg = MCI_CLK_ENABLE,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.datalength_bits = 24,
- .datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
+ .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
.pwrreg_powerup = MCI_PWR_ON,
@@ -170,7 +178,7 @@ static struct variant_data variant_ux500 = {
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
.datalength_bits = 24,
- .datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
+ .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
.pwrreg_powerup = MCI_PWR_ON,
@@ -178,6 +186,9 @@ static struct variant_data variant_ux500 = {
.signal_direction = true,
.pwrreg_clkgate = true,
.busy_detect = true,
+ .busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE,
+ .busy_detect_flag = MCI_ST_CARDBUSY,
+ .busy_detect_mask = MCI_ST_BUSYENDMASK,
.pwrreg_nopower = true,
};
@@ -188,9 +199,9 @@ static struct variant_data variant_ux500v2 = {
.clkreg_enable = MCI_ST_UX500_HWFCEN,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
- .datactrl_mask_ddrmode = MCI_ST_DPSM_DDRMODE,
+ .datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE,
.datalength_bits = 24,
- .datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
+ .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
.blksz_datactrl16 = true,
@@ -199,6 +210,9 @@ static struct variant_data variant_ux500v2 = {
.signal_direction = true,
.pwrreg_clkgate = true,
.busy_detect = true,
+ .busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE,
+ .busy_detect_flag = MCI_ST_CARDBUSY,
+ .busy_detect_mask = MCI_ST_BUSYENDMASK,
.pwrreg_nopower = true,
};
@@ -210,7 +224,7 @@ static struct variant_data variant_qcom = {
MCI_QCOM_CLK_SELECT_IN_FBCLK,
.clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8,
.datactrl_mask_ddrmode = MCI_QCOM_CLK_SELECT_IN_DDR_MODE,
- .data_cmd_enable = MCI_QCOM_CSPM_DATCMD,
+ .data_cmd_enable = MCI_CPSM_QCOM_DATCMD,
.blksz_datactrl4 = true,
.datalength_bits = 24,
.pwrreg_powerup = MCI_PWR_UP,
@@ -220,6 +234,7 @@ static struct variant_data variant_qcom = {
.qcom_dml = true,
};
+/* Busy detection for the ST Micro variant */
static int mmci_card_busy(struct mmc_host *mmc)
{
struct mmci_host *host = mmc_priv(mmc);
@@ -227,7 +242,7 @@ static int mmci_card_busy(struct mmc_host *mmc)
int busy = 0;
spin_lock_irqsave(&host->lock, flags);
- if (readl(host->base + MMCISTATUS) & MCI_ST_CARDBUSY)
+ if (readl(host->base + MMCISTATUS) & host->variant->busy_detect_flag)
busy = 1;
spin_unlock_irqrestore(&host->lock, flags);
@@ -294,8 +309,8 @@ static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
*/
static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
{
- /* Keep ST Micro busy mode if enabled */
- datactrl |= host->datactrl_reg & MCI_ST_DPSM_BUSYMODE;
+ /* Keep busy mode in DPSM if enabled */
+ datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag;
if (host->datactrl_reg != datactrl) {
host->datactrl_reg = datactrl;
@@ -492,6 +507,7 @@ static void mmci_dma_data_error(struct mmci_host *host)
{
dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
dmaengine_terminate_all(host->dma_current);
+ host->dma_in_progress = false;
host->dma_current = NULL;
host->dma_desc_current = NULL;
host->data->host_cookie = 0;
@@ -550,6 +566,7 @@ static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
mmci_dma_release(host);
}
+ host->dma_in_progress = false;
host->dma_current = NULL;
host->dma_desc_current = NULL;
}
@@ -650,6 +667,7 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
dev_vdbg(mmc_dev(host->mmc),
"Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
data->sg_len, data->blksz, data->blocks, data->flags);
+ host->dma_in_progress = true;
dmaengine_submit(host->dma_desc_current);
dma_async_issue_pending(host->dma_current);
@@ -684,8 +702,7 @@ static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
next->dma_chan = NULL;
}
-static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
- bool is_first_req)
+static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct mmci_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
@@ -726,8 +743,10 @@ static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
if (host->dma_desc_current == next->dma_desc)
host->dma_desc_current = NULL;
- if (host->dma_current == next->dma_chan)
+ if (host->dma_current == next->dma_chan) {
+ host->dma_in_progress = false;
host->dma_current = NULL;
+ }
next->dma_desc = NULL;
next->dma_chan = NULL;
@@ -973,37 +992,75 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
unsigned int status)
{
void __iomem *base = host->base;
- bool sbc, busy_resp;
+ bool sbc;
if (!cmd)
return;
sbc = (cmd == host->mrq->sbc);
- busy_resp = host->variant->busy_detect && (cmd->flags & MMC_RSP_BUSY);
- if (!((status|host->busy_status) & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|
- MCI_CMDSENT|MCI_CMDRESPEND)))
+ /*
+ * We need to be one of these interrupts to be considered worth
+ * handling. Note that we tag on any latent IRQs postponed
+ * due to waiting for busy status.
+ */
+ if (!((status|host->busy_status) &
+ (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND)))
return;
- /* Check if we need to wait for busy completion. */
- if (host->busy_status && (status & MCI_ST_CARDBUSY))
- return;
+ /*
+ * ST Micro variant: handle busy detection.
+ */
+ if (host->variant->busy_detect) {
+ bool busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
- /* Enable busy completion if needed and supported. */
- if (!host->busy_status && busy_resp &&
- !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
- (readl(base + MMCISTATUS) & MCI_ST_CARDBUSY)) {
- writel(readl(base + MMCIMASK0) | MCI_ST_BUSYEND,
- base + MMCIMASK0);
- host->busy_status = status & (MCI_CMDSENT|MCI_CMDRESPEND);
- return;
- }
+ /* We are busy with a command, return */
+ if (host->busy_status &&
+ (status & host->variant->busy_detect_flag))
+ return;
+
+ /*
+ * We were not busy, but we now got a busy response on
+ * something that was not an error, and we double-check
+ * that the special busy status bit is still set before
+ * proceeding.
+ */
+ if (!host->busy_status && busy_resp &&
+ !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
+ (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
+
+ /* Clear the busy start IRQ */
+ writel(host->variant->busy_detect_mask,
+ host->base + MMCICLEAR);
+
+ /* Unmask the busy end IRQ */
+ writel(readl(base + MMCIMASK0) |
+ host->variant->busy_detect_mask,
+ base + MMCIMASK0);
+ /*
+ * Now cache the last response status code (until
+ * the busy bit goes low), and return.
+ */
+ host->busy_status =
+ status & (MCI_CMDSENT|MCI_CMDRESPEND);
+ return;
+ }
+
+ /*
+ * At this point we are not busy with a command, we have
+ * not received a new busy request, clear and mask the busy
+ * end IRQ and fall through to process the IRQ.
+ */
+ if (host->busy_status) {
- /* At busy completion, mask the IRQ and complete the request. */
- if (host->busy_status) {
- writel(readl(base + MMCIMASK0) & ~MCI_ST_BUSYEND,
- base + MMCIMASK0);
- host->busy_status = 0;
+ writel(host->variant->busy_detect_mask,
+ host->base + MMCICLEAR);
+
+ writel(readl(base + MMCIMASK0) &
+ ~host->variant->busy_detect_mask,
+ base + MMCIMASK0);
+ host->busy_status = 0;
+ }
}
host->cmd = NULL;
@@ -1240,12 +1297,21 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
}
/*
- * We intentionally clear the MCI_ST_CARDBUSY IRQ here (if it's
- * enabled) since the HW seems to be triggering the IRQ on both
- * edges while monitoring DAT0 for busy completion.
+ * We intentionally clear the MCI_ST_CARDBUSY IRQ (if it's
+ * enabled) in mmci_cmd_irq() function where ST Micro busy
+ * detection variant is handled. Considering the HW seems to be
+ * triggering the IRQ on both edges while monitoring DAT0 for
+ * busy completion and that same status bit is used to monitor
+ * start and end of busy detection, special care must be taken
+ * to make sure that both start and end interrupts are always
+ * cleared one after the other.
*/
status &= readl(host->base + MMCIMASK0);
- writel(status, host->base + MMCICLEAR);
+ if (host->variant->busy_detect)
+ writel(status & ~host->variant->busy_detect_mask,
+ host->base + MMCICLEAR);
+ else
+ writel(status, host->base + MMCICLEAR);
dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
@@ -1257,9 +1323,11 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
mmci_data_irq(host, host->data, status);
}
- /* Don't poll for busy completion in irq context. */
- if (host->busy_status)
- status &= ~MCI_ST_CARDBUSY;
+ /*
+ * Don't poll for busy completion in irq context.
+ */
+ if (host->variant->busy_detect && host->busy_status)
+ status &= ~host->variant->busy_detect_flag;
ret = 1;
} while (status);
@@ -1612,9 +1680,18 @@ static int mmci_probe(struct amba_device *dev,
/* We support these capabilities. */
mmc->caps |= MMC_CAP_CMD23;
+ /*
+ * Enable busy detection.
+ */
if (variant->busy_detect) {
mmci_ops.card_busy = mmci_card_busy;
- mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
+ /*
+ * Not all variants have a flag to enable busy detection
+ * in the DPSM, but if they do, set it here.
+ */
+ if (variant->busy_dpsm_flag)
+ mmci_write_datactrlreg(host,
+ host->variant->busy_dpsm_flag);
mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
mmc->max_busy_timeout = 0;
}
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index a1f5e4f49e2a..4a8bef1aac8f 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -51,25 +51,27 @@
#define MCI_QCOM_CLK_SELECT_IN_DDR_MODE (BIT(14) | BIT(15))
#define MMCIARGUMENT 0x008
-#define MMCICOMMAND 0x00c
-#define MCI_CPSM_RESPONSE (1 << 6)
-#define MCI_CPSM_LONGRSP (1 << 7)
-#define MCI_CPSM_INTERRUPT (1 << 8)
-#define MCI_CPSM_PENDING (1 << 9)
-#define MCI_CPSM_ENABLE (1 << 10)
-/* Argument flag extenstions in the ST Micro versions */
-#define MCI_ST_SDIO_SUSP (1 << 11)
-#define MCI_ST_ENCMD_COMPL (1 << 12)
-#define MCI_ST_NIEN (1 << 13)
-#define MCI_ST_CE_ATACMD (1 << 14)
-/* Modified on Qualcomm Integrations */
-#define MCI_QCOM_CSPM_DATCMD BIT(12)
-#define MCI_QCOM_CSPM_MCIABORT BIT(13)
-#define MCI_QCOM_CSPM_CCSENABLE BIT(14)
-#define MCI_QCOM_CSPM_CCSDISABLE BIT(15)
-#define MCI_QCOM_CSPM_AUTO_CMD19 BIT(16)
-#define MCI_QCOM_CSPM_AUTO_CMD21 BIT(21)
+/* The command register controls the Command Path State Machine (CPSM) */
+#define MMCICOMMAND 0x00c
+#define MCI_CPSM_RESPONSE BIT(6)
+#define MCI_CPSM_LONGRSP BIT(7)
+#define MCI_CPSM_INTERRUPT BIT(8)
+#define MCI_CPSM_PENDING BIT(9)
+#define MCI_CPSM_ENABLE BIT(10)
+/* Command register flag extenstions in the ST Micro versions */
+#define MCI_CPSM_ST_SDIO_SUSP BIT(11)
+#define MCI_CPSM_ST_ENCMD_COMPL BIT(12)
+#define MCI_CPSM_ST_NIEN BIT(13)
+#define MCI_CPSM_ST_CE_ATACMD BIT(14)
+/* Command register flag extensions in the Qualcomm versions */
+#define MCI_CPSM_QCOM_PROGENA BIT(11)
+#define MCI_CPSM_QCOM_DATCMD BIT(12)
+#define MCI_CPSM_QCOM_MCIABORT BIT(13)
+#define MCI_CPSM_QCOM_CCSENABLE BIT(14)
+#define MCI_CPSM_QCOM_CCSDISABLE BIT(15)
+#define MCI_CPSM_QCOM_AUTO_CMD19 BIT(16)
+#define MCI_CPSM_QCOM_AUTO_CMD21 BIT(21)
#define MMCIRESPCMD 0x010
#define MMCIRESPONSE0 0x014
@@ -78,22 +80,27 @@
#define MMCIRESPONSE3 0x020
#define MMCIDATATIMER 0x024
#define MMCIDATALENGTH 0x028
+
+/* The data control register controls the Data Path State Machine (DPSM) */
#define MMCIDATACTRL 0x02c
-#define MCI_DPSM_ENABLE (1 << 0)
-#define MCI_DPSM_DIRECTION (1 << 1)
-#define MCI_DPSM_MODE (1 << 2)
-#define MCI_DPSM_DMAENABLE (1 << 3)
-#define MCI_DPSM_BLOCKSIZE (1 << 4)
+#define MCI_DPSM_ENABLE BIT(0)
+#define MCI_DPSM_DIRECTION BIT(1)
+#define MCI_DPSM_MODE BIT(2)
+#define MCI_DPSM_DMAENABLE BIT(3)
+#define MCI_DPSM_BLOCKSIZE BIT(4)
/* Control register extensions in the ST Micro U300 and Ux500 versions */
-#define MCI_ST_DPSM_RWSTART (1 << 8)
-#define MCI_ST_DPSM_RWSTOP (1 << 9)
-#define MCI_ST_DPSM_RWMOD (1 << 10)
-#define MCI_ST_DPSM_SDIOEN (1 << 11)
+#define MCI_DPSM_ST_RWSTART BIT(8)
+#define MCI_DPSM_ST_RWSTOP BIT(9)
+#define MCI_DPSM_ST_RWMOD BIT(10)
+#define MCI_DPSM_ST_SDIOEN BIT(11)
/* Control register extensions in the ST Micro Ux500 versions */
-#define MCI_ST_DPSM_DMAREQCTL (1 << 12)
-#define MCI_ST_DPSM_DBOOTMODEEN (1 << 13)
-#define MCI_ST_DPSM_BUSYMODE (1 << 14)
-#define MCI_ST_DPSM_DDRMODE (1 << 15)
+#define MCI_DPSM_ST_DMAREQCTL BIT(12)
+#define MCI_DPSM_ST_DBOOTMODEEN BIT(13)
+#define MCI_DPSM_ST_BUSYMODE BIT(14)
+#define MCI_DPSM_ST_DDRMODE BIT(15)
+/* Control register extensions in the Qualcomm versions */
+#define MCI_DPSM_QCOM_DATA_PEND BIT(17)
+#define MCI_DPSM_QCOM_RX_DATA_PEND BIT(20)
#define MMCIDATACNT 0x030
#define MMCISTATUS 0x034
@@ -167,7 +174,7 @@
/* Extended status bits for the ST Micro variants */
#define MCI_ST_SDIOITMASK (1 << 22)
#define MCI_ST_CEATAENDMASK (1 << 23)
-#define MCI_ST_BUSYEND (1 << 24)
+#define MCI_ST_BUSYENDMASK (1 << 24)
#define MMCIMASK1 0x040
#define MMCIFIFOCNT 0x048
@@ -238,8 +245,9 @@ struct mmci_host {
struct dma_chan *dma_tx_channel;
struct dma_async_tx_descriptor *dma_desc_current;
struct mmci_host_next next_data;
+ bool dma_in_progress;
-#define dma_inprogress(host) ((host)->dma_current)
+#define dma_inprogress(host) ((host)->dma_in_progress)
#else
#define dma_inprogress(host) (0)
#endif
diff --git a/drivers/mmc/host/mmci_qcom_dml.c b/drivers/mmc/host/mmci_qcom_dml.c
index 2b7fc3764803..00750c9d3514 100644
--- a/drivers/mmc/host/mmci_qcom_dml.c
+++ b/drivers/mmc/host/mmci_qcom_dml.c
@@ -170,7 +170,7 @@ int dml_hw_init(struct mmci_host *host, struct device_node *np)
writel_relaxed(producer_id | (consumer_id << CONSUMER_PIPE_ID_SHFT),
base + DML_PIPE_ID);
- /* Make sure dml intialization is finished */
+ /* Make sure dml initialization is finished */
mb();
return 0;
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 84e9afcb5c09..b235d8da0602 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -28,6 +28,7 @@
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/interrupt.h>
#include <linux/mmc/card.h>
#include <linux/mmc/core.h>
@@ -579,7 +580,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
}
}
sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
- (mode << 8) | (div % 0xff));
+ (mode << 8) | div);
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
cpu_relax();
@@ -927,8 +928,7 @@ static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
msdc_start_command(host, mrq, mrq->cmd);
}
-static void msdc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
- bool is_first_req)
+static void msdc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct msdc_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
@@ -1075,11 +1075,8 @@ static int msdc_card_busy(struct mmc_host *mmc)
struct msdc_host *host = mmc_priv(mmc);
u32 status = readl(host->base + MSDC_PS);
- /* check if any pin between dat[0:3] is low */
- if (((status >> 16) & 0xf) != 0xf)
- return 1;
-
- return 0;
+ /* only check if data0 is low */
+ return !(status & BIT(16));
}
static void msdc_request_timeout(struct work_struct *work)
@@ -1562,7 +1559,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
host->src_clk_freq = clk_get_rate(host->src_clk);
/* Set host parameters to mmc */
mmc->ops = &mt_msdc_ops;
- mmc->f_min = host->src_clk_freq / (4 * 255);
+ mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
/* MMC core transfer sizes tunable parameters */
@@ -1713,6 +1710,7 @@ static const struct of_device_id msdc_of_ids[] = {
{ .compatible = "mediatek,mt8135-mmc", },
{}
};
+MODULE_DEVICE_TABLE(of, msdc_of_ids);
static struct platform_driver mt_msdc_driver = {
.probe = msdc_drv_probe,
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 44ecebd1ea8c..add1e70195ea 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -153,7 +153,11 @@ static void mxs_mmc_request_done(struct mxs_mmc_host *host)
}
}
- if (data) {
+ if (cmd == mrq->sbc) {
+ /* Finished CMD23, now send actual command. */
+ mxs_mmc_start_cmd(host, mrq->cmd);
+ return;
+ } else if (data) {
dma_unmap_sg(mmc_dev(host->mmc), data->sg,
data->sg_len, ssp->dma_dir);
/*
@@ -166,7 +170,7 @@ static void mxs_mmc_request_done(struct mxs_mmc_host *host)
data->bytes_xfered = 0;
host->data = NULL;
- if (mrq->stop) {
+ if (data->stop && (data->error || !mrq->sbc)) {
mxs_mmc_start_cmd(host, mrq->stop);
return;
}
@@ -309,6 +313,9 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
cmd1 = cmd->arg;
+ if (cmd->opcode == MMC_STOP_TRANSMISSION)
+ cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
+
if (host->sdio_irq_en) {
ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
@@ -417,8 +424,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
ssp->base + HW_SSP_BLOCK_SIZE);
}
- if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
- (cmd->opcode == SD_IO_RW_EXTENDED))
+ if (cmd->opcode == SD_IO_RW_EXTENDED)
cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
cmd1 = cmd->arg;
@@ -493,7 +499,11 @@ static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
WARN_ON(host->mrq != NULL);
host->mrq = mrq;
- mxs_mmc_start_cmd(host, mrq->cmd);
+
+ if (mrq->sbc)
+ mxs_mmc_start_cmd(host, mrq->sbc);
+ else
+ mxs_mmc_start_cmd(host, mrq->cmd);
}
static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -640,7 +650,7 @@ static int mxs_mmc_probe(struct platform_device *pdev)
/* set mmc core parameters */
mmc->ops = &mxs_mmc_ops;
mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
- MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
+ MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL | MMC_CAP_CMD23;
host->broken_cd = of_property_read_bool(np, "broken-cd");
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index be3c49fa7382..bd49f34d7654 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -893,7 +893,7 @@ static void mmc_omap_cover_handler(unsigned long param)
* If no card is inserted, we postpone polling until
* the cover has been closed.
*/
- if (slot->mmc->card == NULL || !mmc_card_present(slot->mmc->card))
+ if (slot->mmc->card == NULL)
return;
mod_timer(&slot->cover_timer,
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 5f2f24a7360d..a58bd653ed8b 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1162,7 +1162,7 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
if (status & ERR_EN) {
omap_hsmmc_dbg_report_irq(host, status);
- if (status & (CTO_EN | CCRC_EN))
+ if (status & (CTO_EN | CCRC_EN | CEB_EN))
end_cmd = 1;
if (host->data || host->response_busy) {
end_trans = !end_cmd;
@@ -1469,10 +1469,11 @@ static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
}
static void set_data_timeout(struct omap_hsmmc_host *host,
- unsigned int timeout_ns,
+ unsigned long long timeout_ns,
unsigned int timeout_clks)
{
- unsigned int timeout, cycle_ns;
+ unsigned long long timeout = timeout_ns;
+ unsigned int cycle_ns;
uint32_t reg, clkd, dto = 0;
reg = OMAP_HSMMC_READ(host->base, SYSCTL);
@@ -1481,7 +1482,7 @@ static void set_data_timeout(struct omap_hsmmc_host *host,
clkd = 1;
cycle_ns = 1000000000 / (host->clk_rate / clkd);
- timeout = timeout_ns / cycle_ns;
+ do_div(timeout, cycle_ns);
timeout += timeout_clks;
if (timeout) {
while ((timeout & 0x80000000) == 0) {
@@ -1527,16 +1528,24 @@ static int
omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req)
{
int ret;
+ unsigned long long timeout;
+
host->data = req->data;
if (req->data == NULL) {
OMAP_HSMMC_WRITE(host->base, BLK, 0);
- /*
- * Set an arbitrary 100ms data timeout for commands with
- * busy signal.
- */
- if (req->cmd->flags & MMC_RSP_BUSY)
- set_data_timeout(host, 100000000U, 0);
+ if (req->cmd->flags & MMC_RSP_BUSY) {
+ timeout = req->cmd->busy_timeout * NSEC_PER_MSEC;
+
+ /*
+ * Set an arbitrary 100ms data timeout for commands with
+ * busy signal and no indication of busy_timeout.
+ */
+ if (!timeout)
+ timeout = 100000000U;
+
+ set_data_timeout(host, timeout, 0);
+ }
return 0;
}
@@ -1565,8 +1574,7 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
}
}
-static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
- bool is_first_req)
+static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 3ccaa1415f33..41b57713b620 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -190,8 +190,7 @@ static int sd_pre_dma_transfer(struct realtek_pci_sdmmc *host,
return using_cookie;
}
-static void sdmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
- bool is_first_req)
+static void sdmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct realtek_pci_sdmmc *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
@@ -708,7 +707,7 @@ static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host,
u8 opcode, u8 sample_point)
{
int err;
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
err = sd_change_phase(host, sample_point, true);
if (err < 0)
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 6e9c0f8fddb1..12d2fbe9c520 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -682,7 +682,7 @@ static int sd_tuning_rx_cmd(struct rtsx_usb_sdmmc *host,
u8 opcode, u8 sample_point)
{
int err;
- struct mmc_command cmd = {0};
+ struct mmc_command cmd = {};
err = sd_change_phase(host, sample_point, 0);
if (err)
@@ -1374,6 +1374,8 @@ static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev)
mutex_init(&host->host_mutex);
rtsx_usb_init_host(host);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
pm_runtime_enable(&pdev->dev);
#ifdef RTSX_USB_USE_LEDS_CLASS
@@ -1428,6 +1430,7 @@ static int rtsx_usb_sdmmc_drv_remove(struct platform_device *pdev)
mmc_free_host(mmc);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
platform_set_drvdata(pdev, NULL);
dev_dbg(&(pdev->dev),
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index c531deef3258..7a173f8c455b 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -21,6 +21,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/gpio.h>
+#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
@@ -28,7 +29,6 @@
#include <mach/dma.h>
#include <mach/gpio-samsung.h>
-#include <linux/platform_data/dma-s3c24xx.h>
#include <linux/platform_data/mmc-s3cmci.h>
#include "s3cmci.h"
@@ -1682,19 +1682,13 @@ static int s3cmci_probe(struct platform_device *pdev)
gpio_direction_input(host->pdata->gpio_wprotect);
}
- /* depending on the dma state, get a dma channel to use. */
+ /* Depending on the dma state, get a DMA channel to use. */
if (s3cmci_host_usedma(host)) {
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- host->dma = dma_request_slave_channel_compat(mask,
- s3c24xx_dma_filter, (void *)DMACH_SDI, &pdev->dev, "rx-tx");
- if (!host->dma) {
+ host->dma = dma_request_chan(&pdev->dev, "rx-tx");
+ ret = PTR_ERR_OR_ZERO(host->dma);
+ if (ret) {
dev_err(&pdev->dev, "cannot get DMA channel.\n");
- ret = -EBUSY;
goto probe_free_gpio_wp;
}
}
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 81d4dc034793..9dcb7048e3b1 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -328,6 +328,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
{ "80865ACC", NULL, &sdhci_acpi_slot_int_emmc },
{ "80865AD0", NULL, &sdhci_acpi_slot_int_sdio },
{ "80860F14" , "1" , &sdhci_acpi_slot_int_emmc },
+ { "80860F14" , "2" , &sdhci_acpi_slot_int_sdio },
{ "80860F14" , "3" , &sdhci_acpi_slot_int_sd },
{ "80860F16" , NULL, &sdhci_acpi_slot_int_sd },
{ "INT33BB" , "2" , &sdhci_acpi_slot_int_sdio },
@@ -394,7 +395,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
/* Power on the SDHCI controller and its children */
acpi_device_fix_up_power(device);
list_for_each_entry(child, &device->children, node)
- acpi_device_fix_up_power(child);
+ if (child->status.present && child->status.enabled)
+ acpi_device_fix_up_power(child);
if (acpi_bus_get_status(device) || !device->status.present)
return -ENODEV;
@@ -465,7 +467,10 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) {
bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL);
- if (mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0, NULL)) {
+ err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0, NULL);
+ if (err) {
+ if (err == -EPROBE_DEFER)
+ goto err_free;
dev_warn(dev, "failed to setup card detect gpio\n");
c->use_runtime_pm = false;
}
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
new file mode 100644
index 000000000000..316cfec3f005
--- /dev/null
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright (C) 2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+
+#include "sdhci-pltfm.h"
+
+/* HRS - Host Register Set (specific to Cadence) */
+#define SDHCI_CDNS_HRS04 0x10 /* PHY access port */
+#define SDHCI_CDNS_HRS04_ACK BIT(26)
+#define SDHCI_CDNS_HRS04_RD BIT(25)
+#define SDHCI_CDNS_HRS04_WR BIT(24)
+#define SDHCI_CDNS_HRS04_RDATA_SHIFT 16
+#define SDHCI_CDNS_HRS04_WDATA_SHIFT 8
+#define SDHCI_CDNS_HRS04_ADDR_SHIFT 0
+
+#define SDHCI_CDNS_HRS06 0x18 /* eMMC control */
+#define SDHCI_CDNS_HRS06_TUNE_UP BIT(15)
+#define SDHCI_CDNS_HRS06_TUNE_SHIFT 8
+#define SDHCI_CDNS_HRS06_TUNE_MASK 0x3f
+#define SDHCI_CDNS_HRS06_MODE_MASK 0x7
+#define SDHCI_CDNS_HRS06_MODE_SD 0x0
+#define SDHCI_CDNS_HRS06_MODE_MMC_SDR 0x2
+#define SDHCI_CDNS_HRS06_MODE_MMC_DDR 0x3
+#define SDHCI_CDNS_HRS06_MODE_MMC_HS200 0x4
+#define SDHCI_CDNS_HRS06_MODE_MMC_HS400 0x5
+
+/* SRS - Slot Register Set (SDHCI-compatible) */
+#define SDHCI_CDNS_SRS_BASE 0x200
+
+/* PHY */
+#define SDHCI_CDNS_PHY_DLY_SD_HS 0x00
+#define SDHCI_CDNS_PHY_DLY_SD_DEFAULT 0x01
+#define SDHCI_CDNS_PHY_DLY_UHS_SDR12 0x02
+#define SDHCI_CDNS_PHY_DLY_UHS_SDR25 0x03
+#define SDHCI_CDNS_PHY_DLY_UHS_SDR50 0x04
+#define SDHCI_CDNS_PHY_DLY_UHS_DDR50 0x05
+#define SDHCI_CDNS_PHY_DLY_EMMC_LEGACY 0x06
+#define SDHCI_CDNS_PHY_DLY_EMMC_SDR 0x07
+#define SDHCI_CDNS_PHY_DLY_EMMC_DDR 0x08
+
+/*
+ * The tuned val register is 6 bit-wide, but not the whole of the range is
+ * available. The range 0-42 seems to be available (then 43 wraps around to 0)
+ * but I am not quite sure if it is official. Use only 0 to 39 for safety.
+ */
+#define SDHCI_CDNS_MAX_TUNING_LOOP 40
+
+struct sdhci_cdns_priv {
+ void __iomem *hrs_addr;
+};
+
+static void sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv,
+ u8 addr, u8 data)
+{
+ void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS04;
+ u32 tmp;
+
+ tmp = (data << SDHCI_CDNS_HRS04_WDATA_SHIFT) |
+ (addr << SDHCI_CDNS_HRS04_ADDR_SHIFT);
+ writel(tmp, reg);
+
+ tmp |= SDHCI_CDNS_HRS04_WR;
+ writel(tmp, reg);
+
+ tmp &= ~SDHCI_CDNS_HRS04_WR;
+ writel(tmp, reg);
+}
+
+static void sdhci_cdns_phy_init(struct sdhci_cdns_priv *priv)
+{
+ sdhci_cdns_write_phy_reg(priv, SDHCI_CDNS_PHY_DLY_SD_HS, 4);
+ sdhci_cdns_write_phy_reg(priv, SDHCI_CDNS_PHY_DLY_SD_DEFAULT, 4);
+ sdhci_cdns_write_phy_reg(priv, SDHCI_CDNS_PHY_DLY_EMMC_LEGACY, 9);
+ sdhci_cdns_write_phy_reg(priv, SDHCI_CDNS_PHY_DLY_EMMC_SDR, 2);
+ sdhci_cdns_write_phy_reg(priv, SDHCI_CDNS_PHY_DLY_EMMC_DDR, 3);
+}
+
+static inline void *sdhci_cdns_priv(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return sdhci_pltfm_priv(pltfm_host);
+}
+
+static unsigned int sdhci_cdns_get_timeout_clock(struct sdhci_host *host)
+{
+ /*
+ * Cadence's spec says the Timeout Clock Frequency is the same as the
+ * Base Clock Frequency. Divide it by 1000 to return a value in kHz.
+ */
+ return host->max_clk / 1000;
+}
+
+static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int timing)
+{
+ struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
+ u32 mode, tmp;
+
+ switch (timing) {
+ case MMC_TIMING_MMC_HS:
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_SDR;
+ break;
+ case MMC_TIMING_MMC_DDR52:
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_DDR;
+ break;
+ case MMC_TIMING_MMC_HS200:
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_HS200;
+ break;
+ case MMC_TIMING_MMC_HS400:
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400;
+ break;
+ default:
+ mode = SDHCI_CDNS_HRS06_MODE_SD;
+ break;
+ }
+
+ /* The speed mode for eMMC is selected by HRS06 register */
+ tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06);
+ tmp &= ~SDHCI_CDNS_HRS06_MODE_MASK;
+ tmp |= mode;
+ writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS06);
+
+ /* For SD, fall back to the default handler */
+ if (mode == SDHCI_CDNS_HRS06_MODE_SD)
+ sdhci_set_uhs_signaling(host, timing);
+}
+
+static const struct sdhci_ops sdhci_cdns_ops = {
+ .set_clock = sdhci_set_clock,
+ .get_timeout_clock = sdhci_cdns_get_timeout_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_cdns_set_uhs_signaling,
+};
+
+static const struct sdhci_pltfm_data sdhci_cdns_pltfm_data = {
+ .ops = &sdhci_cdns_ops,
+};
+
+static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val)
+{
+ struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
+ void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS06;
+ u32 tmp;
+
+ if (WARN_ON(val > SDHCI_CDNS_HRS06_TUNE_MASK))
+ return -EINVAL;
+
+ tmp = readl(reg);
+ tmp &= ~(SDHCI_CDNS_HRS06_TUNE_MASK << SDHCI_CDNS_HRS06_TUNE_SHIFT);
+ tmp |= val << SDHCI_CDNS_HRS06_TUNE_SHIFT;
+ tmp |= SDHCI_CDNS_HRS06_TUNE_UP;
+ writel(tmp, reg);
+
+ return readl_poll_timeout(reg, tmp, !(tmp & SDHCI_CDNS_HRS06_TUNE_UP),
+ 0, 1);
+}
+
+static int sdhci_cdns_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ int cur_streak = 0;
+ int max_streak = 0;
+ int end_of_streak = 0;
+ int i;
+
+ /*
+ * This handler only implements the eMMC tuning that is specific to
+ * this controller. Fall back to the standard method for SD timing.
+ */
+ if (host->timing != MMC_TIMING_MMC_HS200)
+ return sdhci_execute_tuning(mmc, opcode);
+
+ if (WARN_ON(opcode != MMC_SEND_TUNING_BLOCK_HS200))
+ return -EINVAL;
+
+ for (i = 0; i < SDHCI_CDNS_MAX_TUNING_LOOP; i++) {
+ if (sdhci_cdns_set_tune_val(host, i) ||
+ mmc_send_tuning(host->mmc, opcode, NULL)) { /* bad */
+ cur_streak = 0;
+ } else { /* good */
+ cur_streak++;
+ if (cur_streak > max_streak) {
+ max_streak = cur_streak;
+ end_of_streak = i;
+ }
+ }
+ }
+
+ if (!max_streak) {
+ dev_err(mmc_dev(host->mmc), "no tuning point found\n");
+ return -EIO;
+ }
+
+ return sdhci_cdns_set_tune_val(host, end_of_streak - max_streak / 2);
+}
+
+static int sdhci_cdns_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_cdns_priv *priv;
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_cdns_pltfm_data, sizeof(*priv));
+ if (IS_ERR(host)) {
+ ret = PTR_ERR(host);
+ goto disable_clk;
+ }
+
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->clk = clk;
+
+ priv = sdhci_cdns_priv(host);
+ priv->hrs_addr = host->ioaddr;
+ host->ioaddr += SDHCI_CDNS_SRS_BASE;
+ host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning;
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto free;
+
+ sdhci_cdns_phy_init(priv);
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto free;
+
+ return 0;
+free:
+ sdhci_pltfm_free(pdev);
+disable_clk:
+ clk_disable_unprepare(clk);
+
+ return ret;
+}
+
+static const struct of_device_id sdhci_cdns_match[] = {
+ { .compatible = "socionext,uniphier-sd4hc" },
+ { .compatible = "cdns,sd4hc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sdhci_cdns_match);
+
+static struct platform_driver sdhci_cdns_driver = {
+ .driver = {
+ .name = "sdhci-cdns",
+ .pm = &sdhci_pltfm_pmops,
+ .of_match_table = sdhci_cdns_match,
+ },
+ .probe = sdhci_cdns_probe,
+ .remove = sdhci_pltfm_unregister,
+};
+module_platform_driver(sdhci_cdns_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("Cadence SD/SDIO/eMMC Host Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 7123ef96ed18..445fc47dc3e7 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -830,6 +830,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
switch (uhs) {
case MMC_TIMING_UHS_SDR50:
+ case MMC_TIMING_UHS_DDR50:
pinctrl = imx_data->pins_100mhz;
break;
case MMC_TIMING_UHS_SDR104:
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index de132e281753..ece8b37e51dd 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -24,30 +24,36 @@
SDHCI_QUIRK_PIO_NEEDS_DELAY | \
SDHCI_QUIRK_NO_HISPD_BIT)
-#define ESDHC_PROCTL 0x28
-
-#define ESDHC_SYSTEM_CONTROL 0x2c
-#define ESDHC_CLOCK_MASK 0x0000fff0
-#define ESDHC_PREDIV_SHIFT 8
-#define ESDHC_DIVIDER_SHIFT 4
-#define ESDHC_CLOCK_PEREN 0x00000004
-#define ESDHC_CLOCK_HCKEN 0x00000002
-#define ESDHC_CLOCK_IPGEN 0x00000001
-
/* pltfm-specific */
#define ESDHC_HOST_CONTROL_LE 0x20
/*
- * P2020 interpretation of the SDHCI_HOST_CONTROL register
+ * eSDHC register definition
*/
-#define ESDHC_CTRL_4BITBUS (0x1 << 1)
-#define ESDHC_CTRL_8BITBUS (0x2 << 1)
-#define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1)
-
-/* OF-specific */
-#define ESDHC_DMA_SYSCTL 0x40c
-#define ESDHC_DMA_SNOOP 0x00000040
-#define ESDHC_HOST_CONTROL_RES 0x01
+/* Present State Register */
+#define ESDHC_PRSSTAT 0x24
+#define ESDHC_CLOCK_STABLE 0x00000008
+
+/* Protocol Control Register */
+#define ESDHC_PROCTL 0x28
+#define ESDHC_CTRL_4BITBUS (0x1 << 1)
+#define ESDHC_CTRL_8BITBUS (0x2 << 1)
+#define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1)
+#define ESDHC_HOST_CONTROL_RES 0x01
+
+/* System Control Register */
+#define ESDHC_SYSTEM_CONTROL 0x2c
+#define ESDHC_CLOCK_MASK 0x0000fff0
+#define ESDHC_PREDIV_SHIFT 8
+#define ESDHC_DIVIDER_SHIFT 4
+#define ESDHC_CLOCK_SDCLKEN 0x00000008
+#define ESDHC_CLOCK_PEREN 0x00000004
+#define ESDHC_CLOCK_HCKEN 0x00000002
+#define ESDHC_CLOCK_IPGEN 0x00000001
+
+/* Control Register for DMA transfer */
+#define ESDHC_DMA_SYSCTL 0x40c
+#define ESDHC_DMA_SNOOP 0x00000040
#endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 726246665850..3275d4995812 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -143,6 +143,14 @@ static void sdhci_iproc_writeb(struct sdhci_host *host, u8 val, int reg)
}
static const struct sdhci_ops sdhci_iproc_ops = {
+ .set_clock = sdhci_set_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_ops sdhci_iproc_32only_ops = {
.read_l = sdhci_iproc_readl,
.read_w = sdhci_iproc_readw,
.read_b = sdhci_iproc_readb,
@@ -156,6 +164,28 @@ static const struct sdhci_ops sdhci_iproc_ops = {
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
+static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = {
+ .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
+ .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
+ .ops = &sdhci_iproc_32only_ops,
+};
+
+static const struct sdhci_iproc_data iproc_cygnus_data = {
+ .pdata = &sdhci_iproc_cygnus_pltfm_data,
+ .caps = ((0x1 << SDHCI_MAX_BLOCK_SHIFT)
+ & SDHCI_MAX_BLOCK_MASK) |
+ SDHCI_CAN_VDD_330 |
+ SDHCI_CAN_VDD_180 |
+ SDHCI_CAN_DO_SUSPEND |
+ SDHCI_CAN_DO_HISPD |
+ SDHCI_CAN_DO_ADMA2 |
+ SDHCI_CAN_DO_SDMA,
+ .caps1 = SDHCI_DRIVER_TYPE_C |
+ SDHCI_DRIVER_TYPE_D |
+ SDHCI_SUPPORT_DDR50,
+ .mmc_caps = MMC_CAP_1_8V_DDR,
+};
+
static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = {
.quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
.quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
@@ -181,20 +211,26 @@ static const struct sdhci_iproc_data iproc_data = {
static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = {
.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
- SDHCI_QUIRK_MISSING_CAPS,
- .ops = &sdhci_iproc_ops,
+ SDHCI_QUIRK_MISSING_CAPS |
+ SDHCI_QUIRK_NO_HISPD_BIT,
+ .ops = &sdhci_iproc_32only_ops,
};
static const struct sdhci_iproc_data bcm2835_data = {
.pdata = &sdhci_bcm2835_pltfm_data,
- .caps = SDHCI_CAN_VDD_330,
- .caps1 = 0x00000000,
+ .caps = ((0x1 << SDHCI_MAX_BLOCK_SHIFT)
+ & SDHCI_MAX_BLOCK_MASK) |
+ SDHCI_CAN_VDD_330 |
+ SDHCI_CAN_DO_HISPD,
+ .caps1 = SDHCI_DRIVER_TYPE_A |
+ SDHCI_DRIVER_TYPE_C,
.mmc_caps = 0x00000000,
};
static const struct of_device_id sdhci_iproc_of_match[] = {
{ .compatible = "brcm,bcm2835-sdhci", .data = &bcm2835_data },
- { .compatible = "brcm,sdhci-iproc-cygnus", .data = &iproc_data },
+ { .compatible = "brcm,sdhci-iproc-cygnus", .data = &iproc_cygnus_data},
+ { .compatible = "brcm,sdhci-iproc", .data = &iproc_data },
{ }
};
MODULE_DEVICE_TABLE(of, sdhci_iproc_of_match);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 90ed2e12d345..10cdc84d5113 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -18,7 +18,9 @@
#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/mmc/mmc.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/iopoll.h>
#include "sdhci-pltfm.h"
@@ -31,6 +33,7 @@
#define HC_MODE_EN 0x1
#define CORE_POWER 0x0
#define CORE_SW_RST BIT(7)
+#define FF_CLK_SW_RST_DIS BIT(13)
#define CORE_PWRCTL_STATUS 0xdc
#define CORE_PWRCTL_MASK 0xe0
@@ -49,6 +52,7 @@
#define INT_MASK 0xf
#define MAX_PHASES 16
#define CORE_DLL_LOCK BIT(7)
+#define CORE_DDR_DLL_LOCK BIT(11)
#define CORE_DLL_EN BIT(16)
#define CORE_CDR_EN BIT(17)
#define CORE_CK_OUT_EN BIT(18)
@@ -56,18 +60,69 @@
#define CORE_DLL_PDN BIT(29)
#define CORE_DLL_RST BIT(30)
#define CORE_DLL_CONFIG 0x100
+#define CORE_CMD_DAT_TRACK_SEL BIT(0)
#define CORE_DLL_STATUS 0x108
+#define CORE_DLL_CONFIG_2 0x1b4
+#define CORE_DDR_CAL_EN BIT(0)
+#define CORE_FLL_CYCLE_CNT BIT(18)
+#define CORE_DLL_CLOCK_DISABLE BIT(21)
+
#define CORE_VENDOR_SPEC 0x10c
+#define CORE_VENDOR_SPEC_POR_VAL 0xa1c
#define CORE_CLK_PWRSAVE BIT(1)
+#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
+#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
+#define CORE_HC_MCLK_SEL_MASK (3 << 8)
+#define CORE_HC_SELECT_IN_EN BIT(18)
+#define CORE_HC_SELECT_IN_HS400 (6 << 19)
+#define CORE_HC_SELECT_IN_MASK (7 << 19)
+
+#define CORE_CSR_CDC_CTLR_CFG0 0x130
+#define CORE_SW_TRIG_FULL_CALIB BIT(16)
+#define CORE_HW_AUTOCAL_ENA BIT(17)
+
+#define CORE_CSR_CDC_CTLR_CFG1 0x134
+#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
+#define CORE_TIMER_ENA BIT(16)
+
+#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
+#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
+#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
+#define CORE_CDC_OFFSET_CFG 0x14C
+#define CORE_CSR_CDC_DELAY_CFG 0x150
+#define CORE_CDC_SLAVE_DDA_CFG 0x160
+#define CORE_CSR_CDC_STATUS0 0x164
+#define CORE_CALIBRATION_DONE BIT(0)
+
+#define CORE_CDC_ERROR_CODE_MASK 0x7000000
+
+#define CORE_CSR_CDC_GEN_CFG 0x178
+#define CORE_CDC_SWITCH_BYPASS_OFF BIT(0)
+#define CORE_CDC_SWITCH_RC_EN BIT(1)
+
+#define CORE_DDR_200_CFG 0x184
+#define CORE_CDC_T4_DLY_SEL BIT(0)
+#define CORE_CMDIN_RCLK_EN BIT(1)
+#define CORE_START_CDC_TRAFFIC BIT(6)
+#define CORE_VENDOR_SPEC3 0x1b0
+#define CORE_PWRSAVE_DLL BIT(3)
+
+#define CORE_DDR_CONFIG 0x1b8
+#define DDR_CONFIG_POR_VAL 0x80040853
#define CORE_VENDOR_SPEC_CAPABILITIES0 0x11c
+#define INVALID_TUNING_PHASE -1
+#define SDHCI_MSM_MIN_CLOCK 400000
+#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
+
#define CDR_SELEXT_SHIFT 20
#define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT)
#define CMUX_SHIFT_PHASE_SHIFT 24
#define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
+#define MSM_MMC_AUTOSUSPEND_DELAY_MS 50
struct sdhci_msm_host {
struct platform_device *pdev;
void __iomem *core_mem; /* MSM SDCC mapped address */
@@ -75,9 +130,56 @@ struct sdhci_msm_host {
struct clk *clk; /* main SD/MMC bus clock */
struct clk *pclk; /* SDHC peripheral bus clock */
struct clk *bus_clk; /* SDHC bus voter clock */
+ struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/
+ unsigned long clk_rate;
struct mmc_host *mmc;
+ bool use_14lpp_dll_reset;
+ bool tuning_done;
+ bool calibration_done;
+ u8 saved_tuning_phase;
+ bool use_cdclp533;
};
+static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host,
+ unsigned int clock)
+{
+ struct mmc_ios ios = host->mmc->ios;
+ /*
+ * The SDHC requires internal clock frequency to be double the
+ * actual clock that will be set for DDR mode. The controller
+ * uses the faster clock(100/400MHz) for some of its parts and
+ * send the actual required clock (50/200MHz) to the card.
+ */
+ if (ios.timing == MMC_TIMING_UHS_DDR50 ||
+ ios.timing == MMC_TIMING_MMC_DDR52 ||
+ ios.timing == MMC_TIMING_MMC_HS400 ||
+ host->flags & SDHCI_HS400_TUNING)
+ clock *= 2;
+ return clock;
+}
+
+static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
+ unsigned int clock)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct mmc_ios curr_ios = host->mmc->ios;
+ int rc;
+
+ clock = msm_get_clock_rate_for_bus_mode(host, clock);
+ rc = clk_set_rate(msm_host->clk, clock);
+ if (rc) {
+ pr_err("%s: Failed to set clock at rate %u at timing %d\n",
+ mmc_hostname(host->mmc), clock,
+ curr_ios.timing);
+ return;
+ }
+ msm_host->clk_rate = clock;
+ pr_debug("%s: Setting clock at rate %lu at timing %d\n",
+ mmc_hostname(host->mmc), clk_get_rate(msm_host->clk),
+ curr_ios.timing);
+}
+
/* Platform specific tuning */
static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll)
{
@@ -115,6 +217,9 @@ static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
u32 config;
struct mmc_host *mmc = host->mmc;
+ if (phase > 0xf)
+ return -EINVAL;
+
spin_lock_irqsave(&host->lock, flags);
config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
@@ -136,9 +241,9 @@ static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT;
writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
- /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_CK_OUT_EN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
/* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
rc = msm_dll_poll_ck_out_en(host, 1);
@@ -163,8 +268,8 @@ out:
* Find out the greatest range of consecuitive selected
* DLL clock output phases that can be used as sampling
* setting for SD3.0 UHS-I card read operation (in SDR104
- * timing mode) or for eMMC4.5 card read operation (in HS200
- * timing mode).
+ * timing mode) or for eMMC4.5 card read operation (in
+ * HS400/HS200 timing mode).
* Select the 3/4 of the range and configure the DLL with the
* selected DLL clock output phase.
*/
@@ -303,8 +408,11 @@ static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
static int msm_init_cm_dll(struct sdhci_host *host)
{
struct mmc_host *mmc = host->mmc;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
int wait_cnt = 50;
unsigned long flags;
+ u32 config;
spin_lock_irqsave(&host->lock, flags);
@@ -313,33 +421,73 @@ static int msm_init_cm_dll(struct sdhci_host *host)
* tuning is in progress. Keeping PWRSAVE ON may
* turn off the clock.
*/
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
- & ~CORE_CLK_PWRSAVE), host->ioaddr + CORE_VENDOR_SPEC);
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config &= ~CORE_CLK_PWRSAVE;
+ writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+
+ if (msm_host->use_14lpp_dll_reset) {
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config &= ~CORE_CK_OUT_EN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+ config |= CORE_DLL_CLOCK_DISABLE;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
+ }
- /* Write 1 to DLL_RST bit of DLL_CONFIG register */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_DLL_RST;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
- /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_DLL_PDN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
msm_cm_dll_set_freq(host);
- /* Write 0 to DLL_RST bit of DLL_CONFIG register */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- & ~CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
+ if (msm_host->use_14lpp_dll_reset &&
+ !IS_ERR_OR_NULL(msm_host->xo_clk)) {
+ u32 mclk_freq = 0;
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+ config &= CORE_FLL_CYCLE_CNT;
+ if (config)
+ mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8),
+ clk_get_rate(msm_host->xo_clk));
+ else
+ mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4),
+ clk_get_rate(msm_host->xo_clk));
- /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- & ~CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+ config &= ~(0xFF << 10);
+ config |= mclk_freq << 10;
- /* Set DLL_EN bit to 1. */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_DLL_EN), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
+ /* wait for 5us before enabling DLL clock */
+ udelay(5);
+ }
- /* Set CK_OUT_EN bit to 1. */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config &= ~CORE_DLL_RST;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config &= ~CORE_DLL_PDN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ if (msm_host->use_14lpp_dll_reset) {
+ msm_cm_dll_set_freq(host);
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+ config &= ~CORE_DLL_CLOCK_DISABLE;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
+ }
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_DLL_EN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_CK_OUT_EN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
/* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
@@ -358,23 +506,346 @@ static int msm_init_cm_dll(struct sdhci_host *host)
return 0;
}
-static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
+static void msm_hc_select_default(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ u32 config;
+
+ if (!msm_host->use_cdclp533) {
+ config = readl_relaxed(host->ioaddr +
+ CORE_VENDOR_SPEC3);
+ config &= ~CORE_PWRSAVE_DLL;
+ writel_relaxed(config, host->ioaddr +
+ CORE_VENDOR_SPEC3);
+ }
+
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config &= ~CORE_HC_MCLK_SEL_MASK;
+ config |= CORE_HC_MCLK_SEL_DFLT;
+ writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+
+ /*
+ * Disable HC_SELECT_IN to be able to use the UHS mode select
+ * configuration from Host Control2 register for all other
+ * modes.
+ * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
+ * in VENDOR_SPEC_FUNC
+ */
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config &= ~CORE_HC_SELECT_IN_EN;
+ config &= ~CORE_HC_SELECT_IN_MASK;
+ writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+
+ /*
+ * Make sure above writes impacting free running MCLK are completed
+ * before changing the clk_rate at GCC.
+ */
+ wmb();
+}
+
+static void msm_hc_select_hs400(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct mmc_ios ios = host->mmc->ios;
+ u32 config, dll_lock;
+ int rc;
+
+ /* Select the divided clock (free running MCLK/2) */
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config &= ~CORE_HC_MCLK_SEL_MASK;
+ config |= CORE_HC_MCLK_SEL_HS400;
+
+ writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+ /*
+ * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
+ * register
+ */
+ if ((msm_host->tuning_done || ios.enhanced_strobe) &&
+ !msm_host->calibration_done) {
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ config |= CORE_HC_SELECT_IN_HS400;
+ config |= CORE_HC_SELECT_IN_EN;
+ writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
+ }
+ if (!msm_host->clk_rate && !msm_host->use_cdclp533) {
+ /*
+ * Poll on DLL_LOCK or DDR_DLL_LOCK bits in
+ * CORE_DLL_STATUS to be set. This should get set
+ * within 15 us at 200 MHz.
+ */
+ rc = readl_relaxed_poll_timeout(host->ioaddr +
+ CORE_DLL_STATUS,
+ dll_lock,
+ (dll_lock &
+ (CORE_DLL_LOCK |
+ CORE_DDR_DLL_LOCK)), 10,
+ 1000);
+ if (rc == -ETIMEDOUT)
+ pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
+ mmc_hostname(host->mmc), dll_lock);
+ }
+ /*
+ * Make sure above writes impacting free running MCLK are completed
+ * before changing the clk_rate at GCC.
+ */
+ wmb();
+}
+
+/*
+ * sdhci_msm_hc_select_mode :- In general all timing modes are
+ * controlled via UHS mode select in Host Control2 register.
+ * eMMC specific HS200/HS400 doesn't have their respective modes
+ * defined here, hence we use these values.
+ *
+ * HS200 - SDR104 (Since they both are equivalent in functionality)
+ * HS400 - This involves multiple configurations
+ * Initially SDR104 - when tuning is required as HS200
+ * Then when switching to DDR @ 400MHz (HS400) we use
+ * the vendor specific HC_SELECT_IN to control the mode.
+ *
+ * In addition to controlling the modes we also need to select the
+ * correct input clock for DLL depending on the mode.
+ *
+ * HS400 - divided clock (free running MCLK/2)
+ * All other modes - default (free running MCLK)
+ */
+void sdhci_msm_hc_select_mode(struct sdhci_host *host)
+{
+ struct mmc_ios ios = host->mmc->ios;
+
+ if (ios.timing == MMC_TIMING_MMC_HS400 ||
+ host->flags & SDHCI_HS400_TUNING)
+ msm_hc_select_hs400(host);
+ else
+ msm_hc_select_default(host);
+}
+
+static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ u32 config, calib_done;
+ int ret;
+
+ pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
+
+ /*
+ * Retuning in HS400 (DDR mode) will fail, just reset the
+ * tuning block and restore the saved tuning phase.
+ */
+ ret = msm_init_cm_dll(host);
+ if (ret)
+ goto out;
+
+ /* Set the selected phase in delay line hw block */
+ ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
+ if (ret)
+ goto out;
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_CMD_DAT_TRACK_SEL;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
+ config &= ~CORE_CDC_T4_DLY_SEL;
+ writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
+
+ config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+ config &= ~CORE_CDC_SWITCH_BYPASS_OFF;
+ writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+
+ config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+ config |= CORE_CDC_SWITCH_RC_EN;
+ writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+
+ config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
+ config &= ~CORE_START_CDC_TRAFFIC;
+ writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
+
+ /* Perform CDC Register Initialization Sequence */
+
+ writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+ writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
+ writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
+ writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
+ writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
+ writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
+ writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
+ writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
+ writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
+
+ /* CDC HW Calibration */
+
+ config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+ config |= CORE_SW_TRIG_FULL_CALIB;
+ writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+ config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+ config &= ~CORE_SW_TRIG_FULL_CALIB;
+ writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+ config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+ config |= CORE_HW_AUTOCAL_ENA;
+ writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+ config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
+ config |= CORE_TIMER_ENA;
+ writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
+
+ ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
+ calib_done,
+ (calib_done & CORE_CALIBRATION_DONE),
+ 1, 50);
+
+ if (ret == -ETIMEDOUT) {
+ pr_err("%s: %s: CDC calibration was not completed\n",
+ mmc_hostname(host->mmc), __func__);
+ goto out;
+ }
+
+ ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
+ & CORE_CDC_ERROR_CODE_MASK;
+ if (ret) {
+ pr_err("%s: %s: CDC error code %d\n",
+ mmc_hostname(host->mmc), __func__, ret);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
+ config |= CORE_START_CDC_TRAFFIC;
+ writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
+out:
+ pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
+ __func__, ret);
+ return ret;
+}
+
+static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ u32 dll_status, config;
+ int ret;
+
+ pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
+
+ /*
+ * Currently the CORE_DDR_CONFIG register defaults to desired
+ * configuration on reset. Currently reprogramming the power on
+ * reset (POR) value in case it might have been modified by
+ * bootloaders. In the future, if this changes, then the desired
+ * values will need to be programmed appropriately.
+ */
+ writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + CORE_DDR_CONFIG);
+
+ if (mmc->ios.enhanced_strobe) {
+ config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
+ config |= CORE_CMDIN_RCLK_EN;
+ writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
+ }
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
+ config |= CORE_DDR_CAL_EN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
+
+ ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_DLL_STATUS,
+ dll_status,
+ (dll_status & CORE_DDR_DLL_LOCK),
+ 10, 1000);
+
+ if (ret == -ETIMEDOUT) {
+ pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n",
+ mmc_hostname(host->mmc), __func__);
+ goto out;
+ }
+
+ config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3);
+ config |= CORE_PWRSAVE_DLL;
+ writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC3);
+
+ /*
+ * Drain writebuffer to ensure above DLL calibration
+ * and PWRSAVE DLL is enabled.
+ */
+ wmb();
+out:
+ pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
+ __func__, ret);
+ return ret;
+}
+
+static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct mmc_host *mmc = host->mmc;
+ int ret;
+ u32 config;
+
+ pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
+
+ /*
+ * Retuning in HS400 (DDR mode) will fail, just reset the
+ * tuning block and restore the saved tuning phase.
+ */
+ ret = msm_init_cm_dll(host);
+ if (ret)
+ goto out;
+
+ if (!mmc->ios.enhanced_strobe) {
+ /* Set the selected phase in delay line hw block */
+ ret = msm_config_cm_dll_phase(host,
+ msm_host->saved_tuning_phase);
+ if (ret)
+ goto out;
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_CMD_DAT_TRACK_SEL;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ }
+
+ if (msm_host->use_cdclp533)
+ ret = sdhci_msm_cdclp533_calibration(host);
+ else
+ ret = sdhci_msm_cm_dll_sdc4_calibration(host);
+out:
+ pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
+ __func__, ret);
+ return ret;
+}
+
+static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
int tuning_seq_cnt = 3;
u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
int rc;
- struct mmc_host *mmc = host->mmc;
struct mmc_ios ios = host->mmc->ios;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
/*
* Tuning is required for SDR104, HS200 and HS400 cards and
* if clock frequency is greater than 100MHz in these modes.
*/
- if (host->clock <= 100 * 1000 * 1000 ||
- !((ios.timing == MMC_TIMING_MMC_HS200) ||
- (ios.timing == MMC_TIMING_UHS_SDR104)))
+ if (host->clock <= CORE_FREQ_100MHZ ||
+ !(ios.timing == MMC_TIMING_MMC_HS400 ||
+ ios.timing == MMC_TIMING_MMC_HS200 ||
+ ios.timing == MMC_TIMING_UHS_SDR104))
return 0;
+ /*
+ * For HS400 tuning in HS200 timing requires:
+ * - select MCLK/2 in VENDOR_SPEC
+ * - program MCLK to 400MHz (or nearest supported) in GCC
+ */
+ if (host->flags & SDHCI_HS400_TUNING) {
+ sdhci_msm_hc_select_mode(host);
+ msm_set_clock_rate_for_bus_mode(host, ios.clock);
+ host->flags &= ~SDHCI_HS400_TUNING;
+ }
+
retry:
/* First of all reset the tuning block */
rc = msm_init_cm_dll(host);
@@ -388,6 +859,7 @@ retry:
if (rc)
return rc;
+ msm_host->saved_tuning_phase = phase;
rc = mmc_send_tuning(mmc, opcode, NULL);
if (!rc) {
/* Tuning is successful at this tuning point */
@@ -423,14 +895,43 @@ retry:
rc = -EIO;
}
+ if (!rc)
+ msm_host->tuning_done = true;
return rc;
}
+/*
+ * sdhci_msm_hs400 - Calibrate the DLL for HS400 bus speed mode operation.
+ * This needs to be done for both tuning and enhanced_strobe mode.
+ * DLL operation is only needed for clock > 100MHz. For clock <= 100MHz
+ * fixed feedback clock is used.
+ */
+static void sdhci_msm_hs400(struct sdhci_host *host, struct mmc_ios *ios)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ int ret;
+
+ if (host->clock > CORE_FREQ_100MHZ &&
+ (msm_host->tuning_done || ios->enhanced_strobe) &&
+ !msm_host->calibration_done) {
+ ret = sdhci_msm_hs400_dll_calibration(host);
+ if (!ret)
+ msm_host->calibration_done = true;
+ else
+ pr_err("%s: Failed to calibrate DLL for hs400 mode (%d)\n",
+ mmc_hostname(host->mmc), ret);
+ }
+}
+
static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
unsigned int uhs)
{
struct mmc_host *mmc = host->mmc;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
u16 ctrl_2;
+ u32 config;
ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
/* Select Bus Speed Mode for host */
@@ -445,6 +946,7 @@ static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
case MMC_TIMING_UHS_SDR50:
ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
break;
+ case MMC_TIMING_MMC_HS400:
case MMC_TIMING_MMC_HS200:
case MMC_TIMING_UHS_SDR104:
ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
@@ -461,15 +963,40 @@ static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
* provide feedback clock, the mode selection can be any value less
* than 3'b011 in bits [2:0] of HOST CONTROL2 register.
*/
- if (host->clock <= 100000000 &&
- (uhs == MMC_TIMING_MMC_HS400 ||
- uhs == MMC_TIMING_MMC_HS200 ||
- uhs == MMC_TIMING_UHS_SDR104))
- ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ if (host->clock <= CORE_FREQ_100MHZ) {
+ if (uhs == MMC_TIMING_MMC_HS400 ||
+ uhs == MMC_TIMING_MMC_HS200 ||
+ uhs == MMC_TIMING_UHS_SDR104)
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ /*
+ * DLL is not required for clock <= 100MHz
+ * Thus, make sure DLL it is disabled when not required
+ */
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_DLL_RST;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_DLL_PDN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ /*
+ * The DLL needs to be restored and CDCLP533 recalibrated
+ * when the clock frequency is set back to 400MHz.
+ */
+ msm_host->calibration_done = false;
+ }
dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n",
mmc_hostname(host->mmc), host->clock, uhs, ctrl_2);
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+
+ spin_unlock_irq(&host->lock);
+
+ if (mmc->ios.timing == MMC_TIMING_MMC_HS400)
+ sdhci_msm_hs400(host, &mmc->ios);
+
+ spin_lock_irq(&host->lock);
}
static void sdhci_msm_voltage_switch(struct sdhci_host *host)
@@ -505,6 +1032,74 @@ static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
return IRQ_HANDLED;
}
+static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ return clk_round_rate(msm_host->clk, ULONG_MAX);
+}
+
+static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
+{
+ return SDHCI_MSM_MIN_CLOCK;
+}
+
+/**
+ * __sdhci_msm_set_clock - sdhci_msm clock control.
+ *
+ * Description:
+ * MSM controller does not use internal divider and
+ * instead directly control the GCC clock as per
+ * HW recommendation.
+ **/
+void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ u16 clk;
+ /*
+ * Keep actual_clock as zero -
+ * - since there is no divider used so no need of having actual_clock.
+ * - MSM controller uses SDCLK for data timeout calculation. If
+ * actual_clock is zero, host->clock is taken for calculation.
+ */
+ host->mmc->actual_clock = 0;
+
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+ return;
+
+ /*
+ * MSM controller do not use clock divider.
+ * Thus read SDHCI_CLOCK_CONTROL and only enable
+ * clock with no divider value programmed.
+ */
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ sdhci_enable_clk(host, clk);
+}
+
+/* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */
+static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ if (!clock) {
+ msm_host->clk_rate = clock;
+ goto out;
+ }
+
+ spin_unlock_irq(&host->lock);
+
+ sdhci_msm_hc_select_mode(host);
+
+ msm_set_clock_rate_for_bus_mode(host, clock);
+
+ spin_lock_irq(&host->lock);
+out:
+ __sdhci_msm_set_clock(host, clock);
+}
+
static const struct of_device_id sdhci_msm_dt_match[] = {
{ .compatible = "qcom,sdhci-msm-v4" },
{},
@@ -513,9 +1108,10 @@ static const struct of_device_id sdhci_msm_dt_match[] = {
MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
static const struct sdhci_ops sdhci_msm_ops = {
- .platform_execute_tuning = sdhci_msm_execute_tuning,
.reset = sdhci_reset,
- .set_clock = sdhci_set_clock,
+ .set_clock = sdhci_msm_set_clock,
+ .get_min_clock = sdhci_msm_get_min_clock,
+ .get_max_clock = sdhci_msm_get_max_clock,
.set_bus_width = sdhci_set_bus_width,
.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
.voltage_switch = sdhci_msm_voltage_switch,
@@ -524,7 +1120,9 @@ static const struct sdhci_ops sdhci_msm_ops = {
static const struct sdhci_pltfm_data sdhci_msm_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
SDHCI_QUIRK_NO_CARD_NO_RESET |
- SDHCI_QUIRK_SINGLE_POWER_WRITE,
+ SDHCI_QUIRK_SINGLE_POWER_WRITE |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.ops = &sdhci_msm_ops,
};
@@ -536,7 +1134,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
struct resource *core_memres;
int ret;
u16 host_version, core_minor;
- u32 core_version, caps;
+ u32 core_version, config;
u8 core_major;
host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
@@ -554,6 +1152,8 @@ static int sdhci_msm_probe(struct platform_device *pdev)
sdhci_get_of_property(pdev);
+ msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
+
/* Setup SDCC bus voter clock. */
msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
if (!IS_ERR(msm_host->bus_clk)) {
@@ -586,6 +1186,16 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto pclk_disable;
}
+ /*
+ * xo clock is needed for FLL feature of cm_dll.
+ * In case if xo clock is not mentioned in DT, warn and proceed.
+ */
+ msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo");
+ if (IS_ERR(msm_host->xo_clk)) {
+ ret = PTR_ERR(msm_host->xo_clk);
+ dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
+ }
+
/* Vote for maximum clock rate for maximum performance */
ret = clk_set_rate(msm_host->clk, INT_MAX);
if (ret)
@@ -604,21 +1214,17 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto clk_disable;
}
- /* Reset the core and Enable SDHC mode */
- writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_POWER) |
- CORE_SW_RST, msm_host->core_mem + CORE_POWER);
-
- /* SW reset can take upto 10HCLK + 15MCLK cycles. (min 40us) */
- usleep_range(1000, 5000);
- if (readl(msm_host->core_mem + CORE_POWER) & CORE_SW_RST) {
- dev_err(&pdev->dev, "Stuck in reset\n");
- ret = -ETIMEDOUT;
- goto clk_disable;
- }
+ /* Reset the vendor spec register to power on reset state */
+ writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
+ host->ioaddr + CORE_VENDOR_SPEC);
/* Set HC_MODE_EN bit in HC_MODE register */
writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
+ config = readl_relaxed(msm_host->core_mem + CORE_HC_MODE);
+ config |= FF_CLK_SW_RST_DIS;
+ writel_relaxed(config, msm_host->core_mem + CORE_HC_MODE);
+
host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
@@ -631,14 +1237,24 @@ static int sdhci_msm_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n",
core_version, core_major, core_minor);
+ if (core_major == 1 && core_minor >= 0x42)
+ msm_host->use_14lpp_dll_reset = true;
+
+ /*
+ * SDCC 5 controller with major version 1, minor version 0x34 and later
+ * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
+ */
+ if (core_major == 1 && core_minor < 0x34)
+ msm_host->use_cdclp533 = true;
+
/*
* Support for some capabilities is not advertised by newer
* controller versions and must be explicitly enabled.
*/
if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) {
- caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
- caps |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
- writel_relaxed(caps, host->ioaddr +
+ config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
+ config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
+ writel_relaxed(config, host->ioaddr +
CORE_VENDOR_SPEC_CAPABILITIES0);
}
@@ -659,12 +1275,27 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto clk_disable;
}
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ MSM_MMC_AUTOSUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(&pdev->dev);
+
+ host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
ret = sdhci_add_host(host);
if (ret)
- goto clk_disable;
+ goto pm_runtime_disable;
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
return 0;
+pm_runtime_disable:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
clk_disable:
clk_disable_unprepare(msm_host->clk);
pclk_disable:
@@ -686,6 +1317,11 @@ static int sdhci_msm_remove(struct platform_device *pdev)
0xffffffff);
sdhci_remove_host(host, dead);
+
+ pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
clk_disable_unprepare(msm_host->clk);
clk_disable_unprepare(msm_host->pclk);
if (!IS_ERR(msm_host->bus_clk))
@@ -694,12 +1330,57 @@ static int sdhci_msm_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int sdhci_msm_runtime_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ clk_disable_unprepare(msm_host->clk);
+ clk_disable_unprepare(msm_host->pclk);
+
+ return 0;
+}
+
+static int sdhci_msm_runtime_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ int ret;
+
+ ret = clk_prepare_enable(msm_host->clk);
+ if (ret) {
+ dev_err(dev, "clk_enable failed for core_clk: %d\n", ret);
+ return ret;
+ }
+ ret = clk_prepare_enable(msm_host->pclk);
+ if (ret) {
+ dev_err(dev, "clk_enable failed for iface_clk: %d\n", ret);
+ clk_disable_unprepare(msm_host->clk);
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops sdhci_msm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend,
+ sdhci_msm_runtime_resume,
+ NULL)
+};
+
static struct platform_driver sdhci_msm_driver = {
.probe = sdhci_msm_probe,
.remove = sdhci_msm_remove,
.driver = {
.name = "sdhci_msm",
.of_match_table = sdhci_msm_dt_match,
+ .pm = &sdhci_msm_pm_ops,
},
};
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 410a55b1c25f..1cfd7f900339 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -28,13 +28,9 @@
#include "sdhci-pltfm.h"
#include <linux/of.h>
-#define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c
#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
#define VENDOR_ENHANCED_STROBE BIT(0)
-#define CLK_CTRL_TIMEOUT_SHIFT 16
-#define CLK_CTRL_TIMEOUT_MASK (0xf << CLK_CTRL_TIMEOUT_SHIFT)
-#define CLK_CTRL_TIMEOUT_MIN_EXP 13
#define PHY_CLK_TOO_SLOW_HZ 400000
@@ -163,15 +159,15 @@ static int sdhci_arasan_syscon_write(struct sdhci_host *host,
static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host)
{
- u32 div;
unsigned long freq;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- div = readl(host->ioaddr + SDHCI_ARASAN_CLK_CTRL_OFFSET);
- div = (div & CLK_CTRL_TIMEOUT_MASK) >> CLK_CTRL_TIMEOUT_SHIFT;
+ /* SDHCI timeout clock is in kHz */
+ freq = DIV_ROUND_UP(clk_get_rate(pltfm_host->clk), 1000);
- freq = clk_get_rate(pltfm_host->clk);
- freq /= 1 << (CLK_CTRL_TIMEOUT_MIN_EXP + div);
+ /* or in MHz */
+ if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
+ freq = DIV_ROUND_UP(freq, 1000);
return freq;
}
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index a9b7fc06c434..d5430ed02a67 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -29,6 +29,8 @@
#include "sdhci-pltfm.h"
+#define SDMMC_MC1R 0x204
+#define SDMMC_MC1R_DDR BIT(3)
#define SDMMC_CACR 0x230
#define SDMMC_CACR_CAPWREN BIT(0)
#define SDMMC_CACR_KEY (0x46 << 8)
@@ -85,11 +87,37 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
}
+/*
+ * In this specific implementation of the SDHCI controller, the power register
+ * needs to have a valid voltage set even when the power supply is managed by
+ * an external regulator.
+ */
+static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ if (!IS_ERR(host->mmc->supply.vmmc)) {
+ struct mmc_host *mmc = host->mmc;
+
+ spin_unlock_irq(&host->lock);
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ spin_lock_irq(&host->lock);
+ }
+ sdhci_set_power_noreg(host, mode, vdd);
+}
+
+void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
+{
+ if (timing == MMC_TIMING_MMC_DDR52)
+ sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R);
+ sdhci_set_uhs_signaling(host, timing);
+}
+
static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
.set_clock = sdhci_at91_set_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
- .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_uhs_signaling = sdhci_at91_set_uhs_signaling,
+ .set_power = sdhci_at91_set_power,
};
static const struct sdhci_pltfm_data soc_data_sama5d2 = {
@@ -100,6 +128,7 @@ static const struct of_device_id sdhci_at91_dt_match[] = {
{ .compatible = "atmel,sama5d2-sdhci", .data = &soc_data_sama5d2 },
{}
};
+MODULE_DEVICE_TABLE(of, sdhci_at91_dt_match);
#ifdef CONFIG_PM
static int sdhci_at91_runtime_suspend(struct device *dev)
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 1bb11e4a9fe5..d3aa67142839 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -18,6 +18,7 @@
#include <linux/of.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/sys_soc.h>
#include <linux/mmc/host.h>
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
@@ -28,6 +29,7 @@
struct sdhci_esdhc {
u8 vendor_ver;
u8 spec_ver;
+ bool quirk_incorrect_hostver;
};
/**
@@ -87,6 +89,8 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
static u16 esdhc_readw_fixup(struct sdhci_host *host,
int spec_reg, u32 value)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
u16 ret;
int shift = (spec_reg & 0x2) * 8;
@@ -94,6 +98,12 @@ static u16 esdhc_readw_fixup(struct sdhci_host *host,
ret = value & 0xffff;
else
ret = (value >> shift) & 0xffff;
+ /* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect
+ * vendor version and spec version information.
+ */
+ if ((spec_reg == SDHCI_HOST_VERSION) &&
+ (esdhc->quirk_incorrect_hostver))
+ ret = (VENDOR_V_23 << SDHCI_VENDOR_VER_SHIFT) | SDHCI_SPEC_200;
return ret;
}
@@ -421,6 +431,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
int pre_div = 1;
int div = 1;
+ u32 timeout;
u32 temp;
host->mmc->actual_clock = 0;
@@ -441,8 +452,8 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
}
temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
- temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
- | ESDHC_CLOCK_MASK);
+ temp &= ~(ESDHC_CLOCK_SDCLKEN | ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
+ ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
@@ -462,7 +473,21 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
| (div << ESDHC_DIVIDER_SHIFT)
| (pre_div << ESDHC_PREDIV_SHIFT));
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
- mdelay(1);
+
+ /* Wait max 20 ms */
+ timeout = 20;
+ while (!(sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)) {
+ if (timeout == 0) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ return;
+ }
+ timeout--;
+ mdelay(1);
+ }
+
+ temp |= ESDHC_CLOCK_SDCLKEN;
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
}
static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
@@ -559,19 +584,28 @@ static const struct sdhci_ops sdhci_esdhc_le_ops = {
};
static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
- .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
- | SDHCI_QUIRK_NO_CARD_NO_RESET
- | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks = ESDHC_DEFAULT_QUIRKS |
+#ifdef CONFIG_PPC
+ SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+#endif
+ SDHCI_QUIRK_NO_CARD_NO_RESET |
+ SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.ops = &sdhci_esdhc_be_ops,
};
static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
- .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
- | SDHCI_QUIRK_NO_CARD_NO_RESET
- | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks = ESDHC_DEFAULT_QUIRKS |
+ SDHCI_QUIRK_NO_CARD_NO_RESET |
+ SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.ops = &sdhci_esdhc_le_ops,
};
+static struct soc_device_attribute soc_incorrect_hostver[] = {
+ { .family = "QorIQ T4240", .revision = "1.0", },
+ { .family = "QorIQ T4240", .revision = "2.0", },
+ { },
+};
+
static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host;
@@ -585,6 +619,10 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
SDHCI_VENDOR_VER_SHIFT;
esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
+ if (soc_device_match(soc_incorrect_hostver))
+ esdhc->quirk_incorrect_hostver = true;
+ else
+ esdhc->quirk_incorrect_hostver = false;
}
static int sdhci_esdhc_probe(struct platform_device *pdev)
@@ -623,8 +661,7 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
of_device_is_compatible(np, "fsl,p5020-esdhc") ||
of_device_is_compatible(np, "fsl,p4080-esdhc") ||
of_device_is_compatible(np, "fsl,p1020-esdhc") ||
- of_device_is_compatible(np, "fsl,t1040-esdhc") ||
- of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
+ of_device_is_compatible(np, "fsl,t1040-esdhc"))
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 1d9e00a00e9f..86560d590786 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -27,6 +27,7 @@
#include <linux/pm_runtime.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/mmc/sdhci-pci-data.h>
+#include <linux/acpi.h>
#include "sdhci.h"
#include "sdhci-pci.h"
@@ -375,6 +376,44 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
return 0;
}
+#ifdef CONFIG_ACPI
+static int ni_set_max_freq(struct sdhci_pci_slot *slot)
+{
+ acpi_status status;
+ unsigned long long max_freq;
+
+ status = acpi_evaluate_integer(ACPI_HANDLE(&slot->chip->pdev->dev),
+ "MXFQ", NULL, &max_freq);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&slot->chip->pdev->dev,
+ "MXFQ not found in acpi table\n");
+ return -EINVAL;
+ }
+
+ slot->host->mmc->f_max = max_freq * 1000000;
+
+ return 0;
+}
+#else
+static inline int ni_set_max_freq(struct sdhci_pci_slot *slot)
+{
+ return 0;
+}
+#endif
+
+static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
+{
+ int err;
+
+ err = ni_set_max_freq(slot);
+ if (err)
+ return err;
+
+ slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
+ MMC_CAP_WAIT_WHILE_BUSY;
+ return 0;
+}
+
static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
@@ -385,12 +424,12 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
- slot->cd_con_id = NULL;
slot->cd_idx = 0;
slot->cd_override_level = true;
if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
- slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD) {
+ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD ||
+ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_SD) {
slot->host->mmc_host_ops.get_cd = bxt_get_cd;
slot->host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
}
@@ -412,6 +451,8 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
if (mode == MMC_POWER_OFF)
return;
+ spin_unlock_irq(&host->lock);
+
/*
* Bus power might not enable after D3 -> D0 transition due to the
* present state not yet having propagated. Retry for up to 2ms.
@@ -424,6 +465,8 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
reg |= SDHCI_POWER_ON;
sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
}
+
+ spin_lock_irq(&host->lock);
}
static const struct sdhci_ops sdhci_intel_byt_ops = {
@@ -447,6 +490,15 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
.ops = &sdhci_intel_byt_ops,
};
+static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
+ SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .allow_runtime_pm = true,
+ .probe_slot = ni_byt_sdio_probe_slot,
+ .ops = &sdhci_intel_byt_ops,
+};
+
static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
@@ -817,6 +869,86 @@ enum amd_chipset_gen {
AMD_CHIPSET_UNKNOWN,
};
+/* AMD registers */
+#define AMD_SD_AUTO_PATTERN 0xB8
+#define AMD_MSLEEP_DURATION 4
+#define AMD_SD_MISC_CONTROL 0xD0
+#define AMD_MAX_TUNE_VALUE 0x0B
+#define AMD_AUTO_TUNE_SEL 0x10800
+#define AMD_FIFO_PTR 0x30
+#define AMD_BIT_MASK 0x1F
+
+static void amd_tuning_reset(struct sdhci_host *host)
+{
+ unsigned int val;
+
+ val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ val |= SDHCI_CTRL_PRESET_VAL_ENABLE | SDHCI_CTRL_EXEC_TUNING;
+ sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
+
+ val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ val &= ~SDHCI_CTRL_EXEC_TUNING;
+ sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
+}
+
+static void amd_config_tuning_phase(struct pci_dev *pdev, u8 phase)
+{
+ unsigned int val;
+
+ pci_read_config_dword(pdev, AMD_SD_AUTO_PATTERN, &val);
+ val &= ~AMD_BIT_MASK;
+ val |= (AMD_AUTO_TUNE_SEL | (phase << 1));
+ pci_write_config_dword(pdev, AMD_SD_AUTO_PATTERN, val);
+}
+
+static void amd_enable_manual_tuning(struct pci_dev *pdev)
+{
+ unsigned int val;
+
+ pci_read_config_dword(pdev, AMD_SD_MISC_CONTROL, &val);
+ val |= AMD_FIFO_PTR;
+ pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val);
+}
+
+static int amd_execute_tuning(struct sdhci_host *host, u32 opcode)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct pci_dev *pdev = slot->chip->pdev;
+ u8 valid_win = 0;
+ u8 valid_win_max = 0;
+ u8 valid_win_end = 0;
+ u8 ctrl, tune_around;
+
+ amd_tuning_reset(host);
+
+ for (tune_around = 0; tune_around < 12; tune_around++) {
+ amd_config_tuning_phase(pdev, tune_around);
+
+ if (mmc_send_tuning(host->mmc, opcode, NULL)) {
+ valid_win = 0;
+ msleep(AMD_MSLEEP_DURATION);
+ ctrl = SDHCI_RESET_CMD | SDHCI_RESET_DATA;
+ sdhci_writeb(host, ctrl, SDHCI_SOFTWARE_RESET);
+ } else if (++valid_win > valid_win_max) {
+ valid_win_max = valid_win;
+ valid_win_end = tune_around;
+ }
+ }
+
+ if (!valid_win_max) {
+ dev_err(&pdev->dev, "no tuning point found\n");
+ return -EIO;
+ }
+
+ amd_config_tuning_phase(pdev, valid_win_end - valid_win_max / 2);
+
+ amd_enable_manual_tuning(pdev);
+
+ host->mmc->retune_period = 0;
+
+ return 0;
+}
+
static int amd_probe(struct sdhci_pci_chip *chip)
{
struct pci_dev *smbus_dev;
@@ -839,16 +971,24 @@ static int amd_probe(struct sdhci_pci_chip *chip)
}
}
- if ((gen == AMD_CHIPSET_BEFORE_ML) || (gen == AMD_CHIPSET_CZ)) {
+ if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ)
chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
- chip->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
- }
return 0;
}
+static const struct sdhci_ops amd_sdhci_pci_ops = {
+ .set_clock = sdhci_set_clock,
+ .enable_dma = sdhci_pci_enable_dma,
+ .set_bus_width = sdhci_pci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .platform_execute_tuning = amd_execute_tuning,
+};
+
static const struct sdhci_pci_fixes sdhci_amd = {
.probe = amd_probe,
+ .ops = &amd_sdhci_pci_ops,
};
static const struct pci_device_id pci_ids[] = {
@@ -1079,6 +1219,14 @@ static const struct pci_device_id pci_ids[] = {
{
.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_BYT_SDIO,
+ .subvendor = PCI_VENDOR_ID_NI,
+ .subdevice = 0x7884,
+ .driver_data = (kernel_ulong_t)&sdhci_ni_byt_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BYT_SDIO,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
@@ -1277,6 +1425,30 @@ static const struct pci_device_id pci_ids[] = {
},
{
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_GLK_EMMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_GLK_SDIO,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_GLK_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
+ },
+
+ {
.vendor = PCI_VENDOR_ID_O2,
.device = PCI_DEVICE_ID_O2_8120,
.subvendor = PCI_ANY_ID,
@@ -1735,11 +1907,16 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
host->mmc->slotno = slotno;
host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
- if (slot->cd_idx >= 0 &&
- mmc_gpiod_request_cd(host->mmc, slot->cd_con_id, slot->cd_idx,
- slot->cd_override_level, 0, NULL)) {
- dev_warn(&pdev->dev, "failed to setup card detect gpio\n");
- slot->cd_idx = -1;
+ if (slot->cd_idx >= 0) {
+ ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx,
+ slot->cd_override_level, 0, NULL);
+ if (ret == -EPROBE_DEFER)
+ goto remove;
+
+ if (ret) {
+ dev_warn(&pdev->dev, "failed to setup card detect gpio\n");
+ slot->cd_idx = -1;
+ }
}
ret = sdhci_add_host(host);
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 6bccf56bc5ff..36f743464fcc 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -34,6 +34,9 @@
#define PCI_DEVICE_ID_INTEL_APL_SD 0x5aca
#define PCI_DEVICE_ID_INTEL_APL_EMMC 0x5acc
#define PCI_DEVICE_ID_INTEL_APL_SDIO 0x5ad0
+#define PCI_DEVICE_ID_INTEL_GLK_SD 0x31ca
+#define PCI_DEVICE_ID_INTEL_GLK_EMMC 0x31cc
+#define PCI_DEVICE_ID_INTEL_GLK_SDIO 0x31d0
/*
* PCI registers
@@ -78,7 +81,6 @@ struct sdhci_pci_slot {
int cd_gpio;
int cd_irq;
- char *cd_con_id;
int cd_idx;
bool cd_override_level;
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 3280f2077959..957839d0fe37 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -106,7 +106,7 @@ extern unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host);
static inline void *sdhci_pltfm_priv(struct sdhci_pltfm_host *host)
{
- return (void *)host->private;
+ return host->private;
}
extern const struct dev_pm_ops sdhci_pltfm_pmops;
diff --git a/drivers/mmc/host/sdhci-s3c-regs.h b/drivers/mmc/host/sdhci-s3c-regs.h
deleted file mode 100644
index e34049ad44cc..000000000000
--- a/drivers/mmc/host/sdhci-s3c-regs.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/* linux/arch/arm/plat-s3c/include/plat/regs-sdhci.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C Platform - SDHCI (HSMMC) register definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __PLAT_S3C_SDHCI_REGS_H
-#define __PLAT_S3C_SDHCI_REGS_H __FILE__
-
-#define S3C_SDHCI_CONTROL2 (0x80)
-#define S3C_SDHCI_CONTROL3 (0x84)
-#define S3C64XX_SDHCI_CONTROL4 (0x8C)
-
-#define S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR (1 << 31)
-#define S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK (1 << 30)
-#define S3C_SDHCI_CTRL2_CDINVRXD3 (1 << 29)
-#define S3C_SDHCI_CTRL2_SLCARDOUT (1 << 28)
-
-#define S3C_SDHCI_CTRL2_FLTCLKSEL_MASK (0xf << 24)
-#define S3C_SDHCI_CTRL2_FLTCLKSEL_SHIFT (24)
-#define S3C_SDHCI_CTRL2_FLTCLKSEL(_x) ((_x) << 24)
-
-#define S3C_SDHCI_CTRL2_LVLDAT_MASK (0xff << 16)
-#define S3C_SDHCI_CTRL2_LVLDAT_SHIFT (16)
-#define S3C_SDHCI_CTRL2_LVLDAT(_x) ((_x) << 16)
-
-#define S3C_SDHCI_CTRL2_ENFBCLKTX (1 << 15)
-#define S3C_SDHCI_CTRL2_ENFBCLKRX (1 << 14)
-#define S3C_SDHCI_CTRL2_SDCDSEL (1 << 13)
-#define S3C_SDHCI_CTRL2_SDSIGPC (1 << 12)
-#define S3C_SDHCI_CTRL2_ENBUSYCHKTXSTART (1 << 11)
-
-#define S3C_SDHCI_CTRL2_DFCNT_MASK (0x3 << 9)
-#define S3C_SDHCI_CTRL2_DFCNT_SHIFT (9)
-#define S3C_SDHCI_CTRL2_DFCNT_NONE (0x0 << 9)
-#define S3C_SDHCI_CTRL2_DFCNT_4SDCLK (0x1 << 9)
-#define S3C_SDHCI_CTRL2_DFCNT_16SDCLK (0x2 << 9)
-#define S3C_SDHCI_CTRL2_DFCNT_64SDCLK (0x3 << 9)
-
-#define S3C_SDHCI_CTRL2_ENCLKOUTHOLD (1 << 8)
-#define S3C_SDHCI_CTRL2_RWAITMODE (1 << 7)
-#define S3C_SDHCI_CTRL2_DISBUFRD (1 << 6)
-#define S3C_SDHCI_CTRL2_SELBASECLK_MASK (0x3 << 4)
-#define S3C_SDHCI_CTRL2_SELBASECLK_SHIFT (4)
-#define S3C_SDHCI_CTRL2_PWRSYNC (1 << 3)
-#define S3C_SDHCI_CTRL2_ENCLKOUTMSKCON (1 << 1)
-#define S3C_SDHCI_CTRL2_HWINITFIN (1 << 0)
-
-#define S3C_SDHCI_CTRL3_FCSEL3 (1 << 31)
-#define S3C_SDHCI_CTRL3_FCSEL2 (1 << 23)
-#define S3C_SDHCI_CTRL3_FCSEL1 (1 << 15)
-#define S3C_SDHCI_CTRL3_FCSEL0 (1 << 7)
-
-#define S3C_SDHCI_CTRL3_FIA3_MASK (0x7f << 24)
-#define S3C_SDHCI_CTRL3_FIA3_SHIFT (24)
-#define S3C_SDHCI_CTRL3_FIA3(_x) ((_x) << 24)
-
-#define S3C_SDHCI_CTRL3_FIA2_MASK (0x7f << 16)
-#define S3C_SDHCI_CTRL3_FIA2_SHIFT (16)
-#define S3C_SDHCI_CTRL3_FIA2(_x) ((_x) << 16)
-
-#define S3C_SDHCI_CTRL3_FIA1_MASK (0x7f << 8)
-#define S3C_SDHCI_CTRL3_FIA1_SHIFT (8)
-#define S3C_SDHCI_CTRL3_FIA1(_x) ((_x) << 8)
-
-#define S3C_SDHCI_CTRL3_FIA0_MASK (0x7f << 0)
-#define S3C_SDHCI_CTRL3_FIA0_SHIFT (0)
-#define S3C_SDHCI_CTRL3_FIA0(_x) ((_x) << 0)
-
-#define S3C64XX_SDHCI_CONTROL4_DRIVE_MASK (0x3 << 16)
-#define S3C64XX_SDHCI_CONTROL4_DRIVE_SHIFT (16)
-#define S3C64XX_SDHCI_CONTROL4_DRIVE_2mA (0x0 << 16)
-#define S3C64XX_SDHCI_CONTROL4_DRIVE_4mA (0x1 << 16)
-#define S3C64XX_SDHCI_CONTROL4_DRIVE_7mA (0x2 << 16)
-#define S3C64XX_SDHCI_CONTROL4_DRIVE_9mA (0x3 << 16)
-
-#define S3C64XX_SDHCI_CONTROL4_BUSY (1)
-
-#endif /* __PLAT_S3C_SDHCI_REGS_H */
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 784c5a848fb4..3e5c83d435ae 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -29,11 +29,80 @@
#include <linux/mmc/host.h>
-#include "sdhci-s3c-regs.h"
#include "sdhci.h"
#define MAX_BUS_CLK (4)
+#define S3C_SDHCI_CONTROL2 (0x80)
+#define S3C_SDHCI_CONTROL3 (0x84)
+#define S3C64XX_SDHCI_CONTROL4 (0x8C)
+
+#define S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR BIT(31)
+#define S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK BIT(30)
+#define S3C_SDHCI_CTRL2_CDINVRXD3 BIT(29)
+#define S3C_SDHCI_CTRL2_SLCARDOUT BIT(28)
+
+#define S3C_SDHCI_CTRL2_FLTCLKSEL_MASK (0xf << 24)
+#define S3C_SDHCI_CTRL2_FLTCLKSEL_SHIFT (24)
+#define S3C_SDHCI_CTRL2_FLTCLKSEL(_x) ((_x) << 24)
+
+#define S3C_SDHCI_CTRL2_LVLDAT_MASK (0xff << 16)
+#define S3C_SDHCI_CTRL2_LVLDAT_SHIFT (16)
+#define S3C_SDHCI_CTRL2_LVLDAT(_x) ((_x) << 16)
+
+#define S3C_SDHCI_CTRL2_ENFBCLKTX BIT(15)
+#define S3C_SDHCI_CTRL2_ENFBCLKRX BIT(14)
+#define S3C_SDHCI_CTRL2_SDCDSEL BIT(13)
+#define S3C_SDHCI_CTRL2_SDSIGPC BIT(12)
+#define S3C_SDHCI_CTRL2_ENBUSYCHKTXSTART BIT(11)
+
+#define S3C_SDHCI_CTRL2_DFCNT_MASK (0x3 << 9)
+#define S3C_SDHCI_CTRL2_DFCNT_SHIFT (9)
+#define S3C_SDHCI_CTRL2_DFCNT_NONE (0x0 << 9)
+#define S3C_SDHCI_CTRL2_DFCNT_4SDCLK (0x1 << 9)
+#define S3C_SDHCI_CTRL2_DFCNT_16SDCLK (0x2 << 9)
+#define S3C_SDHCI_CTRL2_DFCNT_64SDCLK (0x3 << 9)
+
+#define S3C_SDHCI_CTRL2_ENCLKOUTHOLD BIT(8)
+#define S3C_SDHCI_CTRL2_RWAITMODE BIT(7)
+#define S3C_SDHCI_CTRL2_DISBUFRD BIT(6)
+
+#define S3C_SDHCI_CTRL2_SELBASECLK_MASK (0x3 << 4)
+#define S3C_SDHCI_CTRL2_SELBASECLK_SHIFT (4)
+#define S3C_SDHCI_CTRL2_PWRSYNC BIT(3)
+#define S3C_SDHCI_CTRL2_ENCLKOUTMSKCON BIT(1)
+#define S3C_SDHCI_CTRL2_HWINITFIN BIT(0)
+
+#define S3C_SDHCI_CTRL3_FCSEL3 BIT(31)
+#define S3C_SDHCI_CTRL3_FCSEL2 BIT(23)
+#define S3C_SDHCI_CTRL3_FCSEL1 BIT(15)
+#define S3C_SDHCI_CTRL3_FCSEL0 BIT(7)
+
+#define S3C_SDHCI_CTRL3_FIA3_MASK (0x7f << 24)
+#define S3C_SDHCI_CTRL3_FIA3_SHIFT (24)
+#define S3C_SDHCI_CTRL3_FIA3(_x) ((_x) << 24)
+
+#define S3C_SDHCI_CTRL3_FIA2_MASK (0x7f << 16)
+#define S3C_SDHCI_CTRL3_FIA2_SHIFT (16)
+#define S3C_SDHCI_CTRL3_FIA2(_x) ((_x) << 16)
+
+#define S3C_SDHCI_CTRL3_FIA1_MASK (0x7f << 8)
+#define S3C_SDHCI_CTRL3_FIA1_SHIFT (8)
+#define S3C_SDHCI_CTRL3_FIA1(_x) ((_x) << 8)
+
+#define S3C_SDHCI_CTRL3_FIA0_MASK (0x7f << 0)
+#define S3C_SDHCI_CTRL3_FIA0_SHIFT (0)
+#define S3C_SDHCI_CTRL3_FIA0(_x) ((_x) << 0)
+
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_MASK (0x3 << 16)
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_SHIFT (16)
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_2mA (0x0 << 16)
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_4mA (0x1 << 16)
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_7mA (0x2 << 16)
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_9mA (0x3 << 16)
+
+#define S3C64XX_SDHCI_CONTROL4_BUSY (1)
+
/**
* struct sdhci_s3c - S3C SDHCI instance
* @host: The SDHCI host created
@@ -121,7 +190,9 @@ static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
* speed possible with selected clock source and skip the division.
*/
if (ourhost->no_divider) {
+ spin_unlock_irq(&ourhost->host->lock);
rate = clk_round_rate(clksrc, wanted);
+ spin_lock_irq(&ourhost->host->lock);
return wanted - rate;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 42ef3ebb1d8c..63bc33a54d0d 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -22,6 +22,7 @@
#include <linux/scatterlist.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
+#include <linux/of.h>
#include <linux/leds.h>
@@ -1343,20 +1344,10 @@ clock_set:
}
EXPORT_SYMBOL_GPL(sdhci_calc_clk);
-void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
{
- u16 clk;
unsigned long timeout;
- host->mmc->actual_clock = 0;
-
- sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
-
- if (clock == 0)
- return;
-
- clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
-
clk |= SDHCI_CLOCK_INT_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
@@ -1371,12 +1362,30 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
return;
}
timeout--;
- mdelay(1);
+ spin_unlock_irq(&host->lock);
+ usleep_range(900, 1100);
+ spin_lock_irq(&host->lock);
}
clk |= SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
}
+EXPORT_SYMBOL_GPL(sdhci_enable_clk);
+
+void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ u16 clk;
+
+ host->mmc->actual_clock = 0;
+
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+ return;
+
+ clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
+ sdhci_enable_clk(host, clk);
+}
EXPORT_SYMBOL_GPL(sdhci_set_clock);
static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
@@ -1569,6 +1578,9 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
unsigned long flags;
u8 ctrl;
+ if (ios->power_mode == MMC_POWER_UNDEFINED)
+ return;
+
spin_lock_irqsave(&host->lock, flags);
if (host->flags & SDHCI_DEVICE_DEAD) {
@@ -1623,7 +1635,14 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
if ((ios->timing == MMC_TIMING_SD_HS ||
- ios->timing == MMC_TIMING_MMC_HS)
+ ios->timing == MMC_TIMING_MMC_HS ||
+ ios->timing == MMC_TIMING_MMC_HS400 ||
+ ios->timing == MMC_TIMING_MMC_HS200 ||
+ ios->timing == MMC_TIMING_MMC_DDR52 ||
+ ios->timing == MMC_TIMING_UHS_SDR50 ||
+ ios->timing == MMC_TIMING_UHS_SDR104 ||
+ ios->timing == MMC_TIMING_UHS_DDR50 ||
+ ios->timing == MMC_TIMING_UHS_SDR25)
&& !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
ctrl |= SDHCI_CTRL_HISPD;
else
@@ -1632,16 +1651,6 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (host->version >= SDHCI_SPEC_300) {
u16 clk, ctrl_2;
- /* In case of UHS-I modes, set High Speed Enable */
- if ((ios->timing == MMC_TIMING_MMC_HS400) ||
- (ios->timing == MMC_TIMING_MMC_HS200) ||
- (ios->timing == MMC_TIMING_MMC_DDR52) ||
- (ios->timing == MMC_TIMING_UHS_SDR50) ||
- (ios->timing == MMC_TIMING_UHS_SDR104) ||
- (ios->timing == MMC_TIMING_UHS_DDR50) ||
- (ios->timing == MMC_TIMING_UHS_SDR25))
- ctrl |= SDHCI_CTRL_HISPD;
-
if (!host->preset_enabled) {
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
/*
@@ -1821,6 +1830,9 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
struct sdhci_host *host = mmc_priv(mmc);
unsigned long flags;
+ if (enable)
+ pm_runtime_get_noresume(host->mmc->parent);
+
spin_lock_irqsave(&host->lock, flags);
if (enable)
host->flags |= SDHCI_SDIO_IRQ_ENABLED;
@@ -1829,6 +1841,9 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
sdhci_enable_sdio_irq_nolock(host, enable);
spin_unlock_irqrestore(&host->lock, flags);
+
+ if (!enable)
+ pm_runtime_put_noidle(host->mmc->parent);
}
static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
@@ -1948,11 +1963,157 @@ static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
return 0;
}
-static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+static void sdhci_start_tuning(struct sdhci_host *host)
{
- struct sdhci_host *host = mmc_priv(mmc);
u16 ctrl;
- int tuning_loop_counter = MAX_TUNING_LOOP;
+
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ ctrl |= SDHCI_CTRL_EXEC_TUNING;
+ if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
+ ctrl |= SDHCI_CTRL_TUNED_CLK;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+
+ /*
+ * As per the Host Controller spec v3.00, tuning command
+ * generates Buffer Read Ready interrupt, so enable that.
+ *
+ * Note: The spec clearly says that when tuning sequence
+ * is being performed, the controller does not generate
+ * interrupts other than Buffer Read Ready interrupt. But
+ * to make sure we don't hit a controller bug, we _only_
+ * enable Buffer Read Ready interrupt here.
+ */
+ sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
+ sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
+}
+
+static void sdhci_end_tuning(struct sdhci_host *host)
+{
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+}
+
+static void sdhci_reset_tuning(struct sdhci_host *host)
+{
+ u16 ctrl;
+
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ ctrl &= ~SDHCI_CTRL_TUNED_CLK;
+ ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+}
+
+static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode,
+ unsigned long flags)
+{
+ sdhci_reset_tuning(host);
+
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
+
+ sdhci_end_tuning(host);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ mmc_abort_tuning(host->mmc, opcode);
+ spin_lock_irqsave(&host->lock, flags);
+}
+
+/*
+ * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
+ * tuning command does not have a data payload (or rather the hardware does it
+ * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
+ * interrupt setup is different to other commands and there is no timeout
+ * interrupt so special handling is needed.
+ */
+static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode,
+ unsigned long flags)
+{
+ struct mmc_host *mmc = host->mmc;
+ struct mmc_command cmd = {};
+ struct mmc_request mrq = {};
+
+ cmd.opcode = opcode;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+ cmd.mrq = &mrq;
+
+ mrq.cmd = &cmd;
+ /*
+ * In response to CMD19, the card sends 64 bytes of tuning
+ * block to the Host Controller. So we set the block size
+ * to 64 here.
+ */
+ if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
+ mmc->ios.bus_width == MMC_BUS_WIDTH_8)
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128), SDHCI_BLOCK_SIZE);
+ else
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE);
+
+ /*
+ * The tuning block is sent by the card to the host controller.
+ * So we set the TRNS_READ bit in the Transfer Mode register.
+ * This also takes care of setting DMA Enable and Multi Block
+ * Select in the same register to 0.
+ */
+ sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
+
+ sdhci_send_command(host, &cmd);
+
+ host->cmd = NULL;
+
+ sdhci_del_timer(host, &mrq);
+
+ host->tuning_done = 0;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ /* Wait for Buffer Read Ready interrupt */
+ wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
+ msecs_to_jiffies(50));
+
+ spin_lock_irqsave(&host->lock, flags);
+}
+
+static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode,
+ unsigned long flags)
+{
+ int i;
+
+ /*
+ * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
+ * of loops reaches 40 times.
+ */
+ for (i = 0; i < MAX_TUNING_LOOP; i++) {
+ u16 ctrl;
+
+ sdhci_send_tuning(host, opcode, flags);
+
+ if (!host->tuning_done) {
+ pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
+ mmc_hostname(host->mmc));
+ sdhci_abort_tuning(host, opcode, flags);
+ return;
+ }
+
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
+ if (ctrl & SDHCI_CTRL_TUNED_CLK)
+ return; /* Success! */
+ break;
+ }
+
+ /* eMMC spec does not require a delay between tuning cycles */
+ if (opcode == MMC_SEND_TUNING_BLOCK)
+ mdelay(1);
+ }
+
+ pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
+ mmc_hostname(host->mmc));
+ sdhci_reset_tuning(host);
+}
+
+int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
int err = 0;
unsigned long flags;
unsigned int tuning_count = 0;
@@ -1961,7 +2122,6 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
spin_lock_irqsave(&host->lock, flags);
hs400_tuning = host->flags & SDHCI_HS400_TUNING;
- host->flags &= ~SDHCI_HS400_TUNING;
if (host->tuning_mode == SDHCI_TUNING_MODE_1)
tuning_count = host->tuning_count;
@@ -2004,143 +2164,24 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (host->ops->platform_execute_tuning) {
spin_unlock_irqrestore(&host->lock, flags);
err = host->ops->platform_execute_tuning(host, opcode);
- return err;
- }
-
- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
- ctrl |= SDHCI_CTRL_EXEC_TUNING;
- if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
- ctrl |= SDHCI_CTRL_TUNED_CLK;
- sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
-
- /*
- * As per the Host Controller spec v3.00, tuning command
- * generates Buffer Read Ready interrupt, so enable that.
- *
- * Note: The spec clearly says that when tuning sequence
- * is being performed, the controller does not generate
- * interrupts other than Buffer Read Ready interrupt. But
- * to make sure we don't hit a controller bug, we _only_
- * enable Buffer Read Ready interrupt here.
- */
- sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
- sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
-
- /*
- * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
- * of loops reaches 40 times.
- */
- do {
- struct mmc_command cmd = {0};
- struct mmc_request mrq = {NULL};
-
- cmd.opcode = opcode;
- cmd.arg = 0;
- cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
- cmd.retries = 0;
- cmd.data = NULL;
- cmd.mrq = &mrq;
- cmd.error = 0;
-
- if (tuning_loop_counter-- == 0)
- break;
-
- mrq.cmd = &cmd;
-
- /*
- * In response to CMD19, the card sends 64 bytes of tuning
- * block to the Host Controller. So we set the block size
- * to 64 here.
- */
- if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
- if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
- sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
- SDHCI_BLOCK_SIZE);
- else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
- sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
- SDHCI_BLOCK_SIZE);
- } else {
- sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
- SDHCI_BLOCK_SIZE);
- }
-
- /*
- * The tuning block is sent by the card to the host controller.
- * So we set the TRNS_READ bit in the Transfer Mode register.
- * This also takes care of setting DMA Enable and Multi Block
- * Select in the same register to 0.
- */
- sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
-
- sdhci_send_command(host, &cmd);
-
- host->cmd = NULL;
- sdhci_del_timer(host, &mrq);
-
- spin_unlock_irqrestore(&host->lock, flags);
- /* Wait for Buffer Read Ready interrupt */
- wait_event_timeout(host->buf_ready_int,
- (host->tuning_done == 1),
- msecs_to_jiffies(50));
spin_lock_irqsave(&host->lock, flags);
-
- if (!host->tuning_done) {
- pr_info(DRIVER_NAME ": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n");
-
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- sdhci_do_reset(host, SDHCI_RESET_DATA);
-
- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
- ctrl &= ~SDHCI_CTRL_TUNED_CLK;
- ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
- sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
-
- err = -EIO;
- goto out;
- }
-
- host->tuning_done = 0;
-
- ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
-
- /* eMMC spec does not require a delay between tuning cycles */
- if (opcode == MMC_SEND_TUNING_BLOCK)
- mdelay(1);
- } while (ctrl & SDHCI_CTRL_EXEC_TUNING);
-
- /*
- * The Host Driver has exhausted the maximum number of loops allowed,
- * so use fixed sampling frequency.
- */
- if (tuning_loop_counter < 0) {
- ctrl &= ~SDHCI_CTRL_TUNED_CLK;
- sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
- }
- if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
- pr_info(DRIVER_NAME ": Tuning procedure failed, falling back to fixed sampling clock\n");
- err = -EIO;
+ goto out_unlock;
}
-out:
- if (tuning_count) {
- /*
- * In case tuning fails, host controllers which support
- * re-tuning can try tuning again at a later time, when the
- * re-tuning timer expires. So for these controllers, we
- * return 0. Since there might be other controllers who do not
- * have this capability, we return error for them.
- */
- err = 0;
- }
+ host->mmc->retune_period = tuning_count;
- host->mmc->retune_period = err ? 0 : tuning_count;
+ sdhci_start_tuning(host);
- sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
- sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ __sdhci_execute_tuning(host, opcode, flags);
+
+ sdhci_end_tuning(host);
out_unlock:
+ host->flags &= ~SDHCI_HS400_TUNING;
spin_unlock_irqrestore(&host->lock, flags);
+
return err;
}
+EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
static int sdhci_select_drive_strength(struct mmc_card *card,
unsigned int max_dtr, int host_drv,
@@ -2198,8 +2239,7 @@ static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
data->host_cookie = COOKIE_UNMAPPED;
}
-static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
- bool is_first_req)
+static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct sdhci_host *host = mmc_priv(mmc);
@@ -2703,7 +2743,8 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
if (intmask & SDHCI_INT_RETUNE)
mmc_retune_needed(host->mmc);
- if (intmask & SDHCI_INT_CARD_INT) {
+ if ((intmask & SDHCI_INT_CARD_INT) &&
+ (host->ier & SDHCI_INT_CARD_INT)) {
sdhci_enable_sdio_irq_nolock(host, false);
host->thread_isr |= SDHCI_INT_CARD_INT;
result = IRQ_WAKE_THREAD;
@@ -2911,23 +2952,25 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
sdhci_init(host, 0);
- /* Force clock and power re-program */
- host->pwr = 0;
- host->clock = 0;
- mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
- mmc->ops->set_ios(mmc, &mmc->ios);
+ if (mmc->ios.power_mode != MMC_POWER_UNDEFINED) {
+ /* Force clock and power re-program */
+ host->pwr = 0;
+ host->clock = 0;
+ mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
+ mmc->ops->set_ios(mmc, &mmc->ios);
+
+ if ((host_flags & SDHCI_PV_ENABLED) &&
+ !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
+ spin_lock_irqsave(&host->lock, flags);
+ sdhci_enable_preset_value(host, true);
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
- if ((host_flags & SDHCI_PV_ENABLED) &&
- !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
- spin_lock_irqsave(&host->lock, flags);
- sdhci_enable_preset_value(host, true);
- spin_unlock_irqrestore(&host->lock, flags);
+ if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
+ mmc->ops->hs400_enhanced_strobe)
+ mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
}
- if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
- mmc->ops->hs400_enhanced_strobe)
- mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
-
spin_lock_irqsave(&host->lock, flags);
host->runtime_suspended = false;
@@ -3010,6 +3053,8 @@ static int sdhci_set_dma_mask(struct sdhci_host *host)
void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
{
u16 v;
+ u64 dt_caps_mask = 0;
+ u64 dt_caps = 0;
if (host->read_caps)
return;
@@ -3024,18 +3069,35 @@ void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
sdhci_do_reset(host, SDHCI_RESET_ALL);
+ of_property_read_u64(mmc_dev(host->mmc)->of_node,
+ "sdhci-caps-mask", &dt_caps_mask);
+ of_property_read_u64(mmc_dev(host->mmc)->of_node,
+ "sdhci-caps", &dt_caps);
+
v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
return;
- host->caps = caps ? *caps : sdhci_readl(host, SDHCI_CAPABILITIES);
+ if (caps) {
+ host->caps = *caps;
+ } else {
+ host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+ host->caps &= ~lower_32_bits(dt_caps_mask);
+ host->caps |= lower_32_bits(dt_caps);
+ }
if (host->version < SDHCI_SPEC_300)
return;
- host->caps1 = caps1 ? *caps1 : sdhci_readl(host, SDHCI_CAPABILITIES_1);
+ if (caps1) {
+ host->caps1 = *caps1;
+ } else {
+ host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
+ host->caps1 &= ~upper_32_bits(dt_caps_mask);
+ host->caps1 |= upper_32_bits(dt_caps);
+ }
}
EXPORT_SYMBOL_GPL(__sdhci_read_caps);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 2570455b219a..edf3adfbc213 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -17,6 +17,8 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/io.h>
+#include <linux/leds.h>
+#include <linux/interrupt.h>
#include <linux/mmc/host.h>
@@ -656,7 +658,7 @@ extern void sdhci_free_host(struct sdhci_host *host);
static inline void *sdhci_priv(struct sdhci_host *host)
{
- return (void *)host->private;
+ return host->private;
}
extern void sdhci_card_detect(struct sdhci_host *host);
@@ -682,6 +684,7 @@ static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
unsigned int *actual_clock);
void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
+void sdhci_enable_clk(struct sdhci_host *host, u16 clk);
void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
unsigned short vdd);
void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
@@ -689,6 +692,7 @@ void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
void sdhci_set_bus_width(struct sdhci_host *host, int width);
void sdhci_reset(struct sdhci_host *host, u8 mask);
void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
+int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
#ifdef CONFIG_PM
extern int sdhci_suspend_host(struct sdhci_host *host);
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 900778421be6..4062d6bef3c8 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1079,26 +1079,10 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->state = STATE_IDLE;
}
-static int sh_mmcif_get_cd(struct mmc_host *mmc)
-{
- struct sh_mmcif_host *host = mmc_priv(mmc);
- struct device *dev = sh_mmcif_host_to_dev(host);
- struct sh_mmcif_plat_data *p = dev->platform_data;
- int ret = mmc_gpio_get_cd(mmc);
-
- if (ret >= 0)
- return ret;
-
- if (!p || !p->get_cd)
- return -ENOSYS;
- else
- return p->get_cd(host->pd);
-}
-
static struct mmc_host_ops sh_mmcif_ops = {
.request = sh_mmcif_request,
.set_ios = sh_mmcif_set_ios,
- .get_cd = sh_mmcif_get_cd,
+ .get_cd = mmc_gpio_get_cd,
};
static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
@@ -1443,8 +1427,8 @@ static int sh_mmcif_probe(struct platform_device *pdev)
host->mmc = mmc;
host->addr = reg;
host->timeout = msecs_to_jiffies(10000);
- host->ccs_enable = !pd || !pd->ccs_unsupported;
- host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present;
+ host->ccs_enable = true;
+ host->clk_ctrl2_enable = false;
host->pd = pdev;
@@ -1509,12 +1493,6 @@ static int sh_mmcif_probe(struct platform_device *pdev)
}
}
- if (pd && pd->use_cd_gpio) {
- ret = mmc_gpio_request_cd(mmc, pd->cd_gpio, 0);
- if (ret < 0)
- goto err_clk;
- }
-
mutex_init(&host->thread_lock);
ret = mmc_add_host(mmc);
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 49edff7fee49..bc6be0dbea39 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -47,31 +47,69 @@
#define host_to_priv(host) container_of((host)->pdata, struct sh_mobile_sdhi, mmc_data)
+struct sh_mobile_sdhi_scc {
+ unsigned long clk_rate; /* clock rate for SDR104 */
+ u32 tap; /* sampling clock position for SDR104 */
+};
+
struct sh_mobile_sdhi_of_data {
unsigned long tmio_flags;
+ u32 tmio_ocr_mask;
unsigned long capabilities;
unsigned long capabilities2;
enum dma_slave_buswidth dma_buswidth;
dma_addr_t dma_rx_offset;
unsigned bus_shift;
+ int scc_offset;
+ struct sh_mobile_sdhi_scc *taps;
+ int taps_num;
};
static const struct sh_mobile_sdhi_of_data of_default_cfg = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
};
+static const struct sh_mobile_sdhi_of_data of_rz_compatible = {
+ .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_32BIT_DATA_PORT,
+ .tmio_ocr_mask = MMC_VDD_32_33,
+ .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
+};
+
static const struct sh_mobile_sdhi_of_data of_rcar_gen1_compatible = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
TMIO_MMC_CLK_ACTUAL,
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
};
+/* Definitions for sampling clocks */
+static struct sh_mobile_sdhi_scc rcar_gen2_scc_taps[] = {
+ {
+ .clk_rate = 156000000,
+ .tap = 0x00000703,
+ },
+ {
+ .clk_rate = 0,
+ .tap = 0x00000300,
+ },
+};
+
static const struct sh_mobile_sdhi_of_data of_rcar_gen2_compatible = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2,
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
.dma_buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES,
.dma_rx_offset = 0x2000,
+ .scc_offset = 0x0300,
+ .taps = rcar_gen2_scc_taps,
+ .taps_num = ARRAY_SIZE(rcar_gen2_scc_taps),
+};
+
+/* Definitions for sampling clocks */
+static struct sh_mobile_sdhi_scc rcar_gen3_scc_taps[] = {
+ {
+ .clk_rate = 0,
+ .tap = 0x00000300,
+ },
};
static const struct sh_mobile_sdhi_of_data of_rcar_gen3_compatible = {
@@ -79,6 +117,9 @@ static const struct sh_mobile_sdhi_of_data of_rcar_gen3_compatible = {
TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2,
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
.bus_shift = 2,
+ .scc_offset = 0x1000,
+ .taps = rcar_gen3_scc_taps,
+ .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
};
static const struct of_device_id sh_mobile_sdhi_of_match[] = {
@@ -86,6 +127,7 @@ static const struct of_device_id sh_mobile_sdhi_of_match[] = {
{ .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, },
{ .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, },
{ .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, },
+ { .compatible = "renesas,sdhi-r7s72100", .data = &of_rz_compatible, },
{ .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, },
{ .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, },
{ .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, },
@@ -101,10 +143,12 @@ MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
struct sh_mobile_sdhi {
struct clk *clk;
+ struct clk *clk_cd;
struct tmio_mmc_data mmc_data;
struct tmio_mmc_dma dma_priv;
struct pinctrl *pinctrl;
struct pinctrl_state *pins_default, *pins_uhs;
+ void __iomem *scc_ctl;
};
static void sh_mobile_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
@@ -147,6 +191,12 @@ static int sh_mobile_sdhi_clk_enable(struct tmio_mmc_host *host)
if (ret < 0)
return ret;
+ ret = clk_prepare_enable(priv->clk_cd);
+ if (ret < 0) {
+ clk_disable_unprepare(priv->clk);
+ return ret;
+ }
+
/*
* The clock driver may not know what maximum frequency
* actually works, so it should be set with the max-frequency
@@ -212,6 +262,7 @@ static void sh_mobile_sdhi_clk_disable(struct tmio_mmc_host *host)
struct sh_mobile_sdhi *priv = host_to_priv(host);
clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk_cd);
}
static int sh_mobile_sdhi_card_busy(struct mmc_host *mmc)
@@ -255,6 +306,190 @@ static int sh_mobile_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
return pinctrl_select_state(priv->pinctrl, pin_state);
}
+/* SCC registers */
+#define SH_MOBILE_SDHI_SCC_DTCNTL 0x000
+#define SH_MOBILE_SDHI_SCC_TAPSET 0x002
+#define SH_MOBILE_SDHI_SCC_DT2FF 0x004
+#define SH_MOBILE_SDHI_SCC_CKSEL 0x006
+#define SH_MOBILE_SDHI_SCC_RVSCNTL 0x008
+#define SH_MOBILE_SDHI_SCC_RVSREQ 0x00A
+
+/* Definitions for values the SH_MOBILE_SDHI_SCC_DTCNTL register */
+#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN BIT(0)
+#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT 16
+#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_MASK 0xff
+
+/* Definitions for values the SH_MOBILE_SDHI_SCC_CKSEL register */
+#define SH_MOBILE_SDHI_SCC_CKSEL_DTSEL BIT(0)
+/* Definitions for values the SH_MOBILE_SDHI_SCC_RVSCNTL register */
+#define SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN BIT(0)
+/* Definitions for values the SH_MOBILE_SDHI_SCC_RVSREQ register */
+#define SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR BIT(2)
+
+static inline u32 sd_scc_read32(struct tmio_mmc_host *host,
+ struct sh_mobile_sdhi *priv, int addr)
+{
+ return readl(priv->scc_ctl + (addr << host->bus_shift));
+}
+
+static inline void sd_scc_write32(struct tmio_mmc_host *host,
+ struct sh_mobile_sdhi *priv,
+ int addr, u32 val)
+{
+ writel(val, priv->scc_ctl + (addr << host->bus_shift));
+}
+
+static unsigned int sh_mobile_sdhi_init_tuning(struct tmio_mmc_host *host)
+{
+ struct sh_mobile_sdhi *priv;
+
+ priv = host_to_priv(host);
+
+ /* set sampling clock selection range */
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL,
+ 0x8 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT);
+
+ /* Initialize SCC */
+ sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, 0x0);
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL,
+ SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN |
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL));
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL,
+ SH_MOBILE_SDHI_SCC_CKSEL_DTSEL |
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL));
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
+ ~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, host->scc_tappos);
+
+ /* Read TAPNUM */
+ return (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL) >>
+ SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT) &
+ SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_MASK;
+}
+
+static void sh_mobile_sdhi_prepare_tuning(struct tmio_mmc_host *host,
+ unsigned long tap)
+{
+ struct sh_mobile_sdhi *priv = host_to_priv(host);
+
+ /* Set sampling clock position */
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, tap);
+}
+
+#define SH_MOBILE_SDHI_MAX_TAP 3
+
+static int sh_mobile_sdhi_select_tuning(struct tmio_mmc_host *host)
+{
+ struct sh_mobile_sdhi *priv = host_to_priv(host);
+ unsigned long tap_cnt; /* counter of tuning success */
+ unsigned long tap_set; /* tap position */
+ unsigned long tap_start;/* start position of tuning success */
+ unsigned long tap_end; /* end position of tuning success */
+ unsigned long ntap; /* temporary counter of tuning success */
+ unsigned long i;
+
+ /* Clear SCC_RVSREQ */
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
+
+ /*
+ * Find the longest consecutive run of successful probes. If that
+ * is more than SH_MOBILE_SDHI_MAX_TAP probes long then use the
+ * center index as the tap.
+ */
+ tap_cnt = 0;
+ ntap = 0;
+ tap_start = 0;
+ tap_end = 0;
+ for (i = 0; i < host->tap_num * 2; i++) {
+ if (test_bit(i, host->taps))
+ ntap++;
+ else {
+ if (ntap > tap_cnt) {
+ tap_start = i - ntap;
+ tap_end = i - 1;
+ tap_cnt = ntap;
+ }
+ ntap = 0;
+ }
+ }
+
+ if (ntap > tap_cnt) {
+ tap_start = i - ntap;
+ tap_end = i - 1;
+ tap_cnt = ntap;
+ }
+
+ if (tap_cnt >= SH_MOBILE_SDHI_MAX_TAP)
+ tap_set = (tap_start + tap_end) / 2 % host->tap_num;
+ else
+ return -EIO;
+
+ /* Set SCC */
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, tap_set);
+
+ /* Enable auto re-tuning */
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
+ SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN |
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
+
+ return 0;
+}
+
+
+static bool sh_mobile_sdhi_check_scc_error(struct tmio_mmc_host *host)
+{
+ struct sh_mobile_sdhi *priv = host_to_priv(host);
+
+ /* Check SCC error */
+ if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL) &
+ SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &&
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ) &
+ SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR) {
+ /* Clear SCC error */
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
+ return true;
+ }
+
+ return false;
+}
+
+static void sh_mobile_sdhi_hw_reset(struct tmio_mmc_host *host)
+{
+ struct sh_mobile_sdhi *priv;
+
+ priv = host_to_priv(host);
+
+ /* Reset SCC */
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL,
+ ~SH_MOBILE_SDHI_SCC_CKSEL_DTSEL &
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL));
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
+ ~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
+ ~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
+}
+
static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host)
{
int timeout = 1000;
@@ -318,14 +553,13 @@ static void sh_mobile_sdhi_enable_dma(struct tmio_mmc_host *host, bool enable)
static int sh_mobile_sdhi_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id =
- of_match_device(sh_mobile_sdhi_of_match, &pdev->dev);
+ const struct sh_mobile_sdhi_of_data *of_data = of_device_get_match_data(&pdev->dev);
struct sh_mobile_sdhi *priv;
struct tmio_mmc_data *mmc_data;
struct tmio_mmc_data *mmd = pdev->dev.platform_data;
struct tmio_mmc_host *host;
struct resource *res;
- int irq, ret, i = 0;
+ int irq, ret, i;
struct tmio_mmc_dma *dma_priv;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -346,6 +580,21 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
goto eprobe;
}
+ /*
+ * Some controllers provide a 2nd clock just to run the internal card
+ * detection logic. Unfortunately, the existing driver architecture does
+ * not support a separation of clocks for runtime PM usage. When
+ * native hotplug is used, the tmio driver assumes that the core
+ * must continue to run for card detect to stay active, so we cannot
+ * disable it.
+ * Additionally, it is prohibited to supply a clock to the core but not
+ * to the card detect circuit. That leaves us with if separate clocks
+ * are presented, we must treat them both as virtually 1 clock.
+ */
+ priv->clk_cd = devm_clk_get(&pdev->dev, "cd");
+ if (IS_ERR(priv->clk_cd))
+ priv->clk_cd = NULL;
+
priv->pinctrl = devm_pinctrl_get(&pdev->dev);
if (!IS_ERR(priv->pinctrl)) {
priv->pins_default = pinctrl_lookup_state(priv->pinctrl,
@@ -360,10 +609,10 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
goto eprobe;
}
- if (of_id && of_id->data) {
- const struct sh_mobile_sdhi_of_data *of_data = of_id->data;
+ if (of_data) {
mmc_data->flags |= of_data->tmio_flags;
+ mmc_data->ocr_mask = of_data->tmio_ocr_mask;
mmc_data->capabilities |= of_data->capabilities;
mmc_data->capabilities2 |= of_data->capabilities2;
mmc_data->dma_rx_offset = of_data->dma_rx_offset;
@@ -415,15 +664,43 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
*/
mmc_data->flags |= TMIO_MMC_HAVE_CMD12_CTRL;
- /*
- * All SDHI need SDIO_INFO1 reserved bit
- */
- mmc_data->flags |= TMIO_MMC_SDIO_STATUS_QUIRK;
+ /* All SDHI have SDIO status bits which must be 1 */
+ mmc_data->flags |= TMIO_MMC_SDIO_STATUS_SETBITS;
ret = tmio_mmc_host_probe(host, mmc_data);
if (ret < 0)
goto efree;
+ /* Enable tuning iff we have an SCC and a supported mode */
+ if (of_data && of_data->scc_offset &&
+ (host->mmc->caps & MMC_CAP_UHS_SDR104 ||
+ host->mmc->caps2 & MMC_CAP2_HS200_1_8V_SDR)) {
+ const struct sh_mobile_sdhi_scc *taps = of_data->taps;
+ bool hit = false;
+
+ host->mmc->caps |= MMC_CAP_HW_RESET;
+
+ for (i = 0; i < of_data->taps_num; i++) {
+ if (taps[i].clk_rate == 0 ||
+ taps[i].clk_rate == host->mmc->f_max) {
+ host->scc_tappos = taps->tap;
+ hit = true;
+ break;
+ }
+ }
+
+ if (!hit)
+ dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n");
+
+ priv->scc_ctl = host->ctl + of_data->scc_offset;
+ host->init_tuning = sh_mobile_sdhi_init_tuning;
+ host->prepare_tuning = sh_mobile_sdhi_prepare_tuning;
+ host->select_tuning = sh_mobile_sdhi_select_tuning;
+ host->check_scc_error = sh_mobile_sdhi_check_scc_error;
+ host->hw_reset = sh_mobile_sdhi_hw_reset;
+ }
+
+ i = 0;
while (1) {
irq = platform_get_irq(pdev, i);
if (irq < 0)
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index c0a5c676d0e8..6ffcd2838272 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -5,6 +5,7 @@
* (C) Copyright 2013-2014 O2S GmbH <www.o2s.ch>
* (C) Copyright 2013-2014 David Lanzend�rfer <david.lanzendoerfer@o2s.ch>
* (C) Copyright 2013-2014 Hans de Goede <hdegoede@redhat.com>
+ * (C) Copyright 2017 Sootech SA
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -101,6 +102,7 @@
(SDXC_SOFT_RESET | SDXC_FIFO_RESET | SDXC_DMA_RESET)
/* clock control bits */
+#define SDXC_MASK_DATA0 BIT(31)
#define SDXC_CARD_CLOCK_ON BIT(16)
#define SDXC_LOW_POWER_ON BIT(17)
@@ -253,6 +255,11 @@ struct sunxi_mmc_cfg {
/* does the IP block support autocalibration? */
bool can_calibrate;
+
+ /* Does DATA0 needs to be masked while the clock is updated */
+ bool mask_data0;
+
+ bool needs_new_timings;
};
struct sunxi_mmc_host {
@@ -654,11 +661,16 @@ static int sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en)
unsigned long expire = jiffies + msecs_to_jiffies(750);
u32 rval;
+ dev_dbg(mmc_dev(host->mmc), "%sabling the clock\n",
+ oclk_en ? "en" : "dis");
+
rval = mmc_readl(host, REG_CLKCR);
- rval &= ~(SDXC_CARD_CLOCK_ON | SDXC_LOW_POWER_ON);
+ rval &= ~(SDXC_CARD_CLOCK_ON | SDXC_LOW_POWER_ON | SDXC_MASK_DATA0);
if (oclk_en)
rval |= SDXC_CARD_CLOCK_ON;
+ if (host->cfg->mask_data0)
+ rval |= SDXC_MASK_DATA0;
mmc_writel(host, REG_CLKCR, rval);
@@ -678,46 +690,29 @@ static int sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en)
return -EIO;
}
+ if (host->cfg->mask_data0) {
+ rval = mmc_readl(host, REG_CLKCR);
+ mmc_writel(host, REG_CLKCR, rval & ~SDXC_MASK_DATA0);
+ }
+
return 0;
}
static int sunxi_mmc_calibrate(struct sunxi_mmc_host *host, int reg_off)
{
- u32 reg = readl(host->reg_base + reg_off);
- u32 delay;
- unsigned long timeout;
-
if (!host->cfg->can_calibrate)
return 0;
- reg &= ~(SDXC_CAL_DL_MASK << SDXC_CAL_DL_SW_SHIFT);
- reg &= ~SDXC_CAL_DL_SW_EN;
-
- writel(reg | SDXC_CAL_START, host->reg_base + reg_off);
-
- dev_dbg(mmc_dev(host->mmc), "calibration started\n");
-
- timeout = jiffies + HZ * SDXC_CAL_TIMEOUT;
-
- while (!((reg = readl(host->reg_base + reg_off)) & SDXC_CAL_DONE)) {
- if (time_before(jiffies, timeout))
- cpu_relax();
- else {
- reg &= ~SDXC_CAL_START;
- writel(reg, host->reg_base + reg_off);
-
- return -ETIMEDOUT;
- }
- }
-
- delay = (reg >> SDXC_CAL_DL_SHIFT) & SDXC_CAL_DL_MASK;
-
- reg &= ~SDXC_CAL_START;
- reg |= (delay << SDXC_CAL_DL_SW_SHIFT) | SDXC_CAL_DL_SW_EN;
-
- writel(reg, host->reg_base + reg_off);
-
- dev_dbg(mmc_dev(host->mmc), "calibration ended, reg is 0x%x\n", reg);
+ /*
+ * FIXME:
+ * This is not clear how the calibration is supposed to work
+ * yet. The best rate have been obtained by simply setting the
+ * delay to 0, as Allwinner does in its BSP.
+ *
+ * The only mode that doesn't have such a delay is HS400, that
+ * is in itself a TODO.
+ */
+ writel(SDXC_CAL_DL_SW_EN, host->reg_base + reg_off);
return 0;
}
@@ -745,6 +740,7 @@ static int sunxi_mmc_clk_set_phase(struct sunxi_mmc_host *host,
index = SDXC_CLK_50M_DDR;
}
} else {
+ dev_dbg(mmc_dev(host->mmc), "Invalid clock... returning\n");
return -EINVAL;
}
@@ -757,10 +753,21 @@ static int sunxi_mmc_clk_set_phase(struct sunxi_mmc_host *host,
static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
struct mmc_ios *ios)
{
+ struct mmc_host *mmc = host->mmc;
long rate;
u32 rval, clock = ios->clock;
int ret;
+ ret = sunxi_mmc_oclk_onoff(host, 0);
+ if (ret)
+ return ret;
+
+ /* Our clock is gated now */
+ mmc->actual_clock = 0;
+
+ if (!ios->clock)
+ return 0;
+
/* 8 bit DDR requires a higher module clock */
if (ios->timing == MMC_TIMING_MMC_DDR52 &&
ios->bus_width == MMC_BUS_WIDTH_8)
@@ -768,25 +775,21 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
rate = clk_round_rate(host->clk_mmc, clock);
if (rate < 0) {
- dev_err(mmc_dev(host->mmc), "error rounding clk to %d: %ld\n",
+ dev_err(mmc_dev(mmc), "error rounding clk to %d: %ld\n",
clock, rate);
return rate;
}
- dev_dbg(mmc_dev(host->mmc), "setting clk to %d, rounded %ld\n",
+ dev_dbg(mmc_dev(mmc), "setting clk to %d, rounded %ld\n",
clock, rate);
/* setting clock rate */
ret = clk_set_rate(host->clk_mmc, rate);
if (ret) {
- dev_err(mmc_dev(host->mmc), "error setting clk to %ld: %d\n",
+ dev_err(mmc_dev(mmc), "error setting clk to %ld: %d\n",
rate, ret);
return ret;
}
- ret = sunxi_mmc_oclk_onoff(host, 0);
- if (ret)
- return ret;
-
/* clear internal divider */
rval = mmc_readl(host, REG_CLKCR);
rval &= ~0xff;
@@ -798,6 +801,9 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
}
mmc_writel(host, REG_CLKCR, rval);
+ if (host->cfg->needs_new_timings)
+ mmc_writel(host, REG_SD_NTSR, SDXC_2X_TIMING_MODE);
+
ret = sunxi_mmc_clk_set_phase(host, ios, rate);
if (ret)
return ret;
@@ -806,9 +812,22 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
if (ret)
return ret;
- /* TODO: enable calibrate on sdc2 SDXC_REG_DS_DL_REG of A64 */
+ /*
+ * FIXME:
+ *
+ * In HS400 we'll also need to calibrate the data strobe
+ * signal. This should only happen on the MMC2 controller (at
+ * least on the A64).
+ */
+
+ ret = sunxi_mmc_oclk_onoff(host, 1);
+ if (ret)
+ return ret;
- return sunxi_mmc_oclk_onoff(host, 1);
+ /* And we just enabled our clock back */
+ mmc->actual_clock = rate;
+
+ return 0;
}
static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -822,10 +841,13 @@ static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
break;
case MMC_POWER_UP:
- host->ferror = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
- ios->vdd);
- if (host->ferror)
- return;
+ if (!IS_ERR(mmc->supply.vmmc)) {
+ host->ferror = mmc_regulator_set_ocr(mmc,
+ mmc->supply.vmmc,
+ ios->vdd);
+ if (host->ferror)
+ return;
+ }
if (!IS_ERR(mmc->supply.vqmmc)) {
host->ferror = regulator_enable(mmc->supply.vqmmc);
@@ -847,7 +869,9 @@ static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_POWER_OFF:
dev_dbg(mmc_dev(mmc), "power off!\n");
sunxi_mmc_reset_host(host);
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+
if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled)
regulator_disable(mmc->supply.vqmmc);
host->vqmmc_enabled = false;
@@ -877,7 +901,7 @@ static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
mmc_writel(host, REG_GCTRL, rval);
/* set up clock */
- if (ios->clock && ios->power_mode) {
+ if (ios->power_mode) {
host->ferror = sunxi_mmc_clk_set_rate(host, ios);
/* Android code had a usleep_range(50000, 55000); here */
}
@@ -1084,6 +1108,14 @@ static const struct sunxi_mmc_cfg sun50i_a64_cfg = {
.idma_des_size_bits = 16,
.clk_delays = NULL,
.can_calibrate = true,
+ .mask_data0 = true,
+ .needs_new_timings = true,
+};
+
+static const struct sunxi_mmc_cfg sun50i_a64_emmc_cfg = {
+ .idma_des_size_bits = 13,
+ .clk_delays = NULL,
+ .can_calibrate = true,
};
static const struct of_device_id sunxi_mmc_of_match[] = {
@@ -1092,6 +1124,7 @@ static const struct of_device_id sunxi_mmc_of_match[] = {
{ .compatible = "allwinner,sun7i-a20-mmc", .data = &sun7i_a20_cfg },
{ .compatible = "allwinner,sun9i-a80-mmc", .data = &sun9i_a80_cfg },
{ .compatible = "allwinner,sun50i-a64-mmc", .data = &sun50i_a64_cfg },
+ { .compatible = "allwinner,sun50i-a64-emmc", .data = &sun50i_a64_emmc_cfg },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 8e126afd988c..2b349d48fb9a 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -24,6 +24,7 @@
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
+#include <linux/interrupt.h>
#define CTL_SD_CMD 0x00
#define CTL_ARG_REG 0x04
@@ -90,6 +91,8 @@
#define TMIO_SDIO_STAT_EXWT 0x8000
#define TMIO_SDIO_MASK_ALL 0xc007
+#define TMIO_SDIO_SETBITS_MASK 0x0006
+
/* Define some IRQ masks */
/* This is the mask used at reset by the chip */
#define TMIO_MASK_ALL 0x837f031d
@@ -153,9 +156,12 @@ struct tmio_mmc_host {
struct mutex ios_lock; /* protect set_ios() context */
bool native_hotplug;
bool sdio_irq_enabled;
+ u32 scc_tappos;
- int (*write16_hook)(struct tmio_mmc_host *host, int addr);
+ /* Mandatory callback */
int (*clk_enable)(struct tmio_mmc_host *host);
+
+ /* Optional callbacks */
unsigned int (*clk_update)(struct tmio_mmc_host *host,
unsigned int new_clock);
void (*clk_disable)(struct tmio_mmc_host *host);
@@ -164,6 +170,21 @@ struct tmio_mmc_host {
int (*card_busy)(struct mmc_host *mmc);
int (*start_signal_voltage_switch)(struct mmc_host *mmc,
struct mmc_ios *ios);
+ int (*write16_hook)(struct tmio_mmc_host *host, int addr);
+ void (*hw_reset)(struct tmio_mmc_host *host);
+ void (*prepare_tuning)(struct tmio_mmc_host *host, unsigned long tap);
+ bool (*check_scc_error)(struct tmio_mmc_host *host);
+
+ /*
+ * Mandatory callback for tuning to occur which is optional for SDR50
+ * and mandatory for SDR104.
+ */
+ unsigned int (*init_tuning)(struct tmio_mmc_host *host);
+ int (*select_tuning)(struct tmio_mmc_host *host);
+
+ /* Tuning values: 1 for success, 0 for failure */
+ DECLARE_BITMAP(taps, BITS_PER_BYTE * sizeof(long));
+ unsigned int tap_num;
};
struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev);
@@ -245,6 +266,12 @@ static inline u32 sd_ctrl_read16_and_16_as_32(struct tmio_mmc_host *host, int ad
readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
}
+static inline void sd_ctrl_read32_rep(struct tmio_mmc_host *host, int addr,
+ u32 *buf, int count)
+{
+ readsl(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
{
/* If there is a hook and it returns non-zero then there
@@ -267,4 +294,10 @@ static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host, int
writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
}
+static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr,
+ const u32 *buf, int count)
+{
+ writesl(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
#endif
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 700567603107..6b789a739d4d 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -22,7 +22,6 @@
* TODO:
* Investigate using a workqueue for PIO transfers
* Eliminate FIXMEs
- * SDIO support
* Better Power management
* Handle MMC errors better
* double buffer support
@@ -36,6 +35,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/mfd/tmio.h>
+#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
@@ -134,18 +134,25 @@ static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
struct tmio_mmc_host *host = mmc_priv(mmc);
if (enable && !host->sdio_irq_enabled) {
+ u16 sdio_status;
+
/* Keep device active while SDIO irq is enabled */
pm_runtime_get_sync(mmc_dev(mmc));
- host->sdio_irq_enabled = true;
+ host->sdio_irq_enabled = true;
host->sdio_irq_mask = TMIO_SDIO_MASK_ALL &
~TMIO_SDIO_STAT_IOIRQ;
- sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
+
+ /* Clear obsolete interrupts before enabling */
+ sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS) & ~TMIO_SDIO_MASK_ALL;
+ if (host->pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS)
+ sdio_status |= TMIO_SDIO_SETBITS_MASK;
+ sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
+
sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
} else if (!enable && host->sdio_irq_enabled) {
host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
- sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
host->sdio_irq_enabled = false;
pm_runtime_mark_last_busy(mmc_dev(mmc));
@@ -298,6 +305,9 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
if (mrq->cmd->error || (mrq->data && mrq->data->error))
tmio_mmc_abort_dma(host);
+ if (host->check_scc_error)
+ host->check_scc_error(host);
+
mmc_request_done(host->mmc, mrq);
}
@@ -393,6 +403,36 @@ static void tmio_mmc_transfer_data(struct tmio_mmc_host *host,
/*
* Transfer the data
*/
+ if (host->pdata->flags & TMIO_MMC_32BIT_DATA_PORT) {
+ u8 data[4] = { };
+
+ if (is_read)
+ sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf,
+ count >> 2);
+ else
+ sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf,
+ count >> 2);
+
+ /* if count was multiple of 4 */
+ if (!(count & 0x3))
+ return;
+
+ buf8 = (u8 *)(buf + (count >> 2));
+ count %= 4;
+
+ if (is_read) {
+ sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT,
+ (u32 *)data, 1);
+ memcpy(buf8, data, count);
+ } else {
+ memcpy(data, buf8, count);
+ sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT,
+ (u32 *)data, 1);
+ }
+
+ return;
+ }
+
if (is_read)
sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
else
@@ -522,7 +562,7 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
schedule_work(&host->done);
}
-static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
+static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
{
struct mmc_data *data;
spin_lock(&host->lock);
@@ -531,6 +571,9 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
if (!data)
goto out;
+ if (stat & TMIO_STAT_CRCFAIL || stat & TMIO_STAT_STOPBIT_ERR ||
+ stat & TMIO_STAT_TXUNDERRUN)
+ data->error = -EILSEQ;
if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
bool done = false;
@@ -579,8 +622,6 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
goto out;
}
- host->cmd = NULL;
-
/* This controller is sicker than the PXA one. Not only do we need to
* drop the top 8 bits of the first response word, we also need to
* modify the order of the response for short response command types.
@@ -600,14 +641,16 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
if (stat & TMIO_STAT_CMDTIMEOUT)
cmd->error = -ETIMEDOUT;
- else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC)
+ else if ((stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) ||
+ stat & TMIO_STAT_STOPBIT_ERR ||
+ stat & TMIO_STAT_CMD_IDX_ERR)
cmd->error = -EILSEQ;
/* If there is data to handle we enable data IRQs here, and
* we will ultimatley finish the request in the data_end handler.
* If theres no data or we encountered an error, finish now.
*/
- if (host->data && !cmd->error) {
+ if (host->data && (!cmd->error || cmd->error == -EILSEQ)) {
if (host->data->flags & MMC_DATA_READ) {
if (host->force_pio || !host->chan_rx)
tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
@@ -668,16 +711,15 @@ static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host,
/* Data transfer completion */
if (ireg & TMIO_STAT_DATAEND) {
tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
- tmio_mmc_data_irq(host);
+ tmio_mmc_data_irq(host, status);
return true;
}
return false;
}
-static void tmio_mmc_sdio_irq(int irq, void *devid)
+static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
{
- struct tmio_mmc_host *host = devid;
struct mmc_host *mmc = host->mmc;
struct tmio_mmc_data *pdata = host->pdata;
unsigned int ireg, status;
@@ -687,11 +729,11 @@ static void tmio_mmc_sdio_irq(int irq, void *devid)
return;
status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
- ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdcard_irq_mask;
+ ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
sdio_status = status & ~TMIO_SDIO_MASK_ALL;
- if (pdata->flags & TMIO_MMC_SDIO_STATUS_QUIRK)
- sdio_status |= 6;
+ if (pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS)
+ sdio_status |= TMIO_SDIO_SETBITS_MASK;
sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
@@ -718,7 +760,7 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
if (__tmio_mmc_sdcard_irq(host, ireg, status))
return IRQ_HANDLED;
- tmio_mmc_sdio_irq(irq, devid);
+ __tmio_mmc_sdio_irq(host);
return IRQ_HANDLED;
}
@@ -756,6 +798,63 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
return 0;
}
+static void tmio_mmc_hw_reset(struct mmc_host *mmc)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+
+ if (host->hw_reset)
+ host->hw_reset(host);
+}
+
+static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ int i, ret = 0;
+
+ if (!host->tap_num) {
+ if (!host->init_tuning || !host->select_tuning)
+ /* Tuning is not supported */
+ goto out;
+
+ host->tap_num = host->init_tuning(host);
+ if (!host->tap_num)
+ /* Tuning is not supported */
+ goto out;
+ }
+
+ if (host->tap_num * 2 >= sizeof(host->taps) * BITS_PER_BYTE) {
+ dev_warn_once(&host->pdev->dev,
+ "Too many taps, skipping tuning. Please consider updating size of taps field of tmio_mmc_host\n");
+ goto out;
+ }
+
+ bitmap_zero(host->taps, host->tap_num * 2);
+
+ /* Issue CMD19 twice for each tap */
+ for (i = 0; i < 2 * host->tap_num; i++) {
+ if (host->prepare_tuning)
+ host->prepare_tuning(host, i % host->tap_num);
+
+ ret = mmc_send_tuning(mmc, opcode, NULL);
+ if (ret && ret != -EILSEQ)
+ goto out;
+ if (ret == 0)
+ set_bit(i, host->taps);
+
+ mdelay(1);
+ }
+
+ ret = host->select_tuning(host);
+
+out:
+ if (ret < 0) {
+ dev_warn(&host->pdev->dev, "Tuning procedure failed\n");
+ tmio_mmc_hw_reset(mmc);
+ }
+
+ return ret;
+}
+
/* Process requests from the MMC layer */
static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
@@ -809,6 +908,12 @@ static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
return host->clk_enable(host);
}
+static void tmio_mmc_clk_disable(struct tmio_mmc_host *host)
+{
+ if (host->clk_disable)
+ host->clk_disable(host);
+}
+
static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
{
struct mmc_host *mmc = host->mmc;
@@ -972,6 +1077,8 @@ static struct mmc_host_ops tmio_mmc_ops = {
.get_cd = mmc_gpio_get_cd,
.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
.multi_io_quirk = tmio_multi_io_quirk,
+ .hw_reset = tmio_mmc_hw_reset,
+ .execute_tuning = tmio_mmc_execute_tuning,
};
static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
@@ -1050,7 +1157,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
ret = mmc_of_parse(mmc);
if (ret < 0)
- goto host_free;
+ return ret;
_host->pdata = pdata;
platform_set_drvdata(pdev, mmc);
@@ -1060,14 +1167,12 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
ret = tmio_mmc_init_ocr(_host);
if (ret < 0)
- goto host_free;
+ return ret;
_host->ctl = devm_ioremap(&pdev->dev,
res_ctl->start, resource_size(res_ctl));
- if (!_host->ctl) {
- ret = -ENOMEM;
- goto host_free;
- }
+ if (!_host->ctl)
+ return -ENOMEM;
tmio_mmc_ops.card_busy = _host->card_busy;
tmio_mmc_ops.start_signal_voltage_switch = _host->start_signal_voltage_switch;
@@ -1084,8 +1189,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
_host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
mmc->caps & MMC_CAP_NEEDS_POLL ||
- !mmc_card_is_removable(mmc) ||
- mmc->slot.cd_irq >= 0);
+ !mmc_card_is_removable(mmc));
/*
* On Gen2+, eMMC with NONREMOVABLE currently fails because native
@@ -1105,10 +1209,8 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
* Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from
* looping forever...
*/
- if (mmc->f_min == 0) {
- ret = -EINVAL;
- goto host_free;
- }
+ if (mmc->f_min == 0)
+ return -EINVAL;
/*
* While using internal tmio hardware logic for card detection, we need
@@ -1137,7 +1239,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
_host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
sd_ctrl_write16(_host, CTL_SDIO_IRQ_MASK, _host->sdio_irq_mask);
- sd_ctrl_write16(_host, CTL_TRANSACTION_CTL, 0x0000);
+ sd_ctrl_write16(_host, CTL_TRANSACTION_CTL, 0x0001);
}
spin_lock_init(&_host->lock);
@@ -1173,10 +1275,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
}
return 0;
-
-host_free:
-
- return ret;
}
EXPORT_SYMBOL(tmio_mmc_host_probe);
@@ -1185,6 +1283,9 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
struct platform_device *pdev = host->pdev;
struct mmc_host *mmc = host->mmc;
+ if (host->pdata->flags & TMIO_MMC_SDIO_IRQ)
+ sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
+
if (!host->native_hotplug)
pm_runtime_get_sync(&pdev->dev);
@@ -1197,6 +1298,8 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+
+ tmio_mmc_clk_disable(host);
}
EXPORT_SYMBOL(tmio_mmc_host_remove);
@@ -1211,13 +1314,17 @@ int tmio_mmc_host_runtime_suspend(struct device *dev)
if (host->clk_cache)
tmio_mmc_clk_stop(host);
- if (host->clk_disable)
- host->clk_disable(host);
+ tmio_mmc_clk_disable(host);
return 0;
}
EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend);
+static bool tmio_mmc_can_retune(struct tmio_mmc_host *host)
+{
+ return host->tap_num && mmc_can_retune(host->mmc);
+}
+
int tmio_mmc_host_runtime_resume(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
@@ -1231,6 +1338,9 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
tmio_mmc_enable_dma(host, true);
+ if (tmio_mmc_can_retune(host) && host->select_tuning(host))
+ dev_warn(&host->pdev->dev, "Tuning selection failed\n");
+
return 0;
}
EXPORT_SYMBOL(tmio_mmc_host_runtime_resume);
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
index d2c386f09d69..1d843357422e 100644
--- a/drivers/mmc/host/ushc.c
+++ b/drivers/mmc/host/ushc.c
@@ -426,6 +426,9 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id
struct ushc_data *ushc;
int ret;
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev);
if (mmc == NULL)
return -ENOMEM;
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 63fac78b3d46..6380044c0628 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -13,6 +13,7 @@
#include <linux/dma-mapping.h>
#include <linux/highmem.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <linux/mmc/host.h>
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index bb3e0d1dd355..c061e7c704be 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -640,8 +640,6 @@ static void __vub300_irqpoll_response(struct vub300_mmc_host *vub300)
mutex_lock(&vub300->irq_mutex);
if (vub300->irq_enabled)
mmc_signal_sdio_irq(vub300->mmc);
- else if (vub300->irqs_queued)
- vub300->irqs_queued += 1;
else
vub300->irqs_queued += 1;
vub300->irq_disabled = 0;
@@ -728,8 +726,7 @@ static void vub300_deadwork_thread(struct work_struct *work)
*/
} else if (vub300->card_present) {
check_vub300_port_status(vub300);
- } else if (vub300->mmc && vub300->mmc->card &&
- mmc_card_present(vub300->mmc->card)) {
+ } else if (vub300->mmc && vub300->mmc->card) {
/*
* the MMC core must not have responded
* to the previous indication - lets
@@ -1756,8 +1753,7 @@ static void vub300_cmndwork_thread(struct work_struct *work)
int data_length;
mutex_lock(&vub300->cmd_mutex);
init_completion(&vub300->command_complete);
- if (likely(vub300->vub_name[0]) || !vub300->mmc->card ||
- !mmc_card_present(vub300->mmc->card)) {
+ if (likely(vub300->vub_name[0]) || !vub300->mmc->card) {
/*
* the name of the EMPTY Pseudo firmware file
* is used as a flag to indicate that the file
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index c3fd16d997ca..bd04e8bae010 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1395,23 +1395,25 @@ static void wbsd_request_dma(struct wbsd_host *host, int dma)
*/
host->dma_addr = dma_map_single(mmc_dev(host->mmc), host->dma_buffer,
WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(mmc_dev(host->mmc), host->dma_addr))
+ goto kfree;
/*
* ISA DMA must be aligned on a 64k basis.
*/
if ((host->dma_addr & 0xffff) != 0)
- goto kfree;
+ goto unmap;
/*
* ISA cannot access memory above 16 MB.
*/
else if (host->dma_addr >= 0x1000000)
- goto kfree;
+ goto unmap;
host->dma = dma;
return;
-kfree:
+unmap:
/*
* If we've gotten here then there is some kind of alignment bug
*/
@@ -1421,6 +1423,7 @@ kfree:
WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
host->dma_addr = 0;
+kfree:
kfree(host->dma_buffer);
host->dma_buffer = NULL;
@@ -1434,11 +1437,14 @@ err:
static void wbsd_release_dma(struct wbsd_host *host)
{
- if (host->dma_addr) {
+ /*
+ * host->dma_addr is valid here iff host->dma_buffer is not NULL.
+ */
+ if (host->dma_buffer) {
dma_unmap_single(mmc_dev(host->mmc), host->dma_addr,
WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
+ kfree(host->dma_buffer);
}
- kfree(host->dma_buffer);
if (host->dma >= 0)
free_dma(host->dma);
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index 5af00559e9d6..21ebba88679c 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -20,6 +20,7 @@
#include <linux/irq.h>
#include <linux/clk.h>
#include <linux/gpio.h>
+#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_address.h>