From f90d2e4035d456cb20c0b784725d556eb4de4d8a Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Tue, 15 Sep 2015 15:19:45 +0200 Subject: mmc: core: Convert __mmc_switch() into an internal core function As there are no users of the __mmc_switch() API, except for the mmc core itself, let's convert it from an exported function into an internal. Signed-off-by: Ulf Hansson --- include/linux/mmc/core.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux/mmc') diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 258daf914c6d..79a31d3c3788 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -152,8 +152,6 @@ extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *); extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, struct mmc_command *, int); extern void mmc_start_bkops(struct mmc_card *card, bool from_exception); -extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool, - bool, bool); extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); extern int mmc_send_tuning(struct mmc_host *host); extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd); -- cgit v1.2.3 From 9eadcc0581a8ccaf4c2378aa1c193fb164304f1d Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Fri, 2 Oct 2015 10:56:11 +0200 Subject: mmc: core: Remove MMC_CLKGATE MMC_CLKGATE was once invented to save power by gating the bus clock at request inactivity. At that time it served its purpose. The modern way to deal with power saving for these scenarios, is by using runtime PM. Nowadays, several host drivers have deployed runtime PM, but for those that haven't and which still cares power saving at request inactivity, it's certainly time to deploy runtime PM as it has been around for several years now. To simplify code to mmc core and thus decrease maintenance efforts, this patch removes all code related to MMC_CLKGATE. Signed-off-by: Ulf Hansson Reviewed-by: Linus Walleij --- Documentation/mmc/mmc-dev-attrs.txt | 10 -- drivers/mmc/core/Kconfig | 10 -- drivers/mmc/core/core.c | 139 +++----------------- drivers/mmc/core/core.h | 3 - drivers/mmc/core/debugfs.c | 5 - drivers/mmc/core/host.c | 245 ------------------------------------ drivers/mmc/core/mmc.c | 2 - drivers/mmc/core/quirks.c | 18 --- drivers/mmc/core/sd.c | 2 - drivers/mmc/core/sdio.c | 7 +- drivers/mmc/core/sdio_irq.c | 14 +-- include/linux/mmc/card.h | 1 - include/linux/mmc/host.h | 32 ----- 13 files changed, 19 insertions(+), 469 deletions(-) (limited to 'include/linux/mmc') diff --git a/Documentation/mmc/mmc-dev-attrs.txt b/Documentation/mmc/mmc-dev-attrs.txt index 189bab09255a..caa555706f89 100644 --- a/Documentation/mmc/mmc-dev-attrs.txt +++ b/Documentation/mmc/mmc-dev-attrs.txt @@ -72,13 +72,3 @@ Note on raw_rpmb_size_mult: "raw_rpmb_size_mult" is a mutliple of 128kB block. RPMB size in byte is calculated by using the following equation: RPMB partition size = 128kB x raw_rpmb_size_mult - -SD/MMC/SDIO Clock Gating Attribute -================================== - -Read and write access is provided to following attribute. -This attribute appears only if CONFIG_MMC_CLKGATE is enabled. - - clkgate_delay Tune the clock gating delay with desired value in milliseconds. - -echo > /sys/class/mmc_host/mmcX/clkgate_delay diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig index 9ebee72d9c3f..4c33d7690f2f 100644 --- a/drivers/mmc/core/Kconfig +++ b/drivers/mmc/core/Kconfig @@ -1,13 +1,3 @@ # # MMC core configuration # - -config MMC_CLKGATE - bool "MMC host clock gating" - help - This will attempt to aggressively gate the clock to the MMC card. - This is done to save power due to gating off the logic and bus - noise when the MMC card is not in use. Your host driver has to - support handling this in order for it to be of any use. - - If unsure, say N. diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 21cda23b247c..1a36b021b44e 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -187,8 +187,6 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) if (mrq->done) mrq->done(mrq); - - mmc_host_clk_release(host); } } @@ -292,7 +290,6 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) mrq->stop->mrq = mrq; } } - mmc_host_clk_hold(host); led_trigger_event(host->led, LED_FULL); __mmc_start_request(host, mrq); @@ -542,11 +539,8 @@ static void mmc_wait_for_req_done(struct mmc_host *host, static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, bool is_first_req) { - if (host->ops->pre_req) { - mmc_host_clk_hold(host); + if (host->ops->pre_req) host->ops->pre_req(host, mrq, is_first_req); - mmc_host_clk_release(host); - } } /** @@ -561,11 +555,8 @@ static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, int err) { - if (host->ops->post_req) { - mmc_host_clk_hold(host); + if (host->ops->post_req) host->ops->post_req(host, mrq, err); - mmc_host_clk_release(host); - } } /** @@ -850,9 +841,9 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) unsigned int timeout_us, limit_us; timeout_us = data->timeout_ns / 1000; - if (mmc_host_clk_rate(card->host)) + if (card->host->ios.clock) timeout_us += data->timeout_clks * 1000 / - (mmc_host_clk_rate(card->host) / 1000); + (card->host->ios.clock / 1000); if (data->flags & MMC_DATA_WRITE) /* @@ -1050,8 +1041,6 @@ static inline void mmc_set_ios(struct mmc_host *host) ios->power_mode, ios->chip_select, ios->vdd, ios->bus_width, ios->timing); - if (ios->clock > 0) - mmc_set_ungated(host); host->ops->set_ios(host, ios); } @@ -1060,17 +1049,15 @@ static inline void mmc_set_ios(struct mmc_host *host) */ void mmc_set_chip_select(struct mmc_host *host, int mode) { - mmc_host_clk_hold(host); host->ios.chip_select = mode; mmc_set_ios(host); - mmc_host_clk_release(host); } /* * Sets the host clock to the highest possible frequency that * is below "hz". */ -static void __mmc_set_clock(struct mmc_host *host, unsigned int hz) +void mmc_set_clock(struct mmc_host *host, unsigned int hz) { WARN_ON(hz && hz < host->f_min); @@ -1081,68 +1068,6 @@ static void __mmc_set_clock(struct mmc_host *host, unsigned int hz) mmc_set_ios(host); } -void mmc_set_clock(struct mmc_host *host, unsigned int hz) -{ - mmc_host_clk_hold(host); - __mmc_set_clock(host, hz); - mmc_host_clk_release(host); -} - -#ifdef CONFIG_MMC_CLKGATE -/* - * This gates the clock by setting it to 0 Hz. - */ -void mmc_gate_clock(struct mmc_host *host) -{ - unsigned long flags; - - spin_lock_irqsave(&host->clk_lock, flags); - host->clk_old = host->ios.clock; - host->ios.clock = 0; - host->clk_gated = true; - spin_unlock_irqrestore(&host->clk_lock, flags); - mmc_set_ios(host); -} - -/* - * This restores the clock from gating by using the cached - * clock value. - */ -void mmc_ungate_clock(struct mmc_host *host) -{ - /* - * We should previously have gated the clock, so the clock shall - * be 0 here! The clock may however be 0 during initialization, - * when some request operations are performed before setting - * the frequency. When ungate is requested in that situation - * we just ignore the call. - */ - if (host->clk_old) { - BUG_ON(host->ios.clock); - /* This call will also set host->clk_gated to false */ - __mmc_set_clock(host, host->clk_old); - } -} - -void mmc_set_ungated(struct mmc_host *host) -{ - unsigned long flags; - - /* - * We've been given a new frequency while the clock is gated, - * so make sure we regard this as ungating it. - */ - spin_lock_irqsave(&host->clk_lock, flags); - host->clk_gated = false; - spin_unlock_irqrestore(&host->clk_lock, flags); -} - -#else -void mmc_set_ungated(struct mmc_host *host) -{ -} -#endif - int mmc_execute_tuning(struct mmc_card *card) { struct mmc_host *host = card->host; @@ -1157,9 +1082,7 @@ int mmc_execute_tuning(struct mmc_card *card) else opcode = MMC_SEND_TUNING_BLOCK; - mmc_host_clk_hold(host); err = host->ops->execute_tuning(host, opcode); - mmc_host_clk_release(host); if (err) pr_err("%s: tuning execution failed\n", mmc_hostname(host)); @@ -1174,10 +1097,8 @@ int mmc_execute_tuning(struct mmc_card *card) */ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) { - mmc_host_clk_hold(host); host->ios.bus_mode = mode; mmc_set_ios(host); - mmc_host_clk_release(host); } /* @@ -1185,10 +1106,8 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) */ void mmc_set_bus_width(struct mmc_host *host, unsigned int width) { - mmc_host_clk_hold(host); host->ios.bus_width = width; mmc_set_ios(host); - mmc_host_clk_release(host); } /* @@ -1532,11 +1451,8 @@ int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage) int old_signal_voltage = host->ios.signal_voltage; host->ios.signal_voltage = signal_voltage; - if (host->ops->start_signal_voltage_switch) { - mmc_host_clk_hold(host); + if (host->ops->start_signal_voltage_switch) err = host->ops->start_signal_voltage_switch(host, &host->ios); - mmc_host_clk_release(host); - } if (err) host->ios.signal_voltage = old_signal_voltage; @@ -1570,20 +1486,17 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr) pr_warn("%s: cannot verify signal voltage switch\n", mmc_hostname(host)); - mmc_host_clk_hold(host); - cmd.opcode = SD_SWITCH_VOLTAGE; cmd.arg = 0; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(host, &cmd, 0); if (err) - goto err_command; + return err; + + if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) + return -EIO; - if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) { - err = -EIO; - goto err_command; - } /* * The card should drive cmd and dat[0:3] low immediately * after the response of cmd11, but wait 1 ms to be sure @@ -1632,9 +1545,6 @@ power_cycle: mmc_power_cycle(host, ocr); } -err_command: - mmc_host_clk_release(host); - return err; } @@ -1643,10 +1553,8 @@ err_command: */ void mmc_set_timing(struct mmc_host *host, unsigned int timing) { - mmc_host_clk_hold(host); host->ios.timing = timing; mmc_set_ios(host); - mmc_host_clk_release(host); } /* @@ -1654,10 +1562,8 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing) */ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) { - mmc_host_clk_hold(host); host->ios.drv_type = drv_type; mmc_set_ios(host); - mmc_host_clk_release(host); } int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr, @@ -1665,7 +1571,6 @@ int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr, { struct mmc_host *host = card->host; int host_drv_type = SD_DRIVER_TYPE_B; - int drive_strength; *drv_type = 0; @@ -1688,14 +1593,10 @@ int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr, * information and let the hardware specific code * return what is possible given the options */ - mmc_host_clk_hold(host); - drive_strength = host->ops->select_drive_strength(card, max_dtr, - host_drv_type, - card_drv_type, - drv_type); - mmc_host_clk_release(host); - - return drive_strength; + return host->ops->select_drive_strength(card, max_dtr, + host_drv_type, + card_drv_type, + drv_type); } /* @@ -1714,8 +1615,6 @@ void mmc_power_up(struct mmc_host *host, u32 ocr) if (host->ios.power_mode == MMC_POWER_ON) return; - mmc_host_clk_hold(host); - mmc_pwrseq_pre_power_on(host); host->ios.vdd = fls(ocr) - 1; @@ -1749,8 +1648,6 @@ void mmc_power_up(struct mmc_host *host, u32 ocr) * time required to reach a stable voltage. */ mmc_delay(10); - - mmc_host_clk_release(host); } void mmc_power_off(struct mmc_host *host) @@ -1758,8 +1655,6 @@ void mmc_power_off(struct mmc_host *host) if (host->ios.power_mode == MMC_POWER_OFF) return; - mmc_host_clk_hold(host); - mmc_pwrseq_power_off(host); host->ios.clock = 0; @@ -1775,8 +1670,6 @@ void mmc_power_off(struct mmc_host *host) * can be successfully turned on again. */ mmc_delay(1); - - mmc_host_clk_release(host); } void mmc_power_cycle(struct mmc_host *host, u32 ocr) @@ -1992,7 +1885,7 @@ static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, */ timeout_clks <<= 1; timeout_us += (timeout_clks * 1000) / - (mmc_host_clk_rate(card->host) / 1000); + (card->host->ios.clock / 1000); erase_timeout = timeout_us / 1000; @@ -2440,9 +2333,7 @@ static void mmc_hw_reset_for_init(struct mmc_host *host) { if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) return; - mmc_host_clk_hold(host); host->ops->hw_reset(host); - mmc_host_clk_release(host); } int mmc_hw_reset(struct mmc_host *host) diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 1a22a82209b2..09241e56d628 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -40,9 +40,6 @@ void mmc_init_erase(struct mmc_card *card); void mmc_set_chip_select(struct mmc_host *host, int mode); void mmc_set_clock(struct mmc_host *host, unsigned int hz); -void mmc_gate_clock(struct mmc_host *host); -void mmc_ungate_clock(struct mmc_host *host); -void mmc_set_ungated(struct mmc_host *host); void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode); void mmc_set_bus_width(struct mmc_host *host, unsigned int width); u32 mmc_select_voltage(struct mmc_host *host, u32 ocr); diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c index f4db93e03e88..154aced0b91b 100644 --- a/drivers/mmc/core/debugfs.c +++ b/drivers/mmc/core/debugfs.c @@ -255,11 +255,6 @@ void mmc_add_host_debugfs(struct mmc_host *host) &mmc_clock_fops)) goto err_node; -#ifdef CONFIG_MMC_CLKGATE - if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR), - root, &host->clk_delay)) - goto err_node; -#endif #ifdef CONFIG_FAIL_MMC_REQUEST if (fail_request) setup_fault_attr(&fail_default_attr, fail_request); diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 5466f25f0281..970e6906930b 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -61,246 +61,6 @@ void mmc_unregister_host_class(void) class_unregister(&mmc_host_class); } -#ifdef CONFIG_MMC_CLKGATE -static ssize_t clkgate_delay_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct mmc_host *host = cls_dev_to_mmc_host(dev); - return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay); -} - -static ssize_t clkgate_delay_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct mmc_host *host = cls_dev_to_mmc_host(dev); - unsigned long flags, value; - - if (kstrtoul(buf, 0, &value)) - return -EINVAL; - - spin_lock_irqsave(&host->clk_lock, flags); - host->clkgate_delay = value; - spin_unlock_irqrestore(&host->clk_lock, flags); - return count; -} - -/* - * Enabling clock gating will make the core call out to the host - * once up and once down when it performs a request or card operation - * intermingled in any fashion. The driver will see this through - * set_ios() operations with ios.clock field set to 0 to gate (disable) - * the block clock, and to the old frequency to enable it again. - */ -static void mmc_host_clk_gate_delayed(struct mmc_host *host) -{ - unsigned long tick_ns; - unsigned long freq = host->ios.clock; - unsigned long flags; - - if (!freq) { - pr_debug("%s: frequency set to 0 in disable function, " - "this means the clock is already disabled.\n", - mmc_hostname(host)); - return; - } - /* - * New requests may have appeared while we were scheduling, - * then there is no reason to delay the check before - * clk_disable(). - */ - spin_lock_irqsave(&host->clk_lock, flags); - - /* - * Delay n bus cycles (at least 8 from MMC spec) before attempting - * to disable the MCI block clock. The reference count may have - * gone up again after this delay due to rescheduling! - */ - if (!host->clk_requests) { - spin_unlock_irqrestore(&host->clk_lock, flags); - tick_ns = DIV_ROUND_UP(1000000000, freq); - ndelay(host->clk_delay * tick_ns); - } else { - /* New users appeared while waiting for this work */ - spin_unlock_irqrestore(&host->clk_lock, flags); - return; - } - mutex_lock(&host->clk_gate_mutex); - spin_lock_irqsave(&host->clk_lock, flags); - if (!host->clk_requests) { - spin_unlock_irqrestore(&host->clk_lock, flags); - /* This will set host->ios.clock to 0 */ - mmc_gate_clock(host); - spin_lock_irqsave(&host->clk_lock, flags); - pr_debug("%s: gated MCI clock\n", mmc_hostname(host)); - } - spin_unlock_irqrestore(&host->clk_lock, flags); - mutex_unlock(&host->clk_gate_mutex); -} - -/* - * Internal work. Work to disable the clock at some later point. - */ -static void mmc_host_clk_gate_work(struct work_struct *work) -{ - struct mmc_host *host = container_of(work, struct mmc_host, - clk_gate_work.work); - - mmc_host_clk_gate_delayed(host); -} - -/** - * mmc_host_clk_hold - ungate hardware MCI clocks - * @host: host to ungate. - * - * Makes sure the host ios.clock is restored to a non-zero value - * past this call. Increase clock reference count and ungate clock - * if we're the first user. - */ -void mmc_host_clk_hold(struct mmc_host *host) -{ - unsigned long flags; - - /* cancel any clock gating work scheduled by mmc_host_clk_release() */ - cancel_delayed_work_sync(&host->clk_gate_work); - mutex_lock(&host->clk_gate_mutex); - spin_lock_irqsave(&host->clk_lock, flags); - if (host->clk_gated) { - spin_unlock_irqrestore(&host->clk_lock, flags); - mmc_ungate_clock(host); - spin_lock_irqsave(&host->clk_lock, flags); - pr_debug("%s: ungated MCI clock\n", mmc_hostname(host)); - } - host->clk_requests++; - spin_unlock_irqrestore(&host->clk_lock, flags); - mutex_unlock(&host->clk_gate_mutex); -} - -/** - * mmc_host_may_gate_card - check if this card may be gated - * @card: card to check. - */ -static bool mmc_host_may_gate_card(struct mmc_card *card) -{ - /* If there is no card we may gate it */ - if (!card) - return true; - /* - * Don't gate SDIO cards! These need to be clocked at all times - * since they may be independent systems generating interrupts - * and other events. The clock requests counter from the core will - * go down to zero since the core does not need it, but we will not - * gate the clock, because there is somebody out there that may still - * be using it. - */ - return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING); -} - -/** - * mmc_host_clk_release - gate off hardware MCI clocks - * @host: host to gate. - * - * Calls the host driver with ios.clock set to zero as often as possible - * in order to gate off hardware MCI clocks. Decrease clock reference - * count and schedule disabling of clock. - */ -void mmc_host_clk_release(struct mmc_host *host) -{ - unsigned long flags; - - spin_lock_irqsave(&host->clk_lock, flags); - host->clk_requests--; - if (mmc_host_may_gate_card(host->card) && - !host->clk_requests) - schedule_delayed_work(&host->clk_gate_work, - msecs_to_jiffies(host->clkgate_delay)); - spin_unlock_irqrestore(&host->clk_lock, flags); -} - -/** - * mmc_host_clk_rate - get current clock frequency setting - * @host: host to get the clock frequency for. - * - * Returns current clock frequency regardless of gating. - */ -unsigned int mmc_host_clk_rate(struct mmc_host *host) -{ - unsigned long freq; - unsigned long flags; - - spin_lock_irqsave(&host->clk_lock, flags); - if (host->clk_gated) - freq = host->clk_old; - else - freq = host->ios.clock; - spin_unlock_irqrestore(&host->clk_lock, flags); - return freq; -} - -/** - * mmc_host_clk_init - set up clock gating code - * @host: host with potential clock to control - */ -static inline void mmc_host_clk_init(struct mmc_host *host) -{ - host->clk_requests = 0; - /* Hold MCI clock for 8 cycles by default */ - host->clk_delay = 8; - /* - * Default clock gating delay is 0ms to avoid wasting power. - * This value can be tuned by writing into sysfs entry. - */ - host->clkgate_delay = 0; - host->clk_gated = false; - INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); - spin_lock_init(&host->clk_lock); - mutex_init(&host->clk_gate_mutex); -} - -/** - * mmc_host_clk_exit - shut down clock gating code - * @host: host with potential clock to control - */ -static inline void mmc_host_clk_exit(struct mmc_host *host) -{ - /* - * Wait for any outstanding gate and then make sure we're - * ungated before exiting. - */ - if (cancel_delayed_work_sync(&host->clk_gate_work)) - mmc_host_clk_gate_delayed(host); - if (host->clk_gated) - mmc_host_clk_hold(host); - /* There should be only one user now */ - WARN_ON(host->clk_requests > 1); -} - -static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) -{ - host->clkgate_delay_attr.show = clkgate_delay_show; - host->clkgate_delay_attr.store = clkgate_delay_store; - sysfs_attr_init(&host->clkgate_delay_attr.attr); - host->clkgate_delay_attr.attr.name = "clkgate_delay"; - host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR; - if (device_create_file(&host->class_dev, &host->clkgate_delay_attr)) - pr_err("%s: Failed to create clkgate_delay sysfs entry\n", - mmc_hostname(host)); -} -#else - -static inline void mmc_host_clk_init(struct mmc_host *host) -{ -} - -static inline void mmc_host_clk_exit(struct mmc_host *host) -{ -} - -static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) -{ -} - -#endif - void mmc_retune_enable(struct mmc_host *host) { host->can_retune = 1; @@ -583,8 +343,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) return NULL; } - mmc_host_clk_init(host); - spin_lock_init(&host->lock); init_waitqueue_head(&host->wq); INIT_DELAYED_WORK(&host->detect, mmc_rescan); @@ -633,7 +391,6 @@ int mmc_add_host(struct mmc_host *host) #ifdef CONFIG_DEBUG_FS mmc_add_host_debugfs(host); #endif - mmc_host_clk_sysfs_init(host); mmc_start_host(host); register_pm_notifier(&host->pm_notify); @@ -663,8 +420,6 @@ void mmc_remove_host(struct mmc_host *host) device_del(&host->class_dev); led_trigger_unregister_simple(host->led); - - mmc_host_clk_exit(host); } EXPORT_SYMBOL(mmc_remove_host); diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index f6cd995dbe92..479b84a00bd7 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1931,14 +1931,12 @@ static int mmc_reset(struct mmc_host *host) if (!mmc_can_reset(card)) return -EOPNOTSUPP; - mmc_host_clk_hold(host); mmc_set_clock(host, host->f_init); host->ops->hw_reset(host); /* Set initial state and call mmc_set_ios */ mmc_set_initial_state(host); - mmc_host_clk_release(host); return mmc_init_card(host, card->ocr, card); } diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c index dd1d1e0fe322..fad660b95809 100644 --- a/drivers/mmc/core/quirks.c +++ b/drivers/mmc/core/quirks.c @@ -35,25 +35,7 @@ #define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128 #endif -/* - * This hook just adds a quirk for all sdio devices - */ -static void add_quirk_for_sdio_devices(struct mmc_card *card, int data) -{ - if (mmc_card_sdio(card)) - card->quirks |= data; -} - static const struct mmc_fixup mmc_fixup_methods[] = { - /* by default sdio devices are considered CLK_GATING broken */ - /* good cards will be whitelisted as they are tested */ - SDIO_FIXUP(SDIO_ANY_ID, SDIO_ANY_ID, - add_quirk_for_sdio_devices, - MMC_QUIRK_BROKEN_CLK_GATING), - - SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271, - remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING), - SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271, add_quirk, MMC_QUIRK_NONSTD_FUNC_IF), diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index e124db0fc178..b1b9200a4715 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -800,9 +800,7 @@ static int mmc_sd_get_ro(struct mmc_host *host) if (!host->ops->get_ro) return -1; - mmc_host_clk_hold(host); ro = host->ops->get_ro(host); - mmc_host_clk_release(host); return ro; } diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 95bc1014b7a2..16d838e6d623 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -956,13 +956,10 @@ static int mmc_sdio_resume(struct mmc_host *host) } if (!err && host->sdio_irqs) { - if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) { + if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) wake_up_process(host->sdio_irq_thread); - } else if (host->caps & MMC_CAP_SDIO_IRQ) { - mmc_host_clk_hold(host); + else if (host->caps & MMC_CAP_SDIO_IRQ) host->ops->enable_sdio_irq(host, 1); - mmc_host_clk_release(host); - } } mmc_release_host(host); diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c index 09cc67d028f0..91bbbfb29f3f 100644 --- a/drivers/mmc/core/sdio_irq.c +++ b/drivers/mmc/core/sdio_irq.c @@ -168,21 +168,15 @@ static int sdio_irq_thread(void *_host) } set_current_state(TASK_INTERRUPTIBLE); - if (host->caps & MMC_CAP_SDIO_IRQ) { - mmc_host_clk_hold(host); + if (host->caps & MMC_CAP_SDIO_IRQ) host->ops->enable_sdio_irq(host, 1); - mmc_host_clk_release(host); - } if (!kthread_should_stop()) schedule_timeout(period); set_current_state(TASK_RUNNING); } while (!kthread_should_stop()); - if (host->caps & MMC_CAP_SDIO_IRQ) { - mmc_host_clk_hold(host); + if (host->caps & MMC_CAP_SDIO_IRQ) host->ops->enable_sdio_irq(host, 0); - mmc_host_clk_release(host); - } pr_debug("%s: IRQ thread exiting with code %d\n", mmc_hostname(host), ret); @@ -208,9 +202,7 @@ static int sdio_card_irq_get(struct mmc_card *card) return err; } } else if (host->caps & MMC_CAP_SDIO_IRQ) { - mmc_host_clk_hold(host); host->ops->enable_sdio_irq(host, 1); - mmc_host_clk_release(host); } } @@ -229,9 +221,7 @@ static int sdio_card_irq_put(struct mmc_card *card) atomic_set(&host->sdio_irq_thread_abort, 1); kthread_stop(host->sdio_irq_thread); } else if (host->caps & MMC_CAP_SDIO_IRQ) { - mmc_host_clk_hold(host); host->ops->enable_sdio_irq(host, 0); - mmc_host_clk_release(host); } } diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index fdd0779ccdfa..eb0151bac50c 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -269,7 +269,6 @@ struct mmc_card { /* for byte mode */ #define MMC_QUIRK_NONSTD_SDIO (1<<2) /* non-standard SDIO card attached */ /* (missing CIA registers) */ -#define MMC_QUIRK_BROKEN_CLK_GATING (1<<3) /* clock gating the sdio bus will make card fail */ #define MMC_QUIRK_NONSTD_FUNC_IF (1<<4) /* SDIO card has nonstd function interfaces */ #define MMC_QUIRK_DISABLE_CD (1<<5) /* disconnect CD/DAT[3] resistor */ #define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */ diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 83b81fd865f3..cfb3c99a6b4b 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -292,18 +292,6 @@ struct mmc_host { mmc_pm_flag_t pm_caps; /* supported pm features */ -#ifdef CONFIG_MMC_CLKGATE - int clk_requests; /* internal reference counter */ - unsigned int clk_delay; /* number of MCI clk hold cycles */ - bool clk_gated; /* clock gated */ - struct delayed_work clk_gate_work; /* delayed clock gate */ - unsigned int clk_old; /* old clock value cache */ - spinlock_t clk_lock; /* lock for clk fields */ - struct mutex clk_gate_mutex; /* mutex for clock gating */ - struct device_attribute clkgate_delay_attr; - unsigned long clkgate_delay; -#endif - /* host specific block data */ unsigned int max_seg_size; /* see blk_queue_max_segment_size */ unsigned short max_segs; /* see blk_queue_max_segments */ @@ -479,26 +467,6 @@ static inline int mmc_host_packed_wr(struct mmc_host *host) return host->caps2 & MMC_CAP2_PACKED_WR; } -#ifdef CONFIG_MMC_CLKGATE -void mmc_host_clk_hold(struct mmc_host *host); -void mmc_host_clk_release(struct mmc_host *host); -unsigned int mmc_host_clk_rate(struct mmc_host *host); - -#else -static inline void mmc_host_clk_hold(struct mmc_host *host) -{ -} - -static inline void mmc_host_clk_release(struct mmc_host *host) -{ -} - -static inline unsigned int mmc_host_clk_rate(struct mmc_host *host) -{ - return host->ios.clock; -} -#endif - static inline int mmc_card_hs(struct mmc_card *card) { return card->host->ios.timing == MMC_TIMING_SD_HS || -- cgit v1.2.3 From 2086f801cb2a796279e817e68255654c4cfd3be3 Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Mon, 12 Oct 2015 14:48:25 +0200 Subject: mmc: core: Add mmc_regulator_set_vqmmc() This adds logic to the MMC core to set VQMMC. This is expected to be called by MMC drivers like dw_mmc as part of (or instead of) their start_signal_voltage_switch() callback. A few notes: * When setting the signal voltage to 3.3V we do our best to make VQMMC and VMMC match. It's been reported that this makes some old cards happy since they were tested back in the day before UHS when VQMMC and VMMC were provided by the same regulator. A nice side effect of this is that we don't end up on the hairy edge of VQMMC (2.7V), which some EEs claim is a little too close to the minimum for comfort. This is done in two steps. At first we try to find a VQMMC within a 0.3V tolerance of VMMC and if this is not supported by the supplying regulator we try to find a suitable voltage within the whole 2.7V-3.6V area of the spec. * The two step approach is currently necessary, as the used regulator_set_voltage_triplet(min, target, max) uses a simple implementation that just tries two basic steps: regulator_set_voltage(target, max); regulator_set_voltage(min, target); So with only one step with 2.7-3.6V borders, if a suitable voltage is a bit below VMMC, we would directly get the lowest 2.7V which some boards (like Rockchips) don't like at all. * When setting the signal voltage to 1.8V or 1.2V we aim for that specific voltage instead of picking the lowest one in the range. * We very purposely don't print errors in mmc_regulator_set_vqmmc(). There are cases where the MMC core will try several different voltages and we don't want to pollute the logs. Signed-off-by: Douglas Anderson Signed-off-by: Heiko Stuebner Signed-off-by: Ulf Hansson --- drivers/mmc/core/core.c | 78 ++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mmc/host.h | 7 +++++ 2 files changed, 85 insertions(+) (limited to 'include/linux/mmc') diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 7aa3b305b65d..5ae89e48fd85 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1394,6 +1394,84 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc, } EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr); +static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator, + int min_uV, int target_uV, + int max_uV) +{ + /* + * Check if supported first to avoid errors since we may try several + * signal levels during power up and don't want to show errors. + */ + if (!regulator_is_supported_voltage(regulator, min_uV, max_uV)) + return -EINVAL; + + return regulator_set_voltage_triplet(regulator, min_uV, target_uV, + max_uV); +} + +/** + * mmc_regulator_set_vqmmc - Set VQMMC as per the ios + * + * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible. + * That will match the behavior of old boards where VQMMC and VMMC were supplied + * by the same supply. The Bus Operating conditions for 3.3V signaling in the + * SD card spec also define VQMMC in terms of VMMC. + * If this is not possible we'll try the full 2.7-3.6V of the spec. + * + * For 1.2V and 1.8V signaling we'll try to get as close as possible to the + * requested voltage. This is definitely a good idea for UHS where there's a + * separate regulator on the card that's trying to make 1.8V and it's best if + * we match. + * + * This function is expected to be used by a controller's + * start_signal_voltage_switch() function. + */ +int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct device *dev = mmc_dev(mmc); + int ret, volt, min_uV, max_uV; + + /* If no vqmmc supply then we can't change the voltage */ + if (IS_ERR(mmc->supply.vqmmc)) + return -EINVAL; + + switch (ios->signal_voltage) { + case MMC_SIGNAL_VOLTAGE_120: + return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, + 1100000, 1200000, 1300000); + case MMC_SIGNAL_VOLTAGE_180: + return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, + 1700000, 1800000, 1950000); + case MMC_SIGNAL_VOLTAGE_330: + ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV); + if (ret < 0) + return ret; + + dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n", + __func__, volt, max_uV); + + min_uV = max(volt - 300000, 2700000); + max_uV = min(max_uV + 200000, 3600000); + + /* + * Due to a limitation in the current implementation of + * regulator_set_voltage_triplet() which is taking the lowest + * voltage possible if below the target, search for a suitable + * voltage in two steps and try to stay close to vmmc + * with a 0.3V tolerance at first. + */ + if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, + min_uV, volt, max_uV)) + return 0; + + return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, + 2700000, volt, 3600000); + default: + return -EINVAL; + } +} +EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc); + #endif /* CONFIG_REGULATOR */ int mmc_regulator_get_supply(struct mmc_host *mmc) diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index cfb3c99a6b4b..8673ffe3d86e 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -411,6 +411,7 @@ int mmc_regulator_get_ocrmask(struct regulator *supply); int mmc_regulator_set_ocr(struct mmc_host *mmc, struct regulator *supply, unsigned short vdd_bit); +int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios); #else static inline int mmc_regulator_get_ocrmask(struct regulator *supply) { @@ -423,6 +424,12 @@ static inline int mmc_regulator_set_ocr(struct mmc_host *mmc, { return 0; } + +static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + return -EINVAL; +} #endif int mmc_regulator_get_supply(struct mmc_host *mmc); -- cgit v1.2.3 From 3fc7eaef44dbcbcd602b6bcd0ac6efba7a30b108 Mon Sep 17 00:00:00 2001 From: Shawn Lin Date: Wed, 16 Sep 2015 14:41:23 +0800 Subject: mmc: dw_mmc: Add external dma interface support DesignWare MMC Controller can supports two types of DMA mode: external dma and internal dma. We get a RK312x platform integrated dw_mmc and ARM pl330 dma controller. This patch add edmac ops to support these platforms. I've tested it on RK31xx platform with edmac mode and RK3288 platform with idmac mode. Signed-off-by: Shawn Lin Signed-off-by: Jaehoon Chung Signed-off-by: Ulf Hansson --- drivers/mmc/host/Kconfig | 11 +- drivers/mmc/host/dw_mmc-pltfm.c | 2 + drivers/mmc/host/dw_mmc.c | 268 ++++++++++++++++++++++++++++++++-------- drivers/mmc/host/dw_mmc.h | 6 + include/linux/mmc/dw_mmc.h | 23 +++- 5 files changed, 246 insertions(+), 64 deletions(-) (limited to 'include/linux/mmc') diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 56f5b1487008..ef54084315cc 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -615,15 +615,7 @@ config MMC_DW help This selects support for the Synopsys DesignWare Mobile Storage IP block, this provides host support for SD and MMC interfaces, in both - PIO and external DMA modes. - -config MMC_DW_IDMAC - bool "Internal DMAC interface" - depends on MMC_DW - help - This selects support for the internal DMAC block within the Synopsys - Designware Mobile Storage IP block. This disables the external DMA - interface. + PIO, internal DMA mode and external DMA mode. config MMC_DW_PLTFM tristate "Synopsys Designware MCI Support as platform device" @@ -652,7 +644,6 @@ config MMC_DW_K3 tristate "K3 specific extensions for Synopsys DW Memory Card Interface" depends on MMC_DW select MMC_DW_PLTFM - select MMC_DW_IDMAC help This selects support for Hisilicon K3 SoC specific extensions to the Synopsys DesignWare Memory Card Interface driver. Select this option diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c index ec6dbcdec693..7e1d13b68b06 100644 --- a/drivers/mmc/host/dw_mmc-pltfm.c +++ b/drivers/mmc/host/dw_mmc-pltfm.c @@ -59,6 +59,8 @@ int dw_mci_pltfm_register(struct platform_device *pdev, host->pdata = pdev->dev.platform_data; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + /* Get registers' physical base address */ + host->phy_regs = (void *)(regs->start); host->regs = devm_ioremap_resource(&pdev->dev, regs); if (IS_ERR(host->regs)) return PTR_ERR(host->regs); diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index b1b7e7fa072c..7fe0315142e6 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -56,7 +56,6 @@ #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */ #define DW_MCI_FREQ_MIN 400000 /* unit: HZ */ -#ifdef CONFIG_MMC_DW_IDMAC #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \ SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \ SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \ @@ -102,7 +101,6 @@ struct idmac_desc { /* Each descriptor can transfer up to 4KB of data in chained mode */ #define DW_MCI_DESC_DATA_LENGTH 0x1000 -#endif /* CONFIG_MMC_DW_IDMAC */ static bool dw_mci_reset(struct dw_mci *host); static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset); @@ -407,7 +405,6 @@ static int dw_mci_get_dma_dir(struct mmc_data *data) return DMA_FROM_DEVICE; } -#ifdef CONFIG_MMC_DW_IDMAC static void dw_mci_dma_cleanup(struct dw_mci *host) { struct mmc_data *data = host->data; @@ -445,12 +442,21 @@ static void dw_mci_idmac_stop_dma(struct dw_mci *host) mci_writel(host, BMOD, temp); } -static void dw_mci_idmac_complete_dma(struct dw_mci *host) +static void dw_mci_dmac_complete_dma(void *arg) { + struct dw_mci *host = arg; struct mmc_data *data = host->data; dev_vdbg(host->dev, "DMA complete\n"); + if ((host->use_dma == TRANS_MODE_EDMAC) && + data && (data->flags & MMC_DATA_READ)) + /* Invalidate cache after read */ + dma_sync_sg_for_cpu(mmc_dev(host->cur_slot->mmc), + data->sg, + data->sg_len, + DMA_FROM_DEVICE); + host->dma_ops->cleanup(host); /* @@ -564,7 +570,7 @@ static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data, wmb(); /* drain writebuffer */ } -static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) +static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) { u32 temp; @@ -589,6 +595,8 @@ static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) /* Start it running */ mci_writel(host, PLDMND, 1); + + return 0; } static int dw_mci_idmac_init(struct dw_mci *host) @@ -669,10 +677,112 @@ static const struct dw_mci_dma_ops dw_mci_idmac_ops = { .init = dw_mci_idmac_init, .start = dw_mci_idmac_start_dma, .stop = dw_mci_idmac_stop_dma, - .complete = dw_mci_idmac_complete_dma, + .complete = dw_mci_dmac_complete_dma, + .cleanup = dw_mci_dma_cleanup, +}; + +static void dw_mci_edmac_stop_dma(struct dw_mci *host) +{ + dmaengine_terminate_all(host->dms->ch); +} + +static int dw_mci_edmac_start_dma(struct dw_mci *host, + unsigned int sg_len) +{ + struct dma_slave_config cfg; + struct dma_async_tx_descriptor *desc = NULL; + struct scatterlist *sgl = host->data->sg; + const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; + u32 sg_elems = host->data->sg_len; + u32 fifoth_val; + u32 fifo_offset = host->fifo_reg - host->regs; + int ret = 0; + + /* Set external dma config: burst size, burst width */ + cfg.dst_addr = (dma_addr_t)(host->phy_regs + fifo_offset); + cfg.src_addr = cfg.dst_addr; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + + /* Match burst msize with external dma config */ + fifoth_val = mci_readl(host, FIFOTH); + cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7]; + cfg.src_maxburst = cfg.dst_maxburst; + + if (host->data->flags & MMC_DATA_WRITE) + cfg.direction = DMA_MEM_TO_DEV; + else + cfg.direction = DMA_DEV_TO_MEM; + + ret = dmaengine_slave_config(host->dms->ch, &cfg); + if (ret) { + dev_err(host->dev, "Failed to config edmac.\n"); + return -EBUSY; + } + + desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, + sg_len, cfg.direction, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + dev_err(host->dev, "Can't prepare slave sg.\n"); + return -EBUSY; + } + + /* Set dw_mci_dmac_complete_dma as callback */ + desc->callback = dw_mci_dmac_complete_dma; + desc->callback_param = (void *)host; + dmaengine_submit(desc); + + /* Flush cache before write */ + if (host->data->flags & MMC_DATA_WRITE) + dma_sync_sg_for_device(mmc_dev(host->cur_slot->mmc), sgl, + sg_elems, DMA_TO_DEVICE); + + dma_async_issue_pending(host->dms->ch); + + return 0; +} + +static int dw_mci_edmac_init(struct dw_mci *host) +{ + /* Request external dma channel */ + host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL); + if (!host->dms) + return -ENOMEM; + + host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx"); + if (!host->dms->ch) { + dev_err(host->dev, + "Failed to get external DMA channel %d\n", + host->dms->ch->chan_id); + kfree(host->dms); + host->dms = NULL; + return -ENXIO; + } + + return 0; +} + +static void dw_mci_edmac_exit(struct dw_mci *host) +{ + if (host->dms) { + if (host->dms->ch) { + dma_release_channel(host->dms->ch); + host->dms->ch = NULL; + } + kfree(host->dms); + host->dms = NULL; + } +} + +static const struct dw_mci_dma_ops dw_mci_edmac_ops = { + .init = dw_mci_edmac_init, + .exit = dw_mci_edmac_exit, + .start = dw_mci_edmac_start_dma, + .stop = dw_mci_edmac_stop_dma, + .complete = dw_mci_dmac_complete_dma, .cleanup = dw_mci_dma_cleanup, }; -#endif /* CONFIG_MMC_DW_IDMAC */ static int dw_mci_pre_dma_transfer(struct dw_mci *host, struct mmc_data *data, @@ -752,7 +862,6 @@ static void dw_mci_post_req(struct mmc_host *mmc, static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) { -#ifdef CONFIG_MMC_DW_IDMAC unsigned int blksz = data->blksz; const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; u32 fifo_width = 1 << host->data_shift; @@ -760,6 +869,10 @@ static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; int idx = ARRAY_SIZE(mszs) - 1; + /* pio should ship this scenario */ + if (!host->use_dma) + return; + tx_wmark = (host->fifo_depth) / 2; tx_wmark_invers = host->fifo_depth - tx_wmark; @@ -788,7 +901,6 @@ static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) done: fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark); mci_writel(host, FIFOTH, fifoth_val); -#endif } static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data) @@ -850,10 +962,12 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) host->using_dma = 1; - dev_vdbg(host->dev, - "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", - (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma, - sg_len); + if (host->use_dma == TRANS_MODE_IDMAC) + dev_vdbg(host->dev, + "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", + (unsigned long)host->sg_cpu, + (unsigned long)host->sg_dma, + sg_len); /* * Decide the MSIZE and RX/TX Watermark. @@ -875,7 +989,11 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) mci_writel(host, INTMASK, temp); spin_unlock_irqrestore(&host->irq_lock, irqflags); - host->dma_ops->start(host, sg_len); + if (host->dma_ops->start(host, sg_len)) { + /* We can't do DMA */ + dev_err(host->dev, "%s: failed to start DMA.\n", __func__); + return -ENODEV; + } return 0; } @@ -2338,15 +2456,17 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) } -#ifdef CONFIG_MMC_DW_IDMAC - /* Handle DMA interrupts */ + if (host->use_dma != TRANS_MODE_IDMAC) + return IRQ_HANDLED; + + /* Handle IDMA interrupts */ if (host->dma_64bit_address == 1) { pending = mci_readl(host, IDSTS64); if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI); mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI); - host->dma_ops->complete(host); + host->dma_ops->complete((void *)host); } } else { pending = mci_readl(host, IDSTS); @@ -2354,10 +2474,9 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI); mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); - host->dma_ops->complete(host); + host->dma_ops->complete((void *)host); } } -#endif return IRQ_HANDLED; } @@ -2466,13 +2585,21 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) goto err_host_allocated; /* Useful defaults if platform data is unset. */ - if (host->use_dma) { + if (host->use_dma == TRANS_MODE_IDMAC) { mmc->max_segs = host->ring_size; mmc->max_blk_size = 65536; mmc->max_seg_size = 0x1000; mmc->max_req_size = mmc->max_seg_size * host->ring_size; mmc->max_blk_count = mmc->max_req_size / 512; + } else if (host->use_dma == TRANS_MODE_EDMAC) { + mmc->max_segs = 64; + mmc->max_blk_size = 65536; + mmc->max_blk_count = 65535; + mmc->max_req_size = + mmc->max_blk_size * mmc->max_blk_count; + mmc->max_seg_size = mmc->max_req_size; } else { + /* TRANS_MODE_PIO */ mmc->max_segs = 64; mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */ mmc->max_blk_count = 512; @@ -2512,38 +2639,74 @@ static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) static void dw_mci_init_dma(struct dw_mci *host) { int addr_config; - /* Check ADDR_CONFIG bit in HCON to find IDMAC address bus width */ - addr_config = (mci_readl(host, HCON) >> 27) & 0x01; - - if (addr_config == 1) { - /* host supports IDMAC in 64-bit address mode */ - host->dma_64bit_address = 1; - dev_info(host->dev, "IDMAC supports 64-bit address mode.\n"); - if (!dma_set_mask(host->dev, DMA_BIT_MASK(64))) - dma_set_coherent_mask(host->dev, DMA_BIT_MASK(64)); - } else { - /* host supports IDMAC in 32-bit address mode */ - host->dma_64bit_address = 0; - dev_info(host->dev, "IDMAC supports 32-bit address mode.\n"); - } + struct device *dev = host->dev; + struct device_node *np = dev->of_node; - /* Alloc memory for sg translation */ - host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE, - &host->sg_dma, GFP_KERNEL); - if (!host->sg_cpu) { - dev_err(host->dev, "%s: could not alloc DMA memory\n", - __func__); + /* + * Check tansfer mode from HCON[17:16] + * Clear the ambiguous description of dw_mmc databook: + * 2b'00: No DMA Interface -> Actually means using Internal DMA block + * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block + * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block + * 2b'11: Non DW DMA Interface -> pio only + * Compared to DesignWare DMA Interface, Generic DMA Interface has a + * simpler request/acknowledge handshake mechanism and both of them + * are regarded as external dma master for dw_mmc. + */ + host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON)); + if (host->use_dma == DMA_INTERFACE_IDMA) { + host->use_dma = TRANS_MODE_IDMAC; + } else if (host->use_dma == DMA_INTERFACE_DWDMA || + host->use_dma == DMA_INTERFACE_GDMA) { + host->use_dma = TRANS_MODE_EDMAC; + } else { goto no_dma; } /* Determine which DMA interface to use */ -#ifdef CONFIG_MMC_DW_IDMAC - host->dma_ops = &dw_mci_idmac_ops; - dev_info(host->dev, "Using internal DMA controller.\n"); -#endif + if (host->use_dma == TRANS_MODE_IDMAC) { + /* + * Check ADDR_CONFIG bit in HCON to find + * IDMAC address bus width + */ + addr_config = (mci_readl(host, HCON) >> 27) & 0x01; + + if (addr_config == 1) { + /* host supports IDMAC in 64-bit address mode */ + host->dma_64bit_address = 1; + dev_info(host->dev, + "IDMAC supports 64-bit address mode.\n"); + if (!dma_set_mask(host->dev, DMA_BIT_MASK(64))) + dma_set_coherent_mask(host->dev, + DMA_BIT_MASK(64)); + } else { + /* host supports IDMAC in 32-bit address mode */ + host->dma_64bit_address = 0; + dev_info(host->dev, + "IDMAC supports 32-bit address mode.\n"); + } - if (!host->dma_ops) - goto no_dma; + /* Alloc memory for sg translation */ + host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE, + &host->sg_dma, GFP_KERNEL); + if (!host->sg_cpu) { + dev_err(host->dev, + "%s: could not alloc DMA memory\n", + __func__); + goto no_dma; + } + + host->dma_ops = &dw_mci_idmac_ops; + dev_info(host->dev, "Using internal DMA controller.\n"); + } else { + /* TRANS_MODE_EDMAC: check dma bindings again */ + if ((of_property_count_strings(np, "dma-names") < 0) || + (!of_find_property(np, "dmas", NULL))) { + goto no_dma; + } + host->dma_ops = &dw_mci_edmac_ops; + dev_info(host->dev, "Using external DMA controller.\n"); + } if (host->dma_ops->init && host->dma_ops->start && host->dma_ops->stop && host->dma_ops->cleanup) { @@ -2557,12 +2720,11 @@ static void dw_mci_init_dma(struct dw_mci *host) goto no_dma; } - host->use_dma = 1; return; no_dma: dev_info(host->dev, "Using PIO mode.\n"); - host->use_dma = 0; + host->use_dma = TRANS_MODE_PIO; } static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) @@ -2645,10 +2807,9 @@ static bool dw_mci_reset(struct dw_mci *host) } } -#if IS_ENABLED(CONFIG_MMC_DW_IDMAC) - /* It is also recommended that we reset and reprogram idmac */ - dw_mci_idmac_reset(host); -#endif + if (host->use_dma == TRANS_MODE_IDMAC) + /* It is also recommended that we reset and reprogram idmac */ + dw_mci_idmac_reset(host); ret = true; @@ -3062,6 +3223,9 @@ EXPORT_SYMBOL(dw_mci_remove); */ int dw_mci_suspend(struct dw_mci *host) { + if (host->use_dma && host->dma_ops->exit) + host->dma_ops->exit(host); + return 0; } EXPORT_SYMBOL(dw_mci_suspend); diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index 8ce4674730a6..811d4673a1c5 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h @@ -148,6 +148,12 @@ #define SDMMC_SET_FIFOTH(m, r, t) (((m) & 0x7) << 28 | \ ((r) & 0xFFF) << 16 | \ ((t) & 0xFFF)) +/* HCON register defines */ +#define DMA_INTERFACE_IDMA (0x0) +#define DMA_INTERFACE_DWDMA (0x1) +#define DMA_INTERFACE_GDMA (0x2) +#define DMA_INTERFACE_NODMA (0x3) +#define SDMMC_GET_TRANS_MODE(x) (((x)>>16) & 0x3) /* Internal DMAC interrupt defines */ #define SDMMC_IDMAC_INT_AI BIT(9) #define SDMMC_IDMAC_INT_NI BIT(8) diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index 134c57422740..f67b2ec18e6d 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h @@ -16,6 +16,7 @@ #include #include +#include #define MAX_MCI_SLOTS 2 @@ -40,6 +41,17 @@ enum { struct mmc_data; +enum { + TRANS_MODE_PIO = 0, + TRANS_MODE_IDMAC, + TRANS_MODE_EDMAC +}; + +struct dw_mci_dma_slave { + struct dma_chan *ch; + enum dma_transfer_direction direction; +}; + /** * struct dw_mci - MMC controller state shared between all slots * @lock: Spinlock protecting the queue and associated data. @@ -154,7 +166,14 @@ struct dw_mci { dma_addr_t sg_dma; void *sg_cpu; const struct dw_mci_dma_ops *dma_ops; + /* For idmac */ unsigned int ring_size; + + /* For edmac */ + struct dw_mci_dma_slave *dms; + /* Registers's physical base address */ + void *phy_regs; + u32 cmd_status; u32 data_status; u32 stop_cmdr; @@ -208,8 +227,8 @@ struct dw_mci { struct dw_mci_dma_ops { /* DMA Ops */ int (*init)(struct dw_mci *host); - void (*start)(struct dw_mci *host, unsigned int sg_len); - void (*complete)(struct dw_mci *host); + int (*start)(struct dw_mci *host, unsigned int sg_len); + void (*complete)(void *host); void (*stop)(struct dw_mci *host); void (*cleanup)(struct dw_mci *host); void (*exit)(struct dw_mci *host); -- cgit v1.2.3 From 9979dbe5158899b556eb772b7335e29417ac0ddd Mon Sep 17 00:00:00 2001 From: Chaotian Jing Date: Tue, 27 Oct 2015 14:24:28 +0800 Subject: mmc: mmc: extend the mmc_send_tuning() The mmc_execute_tuning() has already prepared the opcode, there is no need to prepare it again at mmc_send_tuning(), and, there is a BUG of mmc_send_tuning() to determine the opcode by bus width, assume eMMC was running at HS200, 4bit mode, then the mmc_send_tuning() will overwrite the opcode from CMD21 to CMD19, then got error. in addition, extend an argument of "cmd_error" to allow getting if there was cmd error when tune response. Signed-off-by: Chaotian Jing [Ulf: Rebased patch] Signed-off-by: Ulf Hansson --- drivers/mmc/core/mmc_ops.c | 8 ++++---- drivers/mmc/host/dw_mmc-exynos.c | 4 ++-- drivers/mmc/host/dw_mmc-rockchip.c | 4 ++-- drivers/mmc/host/dw_mmc.c | 2 +- drivers/mmc/host/dw_mmc.h | 2 +- drivers/mmc/host/sdhci-esdhc-imx.c | 6 +++--- drivers/mmc/host/sdhci-msm.c | 2 +- drivers/mmc/host/sdhci-sirf.c | 2 +- include/linux/mmc/core.h | 2 +- 9 files changed, 16 insertions(+), 16 deletions(-) (limited to 'include/linux/mmc') diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 57f3a21783e9..1f444269ebbe 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -588,7 +588,7 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, } EXPORT_SYMBOL_GPL(mmc_switch); -int mmc_send_tuning(struct mmc_host *host) +int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error) { struct mmc_request mrq = {NULL}; struct mmc_command cmd = {0}; @@ -598,16 +598,13 @@ int mmc_send_tuning(struct mmc_host *host) const u8 *tuning_block_pattern; int size, err = 0; u8 *data_buf; - u32 opcode; if (ios->bus_width == MMC_BUS_WIDTH_8) { tuning_block_pattern = tuning_blk_pattern_8bit; size = sizeof(tuning_blk_pattern_8bit); - opcode = MMC_SEND_TUNING_BLOCK_HS200; } else if (ios->bus_width == MMC_BUS_WIDTH_4) { tuning_block_pattern = tuning_blk_pattern_4bit; size = sizeof(tuning_blk_pattern_4bit); - opcode = MMC_SEND_TUNING_BLOCK; } else return -EINVAL; @@ -638,6 +635,9 @@ int mmc_send_tuning(struct mmc_host *host) mmc_wait_for_req(host, &mrq); + if (cmd_error) + *cmd_error = cmd.error; + if (cmd.error) { err = cmd.error; goto out; diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c index 1e75309898b7..3a7e835a0033 100644 --- a/drivers/mmc/host/dw_mmc-exynos.c +++ b/drivers/mmc/host/dw_mmc-exynos.c @@ -446,7 +446,7 @@ out: return loc; } -static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot) +static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode) { struct dw_mci *host = slot->host; struct dw_mci_exynos_priv_data *priv = host->priv; @@ -461,7 +461,7 @@ static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot) mci_writel(host, TMOUT, ~0); smpl = dw_mci_exynos_move_next_clksmpl(host); - if (!mmc_send_tuning(mmc)) + if (!mmc_send_tuning(mmc, opcode, NULL)) candiates |= (1 << smpl); } while (start_smpl != smpl); diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c index 4b3650f7d43f..9becebeeccd1 100644 --- a/drivers/mmc/host/dw_mmc-rockchip.c +++ b/drivers/mmc/host/dw_mmc-rockchip.c @@ -83,7 +83,7 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios) #define NUM_PHASES 360 #define TUNING_ITERATION_TO_PHASE(i) (DIV_ROUND_UP((i) * 360, NUM_PHASES)) -static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot) +static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode) { struct dw_mci *host = slot->host; struct dw_mci_rockchip_priv_data *priv = host->priv; @@ -114,7 +114,7 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot) for (i = 0; i < NUM_PHASES; ) { clk_set_phase(priv->sample_clk, TUNING_ITERATION_TO_PHASE(i)); - v = !mmc_send_tuning(mmc); + v = !mmc_send_tuning(mmc, opcode, NULL); if (i == 0) first_v = v; diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 6e600e875328..63eefea9645e 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -1540,7 +1540,7 @@ static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode) int err = -EINVAL; if (drv_data && drv_data->execute_tuning) - err = drv_data->execute_tuning(slot); + err = drv_data->execute_tuning(slot, opcode); return err; } diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index f2a88d4bcbac..11cc848a40d4 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h @@ -290,7 +290,7 @@ struct dw_mci_drv_data { void (*prepare_command)(struct dw_mci *host, u32 *cmdr); void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios); int (*parse_dt)(struct dw_mci *host); - int (*execute_tuning)(struct dw_mci_slot *slot); + int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode); int (*prepare_hs400_tuning)(struct dw_mci *host, struct mmc_ios *ios); int (*switch_voltage)(struct mmc_host *mmc, diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 886d230f41d0..1f1582f6cccb 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -759,7 +759,7 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode) min = ESDHC_TUNE_CTRL_MIN; while (min < ESDHC_TUNE_CTRL_MAX) { esdhc_prepare_tuning(host, min); - if (!mmc_send_tuning(host->mmc)) + if (!mmc_send_tuning(host->mmc, opcode, NULL)) break; min += ESDHC_TUNE_CTRL_STEP; } @@ -768,7 +768,7 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode) max = min + ESDHC_TUNE_CTRL_STEP; while (max < ESDHC_TUNE_CTRL_MAX) { esdhc_prepare_tuning(host, max); - if (mmc_send_tuning(host->mmc)) { + if (mmc_send_tuning(host->mmc, opcode, NULL)) { max -= ESDHC_TUNE_CTRL_STEP; break; } @@ -778,7 +778,7 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode) /* use average delay to get the best timing */ avg = (min + max) / 2; esdhc_prepare_tuning(host, avg); - ret = mmc_send_tuning(host->mmc); + ret = mmc_send_tuning(host->mmc, opcode, NULL); esdhc_post_tuning(host); dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n", diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 4bcee033feda..4695bee203ea 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -373,7 +373,7 @@ retry: if (rc) return rc; - rc = mmc_send_tuning(mmc); + rc = mmc_send_tuning(mmc, opcode, NULL); if (!rc) { /* Tuning is successful at this tuning point */ tuned_phases[tuned_phase_cnt++] = phase; diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c index f5488c409fd1..34866f668dd7 100644 --- a/drivers/mmc/host/sdhci-sirf.c +++ b/drivers/mmc/host/sdhci-sirf.c @@ -98,7 +98,7 @@ retry: clock_setting | phase, SDHCI_CLK_DELAY_SETTING); - if (!mmc_send_tuning(mmc)) { + if (!mmc_send_tuning(mmc, opcode, NULL)) { /* Tuning is successful at this tuning point */ tuned_phase_cnt++; dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n", diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 79a31d3c3788..37967b6da03c 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -153,7 +153,7 @@ extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, struct mmc_command *, int); extern void mmc_start_bkops(struct mmc_card *card, bool from_exception); extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); -extern int mmc_send_tuning(struct mmc_host *host); +extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error); extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd); #define MMC_ERASE_ARG 0x00000000 -- cgit v1.2.3