summaryrefslogtreecommitdiff
path: root/drivers/ufs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-11-02 15:13:50 -1000
committerLinus Torvalds <torvalds@linux-foundation.org>2023-11-02 15:13:50 -1000
commit6ed92e559a2ea572ae2bac5cbeddd1dc8cb667b6 (patch)
treeaecc082cb2371d6cf3bceef203342fee48957ae0 /drivers/ufs
parent90a300dc0553c5c4a3324ca6de5877c834d27af7 (diff)
parenta75a16c62a2540f11eeae4f2b50e95deefb652ea (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "Updates to the usual drivers (ufs, megaraid_sas, lpfc, target, ibmvfc, scsi_debug) plus the usual assorted minor fixes and updates. The major change this time around is a prep patch for rethreading of the driver reset handler API not to take a scsi_cmd structure which starts to reduce various drivers' dependence on scsi_cmd in error handling" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (132 commits) scsi: ufs: core: Leave space for '\0' in utf8 desc string scsi: ufs: core: Conversion to bool not necessary scsi: ufs: core: Fix race between force complete and ISR scsi: megaraid: Fix up debug message in megaraid_abort_and_reset() scsi: aic79xx: Fix up NULL command in ahd_done() scsi: message: fusion: Initialize return value in mptfc_bus_reset() scsi: mpt3sas: Fix loop logic scsi: snic: Remove useless code in snic_dr_clean_pending_req() scsi: core: Add comment to target_destroy in scsi_host_template scsi: core: Clean up scsi_dev_queue_ready() scsi: pmcraid: Add missing scsi_device_put() in pmcraid_eh_target_reset_handler() scsi: target: core: Fix kernel-doc comment scsi: pmcraid: Fix kernel-doc comment scsi: core: Handle depopulation and restoration in progress scsi: ufs: core: Add support for parsing OPP scsi: ufs: core: Add OPP support for scaling clocks and regulators scsi: ufs: dt-bindings: common: Add OPP table scsi: scsi_debug: Add param to control sdev's allow_restart scsi: scsi_debug: Add debugfs interface to fail target reset scsi: scsi_debug: Add new error injection type: Reset LUN failed ...
Diffstat (limited to 'drivers/ufs')
-rw-r--r--drivers/ufs/core/ufshcd.c268
-rw-r--r--drivers/ufs/host/cdns-pltfrm.c5
-rw-r--r--drivers/ufs/host/tc-dwc-g210-pltfrm.c6
-rw-r--r--drivers/ufs/host/ti-j721e-ufs.c6
-rw-r--r--drivers/ufs/host/ufs-exynos.c15
-rw-r--r--drivers/ufs/host/ufs-hisi.c5
-rw-r--r--drivers/ufs/host/ufs-mediatek.c7
-rw-r--r--drivers/ufs/host/ufs-qcom.c216
-rw-r--r--drivers/ufs/host/ufs-qcom.h20
-rw-r--r--drivers/ufs/host/ufs-renesas.c6
-rw-r--r--drivers/ufs/host/ufs-sprd.c5
-rw-r--r--drivers/ufs/host/ufshcd-pci.c5
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c93
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.h2
14 files changed, 492 insertions, 167 deletions
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 8382e8cfa414..8b1031fb0a44 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -20,6 +20,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/pm_opp.h>
#include <linux/regulator/consumer.h>
#include <linux/sched/clock.h>
#include <linux/iopoll.h>
@@ -274,8 +275,8 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
-static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
-static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
+static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
+ bool scale_up);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
@@ -447,8 +448,8 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
} else {
doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
}
- trace_ufshcd_command(dev_name(hba->dev), str_t, tag,
- doorbell, hwq_id, transfer_len, intr, lba, opcode, group_id);
+ trace_ufshcd_command(cmd->device, str_t, tag, doorbell, hwq_id,
+ transfer_len, intr, lba, opcode, group_id);
}
static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
@@ -1062,14 +1063,68 @@ out:
return ret;
}
+int ufshcd_opp_config_clks(struct device *dev, struct opp_table *opp_table,
+ struct dev_pm_opp *opp, void *data,
+ bool scaling_down)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct list_head *head = &hba->clk_list_head;
+ struct ufs_clk_info *clki;
+ unsigned long freq;
+ u8 idx = 0;
+ int ret;
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk)) {
+ freq = dev_pm_opp_get_freq_indexed(opp, idx++);
+
+ /* Do not set rate for clocks having frequency as 0 */
+ if (!freq)
+ continue;
+
+ ret = clk_set_rate(clki->clk, freq);
+ if (ret) {
+ dev_err(dev, "%s: %s clk set rate(%ldHz) failed, %d\n",
+ __func__, clki->name, freq, ret);
+ return ret;
+ }
+
+ trace_ufshcd_clk_scaling(dev_name(dev),
+ (scaling_down ? "scaled down" : "scaled up"),
+ clki->name, hba->clk_scaling.target_freq, freq);
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ufshcd_opp_config_clks);
+
+static int ufshcd_opp_set_rate(struct ufs_hba *hba, unsigned long freq)
+{
+ struct dev_pm_opp *opp;
+ int ret;
+
+ opp = dev_pm_opp_find_freq_floor_indexed(hba->dev,
+ &freq, 0);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ ret = dev_pm_opp_set_opp(hba->dev, opp);
+ dev_pm_opp_put(opp);
+
+ return ret;
+}
+
/**
* ufshcd_scale_clks - scale up or scale down UFS controller clocks
* @hba: per adapter instance
+ * @freq: frequency to scale
* @scale_up: True if scaling up and false if scaling down
*
* Return: 0 if successful; < 0 upon failure.
*/
-static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
+static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
+ bool scale_up)
{
int ret = 0;
ktime_t start = ktime_get();
@@ -1078,13 +1133,21 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
if (ret)
goto out;
- ret = ufshcd_set_clk_freq(hba, scale_up);
+ if (hba->use_pm_opp)
+ ret = ufshcd_opp_set_rate(hba, freq);
+ else
+ ret = ufshcd_set_clk_freq(hba, scale_up);
if (ret)
goto out;
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
- if (ret)
- ufshcd_set_clk_freq(hba, !scale_up);
+ if (ret) {
+ if (hba->use_pm_opp)
+ ufshcd_opp_set_rate(hba,
+ hba->devfreq->previous_freq);
+ else
+ ufshcd_set_clk_freq(hba, !scale_up);
+ }
out:
trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
@@ -1096,12 +1159,13 @@ out:
/**
* ufshcd_is_devfreq_scaling_required - check if scaling is required or not
* @hba: per adapter instance
+ * @freq: frequency to scale
* @scale_up: True if scaling up and false if scaling down
*
* Return: true if scaling is required, false otherwise.
*/
static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
- bool scale_up)
+ unsigned long freq, bool scale_up)
{
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
@@ -1109,6 +1173,9 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
if (list_empty(head))
return false;
+ if (hba->use_pm_opp)
+ return freq != hba->clk_scaling.target_freq;
+
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
if (scale_up && clki->max_freq) {
@@ -1304,12 +1371,14 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc
/**
* ufshcd_devfreq_scale - scale up/down UFS clocks and gear
* @hba: per adapter instance
+ * @freq: frequency to scale
* @scale_up: True for scaling up and false for scalin down
*
* Return: 0 for success; -EBUSY if scaling can't happen at this time; non-zero
* for any other errors.
*/
-static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
+static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
+ bool scale_up)
{
int ret = 0;
@@ -1324,7 +1393,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
goto out_unprepare;
}
- ret = ufshcd_scale_clks(hba, scale_up);
+ ret = ufshcd_scale_clks(hba, freq, scale_up);
if (ret) {
if (!scale_up)
ufshcd_scale_gear(hba, true);
@@ -1335,7 +1404,8 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
if (scale_up) {
ret = ufshcd_scale_gear(hba, true);
if (ret) {
- ufshcd_scale_clks(hba, false);
+ ufshcd_scale_clks(hba, hba->devfreq->previous_freq,
+ false);
goto out_unprepare;
}
}
@@ -1357,9 +1427,10 @@ static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
return;
}
hba->clk_scaling.is_suspended = true;
+ hba->clk_scaling.window_start_t = 0;
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- __ufshcd_suspend_clkscaling(hba);
+ devfreq_suspend_device(hba->devfreq);
}
static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
@@ -1393,15 +1464,35 @@ static int ufshcd_devfreq_target(struct device *dev,
if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
- clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
- /* Override with the closest supported frequency */
- *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
+ if (hba->use_pm_opp) {
+ struct dev_pm_opp *opp;
+
+ /* Get the recommended frequency from OPP framework */
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ dev_pm_opp_put(opp);
+ } else {
+ /* Override with the closest supported frequency */
+ clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info,
+ list);
+ *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
+ }
+
spin_lock_irqsave(hba->host->host_lock, irq_flags);
if (ufshcd_eh_in_progress(hba)) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
return 0;
}
+ /* Skip scaling clock when clock scaling is suspended */
+ if (hba->clk_scaling.is_suspended) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ dev_warn(hba->dev, "clock scaling is suspended, skip");
+ return 0;
+ }
+
if (!hba->clk_scaling.active_reqs)
sched_clk_scaling_suspend_work = true;
@@ -1410,12 +1501,17 @@ static int ufshcd_devfreq_target(struct device *dev,
goto out;
}
- /* Decide based on the rounded-off frequency and update */
- scale_up = *freq == clki->max_freq;
- if (!scale_up)
+ /* Decide based on the target or rounded-off frequency and update */
+ if (hba->use_pm_opp)
+ scale_up = *freq > hba->clk_scaling.target_freq;
+ else
+ scale_up = *freq == clki->max_freq;
+
+ if (!hba->use_pm_opp && !scale_up)
*freq = clki->min_freq;
+
/* Update the frequency */
- if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
+ if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
ret = 0;
goto out; /* no state change required */
@@ -1423,14 +1519,16 @@ static int ufshcd_devfreq_target(struct device *dev,
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
start = ktime_get();
- ret = ufshcd_devfreq_scale(hba, scale_up);
+ ret = ufshcd_devfreq_scale(hba, *freq, scale_up);
+ if (!ret)
+ hba->clk_scaling.target_freq = *freq;
trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
(scale_up ? "up" : "down"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
out:
- if (sched_clk_scaling_suspend_work)
+ if (sched_clk_scaling_suspend_work && !scale_up)
queue_work(hba->clk_scaling.workq,
&hba->clk_scaling.suspend_work);
@@ -1443,8 +1541,6 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
unsigned long flags;
- struct list_head *clk_list = &hba->clk_list_head;
- struct ufs_clk_info *clki;
ktime_t curr_t;
if (!ufshcd_is_clkscaling_supported(hba))
@@ -1457,17 +1553,24 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
if (!scaling->window_start_t)
goto start_window;
- clki = list_first_entry(clk_list, struct ufs_clk_info, list);
/*
* If current frequency is 0, then the ondemand governor considers
* there's no initial frequency set. And it always requests to set
* to max. frequency.
*/
- stat->current_frequency = clki->curr_freq;
+ if (hba->use_pm_opp) {
+ stat->current_frequency = hba->clk_scaling.target_freq;
+ } else {
+ struct list_head *clk_list = &hba->clk_list_head;
+ struct ufs_clk_info *clki;
+
+ clki = list_first_entry(clk_list, struct ufs_clk_info, list);
+ stat->current_frequency = clki->curr_freq;
+ }
+
if (scaling->is_busy_started)
scaling->tot_busy_t += ktime_us_delta(curr_t,
scaling->busy_start_t);
-
stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
stat->busy_time = scaling->tot_busy_t;
start_window:
@@ -1496,9 +1599,11 @@ static int ufshcd_devfreq_init(struct ufs_hba *hba)
if (list_empty(clk_list))
return 0;
- clki = list_first_entry(clk_list, struct ufs_clk_info, list);
- dev_pm_opp_add(hba->dev, clki->min_freq, 0);
- dev_pm_opp_add(hba->dev, clki->max_freq, 0);
+ if (!hba->use_pm_opp) {
+ clki = list_first_entry(clk_list, struct ufs_clk_info, list);
+ dev_pm_opp_add(hba->dev, clki->min_freq, 0);
+ dev_pm_opp_add(hba->dev, clki->max_freq, 0);
+ }
ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
&hba->vps->ondemand_data);
@@ -1510,8 +1615,10 @@ static int ufshcd_devfreq_init(struct ufs_hba *hba)
ret = PTR_ERR(devfreq);
dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
- dev_pm_opp_remove(hba->dev, clki->min_freq);
- dev_pm_opp_remove(hba->dev, clki->max_freq);
+ if (!hba->use_pm_opp) {
+ dev_pm_opp_remove(hba->dev, clki->min_freq);
+ dev_pm_opp_remove(hba->dev, clki->max_freq);
+ }
return ret;
}
@@ -1523,7 +1630,6 @@ static int ufshcd_devfreq_init(struct ufs_hba *hba)
static void ufshcd_devfreq_remove(struct ufs_hba *hba)
{
struct list_head *clk_list = &hba->clk_list_head;
- struct ufs_clk_info *clki;
if (!hba->devfreq)
return;
@@ -1531,19 +1637,13 @@ static void ufshcd_devfreq_remove(struct ufs_hba *hba)
devfreq_remove_device(hba->devfreq);
hba->devfreq = NULL;
- clki = list_first_entry(clk_list, struct ufs_clk_info, list);
- dev_pm_opp_remove(hba->dev, clki->min_freq);
- dev_pm_opp_remove(hba->dev, clki->max_freq);
-}
-
-static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
-{
- unsigned long flags;
+ if (!hba->use_pm_opp) {
+ struct ufs_clk_info *clki;
- devfreq_suspend_device(hba->devfreq);
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->clk_scaling.window_start_t = 0;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ clki = list_first_entry(clk_list, struct ufs_clk_info, list);
+ dev_pm_opp_remove(hba->dev, clki->min_freq);
+ dev_pm_opp_remove(hba->dev, clki->max_freq);
+ }
}
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
@@ -1558,11 +1658,12 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
if (!hba->clk_scaling.is_suspended) {
suspend = true;
hba->clk_scaling.is_suspended = true;
+ hba->clk_scaling.window_start_t = 0;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (suspend)
- __ufshcd_suspend_clkscaling(hba);
+ devfreq_suspend_device(hba->devfreq);
}
static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
@@ -1618,7 +1719,7 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
ufshcd_resume_clkscaling(hba);
} else {
ufshcd_suspend_clkscaling(hba);
- err = ufshcd_devfreq_scale(hba, true);
+ err = ufshcd_devfreq_scale(hba, ULONG_MAX, true);
if (err)
dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
__func__, err);
@@ -2165,7 +2266,8 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
lrbp->compl_time_stamp = ktime_set(0, 0);
lrbp->compl_time_stamp_local_clock = 0;
ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
- ufshcd_clk_scaling_start_busy(hba);
+ if (lrbp->cmd)
+ ufshcd_clk_scaling_start_busy(hba);
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_start_monitor(hba, lrbp);
@@ -2304,7 +2406,7 @@ static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY,
500, UIC_CMD_TIMEOUT * 1000, false, hba,
REG_CONTROLLER_STATUS);
- return ret == 0 ? true : false;
+ return ret == 0;
}
/**
@@ -2715,27 +2817,23 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
* for SCSI Purposes
* @hba: per adapter instance
* @lrbp: pointer to local reference block
- *
- * Return: 0 upon success; < 0 upon failure.
*/
-static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
+ struct request *rq = scsi_cmd_to_rq(lrbp->cmd);
+ unsigned int ioprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
u8 upiu_flags;
- int ret = 0;
if (hba->ufs_version <= ufshci_version(1, 1))
lrbp->command_type = UTP_CMD_TYPE_SCSI;
else
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
- if (likely(lrbp->cmd)) {
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0);
- ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
- } else {
- ret = -EINVAL;
- }
-
- return ret;
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
+ lrbp->cmd->sc_data_direction, 0);
+ if (ioprio_class == IOPRIO_CLASS_RT)
+ upiu_flags |= UPIU_CMD_FLAGS_CP;
+ ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
}
/**
@@ -2823,8 +2921,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
int err = 0;
struct ufs_hw_queue *hwq = NULL;
- WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
-
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
break;
@@ -3632,7 +3728,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
*/
ret = utf16s_to_utf8s(uc_str->uc,
uc_str->len - QUERY_DESC_HDR_SIZE,
- UTF16_BIG_ENDIAN, str, ascii_len);
+ UTF16_BIG_ENDIAN, str, ascii_len - 1);
/* replace non-printable or non-ASCII characters with spaces */
for (i = 0; i < ret; i++)
@@ -5098,8 +5194,7 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
struct request_queue *q = sdev->request_queue;
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
- if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT)
- blk_queue_update_dma_alignment(q, SZ_4K - 1);
+
/*
* Block runtime-pm until all consumers are added.
* Refer ufshcd_setup_links().
@@ -5115,6 +5210,9 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
*/
sdev->silence_suspend = 1;
+ if (hba->vops && hba->vops->config_scsi_dev)
+ hba->vops->config_scsi_dev(sdev);
+
ufshcd_crypto_register(hba, q);
return 0;
@@ -5405,7 +5503,6 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
lrbp->utr_descriptor_ptr->header.ocs = ocs;
}
complete(hba->dev_cmd.complete);
- ufshcd_clk_scaling_update_busy(hba);
}
}
}
@@ -5518,13 +5615,13 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
* For those cmds of which the cqes are not present
* in the cq, complete them explicitly.
*/
+ spin_lock_irqsave(&hwq->cq_lock, flags);
if (cmd && !test_bit(SCMD_STATE_COMPLETE, &cmd->state)) {
- spin_lock_irqsave(&hwq->cq_lock, flags);
set_host_byte(cmd, DID_REQUEUE);
ufshcd_release_scsi_cmd(hba, lrbp);
scsi_done(cmd);
- spin_unlock_irqrestore(&hwq->cq_lock, flags);
}
+ spin_unlock_irqrestore(&hwq->cq_lock, flags);
} else {
ufshcd_mcq_poll_cqe_lock(hba, hwq);
}
@@ -6924,8 +7021,6 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
spin_lock_irqsave(host->host_lock, flags);
task_tag = req->tag;
- WARN_ONCE(task_tag < 0 || task_tag >= hba->nutmrs, "Invalid tag %d\n",
- task_tag);
hba->tmf_rqs[req->tag] = req;
treq->upiu_req.req_header.task_tag = task_tag;
@@ -7499,8 +7594,6 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
bool outstanding;
u32 reg;
- WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
-
ufshcd_hold(hba);
if (!is_mcq_enabled(hba)) {
@@ -7627,7 +7720,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
hba->silence_err_logs = false;
/* scale up clocks to max frequency before full reinitialization */
- ufshcd_scale_clks(hba, true);
+ ufshcd_scale_clks(hba, ULONG_MAX, true);
err = ufshcd_hba_enable(hba);
@@ -7716,6 +7809,19 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
hba = shost_priv(cmd->device->host);
+ /*
+ * If runtime PM sent SSU and got a timeout, scsi_error_handler is
+ * stuck in this function waiting for flush_work(&hba->eh_work). And
+ * ufshcd_err_handler(eh_work) is stuck waiting for runtime PM. Do
+ * ufshcd_link_recovery instead of eh_work to prevent deadlock.
+ */
+ if (hba->pm_op_in_progress) {
+ if (ufshcd_link_recovery(hba))
+ err = FAILED;
+
+ return err;
+ }
+
spin_lock_irqsave(hba->host->host_lock, flags);
hba->force_reset = true;
ufshcd_schedule_eh_work(hba);
@@ -8723,7 +8829,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
if (ret)
goto out;
- if (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) {
+ if (!hba->pm_op_in_progress &&
+ (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) {
/* Reset the device and controller before doing reinit */
ufshcd_device_reset(hba);
ufshcd_hba_stop(hba);
@@ -9159,6 +9266,17 @@ static int ufshcd_init_clocks(struct ufs_hba *hba)
dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
clki->name, clk_get_rate(clki->clk));
}
+
+ /* Set Max. frequency for all clocks */
+ if (hba->use_pm_opp) {
+ ret = ufshcd_opp_set_rate(hba, ULONG_MAX);
+ if (ret) {
+ dev_err(hba->dev, "%s: failed to set OPP: %d", __func__,
+ ret);
+ goto out;
+ }
+ }
+
out:
return ret;
}
diff --git a/drivers/ufs/host/cdns-pltfrm.c b/drivers/ufs/host/cdns-pltfrm.c
index 2491e7e87028..bb30267da471 100644
--- a/drivers/ufs/host/cdns-pltfrm.c
+++ b/drivers/ufs/host/cdns-pltfrm.c
@@ -305,12 +305,11 @@ static int cdns_ufs_pltfrm_probe(struct platform_device *pdev)
*
* Return: 0 (success).
*/
-static int cdns_ufs_pltfrm_remove(struct platform_device *pdev)
+static void cdns_ufs_pltfrm_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
ufshcd_remove(hba);
- return 0;
}
static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
@@ -322,7 +321,7 @@ static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
static struct platform_driver cdns_ufs_pltfrm_driver = {
.probe = cdns_ufs_pltfrm_probe,
- .remove = cdns_ufs_pltfrm_remove,
+ .remove_new = cdns_ufs_pltfrm_remove,
.driver = {
.name = "cdns-ufshcd",
.pm = &cdns_ufs_dev_pm_ops,
diff --git a/drivers/ufs/host/tc-dwc-g210-pltfrm.c b/drivers/ufs/host/tc-dwc-g210-pltfrm.c
index 4d5389dd9585..a3877592604d 100644
--- a/drivers/ufs/host/tc-dwc-g210-pltfrm.c
+++ b/drivers/ufs/host/tc-dwc-g210-pltfrm.c
@@ -74,14 +74,12 @@ static int tc_dwc_g210_pltfm_probe(struct platform_device *pdev)
* @pdev: pointer to platform device structure
*
*/
-static int tc_dwc_g210_pltfm_remove(struct platform_device *pdev)
+static void tc_dwc_g210_pltfm_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
-
- return 0;
}
static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = {
@@ -91,7 +89,7 @@ static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = {
static struct platform_driver tc_dwc_g210_pltfm_driver = {
.probe = tc_dwc_g210_pltfm_probe,
- .remove = tc_dwc_g210_pltfm_remove,
+ .remove_new = tc_dwc_g210_pltfm_remove,
.driver = {
.name = "tc-dwc-g210-pltfm",
.pm = &tc_dwc_g210_pltfm_pm_ops,
diff --git a/drivers/ufs/host/ti-j721e-ufs.c b/drivers/ufs/host/ti-j721e-ufs.c
index 117eb7da92ac..250c22df000d 100644
--- a/drivers/ufs/host/ti-j721e-ufs.c
+++ b/drivers/ufs/host/ti-j721e-ufs.c
@@ -65,13 +65,11 @@ disable_pm:
return ret;
}
-static int ti_j721e_ufs_remove(struct platform_device *pdev)
+static void ti_j721e_ufs_remove(struct platform_device *pdev)
{
of_platform_depopulate(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct of_device_id ti_j721e_ufs_of_match[] = {
@@ -85,7 +83,7 @@ MODULE_DEVICE_TABLE(of, ti_j721e_ufs_of_match);
static struct platform_driver ti_j721e_ufs_driver = {
.probe = ti_j721e_ufs_probe,
- .remove = ti_j721e_ufs_remove,
+ .remove_new = ti_j721e_ufs_remove,
.driver = {
.name = "ti-j721e-ufs",
.of_match_table = ti_j721e_ufs_of_match,
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index 3396e0388512..71bd6dbc0547 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -1511,6 +1511,11 @@ static int fsd_ufs_pre_link(struct exynos_ufs *ufs)
return 0;
}
+static void exynos_ufs_config_scsi_dev(struct scsi_device *sdev)
+{
+ blk_queue_update_dma_alignment(sdev->request_queue, SZ_4K - 1);
+}
+
static int fsd_ufs_post_link(struct exynos_ufs *ufs)
{
int i;
@@ -1579,6 +1584,7 @@ static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
.hibern8_notify = exynos_ufs_hibern8_notify,
.suspend = exynos_ufs_suspend,
.resume = exynos_ufs_resume,
+ .config_scsi_dev = exynos_ufs_config_scsi_dev,
};
static struct ufs_hba_variant_ops ufs_hba_exynosauto_vh_ops = {
@@ -1605,7 +1611,7 @@ static int exynos_ufs_probe(struct platform_device *pdev)
return err;
}
-static int exynos_ufs_remove(struct platform_device *pdev)
+static void exynos_ufs_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
@@ -1615,8 +1621,6 @@ static int exynos_ufs_remove(struct platform_device *pdev)
phy_power_off(ufs->phy);
phy_exit(ufs->phy);
-
- return 0;
}
static struct exynos_ufs_uic_attr exynos7_uic_attr = {
@@ -1680,8 +1684,7 @@ static const struct exynos_ufs_drv_data exynos_ufs_drvs = {
UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL |
- UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING |
- UFSHCD_QUIRK_4KB_DMA_ALIGNMENT,
+ UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING,
.opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX |
@@ -1756,7 +1759,7 @@ static const struct dev_pm_ops exynos_ufs_pm_ops = {
static struct platform_driver exynos_ufs_pltform = {
.probe = exynos_ufs_probe,
- .remove = exynos_ufs_remove,
+ .remove_new = exynos_ufs_remove,
.driver = {
.name = "exynos-ufshc",
.pm = &exynos_ufs_pm_ops,
diff --git a/drivers/ufs/host/ufs-hisi.c b/drivers/ufs/host/ufs-hisi.c
index 5b3060cd0ab8..0229ac0a8dbe 100644
--- a/drivers/ufs/host/ufs-hisi.c
+++ b/drivers/ufs/host/ufs-hisi.c
@@ -575,12 +575,11 @@ static int ufs_hisi_probe(struct platform_device *pdev)
return ufshcd_pltfrm_init(pdev, of_id->data);
}
-static int ufs_hisi_remove(struct platform_device *pdev)
+static void ufs_hisi_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
ufshcd_remove(hba);
- return 0;
}
static const struct dev_pm_ops ufs_hisi_pm_ops = {
@@ -592,7 +591,7 @@ static const struct dev_pm_ops ufs_hisi_pm_ops = {
static struct platform_driver ufs_hisi_pltform = {
.probe = ufs_hisi_probe,
- .remove = ufs_hisi_remove,
+ .remove_new = ufs_hisi_remove,
.driver = {
.name = "ufshcd-hisi",
.pm = &ufs_hisi_pm_ops,
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index 2383ecd88f1c..fc61790d289b 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -806,7 +806,7 @@ static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
return 0;
}
- err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc);
+ err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc, false);
if (err)
return err;
@@ -1748,13 +1748,12 @@ out:
*
* Always return 0
*/
-static int ufs_mtk_remove(struct platform_device *pdev)
+static void ufs_mtk_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1818,7 +1817,7 @@ static const struct dev_pm_ops ufs_mtk_pm_ops = {
static struct platform_driver ufs_mtk_pltform = {
.probe = ufs_mtk_probe,
- .remove = ufs_mtk_remove,
+ .remove_new = ufs_mtk_remove,
.driver = {
.name = "ufshcd-mtk",
.pm = &ufs_mtk_pm_ops,
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index d1149b1c3ed5..96cb8b5b4e66 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -93,8 +93,7 @@ static const struct __ufs_qcom_bw_table {
static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
-static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
- u32 clk_cycles);
+static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up);
static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
{
@@ -460,7 +459,7 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
return ret;
}
- phy_set_mode_ext(phy, PHY_MODE_UFS_HS_B, host->hs_gear);
+ phy_set_mode_ext(phy, PHY_MODE_UFS_HS_B, host->phy_gear);
/* power on phy - start serdes and phy's power and clocks */
ret = phy_power_on(phy);
@@ -528,11 +527,20 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
return err;
}
-/*
+/**
+ * ufs_qcom_cfg_timers - Configure ufs qcom cfg timers
+ *
+ * @hba: host controller instance
+ * @gear: Current operating gear
+ * @hs: current power mode
+ * @rate: current operating rate (A or B)
+ * @update_link_startup_timer: indicate if link_start ongoing
+ * @is_pre_scale_up: flag to check if pre scale up condition.
* Return: zero for success and non-zero in case of a failure.
*/
static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
- u32 hs, u32 rate, bool update_link_startup_timer)
+ u32 hs, u32 rate, bool update_link_startup_timer,
+ bool is_pre_scale_up)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_clk_info *clki;
@@ -563,11 +571,14 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
/*
* The Qunipro controller does not use following registers:
* SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
- * UFS_REG_PA_LINK_STARTUP_TIMER
- * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
+ * UFS_REG_PA_LINK_STARTUP_TIMER.
+ * However UTP controller uses SYS1CLK_1US_REG register for Interrupt
* Aggregation logic.
- */
- if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
+ * It is mandatory to write SYS1CLK_1US_REG register on UFS host
+ * controller V4.0.0 onwards.
+ */
+ if (host->hw_ver.major < 4 && ufs_qcom_cap_qunipro(host) &&
+ !ufshcd_is_intr_aggr_allowed(hba))
return 0;
if (gear == 0) {
@@ -576,8 +587,14 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
}
list_for_each_entry(clki, &hba->clk_list_head, list) {
- if (!strcmp(clki->name, "core_clk"))
- core_clk_rate = clk_get_rate(clki->clk);
+ if (!strcmp(clki->name, "core_clk")) {
+ if (is_pre_scale_up)
+ core_clk_rate = clki->max_freq;
+ else
+ core_clk_rate = clk_get_rate(clki->clk);
+ break;
+ }
+
}
/* If frequency is smaller than 1MHz, set to 1MHz */
@@ -679,20 +696,17 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
switch (status) {
case PRE_CHANGE:
if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
- 0, true)) {
+ 0, true, false)) {
dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
__func__);
return -EINVAL;
}
- if (ufs_qcom_cap_qunipro(host))
- /*
- * set unipro core clock cycles to 150 & clear clock
- * divider
- */
- err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
- 150);
-
+ if (ufs_qcom_cap_qunipro(host)) {
+ err = ufs_qcom_set_core_clk_ctrl(hba, true);
+ if (err)
+ dev_err(hba->dev, "cfg core clk ctrl failed\n");
+ }
/*
* Some UFS devices (and may be host) have issues if LCC is
* enabled. So we are setting PA_Local_TX_LCC_Enable to 0
@@ -909,8 +923,13 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
return ret;
}
- /* Use the agreed gear */
- host->hs_gear = dev_req_params->gear_tx;
+ /*
+ * Update phy_gear only when the gears are scaled to a higher value. This is
+ * because, the PHY gear settings are backwards compatible and we only need to
+ * change the PHY gear settings while scaling to higher gears.
+ */
+ if (dev_req_params->gear_tx > host->phy_gear)
+ host->phy_gear = dev_req_params->gear_tx;
/* enable the device ref clock before changing to HS mode */
if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
@@ -926,7 +945,7 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
case POST_CHANGE:
if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
dev_req_params->pwr_rx,
- dev_req_params->hs_rate, false)) {
+ dev_req_params->hs_rate, false, false)) {
dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
__func__);
/*
@@ -1277,7 +1296,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
* Power up the PHY using the minimum supported gear (UFS_HS_G2).
* Switching to max gear will be performed during reinit if supported.
*/
- host->hs_gear = UFS_HS_G2;
+ host->phy_gear = UFS_HS_G2;
return 0;
@@ -1296,14 +1315,96 @@ static void ufs_qcom_exit(struct ufs_hba *hba)
phy_exit(host->generic_phy);
}
-static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
- u32 clk_cycles)
+/**
+ * ufs_qcom_set_clk_40ns_cycles - Configure 40ns clk cycles
+ *
+ * @hba: host controller instance
+ * @cycles_in_1us: No of cycles in 1us to be configured
+ *
+ * Returns error if dme get/set configuration for 40ns fails
+ * and returns zero on success.
+ */
+static int ufs_qcom_set_clk_40ns_cycles(struct ufs_hba *hba,
+ u32 cycles_in_1us)
{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ u32 cycles_in_40ns;
+ u32 reg;
int err;
- u32 core_clk_ctrl_reg;
- if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
+ /*
+ * UFS host controller V4.0.0 onwards needs to program
+ * PA_VS_CORE_CLK_40NS_CYCLES attribute per programmed
+ * frequency of unipro core clk of UFS host controller.
+ */
+ if (host->hw_ver.major < 4)
+ return 0;
+
+ /*
+ * Generic formulae for cycles_in_40ns = (freq_unipro/25) is not
+ * applicable for all frequencies. For ex: ceil(37.5 MHz/25) will
+ * be 2 and ceil(403 MHZ/25) will be 17 whereas Hardware
+ * specification expect to be 16. Hence use exact hardware spec
+ * mandated value for cycles_in_40ns instead of calculating using
+ * generic formulae.
+ */
+ switch (cycles_in_1us) {
+ case UNIPRO_CORE_CLK_FREQ_403_MHZ:
+ cycles_in_40ns = 16;
+ break;
+ case UNIPRO_CORE_CLK_FREQ_300_MHZ:
+ cycles_in_40ns = 12;
+ break;
+ case UNIPRO_CORE_CLK_FREQ_201_5_MHZ:
+ cycles_in_40ns = 8;
+ break;
+ case UNIPRO_CORE_CLK_FREQ_150_MHZ:
+ cycles_in_40ns = 6;
+ break;
+ case UNIPRO_CORE_CLK_FREQ_100_MHZ:
+ cycles_in_40ns = 4;
+ break;
+ case UNIPRO_CORE_CLK_FREQ_75_MHZ:
+ cycles_in_40ns = 3;
+ break;
+ case UNIPRO_CORE_CLK_FREQ_37_5_MHZ:
+ cycles_in_40ns = 2;
+ break;
+ default:
+ dev_err(hba->dev, "UNIPRO clk freq %u MHz not supported\n",
+ cycles_in_1us);
return -EINVAL;
+ }
+
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), &reg);
+ if (err)
+ return err;
+
+ reg &= ~PA_VS_CORE_CLK_40NS_CYCLES_MASK;
+ reg |= cycles_in_40ns;
+
+ return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), reg);
+}
+
+static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct list_head *head = &hba->clk_list_head;
+ struct ufs_clk_info *clki;
+ u32 cycles_in_1us;
+ u32 core_clk_ctrl_reg;
+ int err;
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk) &&
+ !strcmp(clki->name, "core_clk_unipro")) {
+ if (is_scale_up)
+ cycles_in_1us = ceil(clki->max_freq, (1000 * 1000));
+ else
+ cycles_in_1us = ceil(clk_get_rate(clki->clk), (1000 * 1000));
+ break;
+ }
+ }
err = ufshcd_dme_get(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
@@ -1311,32 +1412,54 @@ static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
if (err)
return err;
- core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
- core_clk_ctrl_reg |= clk_cycles;
+ /* Bit mask is different for UFS host controller V4.0.0 onwards */
+ if (host->hw_ver.major >= 4) {
+ if (!FIELD_FIT(CLK_1US_CYCLES_MASK_V4, cycles_in_1us))
+ return -ERANGE;
+ core_clk_ctrl_reg &= ~CLK_1US_CYCLES_MASK_V4;
+ core_clk_ctrl_reg |= FIELD_PREP(CLK_1US_CYCLES_MASK_V4, cycles_in_1us);
+ } else {
+ if (!FIELD_FIT(CLK_1US_CYCLES_MASK, cycles_in_1us))
+ return -ERANGE;
+ core_clk_ctrl_reg &= ~CLK_1US_CYCLES_MASK;
+ core_clk_ctrl_reg |= FIELD_PREP(CLK_1US_CYCLES_MASK, cycles_in_1us);
+ }
/* Clear CORE_CLK_DIV_EN */
core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
- return ufshcd_dme_set(hba,
+ err = ufshcd_dme_set(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
core_clk_ctrl_reg);
-}
+ if (err)
+ return err;
-static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
-{
- /* nothing to do as of now */
- return 0;
+ /* Configure unipro core clk 40ns attribute */
+ return ufs_qcom_set_clk_40ns_cycles(hba, cycles_in_1us);
}
-static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
+static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct ufs_pa_layer_attr *attr = &host->dev_req_params;
+ int ret;
if (!ufs_qcom_cap_qunipro(host))
return 0;
- /* set unipro core clock cycles to 150 and clear clock divider */
- return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
+ ret = ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
+ attr->hs_rate, false, true);
+ if (ret) {
+ dev_err(hba->dev, "%s ufs cfg timer failed\n", __func__);
+ return ret;
+ }
+ /* set unipro core clock attributes and clear clock divider */
+ return ufs_qcom_set_core_clk_ctrl(hba, true);
+}
+
+static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
+{
+ return 0;
}
static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
@@ -1371,15 +1494,14 @@ static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
if (!ufs_qcom_cap_qunipro(host))
return 0;
- /* set unipro core clock cycles to 75 and clear clock divider */
- return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
+ /* set unipro core clock attributes and clear clock divider */
+ return ufs_qcom_set_core_clk_ctrl(hba, false);
}
static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
bool scale_up, enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
int err = 0;
/* check the host controller state before sending hibern8 cmd */
@@ -1409,11 +1531,6 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
return err;
}
- ufs_qcom_cfg_timers(hba,
- dev_req_params->gear_rx,
- dev_req_params->pwr_rx,
- dev_req_params->hs_rate,
- false);
ufs_qcom_icc_update_bw(host);
ufshcd_uic_hibern8_exit(hba);
}
@@ -1910,14 +2027,13 @@ static int ufs_qcom_probe(struct platform_device *pdev)
*
* Always returns 0
*/
-static int ufs_qcom_remove(struct platform_device *pdev)
+static void ufs_qcom_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
platform_msi_domain_free_irqs(hba->dev);
- return 0;
}
static const struct of_device_id ufs_qcom_of_match[] __maybe_unused = {
@@ -1949,7 +2065,7 @@ static const struct dev_pm_ops ufs_qcom_pm_ops = {
static struct platform_driver ufs_qcom_pltform = {
.probe = ufs_qcom_probe,
- .remove = ufs_qcom_remove,
+ .remove_new = ufs_qcom_remove,
.driver = {
.name = "ufshcd-qcom",
.pm = &ufs_qcom_pm_ops,
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index d6f8e74bd538..9950a0089475 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -129,8 +129,21 @@ enum {
#define PA_VS_CONFIG_REG1 0x9000
#define DME_VS_CORE_CLK_CTRL 0xD002
/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
-#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8)
-#define DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK 0xFF
+#define CLK_1US_CYCLES_MASK_V4 GENMASK(27, 16)
+#define CLK_1US_CYCLES_MASK GENMASK(7, 0)
+#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8)
+#define PA_VS_CORE_CLK_40NS_CYCLES 0x9007
+#define PA_VS_CORE_CLK_40NS_CYCLES_MASK GENMASK(6, 0)
+
+
+/* QCOM UFS host controller core clk frequencies */
+#define UNIPRO_CORE_CLK_FREQ_37_5_MHZ 38
+#define UNIPRO_CORE_CLK_FREQ_75_MHZ 75
+#define UNIPRO_CORE_CLK_FREQ_100_MHZ 100
+#define UNIPRO_CORE_CLK_FREQ_150_MHZ 150
+#define UNIPRO_CORE_CLK_FREQ_300_MHZ 300
+#define UNIPRO_CORE_CLK_FREQ_201_5_MHZ 202
+#define UNIPRO_CORE_CLK_FREQ_403_MHZ 403
static inline void
ufs_qcom_get_controller_revision(struct ufs_hba *hba,
@@ -227,7 +240,7 @@ struct ufs_qcom_host {
struct gpio_desc *device_reset;
- u32 hs_gear;
+ u32 phy_gear;
bool esi_enabled;
};
@@ -244,6 +257,7 @@ ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg)
#define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba)
#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba)
#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
+#define ceil(freq, div) ((freq) % (div) == 0 ? ((freq)/(div)) : ((freq)/(div) + 1))
int ufs_qcom_testbus_config(struct ufs_qcom_host *host);
diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c
index cc94970b86c9..8711e5cbc968 100644
--- a/drivers/ufs/host/ufs-renesas.c
+++ b/drivers/ufs/host/ufs-renesas.c
@@ -388,18 +388,16 @@ static int ufs_renesas_probe(struct platform_device *pdev)
return ufshcd_pltfrm_init(pdev, &ufs_renesas_vops);
}
-static int ufs_renesas_remove(struct platform_device *pdev)
+static void ufs_renesas_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
ufshcd_remove(hba);
-
- return 0;
}
static struct platform_driver ufs_renesas_platform = {
.probe = ufs_renesas_probe,
- .remove = ufs_renesas_remove,
+ .remove_new = ufs_renesas_remove,
.driver = {
.name = "ufshcd-renesas",
.of_match_table = of_match_ptr(ufs_renesas_of_match),
diff --git a/drivers/ufs/host/ufs-sprd.c b/drivers/ufs/host/ufs-sprd.c
index 2bad75dd6d58..d8b165908809 100644
--- a/drivers/ufs/host/ufs-sprd.c
+++ b/drivers/ufs/host/ufs-sprd.c
@@ -425,13 +425,12 @@ static int ufs_sprd_probe(struct platform_device *pdev)
return err;
}
-static int ufs_sprd_remove(struct platform_device *pdev)
+static void ufs_sprd_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
- return 0;
}
static const struct dev_pm_ops ufs_sprd_pm_ops = {
@@ -443,7 +442,7 @@ static const struct dev_pm_ops ufs_sprd_pm_ops = {
static struct platform_driver ufs_sprd_pltform = {
.probe = ufs_sprd_probe,
- .remove = ufs_sprd_remove,
+ .remove_new = ufs_sprd_remove,
.driver = {
.name = "ufshcd-sprd",
.pm = &ufs_sprd_pm_ops,
diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
index 248a49e5e7f3..0aca666d2199 100644
--- a/drivers/ufs/host/ufshcd-pci.c
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -58,11 +58,12 @@ static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
int err = 0;
size_t len;
- obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
+ obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL,
+ ACPI_TYPE_BUFFER);
if (!obj)
return -EOPNOTSUPP;
- if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) {
+ if (obj->buffer.length < 1) {
err = -EINVAL;
goto out;
}
diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
index 797a4dfe45d9..da2558e274b4 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.c
+++ b/drivers/ufs/host/ufshcd-pltfrm.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
@@ -121,7 +122,7 @@ static bool phandle_exists(const struct device_node *np,
#define MAX_PROP_SIZE 32
int ufshcd_populate_vreg(struct device *dev, const char *name,
- struct ufs_vreg **out_vreg)
+ struct ufs_vreg **out_vreg, bool skip_current)
{
char prop_name[MAX_PROP_SIZE];
struct ufs_vreg *vreg = NULL;
@@ -147,6 +148,11 @@ int ufshcd_populate_vreg(struct device *dev, const char *name,
if (!vreg->name)
return -ENOMEM;
+ if (skip_current) {
+ vreg->max_uA = 0;
+ goto out;
+ }
+
snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
if (of_property_read_u32(np, prop_name, &vreg->max_uA)) {
dev_info(dev, "%s: unable to find %s\n", __func__, prop_name);
@@ -175,19 +181,19 @@ static int ufshcd_parse_regulator_info(struct ufs_hba *hba)
struct device *dev = hba->dev;
struct ufs_vreg_info *info = &hba->vreg_info;
- err = ufshcd_populate_vreg(dev, "vdd-hba", &info->vdd_hba);
+ err = ufshcd_populate_vreg(dev, "vdd-hba", &info->vdd_hba, true);
if (err)
goto out;
- err = ufshcd_populate_vreg(dev, "vcc", &info->vcc);
+ err = ufshcd_populate_vreg(dev, "vcc", &info->vcc, false);
if (err)
goto out;
- err = ufshcd_populate_vreg(dev, "vccq", &info->vccq);
+ err = ufshcd_populate_vreg(dev, "vccq", &info->vccq, false);
if (err)
goto out;
- err = ufshcd_populate_vreg(dev, "vccq2", &info->vccq2);
+ err = ufshcd_populate_vreg(dev, "vccq2", &info->vccq2, false);
out:
return err;
}
@@ -207,6 +213,77 @@ static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
}
}
+static int ufshcd_parse_operating_points(struct ufs_hba *hba)
+{
+ struct device *dev = hba->dev;
+ struct device_node *np = dev->of_node;
+ struct dev_pm_opp_config config = {};
+ struct ufs_clk_info *clki;
+ const char **clk_names;
+ int cnt, i, ret;
+
+ if (!of_find_property(np, "operating-points-v2", NULL))
+ return 0;
+
+ if (of_find_property(np, "freq-table-hz", NULL)) {
+ dev_err(dev, "%s: operating-points and freq-table-hz are incompatible\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ cnt = of_property_count_strings(np, "clock-names");
+ if (cnt <= 0) {
+ dev_err(dev, "%s: Missing clock-names\n", __func__);
+ return -ENODEV;
+ }
+
+ /* OPP expects clk_names to be NULL terminated */
+ clk_names = devm_kcalloc(dev, cnt + 1, sizeof(*clk_names), GFP_KERNEL);
+ if (!clk_names)
+ return -ENOMEM;
+
+ /*
+ * We still need to get reference to all clocks as the UFS core uses
+ * them separately.
+ */
+ for (i = 0; i < cnt; i++) {
+ ret = of_property_read_string_index(np, "clock-names", i,
+ &clk_names[i]);
+ if (ret)
+ return ret;
+
+ clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
+ if (!clki)
+ return -ENOMEM;
+
+ clki->name = devm_kstrdup(dev, clk_names[i], GFP_KERNEL);
+ if (!clki->name)
+ return -ENOMEM;
+
+ if (!strcmp(clk_names[i], "ref_clk"))
+ clki->keep_link_active = true;
+
+ list_add_tail(&clki->list, &hba->clk_list_head);
+ }
+
+ config.clk_names = clk_names,
+ config.config_clks = ufshcd_opp_config_clks;
+
+ ret = devm_pm_opp_set_config(dev, &config);
+ if (ret)
+ return ret;
+
+ ret = devm_pm_opp_of_add_table(dev);
+ if (ret) {
+ dev_err(dev, "Failed to add OPP table: %d\n", ret);
+ return ret;
+ }
+
+ hba->use_pm_opp = true;
+
+ return 0;
+}
+
/**
* ufshcd_get_pwr_dev_param - get finally agreed attributes for
* power mode change
@@ -373,6 +450,12 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
ufshcd_init_lanes_per_dir(hba);
+ err = ufshcd_parse_operating_points(hba);
+ if (err) {
+ dev_err(dev, "%s: OPP parse failed %d\n", __func__, err);
+ goto dealloc_host;
+ }
+
err = ufshcd_init(hba, mmio_base, irq);
if (err) {
dev_err_probe(dev, err, "Initialization failed with error %d\n",
diff --git a/drivers/ufs/host/ufshcd-pltfrm.h b/drivers/ufs/host/ufshcd-pltfrm.h
index 2df108f4ac13..a86a3ada4bef 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.h
+++ b/drivers/ufs/host/ufshcd-pltfrm.h
@@ -32,6 +32,6 @@ void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param);
int ufshcd_pltfrm_init(struct platform_device *pdev,
const struct ufs_hba_variant_ops *vops);
int ufshcd_populate_vreg(struct device *dev, const char *name,
- struct ufs_vreg **out_vreg);
+ struct ufs_vreg **out_vreg, bool skip_current);
#endif /* UFSHCD_PLTFRM_H_ */