summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/clk/at91/clk-programmable.c4
-rw-r--r--drivers/clk/bcm/clk-kona.c4
-rw-r--r--drivers/clk/clk-composite.c9
-rw-r--r--drivers/clk/clk-mux.c2
-rw-r--r--drivers/clk/clk-s2mps11.c2
-rw-r--r--drivers/clk/clk.c42
-rw-r--r--drivers/clk/hisilicon/clk-hi3620.c72
-rw-r--r--drivers/clk/mmp/Makefile7
-rw-r--r--drivers/clk/mmp/clk-frac.c74
-rw-r--r--drivers/clk/mmp/clk-gate.c133
-rw-r--r--drivers/clk/mmp/clk-mix.c513
-rw-r--r--drivers/clk/mmp/clk-mmp2.c6
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c334
-rw-r--r--drivers/clk/mmp/clk-of-pxa168.c279
-rw-r--r--drivers/clk/mmp/clk-of-pxa910.c301
-rw-r--r--drivers/clk/mmp/clk-pxa168.c6
-rw-r--r--drivers/clk/mmp/clk-pxa910.c6
-rw-r--r--drivers/clk/mmp/clk.c192
-rw-r--r--drivers/clk/mmp/clk.h226
-rw-r--r--drivers/clk/mmp/reset.c99
-rw-r--r--drivers/clk/mmp/reset.h31
-rw-r--r--drivers/clk/pxa/Makefile1
-rw-r--r--drivers/clk/pxa/clk-pxa.c45
-rw-r--r--drivers/clk/pxa/clk-pxa.h9
-rw-r--r--drivers/clk/pxa/clk-pxa25x.c273
-rw-r--r--drivers/clk/pxa/clk-pxa27x.c9
-rw-r--r--drivers/clk/qcom/clk-pll.c2
-rw-r--r--drivers/clk/qcom/clk-rcg.c20
-rw-r--r--drivers/clk/qcom/clk-rcg2.c28
-rw-r--r--drivers/clk/rockchip/Makefile1
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c154
-rw-r--r--drivers/clk/rockchip/clk-pll.c81
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c79
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c246
-rw-r--r--drivers/clk/rockchip/clk.c20
-rw-r--r--drivers/clk/rockchip/clk.h45
-rw-r--r--drivers/clk/samsung/Makefile2
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c33
-rw-r--r--drivers/clk/samsung/clk-exynos4.c2
-rw-r--r--drivers/clk/samsung/clk-exynos4415.c1144
-rw-r--r--drivers/clk/samsung/clk-exynos5260.c185
-rw-r--r--drivers/clk/samsung/clk-exynos7.c743
-rw-r--r--drivers/clk/samsung/clk-pll.c25
-rw-r--r--drivers/clk/samsung/clk-pll.h4
-rw-r--r--drivers/clk/samsung/clk.c102
-rw-r--r--drivers/clk/samsung/clk.h43
-rw-r--r--drivers/clk/shmobile/clk-div6.c113
-rw-r--r--drivers/clk/sunxi/Makefile1
-rw-r--r--drivers/clk/sunxi/clk-a20-gmac.c7
-rw-r--r--drivers/clk/sunxi/clk-factors.c6
-rw-r--r--drivers/clk/sunxi/clk-factors.h3
-rw-r--r--drivers/clk/sunxi/clk-mod0.c1
-rw-r--r--drivers/clk/sunxi/clk-sun6i-ar100.c4
-rw-r--r--drivers/clk/sunxi/clk-sun8i-mbus.c1
-rw-r--r--drivers/clk/sunxi/clk-sun9i-core.c271
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c85
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c328
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c112
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c1599
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h80
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c1
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c10
-rw-r--r--drivers/macintosh/Kconfig10
-rw-r--r--drivers/macintosh/Makefile1
-rw-r--r--drivers/macintosh/therm_pm72.c2278
-rw-r--r--drivers/macintosh/therm_pm72.h326
-rw-r--r--drivers/scsi/53c700.c41
-rw-r--r--drivers/scsi/Kconfig17
-rw-r--r--drivers/scsi/advansys.c8
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c6
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c5
-rw-r--r--drivers/scsi/esas2r/esas2r_flash.c4
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c1
-rw-r--r--drivers/scsi/fcoe/fcoe.c1
-rw-r--r--drivers/scsi/fnic/fnic_main.c1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ipr.c116
-rw-r--r--drivers/scsi/ipr.h4
-rw-r--r--drivers/scsi/isci/init.c1
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c1
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c5
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c5
-rw-r--r--drivers/scsi/mvsas/mv_init.c1
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c1
-rw-r--r--drivers/scsi/pmcraid.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c12
-rw-r--r--drivers/scsi/scsi.c22
-rw-r--r--drivers/scsi/scsi_debug.c62
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/scsi_sysfs.c30
-rw-r--r--drivers/scsi/scsi_transport_spi.c2
-rw-r--r--drivers/scsi/storvsc_drv.c7
-rw-r--r--drivers/spi/spi-img-spfi.c4
-rw-r--r--drivers/spi/spi-meson-spifc.c4
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_compat25.h24
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c17
-rw-r--r--drivers/target/iscsi/iscsi_target.c15
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c11
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_transport.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c26
-rw-r--r--drivers/target/loopback/tcm_loop.c71
-rw-r--r--drivers/target/loopback/tcm_loop.h7
-rw-r--r--drivers/target/sbp/sbp_target.c2
-rw-r--r--drivers/target/target_core_configfs.c344
-rw-r--r--drivers/target/target_core_device.c90
-rw-r--r--drivers/target/target_core_file.c42
-rw-r--r--drivers/target/target_core_hba.c7
-rw-r--r--drivers/target/target_core_iblock.c42
-rw-r--r--drivers/target/target_core_internal.h28
-rw-r--r--drivers/target/target_core_pr.c125
-rw-r--r--drivers/target/target_core_pscsi.c28
-rw-r--r--drivers/target/target_core_rd.c41
-rw-r--r--drivers/target/target_core_sbc.c2
-rw-r--r--drivers/target/target_core_spc.c4
-rw-r--r--drivers/target/target_core_transport.c16
-rw-r--r--drivers/target/target_core_user.c42
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c8
-rw-r--r--drivers/tty/serial/8250/8250_omap.c14
-rw-r--r--drivers/usb/gadget/legacy/tcm_usb_gadget.c10
-rw-r--r--drivers/usb/host/isp1760-hcd.c2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c2
-rw-r--r--drivers/xen/xen-scsiback.c2
134 files changed, 7367 insertions, 4923 deletions
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index 62e2509f9df1..bbdb1b985c91 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -57,7 +57,7 @@ static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
static long clk_programmable_determine_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long *best_parent_rate,
- struct clk **best_parent_clk)
+ struct clk_hw **best_parent_hw)
{
struct clk *parent = NULL;
long best_rate = -EINVAL;
@@ -84,7 +84,7 @@ static long clk_programmable_determine_rate(struct clk_hw *hw,
if (best_rate < 0 || (rate - tmp_rate) < (rate - best_rate)) {
best_rate = tmp_rate;
*best_parent_rate = parent_rate;
- *best_parent_clk = parent;
+ *best_parent_hw = __clk_get_hw(parent);
}
if (!best_rate)
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index 95af2e665dd3..1c06f6f3a8c5 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -1032,7 +1032,7 @@ static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
}
static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *best_parent_rate, struct clk **best_parent)
+ unsigned long *best_parent_rate, struct clk_hw **best_parent)
{
struct kona_clk *bcm_clk = to_kona_clk(hw);
struct clk *clk = hw->clk;
@@ -1075,7 +1075,7 @@ static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
if (delta < best_delta) {
best_delta = delta;
best_rate = other_rate;
- *best_parent = parent;
+ *best_parent = __clk_get_hw(parent);
*best_parent_rate = parent_rate;
}
}
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index b9355daf8065..4386697236a7 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -57,7 +57,7 @@ static unsigned long clk_composite_recalc_rate(struct clk_hw *hw,
static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *best_parent_rate,
- struct clk **best_parent_p)
+ struct clk_hw **best_parent_p)
{
struct clk_composite *composite = to_clk_composite(hw);
const struct clk_ops *rate_ops = composite->rate_ops;
@@ -80,8 +80,9 @@ static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
*best_parent_p = NULL;
if (__clk_get_flags(hw->clk) & CLK_SET_RATE_NO_REPARENT) {
- *best_parent_p = clk_get_parent(mux_hw->clk);
- *best_parent_rate = __clk_get_rate(*best_parent_p);
+ parent = clk_get_parent(mux_hw->clk);
+ *best_parent_p = __clk_get_hw(parent);
+ *best_parent_rate = __clk_get_rate(parent);
return rate_ops->round_rate(rate_hw, rate,
best_parent_rate);
@@ -103,7 +104,7 @@ static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
if (!rate_diff || !*best_parent_p
|| best_rate_diff > rate_diff) {
- *best_parent_p = parent;
+ *best_parent_p = __clk_get_hw(parent);
*best_parent_rate = parent_rate;
best_rate_diff = rate_diff;
best_rate = tmp_rate;
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 4f96ff3ba728..6e1ecf94bf58 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -77,7 +77,7 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
else {
if (mux->flags & CLK_MUX_INDEX_BIT)
- index = (1 << ffs(index));
+ index = 1 << index;
if (mux->flags & CLK_MUX_INDEX_ONE)
index++;
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 87a41038237d..bfa1e64e267d 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -218,7 +218,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
default:
dev_err(&pdev->dev, "Invalid device type\n");
return -EINVAL;
- };
+ }
/* Store clocks of_node in first element of s2mps11_clks array */
s2mps11_clks->clk_np = s2mps11_clk_parse_dt(pdev, clks_init);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 4896ae9e23da..f4963b7d4e17 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -240,7 +240,6 @@ static const struct file_operations clk_dump_fops = {
.release = single_release,
};
-/* caller must hold prepare_lock */
static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
{
struct dentry *d;
@@ -354,13 +353,13 @@ out:
mutex_unlock(&clk_debug_lock);
}
-struct dentry *clk_debugfs_add_file(struct clk *clk, char *name, umode_t mode,
+struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
void *data, const struct file_operations *fops)
{
struct dentry *d = NULL;
- if (clk->dentry)
- d = debugfs_create_file(name, mode, clk->dentry, data, fops);
+ if (hw->clk->dentry)
+ d = debugfs_create_file(name, mode, hw->clk->dentry, data, fops);
return d;
}
@@ -574,11 +573,6 @@ unsigned int __clk_get_enable_count(struct clk *clk)
return !clk ? 0 : clk->enable_count;
}
-unsigned int __clk_get_prepare_count(struct clk *clk)
-{
- return !clk ? 0 : clk->prepare_count;
-}
-
unsigned long __clk_get_rate(struct clk *clk)
{
unsigned long ret;
@@ -601,7 +595,7 @@ out:
}
EXPORT_SYMBOL_GPL(__clk_get_rate);
-unsigned long __clk_get_accuracy(struct clk *clk)
+static unsigned long __clk_get_accuracy(struct clk *clk)
{
if (!clk)
return 0;
@@ -707,7 +701,7 @@ struct clk *__clk_lookup(const char *name)
*/
long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *best_parent_rate,
- struct clk **best_parent_p)
+ struct clk_hw **best_parent_p)
{
struct clk *clk = hw->clk, *parent, *best_parent = NULL;
int i, num_parents;
@@ -743,7 +737,7 @@ long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
out:
if (best_parent)
- *best_parent_p = best_parent;
+ *best_parent_p = best_parent->hw;
*best_parent_rate = best;
return best;
@@ -951,6 +945,7 @@ unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
{
unsigned long parent_rate = 0;
struct clk *parent;
+ struct clk_hw *parent_hw;
if (!clk)
return 0;
@@ -959,10 +954,11 @@ unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
if (parent)
parent_rate = parent->rate;
- if (clk->ops->determine_rate)
+ if (clk->ops->determine_rate) {
+ parent_hw = parent ? parent->hw : NULL;
return clk->ops->determine_rate(clk->hw, rate, &parent_rate,
- &parent);
- else if (clk->ops->round_rate)
+ &parent_hw);
+ } else if (clk->ops->round_rate)
return clk->ops->round_rate(clk->hw, rate, &parent_rate);
else if (clk->flags & CLK_SET_RATE_PARENT)
return __clk_round_rate(clk->parent, rate);
@@ -1350,6 +1346,7 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
{
struct clk *top = clk;
struct clk *old_parent, *parent;
+ struct clk_hw *parent_hw;
unsigned long best_parent_rate = 0;
unsigned long new_rate;
int p_index = 0;
@@ -1365,9 +1362,11 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
/* find the closest rate and parent clk/rate */
if (clk->ops->determine_rate) {
+ parent_hw = parent ? parent->hw : NULL;
new_rate = clk->ops->determine_rate(clk->hw, rate,
&best_parent_rate,
- &parent);
+ &parent_hw);
+ parent = parent_hw->clk;
} else if (clk->ops->round_rate) {
new_rate = clk->ops->round_rate(clk->hw, rate,
&best_parent_rate);
@@ -1614,7 +1613,7 @@ static struct clk *__clk_init_parent(struct clk *clk)
if (clk->num_parents == 1) {
if (IS_ERR_OR_NULL(clk->parent))
- ret = clk->parent = __clk_lookup(clk->parent_names[0]);
+ clk->parent = __clk_lookup(clk->parent_names[0]);
ret = clk->parent;
goto out;
}
@@ -1944,7 +1943,6 @@ int __clk_init(struct device *dev, struct clk *clk)
else
clk->rate = 0;
- clk_debug_register(clk);
/*
* walk the list of orphan clocks and reparent any that are children of
* this clock
@@ -1979,6 +1977,9 @@ int __clk_init(struct device *dev, struct clk *clk)
out:
clk_prepare_unlock();
+ if (!ret)
+ clk_debug_register(clk);
+
return ret;
}
@@ -2273,14 +2274,17 @@ int __clk_get(struct clk *clk)
void __clk_put(struct clk *clk)
{
+ struct module *owner;
+
if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
return;
clk_prepare_lock();
+ owner = clk->owner;
kref_put(&clk->ref, __clk_release);
clk_prepare_unlock();
- module_put(clk->owner);
+ module_put(owner);
}
/*** clk rate change notifiers ***/
diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c
index 339945d2503b..007144f81f50 100644
--- a/drivers/clk/hisilicon/clk-hi3620.c
+++ b/drivers/clk/hisilicon/clk-hi3620.c
@@ -38,44 +38,44 @@
#include "clk.h"
/* clock parent list */
-static const char *timer0_mux_p[] __initdata = { "osc32k", "timerclk01", };
-static const char *timer1_mux_p[] __initdata = { "osc32k", "timerclk01", };
-static const char *timer2_mux_p[] __initdata = { "osc32k", "timerclk23", };
-static const char *timer3_mux_p[] __initdata = { "osc32k", "timerclk23", };
-static const char *timer4_mux_p[] __initdata = { "osc32k", "timerclk45", };
-static const char *timer5_mux_p[] __initdata = { "osc32k", "timerclk45", };
-static const char *timer6_mux_p[] __initdata = { "osc32k", "timerclk67", };
-static const char *timer7_mux_p[] __initdata = { "osc32k", "timerclk67", };
-static const char *timer8_mux_p[] __initdata = { "osc32k", "timerclk89", };
-static const char *timer9_mux_p[] __initdata = { "osc32k", "timerclk89", };
-static const char *uart0_mux_p[] __initdata = { "osc26m", "pclk", };
-static const char *uart1_mux_p[] __initdata = { "osc26m", "pclk", };
-static const char *uart2_mux_p[] __initdata = { "osc26m", "pclk", };
-static const char *uart3_mux_p[] __initdata = { "osc26m", "pclk", };
-static const char *uart4_mux_p[] __initdata = { "osc26m", "pclk", };
-static const char *spi0_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", };
-static const char *spi1_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", };
-static const char *spi2_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", };
+static const char *timer0_mux_p[] __initconst = { "osc32k", "timerclk01", };
+static const char *timer1_mux_p[] __initconst = { "osc32k", "timerclk01", };
+static const char *timer2_mux_p[] __initconst = { "osc32k", "timerclk23", };
+static const char *timer3_mux_p[] __initconst = { "osc32k", "timerclk23", };
+static const char *timer4_mux_p[] __initconst = { "osc32k", "timerclk45", };
+static const char *timer5_mux_p[] __initconst = { "osc32k", "timerclk45", };
+static const char *timer6_mux_p[] __initconst = { "osc32k", "timerclk67", };
+static const char *timer7_mux_p[] __initconst = { "osc32k", "timerclk67", };
+static const char *timer8_mux_p[] __initconst = { "osc32k", "timerclk89", };
+static const char *timer9_mux_p[] __initconst = { "osc32k", "timerclk89", };
+static const char *uart0_mux_p[] __initconst = { "osc26m", "pclk", };
+static const char *uart1_mux_p[] __initconst = { "osc26m", "pclk", };
+static const char *uart2_mux_p[] __initconst = { "osc26m", "pclk", };
+static const char *uart3_mux_p[] __initconst = { "osc26m", "pclk", };
+static const char *uart4_mux_p[] __initconst = { "osc26m", "pclk", };
+static const char *spi0_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
+static const char *spi1_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
+static const char *spi2_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
/* share axi parent */
-static const char *saxi_mux_p[] __initdata = { "armpll3", "armpll2", };
-static const char *pwm0_mux_p[] __initdata = { "osc32k", "osc26m", };
-static const char *pwm1_mux_p[] __initdata = { "osc32k", "osc26m", };
-static const char *sd_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *mmc1_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *mmc1_mux2_p[] __initdata = { "osc26m", "mmc1_div", };
-static const char *g2d_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *venc_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *vdec_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *vpp_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *edc0_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *ldi0_mux_p[] __initdata = { "armpll2", "armpll4",
+static const char *saxi_mux_p[] __initconst = { "armpll3", "armpll2", };
+static const char *pwm0_mux_p[] __initconst = { "osc32k", "osc26m", };
+static const char *pwm1_mux_p[] __initconst = { "osc32k", "osc26m", };
+static const char *sd_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *mmc1_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *mmc1_mux2_p[] __initconst = { "osc26m", "mmc1_div", };
+static const char *g2d_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *venc_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *vdec_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *vpp_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *edc0_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *ldi0_mux_p[] __initconst = { "armpll2", "armpll4",
"armpll3", "armpll5", };
-static const char *edc1_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *ldi1_mux_p[] __initdata = { "armpll2", "armpll4",
+static const char *edc1_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *ldi1_mux_p[] __initconst = { "armpll2", "armpll4",
"armpll3", "armpll5", };
-static const char *rclk_hsic_p[] __initdata = { "armpll3", "armpll2", };
-static const char *mmc2_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *mmc3_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *rclk_hsic_p[] __initconst = { "armpll3", "armpll2", };
+static const char *mmc2_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *mmc3_mux_p[] __initconst = { "armpll2", "armpll3", };
/* fixed rate clocks */
@@ -296,7 +296,7 @@ static unsigned long mmc_clk_recalc_rate(struct clk_hw *hw,
static long mmc_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *best_parent_rate,
- struct clk **best_parent_p)
+ struct clk_hw **best_parent_p)
{
struct clk_mmc *mclk = to_mmc(hw);
unsigned long best = 0;
diff --git a/drivers/clk/mmp/Makefile b/drivers/clk/mmp/Makefile
index 392d78044ce3..3caaf7cc169c 100644
--- a/drivers/clk/mmp/Makefile
+++ b/drivers/clk/mmp/Makefile
@@ -2,7 +2,12 @@
# Makefile for mmp specific clk
#
-obj-y += clk-apbc.o clk-apmu.o clk-frac.o
+obj-y += clk-apbc.o clk-apmu.o clk-frac.o clk-mix.o clk-gate.o clk.o
+
+obj-$(CONFIG_RESET_CONTROLLER) += reset.o
+
+obj-$(CONFIG_MACH_MMP_DT) += clk-of-pxa168.o clk-of-pxa910.o
+obj-$(CONFIG_MACH_MMP2_DT) += clk-of-mmp2.o
obj-$(CONFIG_CPU_PXA168) += clk-pxa168.o
obj-$(CONFIG_CPU_PXA910) += clk-pxa910.o
diff --git a/drivers/clk/mmp/clk-frac.c b/drivers/clk/mmp/clk-frac.c
index 23a56f561812..584a9927993b 100644
--- a/drivers/clk/mmp/clk-frac.c
+++ b/drivers/clk/mmp/clk-frac.c
@@ -22,19 +22,12 @@
* numerator/denominator = Fin / (Fout * factor)
*/
-#define to_clk_factor(hw) container_of(hw, struct clk_factor, hw)
-struct clk_factor {
- struct clk_hw hw;
- void __iomem *base;
- struct clk_factor_masks *masks;
- struct clk_factor_tbl *ftbl;
- unsigned int ftbl_cnt;
-};
+#define to_clk_factor(hw) container_of(hw, struct mmp_clk_factor, hw)
static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate,
unsigned long *prate)
{
- struct clk_factor *factor = to_clk_factor(hw);
+ struct mmp_clk_factor *factor = to_clk_factor(hw);
unsigned long rate = 0, prev_rate;
int i;
@@ -58,8 +51,8 @@ static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate,
static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct clk_factor *factor = to_clk_factor(hw);
- struct clk_factor_masks *masks = factor->masks;
+ struct mmp_clk_factor *factor = to_clk_factor(hw);
+ struct mmp_clk_factor_masks *masks = factor->masks;
unsigned int val, num, den;
val = readl_relaxed(factor->base);
@@ -81,11 +74,12 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate,
unsigned long prate)
{
- struct clk_factor *factor = to_clk_factor(hw);
- struct clk_factor_masks *masks = factor->masks;
+ struct mmp_clk_factor *factor = to_clk_factor(hw);
+ struct mmp_clk_factor_masks *masks = factor->masks;
int i;
unsigned long val;
unsigned long prev_rate, rate = 0;
+ unsigned long flags = 0;
for (i = 0; i < factor->ftbl_cnt; i++) {
prev_rate = rate;
@@ -97,6 +91,9 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate,
if (i > 0)
i--;
+ if (factor->lock)
+ spin_lock_irqsave(factor->lock, flags);
+
val = readl_relaxed(factor->base);
val &= ~(masks->num_mask << masks->num_shift);
@@ -107,21 +104,65 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate,
writel_relaxed(val, factor->base);
+ if (factor->lock)
+ spin_unlock_irqrestore(factor->lock, flags);
+
return 0;
}
+static void clk_factor_init(struct clk_hw *hw)
+{
+ struct mmp_clk_factor *factor = to_clk_factor(hw);
+ struct mmp_clk_factor_masks *masks = factor->masks;
+ u32 val, num, den;
+ int i;
+ unsigned long flags = 0;
+
+ if (factor->lock)
+ spin_lock_irqsave(factor->lock, flags);
+
+ val = readl(factor->base);
+
+ /* calculate numerator */
+ num = (val >> masks->num_shift) & masks->num_mask;
+
+ /* calculate denominator */
+ den = (val >> masks->den_shift) & masks->den_mask;
+
+ for (i = 0; i < factor->ftbl_cnt; i++)
+ if (den == factor->ftbl[i].den && num == factor->ftbl[i].num)
+ break;
+
+ if (i >= factor->ftbl_cnt) {
+ val &= ~(masks->num_mask << masks->num_shift);
+ val |= (factor->ftbl[0].num & masks->num_mask) <<
+ masks->num_shift;
+
+ val &= ~(masks->den_mask << masks->den_shift);
+ val |= (factor->ftbl[0].den & masks->den_mask) <<
+ masks->den_shift;
+
+ writel(val, factor->base);
+ }
+
+ if (factor->lock)
+ spin_unlock_irqrestore(factor->lock, flags);
+}
+
static struct clk_ops clk_factor_ops = {
.recalc_rate = clk_factor_recalc_rate,
.round_rate = clk_factor_round_rate,
.set_rate = clk_factor_set_rate,
+ .init = clk_factor_init,
};
struct clk *mmp_clk_register_factor(const char *name, const char *parent_name,
unsigned long flags, void __iomem *base,
- struct clk_factor_masks *masks, struct clk_factor_tbl *ftbl,
- unsigned int ftbl_cnt)
+ struct mmp_clk_factor_masks *masks,
+ struct mmp_clk_factor_tbl *ftbl,
+ unsigned int ftbl_cnt, spinlock_t *lock)
{
- struct clk_factor *factor;
+ struct mmp_clk_factor *factor;
struct clk_init_data init;
struct clk *clk;
@@ -142,6 +183,7 @@ struct clk *mmp_clk_register_factor(const char *name, const char *parent_name,
factor->ftbl = ftbl;
factor->ftbl_cnt = ftbl_cnt;
factor->hw.init = &init;
+ factor->lock = lock;
init.name = name;
init.ops = &clk_factor_ops;
diff --git a/drivers/clk/mmp/clk-gate.c b/drivers/clk/mmp/clk-gate.c
new file mode 100644
index 000000000000..adbd9d64ded2
--- /dev/null
+++ b/drivers/clk/mmp/clk-gate.c
@@ -0,0 +1,133 @@
+/*
+ * mmp gate clock operation source file
+ *
+ * Copyright (C) 2014 Marvell
+ * Chao Xie <chao.xie@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+#include "clk.h"
+
+/*
+ * Some clocks will have mutiple bits to enable the clocks, and
+ * the bits to disable the clock is not same as enabling bits.
+ */
+
+#define to_clk_mmp_gate(hw) container_of(hw, struct mmp_clk_gate, hw)
+
+static int mmp_clk_gate_enable(struct clk_hw *hw)
+{
+ struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
+ struct clk *clk = hw->clk;
+ unsigned long flags = 0;
+ unsigned long rate;
+ u32 tmp;
+
+ if (gate->lock)
+ spin_lock_irqsave(gate->lock, flags);
+
+ tmp = readl(gate->reg);
+ tmp &= ~gate->mask;
+ tmp |= gate->val_enable;
+ writel(tmp, gate->reg);
+
+ if (gate->lock)
+ spin_unlock_irqrestore(gate->lock, flags);
+
+ if (gate->flags & MMP_CLK_GATE_NEED_DELAY) {
+ rate = __clk_get_rate(clk);
+ /* Need delay 2 cycles. */
+ udelay(2000000/rate);
+ }
+
+ return 0;
+}
+
+static void mmp_clk_gate_disable(struct clk_hw *hw)
+{
+ struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
+ unsigned long flags = 0;
+ u32 tmp;
+
+ if (gate->lock)
+ spin_lock_irqsave(gate->lock, flags);
+
+ tmp = readl(gate->reg);
+ tmp &= ~gate->mask;
+ tmp |= gate->val_disable;
+ writel(tmp, gate->reg);
+
+ if (gate->lock)
+ spin_unlock_irqrestore(gate->lock, flags);
+}
+
+static int mmp_clk_gate_is_enabled(struct clk_hw *hw)
+{
+ struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
+ unsigned long flags = 0;
+ u32 tmp;
+
+ if (gate->lock)
+ spin_lock_irqsave(gate->lock, flags);
+
+ tmp = readl(gate->reg);
+
+ if (gate->lock)
+ spin_unlock_irqrestore(gate->lock, flags);
+
+ return (tmp & gate->mask) == gate->val_enable;
+}
+
+const struct clk_ops mmp_clk_gate_ops = {
+ .enable = mmp_clk_gate_enable,
+ .disable = mmp_clk_gate_disable,
+ .is_enabled = mmp_clk_gate_is_enabled,
+};
+
+struct clk *mmp_clk_register_gate(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u32 mask, u32 val_enable, u32 val_disable,
+ unsigned int gate_flags, spinlock_t *lock)
+{
+ struct mmp_clk_gate *gate;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ /* allocate the gate */
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate) {
+ pr_err("%s:%s could not allocate gate clk\n", __func__, name);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &mmp_clk_gate_ops;
+ init.flags = flags | CLK_IS_BASIC;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ /* struct clk_gate assignments */
+ gate->reg = reg;
+ gate->mask = mask;
+ gate->val_enable = val_enable;
+ gate->val_disable = val_disable;
+ gate->flags = gate_flags;
+ gate->lock = lock;
+ gate->hw.init = &init;
+
+ clk = clk_register(dev, &gate->hw);
+
+ if (IS_ERR(clk))
+ kfree(gate);
+
+ return clk;
+}
diff --git a/drivers/clk/mmp/clk-mix.c b/drivers/clk/mmp/clk-mix.c
new file mode 100644
index 000000000000..48fa53c7ce5e
--- /dev/null
+++ b/drivers/clk/mmp/clk-mix.c
@@ -0,0 +1,513 @@
+/*
+ * mmp mix(div and mux) clock operation source file
+ *
+ * Copyright (C) 2014 Marvell
+ * Chao Xie <chao.xie@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+
+#include "clk.h"
+
+/*
+ * The mix clock is a clock combined mux and div type clock.
+ * Because the div field and mux field need to be set at same
+ * time, we can not divide it into 2 types of clock
+ */
+
+#define to_clk_mix(hw) container_of(hw, struct mmp_clk_mix, hw)
+
+static unsigned int _get_maxdiv(struct mmp_clk_mix *mix)
+{
+ unsigned int div_mask = (1 << mix->reg_info.width_div) - 1;
+ unsigned int maxdiv = 0;
+ struct clk_div_table *clkt;
+
+ if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
+ return div_mask;
+ if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
+ return 1 << div_mask;
+ if (mix->div_table) {
+ for (clkt = mix->div_table; clkt->div; clkt++)
+ if (clkt->div > maxdiv)
+ maxdiv = clkt->div;
+ return maxdiv;
+ }
+ return div_mask + 1;
+}
+
+static unsigned int _get_div(struct mmp_clk_mix *mix, unsigned int val)
+{
+ struct clk_div_table *clkt;
+
+ if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
+ return val;
+ if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
+ return 1 << val;
+ if (mix->div_table) {
+ for (clkt = mix->div_table; clkt->div; clkt++)
+ if (clkt->val == val)
+ return clkt->div;
+ if (clkt->div == 0)
+ return 0;
+ }
+ return val + 1;
+}
+
+static unsigned int _get_mux(struct mmp_clk_mix *mix, unsigned int val)
+{
+ int num_parents = __clk_get_num_parents(mix->hw.clk);
+ int i;
+
+ if (mix->mux_flags & CLK_MUX_INDEX_BIT)
+ return ffs(val) - 1;
+ if (mix->mux_flags & CLK_MUX_INDEX_ONE)
+ return val - 1;
+ if (mix->mux_table) {
+ for (i = 0; i < num_parents; i++)
+ if (mix->mux_table[i] == val)
+ return i;
+ if (i == num_parents)
+ return 0;
+ }
+
+ return val;
+}
+static unsigned int _get_div_val(struct mmp_clk_mix *mix, unsigned int div)
+{
+ struct clk_div_table *clkt;
+
+ if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
+ return div;
+ if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
+ return __ffs(div);
+ if (mix->div_table) {
+ for (clkt = mix->div_table; clkt->div; clkt++)
+ if (clkt->div == div)
+ return clkt->val;
+ if (clkt->div == 0)
+ return 0;
+ }
+
+ return div - 1;
+}
+
+static unsigned int _get_mux_val(struct mmp_clk_mix *mix, unsigned int mux)
+{
+ if (mix->mux_table)
+ return mix->mux_table[mux];
+
+ return mux;
+}
+
+static void _filter_clk_table(struct mmp_clk_mix *mix,
+ struct mmp_clk_mix_clk_table *table,
+ unsigned int table_size)
+{
+ int i;
+ struct mmp_clk_mix_clk_table *item;
+ struct clk *parent, *clk;
+ unsigned long parent_rate;
+
+ clk = mix->hw.clk;
+
+ for (i = 0; i < table_size; i++) {
+ item = &table[i];
+ parent = clk_get_parent_by_index(clk, item->parent_index);
+ parent_rate = __clk_get_rate(parent);
+ if (parent_rate % item->rate) {
+ item->valid = 0;
+ } else {
+ item->divisor = parent_rate / item->rate;
+ item->valid = 1;
+ }
+ }
+}
+
+static int _set_rate(struct mmp_clk_mix *mix, u32 mux_val, u32 div_val,
+ unsigned int change_mux, unsigned int change_div)
+{
+ struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
+ u8 width, shift;
+ u32 mux_div, fc_req;
+ int ret, timeout = 50;
+ unsigned long flags = 0;
+
+ if (!change_mux && !change_div)
+ return -EINVAL;
+
+ if (mix->lock)
+ spin_lock_irqsave(mix->lock, flags);
+
+ if (mix->type == MMP_CLK_MIX_TYPE_V1
+ || mix->type == MMP_CLK_MIX_TYPE_V2)
+ mux_div = readl(ri->reg_clk_ctrl);
+ else
+ mux_div = readl(ri->reg_clk_sel);
+
+ if (change_div) {
+ width = ri->width_div;
+ shift = ri->shift_div;
+ mux_div &= ~MMP_CLK_BITS_MASK(width, shift);
+ mux_div |= MMP_CLK_BITS_SET_VAL(div_val, width, shift);
+ }
+
+ if (change_mux) {
+ width = ri->width_mux;
+ shift = ri->shift_mux;
+ mux_div &= ~MMP_CLK_BITS_MASK(width, shift);
+ mux_div |= MMP_CLK_BITS_SET_VAL(mux_val, width, shift);
+ }
+
+ if (mix->type == MMP_CLK_MIX_TYPE_V1) {
+ writel(mux_div, ri->reg_clk_ctrl);
+ } else if (mix->type == MMP_CLK_MIX_TYPE_V2) {
+ mux_div |= (1 << ri->bit_fc);
+ writel(mux_div, ri->reg_clk_ctrl);
+
+ do {
+ fc_req = readl(ri->reg_clk_ctrl);
+ timeout--;
+ if (!(fc_req & (1 << ri->bit_fc)))
+ break;
+ } while (timeout);
+
+ if (timeout == 0) {
+ pr_err("%s:%s cannot do frequency change\n",
+ __func__, __clk_get_name(mix->hw.clk));
+ ret = -EBUSY;
+ goto error;
+ }
+ } else {
+ fc_req = readl(ri->reg_clk_ctrl);
+ fc_req |= 1 << ri->bit_fc;
+ writel(fc_req, ri->reg_clk_ctrl);
+ writel(mux_div, ri->reg_clk_sel);
+ fc_req &= ~(1 << ri->bit_fc);
+ }
+
+ ret = 0;
+error:
+ if (mix->lock)
+ spin_unlock_irqrestore(mix->lock, flags);
+
+ return ret;
+}
+
+static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *best_parent_rate,
+ struct clk_hw **best_parent_clk)
+{
+ struct mmp_clk_mix *mix = to_clk_mix(hw);
+ struct mmp_clk_mix_clk_table *item;
+ struct clk *parent, *parent_best, *mix_clk;
+ unsigned long parent_rate, mix_rate, mix_rate_best, parent_rate_best;
+ unsigned long gap, gap_best;
+ u32 div_val_max;
+ unsigned int div;
+ int i, j;
+
+ mix_clk = hw->clk;
+
+ parent = NULL;
+ mix_rate_best = 0;
+ parent_rate_best = 0;
+ gap_best = rate;
+ parent_best = NULL;
+
+ if (mix->table) {
+ for (i = 0; i < mix->table_size; i++) {
+ item = &mix->table[i];
+ if (item->valid == 0)
+ continue;
+ parent = clk_get_parent_by_index(mix_clk,
+ item->parent_index);
+ parent_rate = __clk_get_rate(parent);
+ mix_rate = parent_rate / item->divisor;
+ gap = abs(mix_rate - rate);
+ if (parent_best == NULL || gap < gap_best) {
+ parent_best = parent;
+ parent_rate_best = parent_rate;
+ mix_rate_best = mix_rate;
+ gap_best = gap;
+ if (gap_best == 0)
+ goto found;
+ }
+ }
+ } else {
+ for (i = 0; i < __clk_get_num_parents(mix_clk); i++) {
+ parent = clk_get_parent_by_index(mix_clk, i);
+ parent_rate = __clk_get_rate(parent);
+ div_val_max = _get_maxdiv(mix);
+ for (j = 0; j < div_val_max; j++) {
+ div = _get_div(mix, j);
+ mix_rate = parent_rate / div;
+ gap = abs(mix_rate - rate);
+ if (parent_best == NULL || gap < gap_best) {
+ parent_best = parent;
+ parent_rate_best = parent_rate;
+ mix_rate_best = mix_rate;
+ gap_best = gap;
+ if (gap_best == 0)
+ goto found;
+ }
+ }
+ }
+ }
+
+found:
+ *best_parent_rate = parent_rate_best;
+ *best_parent_clk = __clk_get_hw(parent_best);
+
+ return mix_rate_best;
+}
+
+static int mmp_clk_mix_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate,
+ u8 index)
+{
+ struct mmp_clk_mix *mix = to_clk_mix(hw);
+ unsigned int div;
+ u32 div_val, mux_val;
+
+ div = parent_rate / rate;
+ div_val = _get_div_val(mix, div);
+ mux_val = _get_mux_val(mix, index);
+
+ return _set_rate(mix, mux_val, div_val, 1, 1);
+}
+
+static u8 mmp_clk_mix_get_parent(struct clk_hw *hw)
+{
+ struct mmp_clk_mix *mix = to_clk_mix(hw);
+ struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
+ unsigned long flags = 0;
+ u32 mux_div = 0;
+ u8 width, shift;
+ u32 mux_val;
+
+ if (mix->lock)
+ spin_lock_irqsave(mix->lock, flags);
+
+ if (mix->type == MMP_CLK_MIX_TYPE_V1
+ || mix->type == MMP_CLK_MIX_TYPE_V2)
+ mux_div = readl(ri->reg_clk_ctrl);
+ else
+ mux_div = readl(ri->reg_clk_sel);
+
+ if (mix->lock)
+ spin_unlock_irqrestore(mix->lock, flags);
+
+ width = mix->reg_info.width_mux;
+ shift = mix->reg_info.shift_mux;
+
+ mux_val = MMP_CLK_BITS_GET_VAL(mux_div, width, shift);
+
+ return _get_mux(mix, mux_val);
+}
+
+static unsigned long mmp_clk_mix_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct mmp_clk_mix *mix = to_clk_mix(hw);
+ struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
+ unsigned long flags = 0;
+ u32 mux_div = 0;
+ u8 width, shift;
+ unsigned int div;
+
+ if (mix->lock)
+ spin_lock_irqsave(mix->lock, flags);
+
+ if (mix->type == MMP_CLK_MIX_TYPE_V1
+ || mix->type == MMP_CLK_MIX_TYPE_V2)
+ mux_div = readl(ri->reg_clk_ctrl);
+ else
+ mux_div = readl(ri->reg_clk_sel);
+
+ if (mix->lock)
+ spin_unlock_irqrestore(mix->lock, flags);
+
+ width = mix->reg_info.width_div;
+ shift = mix->reg_info.shift_div;
+
+ div = _get_div(mix, MMP_CLK_BITS_GET_VAL(mux_div, width, shift));
+
+ return parent_rate / div;
+}
+
+static int mmp_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct mmp_clk_mix *mix = to_clk_mix(hw);
+ struct mmp_clk_mix_clk_table *item;
+ int i;
+ u32 div_val, mux_val;
+
+ if (mix->table) {
+ for (i = 0; i < mix->table_size; i++) {
+ item = &mix->table[i];
+ if (item->valid == 0)
+ continue;
+ if (item->parent_index == index)
+ break;
+ }
+ if (i < mix->table_size) {
+ div_val = _get_div_val(mix, item->divisor);
+ mux_val = _get_mux_val(mix, item->parent_index);
+ } else
+ return -EINVAL;
+ } else {
+ mux_val = _get_mux_val(mix, index);
+ div_val = 0;
+ }
+
+ return _set_rate(mix, mux_val, div_val, 1, div_val ? 1 : 0);
+}
+
+static int mmp_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long best_parent_rate)
+{
+ struct mmp_clk_mix *mix = to_clk_mix(hw);
+ struct mmp_clk_mix_clk_table *item;
+ unsigned long parent_rate;
+ unsigned int best_divisor;
+ struct clk *mix_clk, *parent;
+ int i;
+
+ best_divisor = best_parent_rate / rate;
+
+ mix_clk = hw->clk;
+ if (mix->table) {
+ for (i = 0; i < mix->table_size; i++) {
+ item = &mix->table[i];
+ if (item->valid == 0)
+ continue;
+ parent = clk_get_parent_by_index(mix_clk,
+ item->parent_index);
+ parent_rate = __clk_get_rate(parent);
+ if (parent_rate == best_parent_rate
+ && item->divisor == best_divisor)
+ break;
+ }
+ if (i < mix->table_size)
+ return _set_rate(mix,
+ _get_mux_val(mix, item->parent_index),
+ _get_div_val(mix, item->divisor),
+ 1, 1);
+ else
+ return -EINVAL;
+ } else {
+ for (i = 0; i < __clk_get_num_parents(mix_clk); i++) {
+ parent = clk_get_parent_by_index(mix_clk, i);
+ parent_rate = __clk_get_rate(parent);
+ if (parent_rate == best_parent_rate)
+ break;
+ }
+ if (i < __clk_get_num_parents(mix_clk))
+ return _set_rate(mix, _get_mux_val(mix, i),
+ _get_div_val(mix, best_divisor), 1, 1);
+ else
+ return -EINVAL;
+ }
+}
+
+static void mmp_clk_mix_init(struct clk_hw *hw)
+{
+ struct mmp_clk_mix *mix = to_clk_mix(hw);
+
+ if (mix->table)
+ _filter_clk_table(mix, mix->table, mix->table_size);
+}
+
+const struct clk_ops mmp_clk_mix_ops = {
+ .determine_rate = mmp_clk_mix_determine_rate,
+ .set_rate_and_parent = mmp_clk_mix_set_rate_and_parent,
+ .set_rate = mmp_clk_set_rate,
+ .set_parent = mmp_clk_set_parent,
+ .get_parent = mmp_clk_mix_get_parent,
+ .recalc_rate = mmp_clk_mix_recalc_rate,
+ .init = mmp_clk_mix_init,
+};
+
+struct clk *mmp_clk_register_mix(struct device *dev,
+ const char *name,
+ const char **parent_names,
+ u8 num_parents,
+ unsigned long flags,
+ struct mmp_clk_mix_config *config,
+ spinlock_t *lock)
+{
+ struct mmp_clk_mix *mix;
+ struct clk *clk;
+ struct clk_init_data init;
+ size_t table_bytes;
+
+ mix = kzalloc(sizeof(*mix), GFP_KERNEL);
+ if (!mix) {
+ pr_err("%s:%s: could not allocate mmp mix clk\n",
+ __func__, name);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.flags = flags | CLK_GET_RATE_NOCACHE;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.ops = &mmp_clk_mix_ops;
+
+ memcpy(&mix->reg_info, &config->reg_info, sizeof(config->reg_info));
+ if (config->table) {
+ table_bytes = sizeof(*config->table) * config->table_size;
+ mix->table = kzalloc(table_bytes, GFP_KERNEL);
+ if (!mix->table) {
+ pr_err("%s:%s: could not allocate mmp mix table\n",
+ __func__, name);
+ kfree(mix);
+ return ERR_PTR(-ENOMEM);
+ }
+ memcpy(mix->table, config->table, table_bytes);
+ mix->table_size = config->table_size;
+ }
+
+ if (config->mux_table) {
+ table_bytes = sizeof(u32) * num_parents;
+ mix->mux_table = kzalloc(table_bytes, GFP_KERNEL);
+ if (!mix->mux_table) {
+ pr_err("%s:%s: could not allocate mmp mix mux-table\n",
+ __func__, name);
+ kfree(mix->table);
+ kfree(mix);
+ return ERR_PTR(-ENOMEM);
+ }
+ memcpy(mix->mux_table, config->mux_table, table_bytes);
+ }
+
+ mix->div_flags = config->div_flags;
+ mix->mux_flags = config->mux_flags;
+ mix->lock = lock;
+ mix->hw.init = &init;
+
+ if (config->reg_info.bit_fc >= 32)
+ mix->type = MMP_CLK_MIX_TYPE_V1;
+ else if (config->reg_info.reg_clk_sel)
+ mix->type = MMP_CLK_MIX_TYPE_V3;
+ else
+ mix->type = MMP_CLK_MIX_TYPE_V2;
+ clk = clk_register(dev, &mix->hw);
+
+ if (IS_ERR(clk)) {
+ kfree(mix->mux_table);
+ kfree(mix->table);
+ kfree(mix);
+ }
+
+ return clk;
+}
diff --git a/drivers/clk/mmp/clk-mmp2.c b/drivers/clk/mmp/clk-mmp2.c
index b2721cae257a..5c90a4230fa3 100644
--- a/drivers/clk/mmp/clk-mmp2.c
+++ b/drivers/clk/mmp/clk-mmp2.c
@@ -54,7 +54,7 @@
static DEFINE_SPINLOCK(clk_lock);
-static struct clk_factor_masks uart_factor_masks = {
+static struct mmp_clk_factor_masks uart_factor_masks = {
.factor = 2,
.num_mask = 0x1fff,
.den_mask = 0x1fff,
@@ -62,7 +62,7 @@ static struct clk_factor_masks uart_factor_masks = {
.den_shift = 0,
};
-static struct clk_factor_tbl uart_factor_tbl[] = {
+static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
{.num = 14634, .den = 2165}, /*14.745MHZ */
{.num = 3521, .den = 689}, /*19.23MHZ */
{.num = 9679, .den = 5728}, /*58.9824MHZ */
@@ -191,7 +191,7 @@ void __init mmp2_clk_init(void)
clk = mmp_clk_register_factor("uart_pll", "pll1_4", 0,
mpmu_base + MPMU_UART_PLL,
&uart_factor_masks, uart_factor_tbl,
- ARRAY_SIZE(uart_factor_tbl));
+ ARRAY_SIZE(uart_factor_tbl), &clk_lock);
clk_set_rate(clk, 14745600);
clk_register_clkdev(clk, "uart_pll", NULL);
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
new file mode 100644
index 000000000000..2cbc2b43ae52
--- /dev/null
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -0,0 +1,334 @@
+/*
+ * mmp2 clock framework source file
+ *
+ * Copyright (C) 2012 Marvell
+ * Chao Xie <xiechao.mail@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/of_address.h>
+
+#include <dt-bindings/clock/marvell,mmp2.h>
+
+#include "clk.h"
+#include "reset.h"
+
+#define APBC_RTC 0x0
+#define APBC_TWSI0 0x4
+#define APBC_TWSI1 0x8
+#define APBC_TWSI2 0xc
+#define APBC_TWSI3 0x10
+#define APBC_TWSI4 0x7c
+#define APBC_TWSI5 0x80
+#define APBC_KPC 0x18
+#define APBC_UART0 0x2c
+#define APBC_UART1 0x30
+#define APBC_UART2 0x34
+#define APBC_UART3 0x88
+#define APBC_GPIO 0x38
+#define APBC_PWM0 0x3c
+#define APBC_PWM1 0x40
+#define APBC_PWM2 0x44
+#define APBC_PWM3 0x48
+#define APBC_SSP0 0x50
+#define APBC_SSP1 0x54
+#define APBC_SSP2 0x58
+#define APBC_SSP3 0x5c
+#define APMU_SDH0 0x54
+#define APMU_SDH1 0x58
+#define APMU_SDH2 0xe8
+#define APMU_SDH3 0xec
+#define APMU_USB 0x5c
+#define APMU_DISP0 0x4c
+#define APMU_DISP1 0x110
+#define APMU_CCIC0 0x50
+#define APMU_CCIC1 0xf4
+#define MPMU_UART_PLL 0x14
+
+struct mmp2_clk_unit {
+ struct mmp_clk_unit unit;
+ void __iomem *mpmu_base;
+ void __iomem *apmu_base;
+ void __iomem *apbc_base;
+};
+
+static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
+ {MMP2_CLK_CLK32, "clk32", NULL, CLK_IS_ROOT, 32768},
+ {MMP2_CLK_VCTCXO, "vctcxo", NULL, CLK_IS_ROOT, 26000000},
+ {MMP2_CLK_PLL1, "pll1", NULL, CLK_IS_ROOT, 800000000},
+ {MMP2_CLK_PLL2, "pll2", NULL, CLK_IS_ROOT, 960000000},
+ {MMP2_CLK_USB_PLL, "usb_pll", NULL, CLK_IS_ROOT, 480000000},
+};
+
+static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
+ {MMP2_CLK_PLL1_2, "pll1_2", "pll1", 1, 2, 0},
+ {MMP2_CLK_PLL1_4, "pll1_4", "pll1_2", 1, 2, 0},
+ {MMP2_CLK_PLL1_8, "pll1_8", "pll1_4", 1, 2, 0},
+ {MMP2_CLK_PLL1_16, "pll1_16", "pll1_8", 1, 2, 0},
+ {MMP2_CLK_PLL1_20, "pll1_20", "pll1_4", 1, 5, 0},
+ {MMP2_CLK_PLL1_3, "pll1_3", "pll1", 1, 3, 0},
+ {MMP2_CLK_PLL1_6, "pll1_6", "pll1_3", 1, 2, 0},
+ {MMP2_CLK_PLL1_12, "pll1_12", "pll1_6", 1, 2, 0},
+ {MMP2_CLK_PLL2_2, "pll2_2", "pll2", 1, 2, 0},
+ {MMP2_CLK_PLL2_4, "pll2_4", "pll2_2", 1, 2, 0},
+ {MMP2_CLK_PLL2_8, "pll2_8", "pll2_4", 1, 2, 0},
+ {MMP2_CLK_PLL2_16, "pll2_16", "pll2_8", 1, 2, 0},
+ {MMP2_CLK_PLL2_3, "pll2_3", "pll2", 1, 3, 0},
+ {MMP2_CLK_PLL2_6, "pll2_6", "pll2_3", 1, 2, 0},
+ {MMP2_CLK_PLL2_12, "pll2_12", "pll2_6", 1, 2, 0},
+ {MMP2_CLK_VCTCXO_2, "vctcxo_2", "vctcxo", 1, 2, 0},
+ {MMP2_CLK_VCTCXO_4, "vctcxo_4", "vctcxo_2", 1, 2, 0},
+};
+
+static struct mmp_clk_factor_masks uart_factor_masks = {
+ .factor = 2,
+ .num_mask = 0x1fff,
+ .den_mask = 0x1fff,
+ .num_shift = 16,
+ .den_shift = 0,
+};
+
+static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
+ {.num = 14634, .den = 2165}, /*14.745MHZ */
+ {.num = 3521, .den = 689}, /*19.23MHZ */
+ {.num = 9679, .den = 5728}, /*58.9824MHZ */
+ {.num = 15850, .den = 9451}, /*59.429MHZ */
+};
+
+static void mmp2_pll_init(struct mmp2_clk_unit *pxa_unit)
+{
+ struct clk *clk;
+ struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+ mmp_register_fixed_rate_clks(unit, fixed_rate_clks,
+ ARRAY_SIZE(fixed_rate_clks));
+
+ mmp_register_fixed_factor_clks(unit, fixed_factor_clks,
+ ARRAY_SIZE(fixed_factor_clks));
+
+ clk = mmp_clk_register_factor("uart_pll", "pll1_4",
+ CLK_SET_RATE_PARENT,
+ pxa_unit->mpmu_base + MPMU_UART_PLL,
+ &uart_factor_masks, uart_factor_tbl,
+ ARRAY_SIZE(uart_factor_tbl), NULL);
+ mmp_clk_add(unit, MMP2_CLK_UART_PLL, clk);
+}
+
+static DEFINE_SPINLOCK(uart0_lock);
+static DEFINE_SPINLOCK(uart1_lock);
+static DEFINE_SPINLOCK(uart2_lock);
+static const char *uart_parent_names[] = {"uart_pll", "vctcxo"};
+
+static DEFINE_SPINLOCK(ssp0_lock);
+static DEFINE_SPINLOCK(ssp1_lock);
+static DEFINE_SPINLOCK(ssp2_lock);
+static DEFINE_SPINLOCK(ssp3_lock);
+static const char *ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"};
+
+static DEFINE_SPINLOCK(reset_lock);
+
+static struct mmp_param_mux_clk apbc_mux_clks[] = {
+ {0, "uart0_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART0, 4, 3, 0, &uart0_lock},
+ {0, "uart1_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART1, 4, 3, 0, &uart1_lock},
+ {0, "uart2_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART2, 4, 3, 0, &uart2_lock},
+ {0, "uart3_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART3, 4, 3, 0, &uart2_lock},
+ {0, "ssp0_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP0, 4, 3, 0, &ssp0_lock},
+ {0, "ssp1_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP1, 4, 3, 0, &ssp1_lock},
+ {0, "ssp2_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP2, 4, 3, 0, &ssp2_lock},
+ {0, "ssp3_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP3, 4, 3, 0, &ssp3_lock},
+};
+
+static struct mmp_param_gate_clk apbc_gate_clks[] = {
+ {MMP2_CLK_TWSI0, "twsi0_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI0, 0x7, 0x3, 0x0, 0, &reset_lock},
+ {MMP2_CLK_TWSI1, "twsi1_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI1, 0x7, 0x3, 0x0, 0, &reset_lock},
+ {MMP2_CLK_TWSI2, "twsi2_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI2, 0x7, 0x3, 0x0, 0, &reset_lock},
+ {MMP2_CLK_TWSI3, "twsi3_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI3, 0x7, 0x3, 0x0, 0, &reset_lock},
+ {MMP2_CLK_TWSI4, "twsi4_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI4, 0x7, 0x3, 0x0, 0, &reset_lock},
+ {MMP2_CLK_TWSI5, "twsi5_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI5, 0x7, 0x3, 0x0, 0, &reset_lock},
+ {MMP2_CLK_GPIO, "gpio_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_GPIO, 0x7, 0x3, 0x0, 0, &reset_lock},
+ {MMP2_CLK_KPC, "kpc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_KPC, 0x7, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, &reset_lock},
+ {MMP2_CLK_RTC, "rtc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_RTC, 0x87, 0x83, 0x0, MMP_CLK_GATE_NEED_DELAY, &reset_lock},
+ {MMP2_CLK_PWM0, "pwm0_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM0, 0x7, 0x3, 0x0, 0, &reset_lock},
+ {MMP2_CLK_PWM1, "pwm1_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM1, 0x7, 0x3, 0x0, 0, &reset_lock},
+ {MMP2_CLK_PWM2, "pwm2_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM2, 0x7, 0x3, 0x0, 0, &reset_lock},
+ {MMP2_CLK_PWM3, "pwm3_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM3, 0x7, 0x3, 0x0, 0, &reset_lock},
+ /* The gate clocks has mux parent. */
+ {MMP2_CLK_UART0, "uart0_clk", "uart0_mux", CLK_SET_RATE_PARENT, APBC_UART0, 0x7, 0x3, 0x0, 0, &uart0_lock},
+ {MMP2_CLK_UART1, "uart1_clk", "uart1_mux", CLK_SET_RATE_PARENT, APBC_UART1, 0x7, 0x3, 0x0, 0, &uart1_lock},
+ {MMP2_CLK_UART2, "uart2_clk", "uart2_mux", CLK_SET_RATE_PARENT, APBC_UART2, 0x7, 0x3, 0x0, 0, &uart2_lock},
+ {MMP2_CLK_UART3, "uart3_clk", "uart3_mux", CLK_SET_RATE_PARENT, APBC_UART3, 0x7, 0x3, 0x0, 0, &uart2_lock},
+ {MMP2_CLK_SSP0, "ssp0_clk", "ssp0_mux", CLK_SET_RATE_PARENT, APBC_SSP0, 0x7, 0x3, 0x0, 0, &ssp0_lock},
+ {MMP2_CLK_SSP1, "ssp1_clk", "ssp1_mux", CLK_SET_RATE_PARENT, APBC_SSP1, 0x7, 0x3, 0x0, 0, &ssp1_lock},
+ {MMP2_CLK_SSP2, "ssp2_clk", "ssp2_mux", CLK_SET_RATE_PARENT, APBC_SSP2, 0x7, 0x3, 0x0, 0, &ssp2_lock},
+ {MMP2_CLK_SSP3, "ssp3_clk", "ssp3_mux", CLK_SET_RATE_PARENT, APBC_SSP3, 0x7, 0x3, 0x0, 0, &ssp3_lock},
+};
+
+static void mmp2_apb_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
+{
+ struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+ mmp_register_mux_clks(unit, apbc_mux_clks, pxa_unit->apbc_base,
+ ARRAY_SIZE(apbc_mux_clks));
+
+ mmp_register_gate_clks(unit, apbc_gate_clks, pxa_unit->apbc_base,
+ ARRAY_SIZE(apbc_gate_clks));
+}
+
+static DEFINE_SPINLOCK(sdh_lock);
+static const char *sdh_parent_names[] = {"pll1_4", "pll2", "usb_pll", "pll1"};
+static struct mmp_clk_mix_config sdh_mix_config = {
+ .reg_info = DEFINE_MIX_REG_INFO(4, 10, 2, 8, 32),
+};
+
+static DEFINE_SPINLOCK(usb_lock);
+
+static DEFINE_SPINLOCK(disp0_lock);
+static DEFINE_SPINLOCK(disp1_lock);
+static const char *disp_parent_names[] = {"pll1", "pll1_16", "pll2", "vctcxo"};
+
+static DEFINE_SPINLOCK(ccic0_lock);
+static DEFINE_SPINLOCK(ccic1_lock);
+static const char *ccic_parent_names[] = {"pll1_2", "pll1_16", "vctcxo"};
+static struct mmp_clk_mix_config ccic0_mix_config = {
+ .reg_info = DEFINE_MIX_REG_INFO(4, 17, 2, 6, 32),
+};
+static struct mmp_clk_mix_config ccic1_mix_config = {
+ .reg_info = DEFINE_MIX_REG_INFO(4, 16, 2, 6, 32),
+};
+
+static struct mmp_param_mux_clk apmu_mux_clks[] = {
+ {MMP2_CLK_DISP0_MUX, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 2, 0, &disp0_lock},
+ {MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock},
+};
+
+static struct mmp_param_div_clk apmu_div_clks[] = {
+ {0, "disp0_div", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 8, 4, 0, &disp0_lock},
+ {0, "disp0_sphy_div", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 15, 5, 0, &disp0_lock},
+ {0, "disp1_div", "disp1_mux", CLK_SET_RATE_PARENT, APMU_DISP1, 8, 4, 0, &disp1_lock},
+ {0, "ccic0_sphy_div", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 10, 5, 0, &ccic0_lock},
+ {0, "ccic1_sphy_div", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 10, 5, 0, &ccic1_lock},
+};
+
+static struct mmp_param_gate_clk apmu_gate_clks[] = {
+ {MMP2_CLK_USB, "usb_clk", "usb_pll", 0, APMU_USB, 0x9, 0x9, 0x0, 0, &usb_lock},
+ /* The gate clocks has mux parent. */
+ {MMP2_CLK_SDH0, "sdh0_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
+ {MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
+ {MMP2_CLK_SDH1, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
+ {MMP2_CLK_SDH1, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
+ {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock},
+ {MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock},
+ {MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x1b, 0x1b, 0x0, 0, &disp1_lock},
+ {MMP2_CLK_CCIC_ARBITER, "ccic_arbiter", "vctcxo", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1800, 0x1800, 0x0, 0, &ccic0_lock},
+ {MMP2_CLK_CCIC0, "ccic0_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1b, 0x1b, 0x0, 0, &ccic0_lock},
+ {MMP2_CLK_CCIC0_PHY, "ccic0_phy_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x24, 0x24, 0x0, 0, &ccic0_lock},
+ {MMP2_CLK_CCIC0_SPHY, "ccic0_sphy_clk", "ccic0_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x300, 0x300, 0x0, 0, &ccic0_lock},
+ {MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock},
+ {MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock},
+ {MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock},
+};
+
+static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
+{
+ struct clk *clk;
+ struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+ sdh_mix_config.reg_info.reg_clk_ctrl = pxa_unit->apmu_base + APMU_SDH0;
+ clk = mmp_clk_register_mix(NULL, "sdh_mix_clk", sdh_parent_names,
+ ARRAY_SIZE(sdh_parent_names),
+ CLK_SET_RATE_PARENT,
+ &sdh_mix_config, &sdh_lock);
+
+ ccic0_mix_config.reg_info.reg_clk_ctrl = pxa_unit->apmu_base + APMU_CCIC0;
+ clk = mmp_clk_register_mix(NULL, "ccic0_mix_clk", ccic_parent_names,
+ ARRAY_SIZE(ccic_parent_names),
+ CLK_SET_RATE_PARENT,
+ &ccic0_mix_config, &ccic0_lock);
+ mmp_clk_add(unit, MMP2_CLK_CCIC0_MIX, clk);
+
+ ccic1_mix_config.reg_info.reg_clk_ctrl = pxa_unit->apmu_base + APMU_CCIC1;
+ clk = mmp_clk_register_mix(NULL, "ccic1_mix_clk", ccic_parent_names,
+ ARRAY_SIZE(ccic_parent_names),
+ CLK_SET_RATE_PARENT,
+ &ccic1_mix_config, &ccic1_lock);
+ mmp_clk_add(unit, MMP2_CLK_CCIC1_MIX, clk);
+
+ mmp_register_mux_clks(unit, apmu_mux_clks, pxa_unit->apmu_base,
+ ARRAY_SIZE(apmu_mux_clks));
+
+ mmp_register_div_clks(unit, apmu_div_clks, pxa_unit->apmu_base,
+ ARRAY_SIZE(apmu_div_clks));
+
+ mmp_register_gate_clks(unit, apmu_gate_clks, pxa_unit->apmu_base,
+ ARRAY_SIZE(apmu_gate_clks));
+}
+
+static void mmp2_clk_reset_init(struct device_node *np,
+ struct mmp2_clk_unit *pxa_unit)
+{
+ struct mmp_clk_reset_cell *cells;
+ int i, nr_resets;
+
+ nr_resets = ARRAY_SIZE(apbc_gate_clks);
+ cells = kcalloc(nr_resets, sizeof(*cells), GFP_KERNEL);
+ if (!cells)
+ return;
+
+ for (i = 0; i < nr_resets; i++) {
+ cells[i].clk_id = apbc_gate_clks[i].id;
+ cells[i].reg = pxa_unit->apbc_base + apbc_gate_clks[i].offset;
+ cells[i].flags = 0;
+ cells[i].lock = apbc_gate_clks[i].lock;
+ cells[i].bits = 0x4;
+ }
+
+ mmp_clk_reset_register(np, cells, nr_resets);
+}
+
+static void __init mmp2_clk_init(struct device_node *np)
+{
+ struct mmp2_clk_unit *pxa_unit;
+
+ pxa_unit = kzalloc(sizeof(*pxa_unit), GFP_KERNEL);
+ if (!pxa_unit)
+ return;
+
+ pxa_unit->mpmu_base = of_iomap(np, 0);
+ if (!pxa_unit->mpmu_base) {
+ pr_err("failed to map mpmu registers\n");
+ return;
+ }
+
+ pxa_unit->apmu_base = of_iomap(np, 1);
+ if (!pxa_unit->mpmu_base) {
+ pr_err("failed to map apmu registers\n");
+ return;
+ }
+
+ pxa_unit->apbc_base = of_iomap(np, 2);
+ if (!pxa_unit->apbc_base) {
+ pr_err("failed to map apbc registers\n");
+ return;
+ }
+
+ mmp_clk_init(np, &pxa_unit->unit, MMP2_NR_CLKS);
+
+ mmp2_pll_init(pxa_unit);
+
+ mmp2_apb_periph_clk_init(pxa_unit);
+
+ mmp2_axi_periph_clk_init(pxa_unit);
+
+ mmp2_clk_reset_init(np, pxa_unit);
+}
+
+CLK_OF_DECLARE(mmp2_clk, "marvell,mmp2-clock", mmp2_clk_init);
diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
new file mode 100644
index 000000000000..5b1810dc4bd2
--- /dev/null
+++ b/drivers/clk/mmp/clk-of-pxa168.c
@@ -0,0 +1,279 @@
+/*
+ * pxa168 clock framework source file
+ *
+ * Copyright (C) 2012 Marvell
+ * Chao Xie <xiechao.mail@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/of_address.h>
+
+#include <dt-bindings/clock/marvell,pxa168.h>
+
+#include "clk.h"
+#include "reset.h"
+
+#define APBC_RTC 0x28
+#define APBC_TWSI0 0x2c
+#define APBC_KPC 0x30
+#define APBC_UART0 0x0
+#define APBC_UART1 0x4
+#define APBC_GPIO 0x8
+#define APBC_PWM0 0xc
+#define APBC_PWM1 0x10
+#define APBC_PWM2 0x14
+#define APBC_PWM3 0x18
+#define APBC_SSP0 0x81c
+#define APBC_SSP1 0x820
+#define APBC_SSP2 0x84c
+#define APBC_SSP3 0x858
+#define APBC_SSP4 0x85c
+#define APBC_TWSI1 0x6c
+#define APBC_UART2 0x70
+#define APMU_SDH0 0x54
+#define APMU_SDH1 0x58
+#define APMU_USB 0x5c
+#define APMU_DISP0 0x4c
+#define APMU_CCIC0 0x50
+#define APMU_DFC 0x60
+#define MPMU_UART_PLL 0x14
+
+struct pxa168_clk_unit {
+ struct mmp_clk_unit unit;
+ void __iomem *mpmu_base;
+ void __iomem *apmu_base;
+ void __iomem *apbc_base;
+};
+
+static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
+ {PXA168_CLK_CLK32, "clk32", NULL, CLK_IS_ROOT, 32768},
+ {PXA168_CLK_VCTCXO, "vctcxo", NULL, CLK_IS_ROOT, 26000000},
+ {PXA168_CLK_PLL1, "pll1", NULL, CLK_IS_ROOT, 624000000},
+};
+
+static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
+ {PXA168_CLK_PLL1_2, "pll1_2", "pll1", 1, 2, 0},
+ {PXA168_CLK_PLL1_4, "pll1_4", "pll1_2", 1, 2, 0},
+ {PXA168_CLK_PLL1_8, "pll1_8", "pll1_4", 1, 2, 0},
+ {PXA168_CLK_PLL1_16, "pll1_16", "pll1_8", 1, 2, 0},
+ {PXA168_CLK_PLL1_6, "pll1_6", "pll1_2", 1, 3, 0},
+ {PXA168_CLK_PLL1_12, "pll1_12", "pll1_6", 1, 2, 0},
+ {PXA168_CLK_PLL1_24, "pll1_24", "pll1_12", 1, 2, 0},
+ {PXA168_CLK_PLL1_48, "pll1_48", "pll1_24", 1, 2, 0},
+ {PXA168_CLK_PLL1_96, "pll1_96", "pll1_48", 1, 2, 0},
+ {PXA168_CLK_PLL1_13, "pll1_13", "pll1", 1, 13, 0},
+ {PXA168_CLK_PLL1_13_1_5, "pll1_13_1_5", "pll1_13", 2, 3, 0},
+ {PXA168_CLK_PLL1_2_1_5, "pll1_2_1_5", "pll1_2", 2, 3, 0},
+ {PXA168_CLK_PLL1_3_16, "pll1_3_16", "pll1", 3, 16, 0},
+};
+
+static struct mmp_clk_factor_masks uart_factor_masks = {
+ .factor = 2,
+ .num_mask = 0x1fff,
+ .den_mask = 0x1fff,
+ .num_shift = 16,
+ .den_shift = 0,
+};
+
+static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
+ {.num = 8125, .den = 1536}, /*14.745MHZ */
+};
+
+static void pxa168_pll_init(struct pxa168_clk_unit *pxa_unit)
+{
+ struct clk *clk;
+ struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+ mmp_register_fixed_rate_clks(unit, fixed_rate_clks,
+ ARRAY_SIZE(fixed_rate_clks));
+
+ mmp_register_fixed_factor_clks(unit, fixed_factor_clks,
+ ARRAY_SIZE(fixed_factor_clks));
+
+ clk = mmp_clk_register_factor("uart_pll", "pll1_4",
+ CLK_SET_RATE_PARENT,
+ pxa_unit->mpmu_base + MPMU_UART_PLL,
+ &uart_factor_masks, uart_factor_tbl,
+ ARRAY_SIZE(uart_factor_tbl), NULL);
+ mmp_clk_add(unit, PXA168_CLK_UART_PLL, clk);
+}
+
+static DEFINE_SPINLOCK(uart0_lock);
+static DEFINE_SPINLOCK(uart1_lock);
+static DEFINE_SPINLOCK(uart2_lock);
+static const char *uart_parent_names[] = {"pll1_3_16", "uart_pll"};
+
+static DEFINE_SPINLOCK(ssp0_lock);
+static DEFINE_SPINLOCK(ssp1_lock);
+static DEFINE_SPINLOCK(ssp2_lock);
+static DEFINE_SPINLOCK(ssp3_lock);
+static DEFINE_SPINLOCK(ssp4_lock);
+static const char *ssp_parent_names[] = {"pll1_96", "pll1_48", "pll1_24", "pll1_12"};
+
+static DEFINE_SPINLOCK(reset_lock);
+
+static struct mmp_param_mux_clk apbc_mux_clks[] = {
+ {0, "uart0_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART0, 4, 3, 0, &uart0_lock},
+ {0, "uart1_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART1, 4, 3, 0, &uart1_lock},
+ {0, "uart2_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART2, 4, 3, 0, &uart2_lock},
+ {0, "ssp0_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP0, 4, 3, 0, &ssp0_lock},
+ {0, "ssp1_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP1, 4, 3, 0, &ssp1_lock},
+ {0, "ssp2_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP2, 4, 3, 0, &ssp2_lock},
+ {0, "ssp3_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP3, 4, 3, 0, &ssp3_lock},
+ {0, "ssp4_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP4, 4, 3, 0, &ssp4_lock},
+};
+
+static struct mmp_param_gate_clk apbc_gate_clks[] = {
+ {PXA168_CLK_TWSI0, "twsi0_clk", "pll1_13_1_5", CLK_SET_RATE_PARENT, APBC_TWSI0, 0x3, 0x3, 0x0, 0, &reset_lock},
+ {PXA168_CLK_TWSI1, "twsi1_clk", "pll1_13_1_5", CLK_SET_RATE_PARENT, APBC_TWSI1, 0x3, 0x3, 0x0, 0, &reset_lock},
+ {PXA168_CLK_GPIO, "gpio_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_GPIO, 0x3, 0x3, 0x0, 0, &reset_lock},
+ {PXA168_CLK_KPC, "kpc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_KPC, 0x3, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
+ {PXA168_CLK_RTC, "rtc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_RTC, 0x83, 0x83, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
+ {PXA168_CLK_PWM0, "pwm0_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM0, 0x3, 0x3, 0x0, 0, &reset_lock},
+ {PXA168_CLK_PWM1, "pwm1_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM1, 0x3, 0x3, 0x0, 0, &reset_lock},
+ {PXA168_CLK_PWM2, "pwm2_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM2, 0x3, 0x3, 0x0, 0, &reset_lock},
+ {PXA168_CLK_PWM3, "pwm3_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM3, 0x3, 0x3, 0x0, 0, &reset_lock},
+ /* The gate clocks has mux parent. */
+ {PXA168_CLK_UART0, "uart0_clk", "uart0_mux", CLK_SET_RATE_PARENT, APBC_UART0, 0x3, 0x3, 0x0, 0, &uart0_lock},
+ {PXA168_CLK_UART1, "uart1_clk", "uart1_mux", CLK_SET_RATE_PARENT, APBC_UART1, 0x3, 0x3, 0x0, 0, &uart1_lock},
+ {PXA168_CLK_UART2, "uart2_clk", "uart2_mux", CLK_SET_RATE_PARENT, APBC_UART2, 0x3, 0x3, 0x0, 0, &uart2_lock},
+ {PXA168_CLK_SSP0, "ssp0_clk", "ssp0_mux", CLK_SET_RATE_PARENT, APBC_SSP0, 0x3, 0x3, 0x0, 0, &ssp0_lock},
+ {PXA168_CLK_SSP1, "ssp1_clk", "ssp1_mux", CLK_SET_RATE_PARENT, APBC_SSP1, 0x3, 0x3, 0x0, 0, &ssp1_lock},
+ {PXA168_CLK_SSP2, "ssp2_clk", "ssp2_mux", CLK_SET_RATE_PARENT, APBC_SSP2, 0x3, 0x3, 0x0, 0, &ssp2_lock},
+ {PXA168_CLK_SSP3, "ssp3_clk", "ssp3_mux", CLK_SET_RATE_PARENT, APBC_SSP3, 0x3, 0x3, 0x0, 0, &ssp3_lock},
+ {PXA168_CLK_SSP4, "ssp4_clk", "ssp4_mux", CLK_SET_RATE_PARENT, APBC_SSP4, 0x3, 0x3, 0x0, 0, &ssp4_lock},
+};
+
+static void pxa168_apb_periph_clk_init(struct pxa168_clk_unit *pxa_unit)
+{
+ struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+ mmp_register_mux_clks(unit, apbc_mux_clks, pxa_unit->apbc_base,
+ ARRAY_SIZE(apbc_mux_clks));
+
+ mmp_register_gate_clks(unit, apbc_gate_clks, pxa_unit->apbc_base,
+ ARRAY_SIZE(apbc_gate_clks));
+
+}
+
+static DEFINE_SPINLOCK(sdh0_lock);
+static DEFINE_SPINLOCK(sdh1_lock);
+static const char *sdh_parent_names[] = {"pll1_12", "pll1_13"};
+
+static DEFINE_SPINLOCK(usb_lock);
+
+static DEFINE_SPINLOCK(disp0_lock);
+static const char *disp_parent_names[] = {"pll1_2", "pll1_12"};
+
+static DEFINE_SPINLOCK(ccic0_lock);
+static const char *ccic_parent_names[] = {"pll1_2", "pll1_12"};
+static const char *ccic_phy_parent_names[] = {"pll1_6", "pll1_12"};
+
+static struct mmp_param_mux_clk apmu_mux_clks[] = {
+ {0, "sdh0_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH0, 6, 1, 0, &sdh0_lock},
+ {0, "sdh1_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH1, 6, 1, 0, &sdh1_lock},
+ {0, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 1, 0, &disp0_lock},
+ {0, "ccic0_mux", ccic_parent_names, ARRAY_SIZE(ccic_parent_names), CLK_SET_RATE_PARENT, APMU_CCIC0, 6, 1, 0, &ccic0_lock},
+ {0, "ccic0_phy_mux", ccic_phy_parent_names, ARRAY_SIZE(ccic_phy_parent_names), CLK_SET_RATE_PARENT, APMU_CCIC0, 7, 1, 0, &ccic0_lock},
+};
+
+static struct mmp_param_div_clk apmu_div_clks[] = {
+ {0, "ccic0_sphy_div", "ccic0_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 10, 5, 0, &ccic0_lock},
+};
+
+static struct mmp_param_gate_clk apmu_gate_clks[] = {
+ {PXA168_CLK_DFC, "dfc_clk", "pll1_4", CLK_SET_RATE_PARENT, APMU_DFC, 0x19b, 0x19b, 0x0, 0, NULL},
+ {PXA168_CLK_USB, "usb_clk", "usb_pll", 0, APMU_USB, 0x9, 0x9, 0x0, 0, &usb_lock},
+ {PXA168_CLK_SPH, "sph_clk", "usb_pll", 0, APMU_USB, 0x12, 0x12, 0x0, 0, &usb_lock},
+ /* The gate clocks has mux parent. */
+ {PXA168_CLK_SDH0, "sdh0_clk", "sdh0_mux", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh0_lock},
+ {PXA168_CLK_SDH1, "sdh1_clk", "sdh1_mux", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh1_lock},
+ {PXA168_CLK_DISP0, "disp0_clk", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock},
+ {PXA168_CLK_CCIC0, "ccic0_clk", "ccic0_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1b, 0x1b, 0x0, 0, &ccic0_lock},
+ {PXA168_CLK_CCIC0_PHY, "ccic0_phy_clk", "ccic0_phy_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x24, 0x24, 0x0, 0, &ccic0_lock},
+ {PXA168_CLK_CCIC0_SPHY, "ccic0_sphy_clk", "ccic0_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x300, 0x300, 0x0, 0, &ccic0_lock},
+};
+
+static void pxa168_axi_periph_clk_init(struct pxa168_clk_unit *pxa_unit)
+{
+ struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+ mmp_register_mux_clks(unit, apmu_mux_clks, pxa_unit->apmu_base,
+ ARRAY_SIZE(apmu_mux_clks));
+
+ mmp_register_div_clks(unit, apmu_div_clks, pxa_unit->apmu_base,
+ ARRAY_SIZE(apmu_div_clks));
+
+ mmp_register_gate_clks(unit, apmu_gate_clks, pxa_unit->apmu_base,
+ ARRAY_SIZE(apmu_gate_clks));
+}
+
+static void pxa168_clk_reset_init(struct device_node *np,
+ struct pxa168_clk_unit *pxa_unit)
+{
+ struct mmp_clk_reset_cell *cells;
+ int i, nr_resets;
+
+ nr_resets = ARRAY_SIZE(apbc_gate_clks);
+ cells = kcalloc(nr_resets, sizeof(*cells), GFP_KERNEL);
+ if (!cells)
+ return;
+
+ for (i = 0; i < nr_resets; i++) {
+ cells[i].clk_id = apbc_gate_clks[i].id;
+ cells[i].reg = pxa_unit->apbc_base + apbc_gate_clks[i].offset;
+ cells[i].flags = 0;
+ cells[i].lock = apbc_gate_clks[i].lock;
+ cells[i].bits = 0x4;
+ }
+
+ mmp_clk_reset_register(np, cells, nr_resets);
+}
+
+static void __init pxa168_clk_init(struct device_node *np)
+{
+ struct pxa168_clk_unit *pxa_unit;
+
+ pxa_unit = kzalloc(sizeof(*pxa_unit), GFP_KERNEL);
+ if (!pxa_unit)
+ return;
+
+ pxa_unit->mpmu_base = of_iomap(np, 0);
+ if (!pxa_unit->mpmu_base) {
+ pr_err("failed to map mpmu registers\n");
+ return;
+ }
+
+ pxa_unit->apmu_base = of_iomap(np, 1);
+ if (!pxa_unit->mpmu_base) {
+ pr_err("failed to map apmu registers\n");
+ return;
+ }
+
+ pxa_unit->apbc_base = of_iomap(np, 2);
+ if (!pxa_unit->apbc_base) {
+ pr_err("failed to map apbc registers\n");
+ return;
+ }
+
+ mmp_clk_init(np, &pxa_unit->unit, PXA168_NR_CLKS);
+
+ pxa168_pll_init(pxa_unit);
+
+ pxa168_apb_periph_clk_init(pxa_unit);
+
+ pxa168_axi_periph_clk_init(pxa_unit);
+
+ pxa168_clk_reset_init(np, pxa_unit);
+}
+
+CLK_OF_DECLARE(pxa168_clk, "marvell,pxa168-clock", pxa168_clk_init);
diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c
new file mode 100644
index 000000000000..5e3c80dad336
--- /dev/null
+++ b/drivers/clk/mmp/clk-of-pxa910.c
@@ -0,0 +1,301 @@
+/*
+ * pxa910 clock framework source file
+ *
+ * Copyright (C) 2012 Marvell
+ * Chao Xie <xiechao.mail@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/of_address.h>
+
+#include <dt-bindings/clock/marvell,pxa910.h>
+
+#include "clk.h"
+#include "reset.h"
+
+#define APBC_RTC 0x28
+#define APBC_TWSI0 0x2c
+#define APBC_KPC 0x18
+#define APBC_UART0 0x0
+#define APBC_UART1 0x4
+#define APBC_GPIO 0x8
+#define APBC_PWM0 0xc
+#define APBC_PWM1 0x10
+#define APBC_PWM2 0x14
+#define APBC_PWM3 0x18
+#define APBC_SSP0 0x1c
+#define APBC_SSP1 0x20
+#define APBC_SSP2 0x4c
+#define APBCP_TWSI1 0x28
+#define APBCP_UART2 0x1c
+#define APMU_SDH0 0x54
+#define APMU_SDH1 0x58
+#define APMU_USB 0x5c
+#define APMU_DISP0 0x4c
+#define APMU_CCIC0 0x50
+#define APMU_DFC 0x60
+#define MPMU_UART_PLL 0x14
+
+struct pxa910_clk_unit {
+ struct mmp_clk_unit unit;
+ void __iomem *mpmu_base;
+ void __iomem *apmu_base;
+ void __iomem *apbc_base;
+ void __iomem *apbcp_base;
+};
+
+static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
+ {PXA910_CLK_CLK32, "clk32", NULL, CLK_IS_ROOT, 32768},
+ {PXA910_CLK_VCTCXO, "vctcxo", NULL, CLK_IS_ROOT, 26000000},
+ {PXA910_CLK_PLL1, "pll1", NULL, CLK_IS_ROOT, 624000000},
+};
+
+static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
+ {PXA910_CLK_PLL1_2, "pll1_2", "pll1", 1, 2, 0},
+ {PXA910_CLK_PLL1_4, "pll1_4", "pll1_2", 1, 2, 0},
+ {PXA910_CLK_PLL1_8, "pll1_8", "pll1_4", 1, 2, 0},
+ {PXA910_CLK_PLL1_16, "pll1_16", "pll1_8", 1, 2, 0},
+ {PXA910_CLK_PLL1_6, "pll1_6", "pll1_2", 1, 3, 0},
+ {PXA910_CLK_PLL1_12, "pll1_12", "pll1_6", 1, 2, 0},
+ {PXA910_CLK_PLL1_24, "pll1_24", "pll1_12", 1, 2, 0},
+ {PXA910_CLK_PLL1_48, "pll1_48", "pll1_24", 1, 2, 0},
+ {PXA910_CLK_PLL1_96, "pll1_96", "pll1_48", 1, 2, 0},
+ {PXA910_CLK_PLL1_13, "pll1_13", "pll1", 1, 13, 0},
+ {PXA910_CLK_PLL1_13_1_5, "pll1_13_1_5", "pll1_13", 2, 3, 0},
+ {PXA910_CLK_PLL1_2_1_5, "pll1_2_1_5", "pll1_2", 2, 3, 0},
+ {PXA910_CLK_PLL1_3_16, "pll1_3_16", "pll1", 3, 16, 0},
+};
+
+static struct mmp_clk_factor_masks uart_factor_masks = {
+ .factor = 2,
+ .num_mask = 0x1fff,
+ .den_mask = 0x1fff,
+ .num_shift = 16,
+ .den_shift = 0,
+};
+
+static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
+ {.num = 8125, .den = 1536}, /*14.745MHZ */
+};
+
+static void pxa910_pll_init(struct pxa910_clk_unit *pxa_unit)
+{
+ struct clk *clk;
+ struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+ mmp_register_fixed_rate_clks(unit, fixed_rate_clks,
+ ARRAY_SIZE(fixed_rate_clks));
+
+ mmp_register_fixed_factor_clks(unit, fixed_factor_clks,
+ ARRAY_SIZE(fixed_factor_clks));
+
+ clk = mmp_clk_register_factor("uart_pll", "pll1_4",
+ CLK_SET_RATE_PARENT,
+ pxa_unit->mpmu_base + MPMU_UART_PLL,
+ &uart_factor_masks, uart_factor_tbl,
+ ARRAY_SIZE(uart_factor_tbl), NULL);
+ mmp_clk_add(unit, PXA910_CLK_UART_PLL, clk);
+}
+
+static DEFINE_SPINLOCK(uart0_lock);
+static DEFINE_SPINLOCK(uart1_lock);
+static DEFINE_SPINLOCK(uart2_lock);
+static const char *uart_parent_names[] = {"pll1_3_16", "uart_pll"};
+
+static DEFINE_SPINLOCK(ssp0_lock);
+static DEFINE_SPINLOCK(ssp1_lock);
+static const char *ssp_parent_names[] = {"pll1_96", "pll1_48", "pll1_24", "pll1_12"};
+
+static DEFINE_SPINLOCK(reset_lock);
+
+static struct mmp_param_mux_clk apbc_mux_clks[] = {
+ {0, "uart0_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART0, 4, 3, 0, &uart0_lock},
+ {0, "uart1_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART1, 4, 3, 0, &uart1_lock},
+ {0, "ssp0_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP0, 4, 3, 0, &ssp0_lock},
+ {0, "ssp1_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP1, 4, 3, 0, &ssp1_lock},
+};
+
+static struct mmp_param_mux_clk apbcp_mux_clks[] = {
+ {0, "uart2_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBCP_UART2, 4, 3, 0, &uart2_lock},
+};
+
+static struct mmp_param_gate_clk apbc_gate_clks[] = {
+ {PXA910_CLK_TWSI0, "twsi0_clk", "pll1_13_1_5", CLK_SET_RATE_PARENT, APBC_TWSI0, 0x3, 0x3, 0x0, 0, &reset_lock},
+ {PXA910_CLK_GPIO, "gpio_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_GPIO, 0x3, 0x3, 0x0, 0, &reset_lock},
+ {PXA910_CLK_KPC, "kpc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_KPC, 0x3, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
+ {PXA910_CLK_RTC, "rtc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_RTC, 0x83, 0x83, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
+ {PXA910_CLK_PWM0, "pwm0_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM0, 0x3, 0x3, 0x0, 0, &reset_lock},
+ {PXA910_CLK_PWM1, "pwm1_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM1, 0x3, 0x3, 0x0, 0, &reset_lock},
+ {PXA910_CLK_PWM2, "pwm2_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM2, 0x3, 0x3, 0x0, 0, &reset_lock},
+ {PXA910_CLK_PWM3, "pwm3_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM3, 0x3, 0x3, 0x0, 0, &reset_lock},
+ /* The gate clocks has mux parent. */
+ {PXA910_CLK_UART0, "uart0_clk", "uart0_mux", CLK_SET_RATE_PARENT, APBC_UART0, 0x3, 0x3, 0x0, 0, &uart0_lock},
+ {PXA910_CLK_UART1, "uart1_clk", "uart1_mux", CLK_SET_RATE_PARENT, APBC_UART1, 0x3, 0x3, 0x0, 0, &uart1_lock},
+ {PXA910_CLK_SSP0, "ssp0_clk", "ssp0_mux", CLK_SET_RATE_PARENT, APBC_SSP0, 0x3, 0x3, 0x0, 0, &ssp0_lock},
+ {PXA910_CLK_SSP1, "ssp1_clk", "ssp1_mux", CLK_SET_RATE_PARENT, APBC_SSP1, 0x3, 0x3, 0x0, 0, &ssp1_lock},
+};
+
+static struct mmp_param_gate_clk apbcp_gate_clks[] = {
+ {PXA910_CLK_TWSI1, "twsi1_clk", "pll1_13_1_5", CLK_SET_RATE_PARENT, APBCP_TWSI1, 0x3, 0x3, 0x0, 0, &reset_lock},
+ /* The gate clocks has mux parent. */
+ {PXA910_CLK_UART2, "uart2_clk", "uart2_mux", CLK_SET_RATE_PARENT, APBCP_UART2, 0x3, 0x3, 0x0, 0, &uart2_lock},
+};
+
+static void pxa910_apb_periph_clk_init(struct pxa910_clk_unit *pxa_unit)
+{
+ struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+ mmp_register_mux_clks(unit, apbc_mux_clks, pxa_unit->apbc_base,
+ ARRAY_SIZE(apbc_mux_clks));
+
+ mmp_register_mux_clks(unit, apbcp_mux_clks, pxa_unit->apbcp_base,
+ ARRAY_SIZE(apbcp_mux_clks));
+
+ mmp_register_gate_clks(unit, apbc_gate_clks, pxa_unit->apbc_base,
+ ARRAY_SIZE(apbc_gate_clks));
+
+ mmp_register_gate_clks(unit, apbcp_gate_clks, pxa_unit->apbcp_base,
+ ARRAY_SIZE(apbcp_gate_clks));
+}
+
+static DEFINE_SPINLOCK(sdh0_lock);
+static DEFINE_SPINLOCK(sdh1_lock);
+static const char *sdh_parent_names[] = {"pll1_12", "pll1_13"};
+
+static DEFINE_SPINLOCK(usb_lock);
+
+static DEFINE_SPINLOCK(disp0_lock);
+static const char *disp_parent_names[] = {"pll1_2", "pll1_12"};
+
+static DEFINE_SPINLOCK(ccic0_lock);
+static const char *ccic_parent_names[] = {"pll1_2", "pll1_12"};
+static const char *ccic_phy_parent_names[] = {"pll1_6", "pll1_12"};
+
+static struct mmp_param_mux_clk apmu_mux_clks[] = {
+ {0, "sdh0_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH0, 6, 1, 0, &sdh0_lock},
+ {0, "sdh1_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH1, 6, 1, 0, &sdh1_lock},
+ {0, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 1, 0, &disp0_lock},
+ {0, "ccic0_mux", ccic_parent_names, ARRAY_SIZE(ccic_parent_names), CLK_SET_RATE_PARENT, APMU_CCIC0, 6, 1, 0, &ccic0_lock},
+ {0, "ccic0_phy_mux", ccic_phy_parent_names, ARRAY_SIZE(ccic_phy_parent_names), CLK_SET_RATE_PARENT, APMU_CCIC0, 7, 1, 0, &ccic0_lock},
+};
+
+static struct mmp_param_div_clk apmu_div_clks[] = {
+ {0, "ccic0_sphy_div", "ccic0_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 10, 5, 0, &ccic0_lock},
+};
+
+static struct mmp_param_gate_clk apmu_gate_clks[] = {
+ {PXA910_CLK_DFC, "dfc_clk", "pll1_4", CLK_SET_RATE_PARENT, APMU_DFC, 0x19b, 0x19b, 0x0, 0, NULL},
+ {PXA910_CLK_USB, "usb_clk", "usb_pll", 0, APMU_USB, 0x9, 0x9, 0x0, 0, &usb_lock},
+ {PXA910_CLK_SPH, "sph_clk", "usb_pll", 0, APMU_USB, 0x12, 0x12, 0x0, 0, &usb_lock},
+ /* The gate clocks has mux parent. */
+ {PXA910_CLK_SDH0, "sdh0_clk", "sdh0_mux", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh0_lock},
+ {PXA910_CLK_SDH1, "sdh1_clk", "sdh1_mux", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh1_lock},
+ {PXA910_CLK_DISP0, "disp0_clk", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock},
+ {PXA910_CLK_CCIC0, "ccic0_clk", "ccic0_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1b, 0x1b, 0x0, 0, &ccic0_lock},
+ {PXA910_CLK_CCIC0_PHY, "ccic0_phy_clk", "ccic0_phy_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x24, 0x24, 0x0, 0, &ccic0_lock},
+ {PXA910_CLK_CCIC0_SPHY, "ccic0_sphy_clk", "ccic0_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x300, 0x300, 0x0, 0, &ccic0_lock},
+};
+
+static void pxa910_axi_periph_clk_init(struct pxa910_clk_unit *pxa_unit)
+{
+ struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+ mmp_register_mux_clks(unit, apmu_mux_clks, pxa_unit->apmu_base,
+ ARRAY_SIZE(apmu_mux_clks));
+
+ mmp_register_div_clks(unit, apmu_div_clks, pxa_unit->apmu_base,
+ ARRAY_SIZE(apmu_div_clks));
+
+ mmp_register_gate_clks(unit, apmu_gate_clks, pxa_unit->apmu_base,
+ ARRAY_SIZE(apmu_gate_clks));
+}
+
+static void pxa910_clk_reset_init(struct device_node *np,
+ struct pxa910_clk_unit *pxa_unit)
+{
+ struct mmp_clk_reset_cell *cells;
+ int i, base, nr_resets_apbc, nr_resets_apbcp, nr_resets;
+
+ nr_resets_apbc = ARRAY_SIZE(apbc_gate_clks);
+ nr_resets_apbcp = ARRAY_SIZE(apbcp_gate_clks);
+ nr_resets = nr_resets_apbc + nr_resets_apbcp;
+ cells = kcalloc(nr_resets, sizeof(*cells), GFP_KERNEL);
+ if (!cells)
+ return;
+
+ base = 0;
+ for (i = 0; i < nr_resets_apbc; i++) {
+ cells[base + i].clk_id = apbc_gate_clks[i].id;
+ cells[base + i].reg =
+ pxa_unit->apbc_base + apbc_gate_clks[i].offset;
+ cells[base + i].flags = 0;
+ cells[base + i].lock = apbc_gate_clks[i].lock;
+ cells[base + i].bits = 0x4;
+ }
+
+ base = nr_resets_apbc;
+ for (i = 0; i < nr_resets_apbcp; i++) {
+ cells[base + i].clk_id = apbcp_gate_clks[i].id;
+ cells[base + i].reg =
+ pxa_unit->apbc_base + apbc_gate_clks[i].offset;
+ cells[base + i].flags = 0;
+ cells[base + i].lock = apbc_gate_clks[i].lock;
+ cells[base + i].bits = 0x4;
+ }
+
+ mmp_clk_reset_register(np, cells, nr_resets);
+}
+
+static void __init pxa910_clk_init(struct device_node *np)
+{
+ struct pxa910_clk_unit *pxa_unit;
+
+ pxa_unit = kzalloc(sizeof(*pxa_unit), GFP_KERNEL);
+ if (!pxa_unit)
+ return;
+
+ pxa_unit->mpmu_base = of_iomap(np, 0);
+ if (!pxa_unit->mpmu_base) {
+ pr_err("failed to map mpmu registers\n");
+ return;
+ }
+
+ pxa_unit->apmu_base = of_iomap(np, 1);
+ if (!pxa_unit->mpmu_base) {
+ pr_err("failed to map apmu registers\n");
+ return;
+ }
+
+ pxa_unit->apbc_base = of_iomap(np, 2);
+ if (!pxa_unit->apbc_base) {
+ pr_err("failed to map apbc registers\n");
+ return;
+ }
+
+ pxa_unit->apbcp_base = of_iomap(np, 3);
+ if (!pxa_unit->mpmu_base) {
+ pr_err("failed to map apbcp registers\n");
+ return;
+ }
+
+ mmp_clk_init(np, &pxa_unit->unit, PXA910_NR_CLKS);
+
+ pxa910_pll_init(pxa_unit);
+
+ pxa910_apb_periph_clk_init(pxa_unit);
+
+ pxa910_axi_periph_clk_init(pxa_unit);
+
+ pxa910_clk_reset_init(np, pxa_unit);
+}
+
+CLK_OF_DECLARE(pxa910_clk, "marvell,pxa910-clock", pxa910_clk_init);
diff --git a/drivers/clk/mmp/clk-pxa168.c b/drivers/clk/mmp/clk-pxa168.c
index 014396b028a2..93e967c0f972 100644
--- a/drivers/clk/mmp/clk-pxa168.c
+++ b/drivers/clk/mmp/clk-pxa168.c
@@ -47,7 +47,7 @@
static DEFINE_SPINLOCK(clk_lock);
-static struct clk_factor_masks uart_factor_masks = {
+static struct mmp_clk_factor_masks uart_factor_masks = {
.factor = 2,
.num_mask = 0x1fff,
.den_mask = 0x1fff,
@@ -55,7 +55,7 @@ static struct clk_factor_masks uart_factor_masks = {
.den_shift = 0,
};
-static struct clk_factor_tbl uart_factor_tbl[] = {
+static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
{.num = 8125, .den = 1536}, /*14.745MHZ */
};
@@ -158,7 +158,7 @@ void __init pxa168_clk_init(void)
uart_pll = mmp_clk_register_factor("uart_pll", "pll1_4", 0,
mpmu_base + MPMU_UART_PLL,
&uart_factor_masks, uart_factor_tbl,
- ARRAY_SIZE(uart_factor_tbl));
+ ARRAY_SIZE(uart_factor_tbl), &clk_lock);
clk_set_rate(uart_pll, 14745600);
clk_register_clkdev(uart_pll, "uart_pll", NULL);
diff --git a/drivers/clk/mmp/clk-pxa910.c b/drivers/clk/mmp/clk-pxa910.c
index 9efc6a47535d..993abcdb32cc 100644
--- a/drivers/clk/mmp/clk-pxa910.c
+++ b/drivers/clk/mmp/clk-pxa910.c
@@ -45,7 +45,7 @@
static DEFINE_SPINLOCK(clk_lock);
-static struct clk_factor_masks uart_factor_masks = {
+static struct mmp_clk_factor_masks uart_factor_masks = {
.factor = 2,
.num_mask = 0x1fff,
.den_mask = 0x1fff,
@@ -53,7 +53,7 @@ static struct clk_factor_masks uart_factor_masks = {
.den_shift = 0,
};
-static struct clk_factor_tbl uart_factor_tbl[] = {
+static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
{.num = 8125, .den = 1536}, /*14.745MHZ */
};
@@ -163,7 +163,7 @@ void __init pxa910_clk_init(void)
uart_pll = mmp_clk_register_factor("uart_pll", "pll1_4", 0,
mpmu_base + MPMU_UART_PLL,
&uart_factor_masks, uart_factor_tbl,
- ARRAY_SIZE(uart_factor_tbl));
+ ARRAY_SIZE(uart_factor_tbl), &clk_lock);
clk_set_rate(uart_pll, 14745600);
clk_register_clkdev(uart_pll, "uart_pll", NULL);
diff --git a/drivers/clk/mmp/clk.c b/drivers/clk/mmp/clk.c
new file mode 100644
index 000000000000..cf038ef54c59
--- /dev/null
+++ b/drivers/clk/mmp/clk.c
@@ -0,0 +1,192 @@
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "clk.h"
+
+void mmp_clk_init(struct device_node *np, struct mmp_clk_unit *unit,
+ int nr_clks)
+{
+ static struct clk **clk_table;
+
+ clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
+ if (!clk_table)
+ return;
+
+ unit->clk_table = clk_table;
+ unit->nr_clks = nr_clks;
+ unit->clk_data.clks = clk_table;
+ unit->clk_data.clk_num = nr_clks;
+ of_clk_add_provider(np, of_clk_src_onecell_get, &unit->clk_data);
+}
+
+void mmp_register_fixed_rate_clks(struct mmp_clk_unit *unit,
+ struct mmp_param_fixed_rate_clk *clks,
+ int size)
+{
+ int i;
+ struct clk *clk;
+
+ for (i = 0; i < size; i++) {
+ clk = clk_register_fixed_rate(NULL, clks[i].name,
+ clks[i].parent_name,
+ clks[i].flags,
+ clks[i].fixed_rate);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ if (clks[i].id)
+ unit->clk_table[clks[i].id] = clk;
+ }
+}
+
+void mmp_register_fixed_factor_clks(struct mmp_clk_unit *unit,
+ struct mmp_param_fixed_factor_clk *clks,
+ int size)
+{
+ struct clk *clk;
+ int i;
+
+ for (i = 0; i < size; i++) {
+ clk = clk_register_fixed_factor(NULL, clks[i].name,
+ clks[i].parent_name,
+ clks[i].flags, clks[i].mult,
+ clks[i].div);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ if (clks[i].id)
+ unit->clk_table[clks[i].id] = clk;
+ }
+}
+
+void mmp_register_general_gate_clks(struct mmp_clk_unit *unit,
+ struct mmp_param_general_gate_clk *clks,
+ void __iomem *base, int size)
+{
+ struct clk *clk;
+ int i;
+
+ for (i = 0; i < size; i++) {
+ clk = clk_register_gate(NULL, clks[i].name,
+ clks[i].parent_name,
+ clks[i].flags,
+ base + clks[i].offset,
+ clks[i].bit_idx,
+ clks[i].gate_flags,
+ clks[i].lock);
+
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ if (clks[i].id)
+ unit->clk_table[clks[i].id] = clk;
+ }
+}
+
+void mmp_register_gate_clks(struct mmp_clk_unit *unit,
+ struct mmp_param_gate_clk *clks,
+ void __iomem *base, int size)
+{
+ struct clk *clk;
+ int i;
+
+ for (i = 0; i < size; i++) {
+ clk = mmp_clk_register_gate(NULL, clks[i].name,
+ clks[i].parent_name,
+ clks[i].flags,
+ base + clks[i].offset,
+ clks[i].mask,
+ clks[i].val_enable,
+ clks[i].val_disable,
+ clks[i].gate_flags,
+ clks[i].lock);
+
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ if (clks[i].id)
+ unit->clk_table[clks[i].id] = clk;
+ }
+}
+
+void mmp_register_mux_clks(struct mmp_clk_unit *unit,
+ struct mmp_param_mux_clk *clks,
+ void __iomem *base, int size)
+{
+ struct clk *clk;
+ int i;
+
+ for (i = 0; i < size; i++) {
+ clk = clk_register_mux(NULL, clks[i].name,
+ clks[i].parent_name,
+ clks[i].num_parents,
+ clks[i].flags,
+ base + clks[i].offset,
+ clks[i].shift,
+ clks[i].width,
+ clks[i].mux_flags,
+ clks[i].lock);
+
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ if (clks[i].id)
+ unit->clk_table[clks[i].id] = clk;
+ }
+}
+
+void mmp_register_div_clks(struct mmp_clk_unit *unit,
+ struct mmp_param_div_clk *clks,
+ void __iomem *base, int size)
+{
+ struct clk *clk;
+ int i;
+
+ for (i = 0; i < size; i++) {
+ clk = clk_register_divider(NULL, clks[i].name,
+ clks[i].parent_name,
+ clks[i].flags,
+ base + clks[i].offset,
+ clks[i].shift,
+ clks[i].width,
+ clks[i].div_flags,
+ clks[i].lock);
+
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ if (clks[i].id)
+ unit->clk_table[clks[i].id] = clk;
+ }
+}
+
+void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id,
+ struct clk *clk)
+{
+ if (IS_ERR_OR_NULL(clk)) {
+ pr_err("CLK %d has invalid pointer %p\n", id, clk);
+ return;
+ }
+ if (id > unit->nr_clks) {
+ pr_err("CLK %d is invalid\n", id);
+ return;
+ }
+
+ unit->clk_table[id] = clk;
+}
diff --git a/drivers/clk/mmp/clk.h b/drivers/clk/mmp/clk.h
index ab86dd4a416a..adf9b711b037 100644
--- a/drivers/clk/mmp/clk.h
+++ b/drivers/clk/mmp/clk.h
@@ -7,19 +7,123 @@
#define APBC_NO_BUS_CTRL BIT(0)
#define APBC_POWER_CTRL BIT(1)
-struct clk_factor_masks {
- unsigned int factor;
- unsigned int num_mask;
- unsigned int den_mask;
- unsigned int num_shift;
- unsigned int den_shift;
+
+/* Clock type "factor" */
+struct mmp_clk_factor_masks {
+ unsigned int factor;
+ unsigned int num_mask;
+ unsigned int den_mask;
+ unsigned int num_shift;
+ unsigned int den_shift;
};
-struct clk_factor_tbl {
+struct mmp_clk_factor_tbl {
unsigned int num;
unsigned int den;
};
+struct mmp_clk_factor {
+ struct clk_hw hw;
+ void __iomem *base;
+ struct mmp_clk_factor_masks *masks;
+ struct mmp_clk_factor_tbl *ftbl;
+ unsigned int ftbl_cnt;
+ spinlock_t *lock;
+};
+
+extern struct clk *mmp_clk_register_factor(const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *base, struct mmp_clk_factor_masks *masks,
+ struct mmp_clk_factor_tbl *ftbl, unsigned int ftbl_cnt,
+ spinlock_t *lock);
+
+/* Clock type "mix" */
+#define MMP_CLK_BITS_MASK(width, shift) \
+ (((1 << (width)) - 1) << (shift))
+#define MMP_CLK_BITS_GET_VAL(data, width, shift) \
+ ((data & MMP_CLK_BITS_MASK(width, shift)) >> (shift))
+#define MMP_CLK_BITS_SET_VAL(val, width, shift) \
+ (((val) << (shift)) & MMP_CLK_BITS_MASK(width, shift))
+
+enum {
+ MMP_CLK_MIX_TYPE_V1,
+ MMP_CLK_MIX_TYPE_V2,
+ MMP_CLK_MIX_TYPE_V3,
+};
+
+/* The register layout */
+struct mmp_clk_mix_reg_info {
+ void __iomem *reg_clk_ctrl;
+ void __iomem *reg_clk_sel;
+ u8 width_div;
+ u8 shift_div;
+ u8 width_mux;
+ u8 shift_mux;
+ u8 bit_fc;
+};
+
+/* The suggested clock table from user. */
+struct mmp_clk_mix_clk_table {
+ unsigned long rate;
+ u8 parent_index;
+ unsigned int divisor;
+ unsigned int valid;
+};
+
+struct mmp_clk_mix_config {
+ struct mmp_clk_mix_reg_info reg_info;
+ struct mmp_clk_mix_clk_table *table;
+ unsigned int table_size;
+ u32 *mux_table;
+ struct clk_div_table *div_table;
+ u8 div_flags;
+ u8 mux_flags;
+};
+
+struct mmp_clk_mix {
+ struct clk_hw hw;
+ struct mmp_clk_mix_reg_info reg_info;
+ struct mmp_clk_mix_clk_table *table;
+ u32 *mux_table;
+ struct clk_div_table *div_table;
+ unsigned int table_size;
+ u8 div_flags;
+ u8 mux_flags;
+ unsigned int type;
+ spinlock_t *lock;
+};
+
+extern const struct clk_ops mmp_clk_mix_ops;
+extern struct clk *mmp_clk_register_mix(struct device *dev,
+ const char *name,
+ const char **parent_names,
+ u8 num_parents,
+ unsigned long flags,
+ struct mmp_clk_mix_config *config,
+ spinlock_t *lock);
+
+
+/* Clock type "gate". MMP private gate */
+#define MMP_CLK_GATE_NEED_DELAY BIT(0)
+
+struct mmp_clk_gate {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u32 mask;
+ u32 val_enable;
+ u32 val_disable;
+ unsigned int flags;
+ spinlock_t *lock;
+};
+
+extern const struct clk_ops mmp_clk_gate_ops;
+extern struct clk *mmp_clk_register_gate(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u32 mask, u32 val_enable,
+ u32 val_disable, unsigned int gate_flags,
+ spinlock_t *lock);
+
+
extern struct clk *mmp_clk_register_pll2(const char *name,
const char *parent_name, unsigned long flags);
extern struct clk *mmp_clk_register_apbc(const char *name,
@@ -28,8 +132,108 @@ extern struct clk *mmp_clk_register_apbc(const char *name,
extern struct clk *mmp_clk_register_apmu(const char *name,
const char *parent_name, void __iomem *base, u32 enable_mask,
spinlock_t *lock);
-extern struct clk *mmp_clk_register_factor(const char *name,
- const char *parent_name, unsigned long flags,
- void __iomem *base, struct clk_factor_masks *masks,
- struct clk_factor_tbl *ftbl, unsigned int ftbl_cnt);
+
+struct mmp_clk_unit {
+ unsigned int nr_clks;
+ struct clk **clk_table;
+ struct clk_onecell_data clk_data;
+};
+
+struct mmp_param_fixed_rate_clk {
+ unsigned int id;
+ char *name;
+ const char *parent_name;
+ unsigned long flags;
+ unsigned long fixed_rate;
+};
+void mmp_register_fixed_rate_clks(struct mmp_clk_unit *unit,
+ struct mmp_param_fixed_rate_clk *clks,
+ int size);
+
+struct mmp_param_fixed_factor_clk {
+ unsigned int id;
+ char *name;
+ const char *parent_name;
+ unsigned long mult;
+ unsigned long div;
+ unsigned long flags;
+};
+void mmp_register_fixed_factor_clks(struct mmp_clk_unit *unit,
+ struct mmp_param_fixed_factor_clk *clks,
+ int size);
+
+struct mmp_param_general_gate_clk {
+ unsigned int id;
+ const char *name;
+ const char *parent_name;
+ unsigned long flags;
+ unsigned long offset;
+ u8 bit_idx;
+ u8 gate_flags;
+ spinlock_t *lock;
+};
+void mmp_register_general_gate_clks(struct mmp_clk_unit *unit,
+ struct mmp_param_general_gate_clk *clks,
+ void __iomem *base, int size);
+
+struct mmp_param_gate_clk {
+ unsigned int id;
+ char *name;
+ const char *parent_name;
+ unsigned long flags;
+ unsigned long offset;
+ u32 mask;
+ u32 val_enable;
+ u32 val_disable;
+ unsigned int gate_flags;
+ spinlock_t *lock;
+};
+void mmp_register_gate_clks(struct mmp_clk_unit *unit,
+ struct mmp_param_gate_clk *clks,
+ void __iomem *base, int size);
+
+struct mmp_param_mux_clk {
+ unsigned int id;
+ char *name;
+ const char **parent_name;
+ u8 num_parents;
+ unsigned long flags;
+ unsigned long offset;
+ u8 shift;
+ u8 width;
+ u8 mux_flags;
+ spinlock_t *lock;
+};
+void mmp_register_mux_clks(struct mmp_clk_unit *unit,
+ struct mmp_param_mux_clk *clks,
+ void __iomem *base, int size);
+
+struct mmp_param_div_clk {
+ unsigned int id;
+ char *name;
+ const char *parent_name;
+ unsigned long flags;
+ unsigned long offset;
+ u8 shift;
+ u8 width;
+ u8 div_flags;
+ spinlock_t *lock;
+};
+void mmp_register_div_clks(struct mmp_clk_unit *unit,
+ struct mmp_param_div_clk *clks,
+ void __iomem *base, int size);
+
+#define DEFINE_MIX_REG_INFO(w_d, s_d, w_m, s_m, fc) \
+{ \
+ .width_div = (w_d), \
+ .shift_div = (s_d), \
+ .width_mux = (w_m), \
+ .shift_mux = (s_m), \
+ .bit_fc = (fc), \
+}
+
+void mmp_clk_init(struct device_node *np, struct mmp_clk_unit *unit,
+ int nr_clks);
+void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id,
+ struct clk *clk);
#endif
diff --git a/drivers/clk/mmp/reset.c b/drivers/clk/mmp/reset.c
new file mode 100644
index 000000000000..b54da1fe73f0
--- /dev/null
+++ b/drivers/clk/mmp/reset.c
@@ -0,0 +1,99 @@
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/reset-controller.h>
+
+#include "reset.h"
+
+#define rcdev_to_unit(rcdev) container_of(rcdev, struct mmp_clk_reset_unit, rcdev)
+
+static int mmp_of_reset_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ struct mmp_clk_reset_unit *unit = rcdev_to_unit(rcdev);
+ struct mmp_clk_reset_cell *cell;
+ int i;
+
+ if (WARN_ON(reset_spec->args_count != rcdev->of_reset_n_cells))
+ return -EINVAL;
+
+ for (i = 0; i < rcdev->nr_resets; i++) {
+ cell = &unit->cells[i];
+ if (cell->clk_id == reset_spec->args[0])
+ break;
+ }
+
+ if (i == rcdev->nr_resets)
+ return -EINVAL;
+
+ return i;
+}
+
+static int mmp_clk_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct mmp_clk_reset_unit *unit = rcdev_to_unit(rcdev);
+ struct mmp_clk_reset_cell *cell;
+ unsigned long flags = 0;
+ u32 val;
+
+ cell = &unit->cells[id];
+ if (cell->lock)
+ spin_lock_irqsave(cell->lock, flags);
+
+ val = readl(cell->reg);
+ val |= cell->bits;
+ writel(val, cell->reg);
+
+ if (cell->lock)
+ spin_unlock_irqrestore(cell->lock, flags);
+
+ return 0;
+}
+
+static int mmp_clk_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct mmp_clk_reset_unit *unit = rcdev_to_unit(rcdev);
+ struct mmp_clk_reset_cell *cell;
+ unsigned long flags = 0;
+ u32 val;
+
+ cell = &unit->cells[id];
+ if (cell->lock)
+ spin_lock_irqsave(cell->lock, flags);
+
+ val = readl(cell->reg);
+ val &= ~cell->bits;
+ writel(val, cell->reg);
+
+ if (cell->lock)
+ spin_unlock_irqrestore(cell->lock, flags);
+
+ return 0;
+}
+
+static struct reset_control_ops mmp_clk_reset_ops = {
+ .assert = mmp_clk_reset_assert,
+ .deassert = mmp_clk_reset_deassert,
+};
+
+void mmp_clk_reset_register(struct device_node *np,
+ struct mmp_clk_reset_cell *cells, int nr_resets)
+{
+ struct mmp_clk_reset_unit *unit;
+
+ unit = kzalloc(sizeof(*unit), GFP_KERNEL);
+ if (!unit)
+ return;
+
+ unit->cells = cells;
+ unit->rcdev.of_reset_n_cells = 1;
+ unit->rcdev.nr_resets = nr_resets;
+ unit->rcdev.ops = &mmp_clk_reset_ops;
+ unit->rcdev.of_node = np;
+ unit->rcdev.of_xlate = mmp_of_reset_xlate;
+
+ reset_controller_register(&unit->rcdev);
+}
diff --git a/drivers/clk/mmp/reset.h b/drivers/clk/mmp/reset.h
new file mode 100644
index 000000000000..be8b1a7000f7
--- /dev/null
+++ b/drivers/clk/mmp/reset.h
@@ -0,0 +1,31 @@
+#ifndef __MACH_MMP_CLK_RESET_H
+#define __MACH_MMP_CLK_RESET_H
+
+#include <linux/reset-controller.h>
+
+#define MMP_RESET_INVERT 1
+
+struct mmp_clk_reset_cell {
+ unsigned int clk_id;
+ void __iomem *reg;
+ u32 bits;
+ unsigned int flags;
+ spinlock_t *lock;
+};
+
+struct mmp_clk_reset_unit {
+ struct reset_controller_dev rcdev;
+ struct mmp_clk_reset_cell *cells;
+};
+
+#ifdef CONFIG_RESET_CONTROLLER
+void mmp_clk_reset_register(struct device_node *np,
+ struct mmp_clk_reset_cell *cells, int nr_resets);
+#else
+static inline void mmp_clk_reset_register(struct device_node *np,
+ struct mmp_clk_reset_cell *cells, int nr_resets)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/clk/pxa/Makefile b/drivers/clk/pxa/Makefile
index 4ff2abcd500b..38e915344605 100644
--- a/drivers/clk/pxa/Makefile
+++ b/drivers/clk/pxa/Makefile
@@ -1,2 +1,3 @@
obj-y += clk-pxa.o
+obj-$(CONFIG_PXA25x) += clk-pxa25x.o
obj-$(CONFIG_PXA27x) += clk-pxa27x.o
diff --git a/drivers/clk/pxa/clk-pxa.c b/drivers/clk/pxa/clk-pxa.c
index ef3c05389c0a..4e834753ab09 100644
--- a/drivers/clk/pxa/clk-pxa.c
+++ b/drivers/clk/pxa/clk-pxa.c
@@ -26,12 +26,20 @@ static struct clk_onecell_data onecell_data = {
.clk_num = CLK_MAX,
};
-#define to_pxa_clk(_hw) container_of(_hw, struct pxa_clk_cken, hw)
+struct pxa_clk {
+ struct clk_hw hw;
+ struct clk_fixed_factor lp;
+ struct clk_fixed_factor hp;
+ struct clk_gate gate;
+ bool (*is_in_low_power)(void);
+};
+
+#define to_pxa_clk(_hw) container_of(_hw, struct pxa_clk, hw)
static unsigned long cken_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct pxa_clk_cken *pclk = to_pxa_clk(hw);
+ struct pxa_clk *pclk = to_pxa_clk(hw);
struct clk_fixed_factor *fix;
if (!pclk->is_in_low_power || pclk->is_in_low_power())
@@ -48,7 +56,7 @@ static struct clk_ops cken_rate_ops = {
static u8 cken_get_parent(struct clk_hw *hw)
{
- struct pxa_clk_cken *pclk = to_pxa_clk(hw);
+ struct pxa_clk *pclk = to_pxa_clk(hw);
if (!pclk->is_in_low_power)
return 0;
@@ -69,29 +77,32 @@ void __init clkdev_pxa_register(int ckid, const char *con_id,
clk_register_clkdev(clk, con_id, dev_id);
}
-int __init clk_pxa_cken_init(struct pxa_clk_cken *clks, int nb_clks)
+int __init clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks)
{
int i;
- struct pxa_clk_cken *pclk;
+ struct pxa_clk *pxa_clk;
struct clk *clk;
for (i = 0; i < nb_clks; i++) {
- pclk = clks + i;
- pclk->gate.lock = &lock;
- clk = clk_register_composite(NULL, pclk->name,
- pclk->parent_names, 2,
- &pclk->hw, &cken_mux_ops,
- &pclk->hw, &cken_rate_ops,
- &pclk->gate.hw, &clk_gate_ops,
- pclk->flags);
- clkdev_pxa_register(pclk->ckid, pclk->con_id, pclk->dev_id,
- clk);
+ pxa_clk = kzalloc(sizeof(*pxa_clk), GFP_KERNEL);
+ pxa_clk->is_in_low_power = clks[i].is_in_low_power;
+ pxa_clk->lp = clks[i].lp;
+ pxa_clk->hp = clks[i].hp;
+ pxa_clk->gate = clks[i].gate;
+ pxa_clk->gate.lock = &lock;
+ clk = clk_register_composite(NULL, clks[i].name,
+ clks[i].parent_names, 2,
+ &pxa_clk->hw, &cken_mux_ops,
+ &pxa_clk->hw, &cken_rate_ops,
+ &pxa_clk->gate.hw, &clk_gate_ops,
+ clks[i].flags);
+ clkdev_pxa_register(clks[i].ckid, clks[i].con_id,
+ clks[i].dev_id, clk);
}
return 0;
}
-static void __init pxa_dt_clocks_init(struct device_node *np)
+void __init clk_pxa_dt_common_init(struct device_node *np)
{
of_clk_add_provider(np, of_clk_src_onecell_get, &onecell_data);
}
-CLK_OF_DECLARE(pxa_clks, "marvell,pxa-clocks", pxa_dt_clocks_init);
diff --git a/drivers/clk/pxa/clk-pxa.h b/drivers/clk/pxa/clk-pxa.h
index 5fe219d06b49..323965430111 100644
--- a/drivers/clk/pxa/clk-pxa.h
+++ b/drivers/clk/pxa/clk-pxa.h
@@ -25,7 +25,7 @@
static struct clk_ops name ## _rate_ops = { \
.recalc_rate = name ## _get_rate, \
}; \
- static struct clk *clk_register_ ## name(void) \
+ static struct clk * __init clk_register_ ## name(void) \
{ \
return clk_register_composite(NULL, clk_name, \
name ## _parents, \
@@ -40,7 +40,7 @@
static struct clk_ops name ## _rate_ops = { \
.recalc_rate = name ## _get_rate, \
}; \
- static struct clk *clk_register_ ## name(void) \
+ static struct clk * __init clk_register_ ## name(void) \
{ \
return clk_register_composite(NULL, clk_name, \
name ## _parents, \
@@ -66,7 +66,7 @@
* | Clock | --- | / div_hp |
* +------------+ +-----------+
*/
-struct pxa_clk_cken {
+struct desc_clk_cken {
struct clk_hw hw;
int ckid;
const char *name;
@@ -102,6 +102,7 @@ static int dummy_clk_set_parent(struct clk_hw *hw, u8 index)
extern void clkdev_pxa_register(int ckid, const char *con_id,
const char *dev_id, struct clk *clk);
-extern int clk_pxa_cken_init(struct pxa_clk_cken *clks, int nb_clks);
+extern int clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks);
+void clk_pxa_dt_common_init(struct device_node *np);
#endif
diff --git a/drivers/clk/pxa/clk-pxa25x.c b/drivers/clk/pxa/clk-pxa25x.c
new file mode 100644
index 000000000000..6cd88d963a7f
--- /dev/null
+++ b/drivers/clk/pxa/clk-pxa25x.c
@@ -0,0 +1,273 @@
+/*
+ * Marvell PXA25x family clocks
+ *
+ * Copyright (C) 2014 Robert Jarzmik
+ *
+ * Heavily inspired from former arch/arm/mach-pxa/pxa25x.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * For non-devicetree platforms. Once pxa is fully converted to devicetree, this
+ * should go away.
+ */
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <mach/pxa25x.h>
+#include <mach/pxa2xx-regs.h>
+
+#include <dt-bindings/clock/pxa-clock.h>
+#include "clk-pxa.h"
+
+#define KHz 1000
+#define MHz (1000 * 1000)
+
+enum {
+ PXA_CORE_RUN = 0,
+ PXA_CORE_TURBO,
+};
+
+/*
+ * Various clock factors driven by the CCCR register.
+ */
+
+/* Crystal Frequency to Memory Frequency Multiplier (L) */
+static unsigned char L_clk_mult[32] = { 0, 27, 32, 36, 40, 45, 0, };
+
+/* Memory Frequency to Run Mode Frequency Multiplier (M) */
+static unsigned char M_clk_mult[4] = { 0, 1, 2, 4 };
+
+/* Run Mode Frequency to Turbo Mode Frequency Multiplier (N) */
+/* Note: we store the value N * 2 here. */
+static unsigned char N2_clk_mult[8] = { 0, 0, 2, 3, 4, 0, 6, 0 };
+
+static const char * const get_freq_khz[] = {
+ "core", "run", "cpll", "memory"
+};
+
+/*
+ * Get the clock frequency as reflected by CCCR and the turbo flag.
+ * We assume these values have been applied via a fcs.
+ * If info is not 0 we also display the current settings.
+ */
+unsigned int pxa25x_get_clk_frequency_khz(int info)
+{
+ struct clk *clk;
+ unsigned long clks[5];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(get_freq_khz); i++) {
+ clk = clk_get(NULL, get_freq_khz[i]);
+ if (IS_ERR(clk)) {
+ clks[i] = 0;
+ } else {
+ clks[i] = clk_get_rate(clk);
+ clk_put(clk);
+ }
+ }
+
+ if (info) {
+ pr_info("Run Mode clock: %ld.%02ldMHz\n",
+ clks[1] / 1000000, (clks[1] % 1000000) / 10000);
+ pr_info("Turbo Mode clock: %ld.%02ldMHz\n",
+ clks[2] / 1000000, (clks[2] % 1000000) / 10000);
+ pr_info("Memory clock: %ld.%02ldMHz\n",
+ clks[3] / 1000000, (clks[3] % 1000000) / 10000);
+ }
+
+ return (unsigned int)clks[0];
+}
+
+static unsigned long clk_pxa25x_memory_get_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ unsigned long cccr = CCCR;
+ unsigned int m = M_clk_mult[(cccr >> 5) & 0x03];
+
+ return parent_rate / m;
+}
+PARENTS(clk_pxa25x_memory) = { "run" };
+RATE_RO_OPS(clk_pxa25x_memory, "memory");
+
+PARENTS(pxa25x_pbus95) = { "ppll_95_85mhz", "ppll_95_85mhz" };
+PARENTS(pxa25x_pbus147) = { "ppll_147_46mhz", "ppll_147_46mhz" };
+PARENTS(pxa25x_osc3) = { "osc_3_6864mhz", "osc_3_6864mhz" };
+
+#define PXA25X_CKEN(dev_id, con_id, parents, mult, div, \
+ bit, is_lp, flags) \
+ PXA_CKEN(dev_id, con_id, bit, parents, mult, div, mult, div, \
+ is_lp, &CKEN, CKEN_ ## bit, flags)
+#define PXA25X_PBUS95_CKEN(dev_id, con_id, bit, mult_hp, div_hp, delay) \
+ PXA25X_CKEN(dev_id, con_id, pxa25x_pbus95_parents, mult_hp, \
+ div_hp, bit, NULL, 0)
+#define PXA25X_PBUS147_CKEN(dev_id, con_id, bit, mult_hp, div_hp, delay)\
+ PXA25X_CKEN(dev_id, con_id, pxa25x_pbus147_parents, mult_hp, \
+ div_hp, bit, NULL, 0)
+#define PXA25X_OSC3_CKEN(dev_id, con_id, bit, mult_hp, div_hp, delay) \
+ PXA25X_CKEN(dev_id, con_id, pxa25x_osc3_parents, mult_hp, \
+ div_hp, bit, NULL, 0)
+
+#define PXA25X_CKEN_1RATE(dev_id, con_id, bit, parents, delay) \
+ PXA_CKEN_1RATE(dev_id, con_id, bit, parents, \
+ &CKEN, CKEN_ ## bit, 0)
+#define PXA25X_CKEN_1RATE_AO(dev_id, con_id, bit, parents, delay) \
+ PXA_CKEN_1RATE(dev_id, con_id, bit, parents, \
+ &CKEN, CKEN_ ## bit, CLK_IGNORE_UNUSED)
+
+static struct desc_clk_cken pxa25x_clocks[] __initdata = {
+ PXA25X_PBUS95_CKEN("pxa2xx-mci.0", NULL, MMC, 1, 5, 0),
+ PXA25X_PBUS95_CKEN("pxa2xx-i2c.0", NULL, I2C, 1, 3, 0),
+ PXA25X_PBUS95_CKEN("pxa2xx-ir", "FICPCLK", FICP, 1, 2, 0),
+ PXA25X_PBUS95_CKEN("pxa25x-udc", NULL, USB, 1, 2, 5),
+ PXA25X_PBUS147_CKEN("pxa2xx-uart.0", NULL, FFUART, 1, 10, 1),
+ PXA25X_PBUS147_CKEN("pxa2xx-uart.1", NULL, BTUART, 1, 10, 1),
+ PXA25X_PBUS147_CKEN("pxa2xx-uart.2", NULL, STUART, 1, 10, 1),
+ PXA25X_PBUS147_CKEN("pxa2xx-uart.3", NULL, HWUART, 1, 10, 1),
+ PXA25X_PBUS147_CKEN("pxa2xx-i2s", NULL, I2S, 1, 10, 0),
+ PXA25X_PBUS147_CKEN(NULL, "AC97CLK", AC97, 1, 12, 0),
+ PXA25X_OSC3_CKEN("pxa25x-ssp.0", NULL, SSP, 1, 1, 0),
+ PXA25X_OSC3_CKEN("pxa25x-nssp.1", NULL, NSSP, 1, 1, 0),
+ PXA25X_OSC3_CKEN("pxa25x-nssp.2", NULL, ASSP, 1, 1, 0),
+ PXA25X_OSC3_CKEN("pxa25x-pwm.0", NULL, PWM0, 1, 1, 0),
+ PXA25X_OSC3_CKEN("pxa25x-pwm.1", NULL, PWM1, 1, 1, 0),
+
+ PXA25X_CKEN_1RATE("pxa2xx-fb", NULL, LCD, clk_pxa25x_memory_parents, 0),
+ PXA25X_CKEN_1RATE_AO("pxa2xx-pcmcia", NULL, MEMC,
+ clk_pxa25x_memory_parents, 0),
+};
+
+static u8 clk_pxa25x_core_get_parent(struct clk_hw *hw)
+{
+ unsigned long clkcfg;
+ unsigned int t;
+
+ asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
+ t = clkcfg & (1 << 0);
+ if (t)
+ return PXA_CORE_TURBO;
+ return PXA_CORE_RUN;
+}
+
+static unsigned long clk_pxa25x_core_get_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return parent_rate;
+}
+PARENTS(clk_pxa25x_core) = { "run", "cpll" };
+MUX_RO_RATE_RO_OPS(clk_pxa25x_core, "core");
+
+static unsigned long clk_pxa25x_run_get_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ unsigned long cccr = CCCR;
+ unsigned int n2 = N2_clk_mult[(cccr >> 7) & 0x07];
+
+ return (parent_rate / n2) * 2;
+}
+PARENTS(clk_pxa25x_run) = { "cpll" };
+RATE_RO_OPS(clk_pxa25x_run, "run");
+
+static unsigned long clk_pxa25x_cpll_get_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ unsigned long clkcfg, cccr = CCCR;
+ unsigned int l, m, n2, t;
+
+ asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
+ t = clkcfg & (1 << 0);
+ l = L_clk_mult[(cccr >> 0) & 0x1f];
+ m = M_clk_mult[(cccr >> 5) & 0x03];
+ n2 = N2_clk_mult[(cccr >> 7) & 0x07];
+
+ if (t)
+ return m * l * n2 * parent_rate / 2;
+ return m * l * parent_rate;
+}
+PARENTS(clk_pxa25x_cpll) = { "osc_3_6864mhz" };
+RATE_RO_OPS(clk_pxa25x_cpll, "cpll");
+
+static void __init pxa25x_register_core(void)
+{
+ clk_register_clk_pxa25x_cpll();
+ clk_register_clk_pxa25x_run();
+ clkdev_pxa_register(CLK_CORE, "core", NULL,
+ clk_register_clk_pxa25x_core());
+}
+
+static void __init pxa25x_register_plls(void)
+{
+ clk_register_fixed_rate(NULL, "osc_3_6864mhz", NULL,
+ CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+ 3686400);
+ clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
+ CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+ 32768);
+ clk_register_fixed_rate(NULL, "clk_dummy", NULL, CLK_IS_ROOT, 0);
+ clk_register_fixed_factor(NULL, "ppll_95_85mhz", "osc_3_6864mhz",
+ 0, 26, 1);
+ clk_register_fixed_factor(NULL, "ppll_147_46mhz", "osc_3_6864mhz",
+ 0, 40, 1);
+}
+
+static void __init pxa25x_base_clocks_init(void)
+{
+ pxa25x_register_plls();
+ pxa25x_register_core();
+ clk_register_clk_pxa25x_memory();
+}
+
+#define DUMMY_CLK(_con_id, _dev_id, _parent) \
+ { .con_id = _con_id, .dev_id = _dev_id, .parent = _parent }
+struct dummy_clk {
+ const char *con_id;
+ const char *dev_id;
+ const char *parent;
+};
+static struct dummy_clk dummy_clks[] __initdata = {
+ DUMMY_CLK(NULL, "pxa25x-gpio", "osc_32_768khz"),
+ DUMMY_CLK(NULL, "pxa26x-gpio", "osc_32_768khz"),
+ DUMMY_CLK("GPIO11_CLK", NULL, "osc_3_6864mhz"),
+ DUMMY_CLK("GPIO12_CLK", NULL, "osc_32_768khz"),
+ DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"),
+ DUMMY_CLK("OSTIMER0", NULL, "osc_32_768khz"),
+ DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"),
+};
+
+static void __init pxa25x_dummy_clocks_init(void)
+{
+ struct clk *clk;
+ struct dummy_clk *d;
+ const char *name;
+ int i;
+
+ /*
+ * All pinctrl logic has been wiped out of the clock driver, especially
+ * for gpio11 and gpio12 outputs. Machine code should ensure proper pin
+ * control (ie. pxa2xx_mfp_config() invocation).
+ */
+ for (i = 0; i < ARRAY_SIZE(dummy_clks); i++) {
+ d = &dummy_clks[i];
+ name = d->dev_id ? d->dev_id : d->con_id;
+ clk = clk_register_fixed_factor(NULL, name, d->parent, 0, 1, 1);
+ clk_register_clkdev(clk, d->con_id, d->dev_id);
+ }
+}
+
+int __init pxa25x_clocks_init(void)
+{
+ pxa25x_base_clocks_init();
+ pxa25x_dummy_clocks_init();
+ return clk_pxa_cken_init(pxa25x_clocks, ARRAY_SIZE(pxa25x_clocks));
+}
+
+static void __init pxa25x_dt_clocks_init(struct device_node *np)
+{
+ pxa25x_clocks_init();
+ clk_pxa_dt_common_init(np);
+}
+CLK_OF_DECLARE(pxa25x_clks, "marvell,pxa250-core-clocks",
+ pxa25x_dt_clocks_init);
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
index 88b9fe13fa44..5f9b54b024b9 100644
--- a/drivers/clk/pxa/clk-pxa27x.c
+++ b/drivers/clk/pxa/clk-pxa27x.c
@@ -111,7 +111,7 @@ PARENTS(pxa27x_membus) = { "lcd_base", "lcd_base" };
PXA_CKEN_1RATE(dev_id, con_id, bit, parents, \
&CKEN, CKEN_ ## bit, CLK_IGNORE_UNUSED)
-static struct pxa_clk_cken pxa27x_clocks[] = {
+static struct desc_clk_cken pxa27x_clocks[] __initdata = {
PXA27X_PBUS_CKEN("pxa2xx-uart.0", NULL, FFUART, 2, 42, 1),
PXA27X_PBUS_CKEN("pxa2xx-uart.1", NULL, BTUART, 2, 42, 1),
PXA27X_PBUS_CKEN("pxa2xx-uart.2", NULL, STUART, 2, 42, 1),
@@ -368,3 +368,10 @@ static int __init pxa27x_clocks_init(void)
return clk_pxa_cken_init(pxa27x_clocks, ARRAY_SIZE(pxa27x_clocks));
}
postcore_initcall(pxa27x_clocks_init);
+
+static void __init pxa27x_dt_clocks_init(struct device_node *np)
+{
+ pxa27x_clocks_init();
+ clk_pxa_dt_common_init(np);
+}
+CLK_OF_DECLARE(pxa_clks, "marvell,pxa270-clocks", pxa27x_dt_clocks_init);
diff --git a/drivers/clk/qcom/clk-pll.c b/drivers/clk/qcom/clk-pll.c
index b823bc3b6250..60873a7f45d9 100644
--- a/drivers/clk/qcom/clk-pll.c
+++ b/drivers/clk/qcom/clk-pll.c
@@ -141,7 +141,7 @@ struct pll_freq_tbl *find_freq(const struct pll_freq_tbl *f, unsigned long rate)
static long
clk_pll_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *p_rate, struct clk **p)
+ unsigned long *p_rate, struct clk_hw **p)
{
struct clk_pll *pll = to_clk_pll(hw);
const struct pll_freq_tbl *f;
diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
index b6e6959e89aa..0b93972c8807 100644
--- a/drivers/clk/qcom/clk-rcg.c
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -368,16 +368,17 @@ clk_dyn_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
static long _freq_tbl_determine_rate(struct clk_hw *hw,
const struct freq_tbl *f, unsigned long rate,
- unsigned long *p_rate, struct clk **p)
+ unsigned long *p_rate, struct clk_hw **p_hw)
{
unsigned long clk_flags;
+ struct clk *p;
f = qcom_find_freq(f, rate);
if (!f)
return -EINVAL;
clk_flags = __clk_get_flags(hw->clk);
- *p = clk_get_parent_by_index(hw->clk, f->src);
+ p = clk_get_parent_by_index(hw->clk, f->src);
if (clk_flags & CLK_SET_RATE_PARENT) {
rate = rate * f->pre_div;
if (f->n) {
@@ -387,15 +388,16 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
rate = tmp;
}
} else {
- rate = __clk_get_rate(*p);
+ rate = __clk_get_rate(p);
}
+ *p_hw = __clk_get_hw(p);
*p_rate = rate;
return f->freq;
}
static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *p_rate, struct clk **p)
+ unsigned long *p_rate, struct clk_hw **p)
{
struct clk_rcg *rcg = to_clk_rcg(hw);
@@ -403,7 +405,7 @@ static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
}
static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *p_rate, struct clk **p)
+ unsigned long *p_rate, struct clk_hw **p)
{
struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
@@ -411,13 +413,15 @@ static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
}
static long clk_rcg_bypass_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *p_rate, struct clk **p)
+ unsigned long *p_rate, struct clk_hw **p_hw)
{
struct clk_rcg *rcg = to_clk_rcg(hw);
const struct freq_tbl *f = rcg->freq_tbl;
+ struct clk *p;
- *p = clk_get_parent_by_index(hw->clk, f->src);
- *p_rate = __clk_round_rate(*p, rate);
+ p = clk_get_parent_by_index(hw->clk, f->src);
+ *p_hw = __clk_get_hw(p);
+ *p_rate = __clk_round_rate(p, rate);
return *p_rate;
}
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index cfa9eb4fe9ca..08b8b3729f53 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -175,16 +175,17 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
static long _freq_tbl_determine_rate(struct clk_hw *hw,
const struct freq_tbl *f, unsigned long rate,
- unsigned long *p_rate, struct clk **p)
+ unsigned long *p_rate, struct clk_hw **p_hw)
{
unsigned long clk_flags;
+ struct clk *p;
f = qcom_find_freq(f, rate);
if (!f)
return -EINVAL;
clk_flags = __clk_get_flags(hw->clk);
- *p = clk_get_parent_by_index(hw->clk, f->src);
+ p = clk_get_parent_by_index(hw->clk, f->src);
if (clk_flags & CLK_SET_RATE_PARENT) {
if (f->pre_div) {
rate /= 2;
@@ -198,15 +199,16 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
rate = tmp;
}
} else {
- rate = __clk_get_rate(*p);
+ rate = __clk_get_rate(p);
}
+ *p_hw = __clk_get_hw(p);
*p_rate = rate;
return f->freq;
}
static long clk_rcg2_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *p_rate, struct clk **p)
+ unsigned long *p_rate, struct clk_hw **p)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -359,7 +361,7 @@ static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
}
static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *p_rate, struct clk **p)
+ unsigned long *p_rate, struct clk_hw **p)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
const struct freq_tbl *f = rcg->freq_tbl;
@@ -371,7 +373,7 @@ static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
u32 hid_div;
/* Force the correct parent */
- *p = clk_get_parent_by_index(hw->clk, f->src);
+ *p = __clk_get_hw(clk_get_parent_by_index(hw->clk, f->src));
if (src_rate == 810000000)
frac = frac_table_810m;
@@ -410,18 +412,20 @@ const struct clk_ops clk_edp_pixel_ops = {
EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
static long clk_byte_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *p_rate, struct clk **p)
+ unsigned long *p_rate, struct clk_hw **p_hw)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
const struct freq_tbl *f = rcg->freq_tbl;
unsigned long parent_rate, div;
u32 mask = BIT(rcg->hid_width) - 1;
+ struct clk *p;
if (rate == 0)
return -EINVAL;
- *p = clk_get_parent_by_index(hw->clk, f->src);
- *p_rate = parent_rate = __clk_round_rate(*p, rate);
+ p = clk_get_parent_by_index(hw->clk, f->src);
+ *p_hw = __clk_get_hw(p);
+ *p_rate = parent_rate = __clk_round_rate(p, rate);
div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
div = min_t(u32, div, mask);
@@ -472,14 +476,16 @@ static const struct frac_entry frac_table_pixel[] = {
};
static long clk_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *p_rate, struct clk **p)
+ unsigned long *p_rate, struct clk_hw **p)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
unsigned long request, src_rate;
int delta = 100000;
const struct freq_tbl *f = rcg->freq_tbl;
const struct frac_entry *frac = frac_table_pixel;
- struct clk *parent = *p = clk_get_parent_by_index(hw->clk, f->src);
+ struct clk *parent = clk_get_parent_by_index(hw->clk, f->src);
+
+ *p = __clk_get_hw(parent);
for (; frac->num; frac++) {
request = (rate * frac->den) / frac->num;
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index bd8514d63634..2714097f90db 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -6,6 +6,7 @@ obj-y += clk-rockchip.o
obj-y += clk.o
obj-y += clk-pll.o
obj-y += clk-cpu.o
+obj-y += clk-mmc-phase.o
obj-$(CONFIG_RESET_CONTROLLER) += softrst.o
obj-y += clk-rk3188.o
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
new file mode 100644
index 000000000000..c842e3b60f21
--- /dev/null
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2014 Google, Inc
+ * Author: Alexandru M Stan <amstan@chromium.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include "clk.h"
+
+struct rockchip_mmc_clock {
+ struct clk_hw hw;
+ void __iomem *reg;
+ int id;
+ int shift;
+};
+
+#define to_mmc_clock(_hw) container_of(_hw, struct rockchip_mmc_clock, hw)
+
+#define RK3288_MMC_CLKGEN_DIV 2
+
+static unsigned long rockchip_mmc_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return parent_rate / RK3288_MMC_CLKGEN_DIV;
+}
+
+#define ROCKCHIP_MMC_DELAY_SEL BIT(10)
+#define ROCKCHIP_MMC_DEGREE_MASK 0x3
+#define ROCKCHIP_MMC_DELAYNUM_OFFSET 2
+#define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET)
+
+#define PSECS_PER_SEC 1000000000000LL
+
+/*
+ * Each fine delay is between 40ps-80ps. Assume each fine delay is 60ps to
+ * simplify calculations. So 45degs could be anywhere between 33deg and 66deg.
+ */
+#define ROCKCHIP_MMC_DELAY_ELEMENT_PSEC 60
+
+static int rockchip_mmc_get_phase(struct clk_hw *hw)
+{
+ struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw);
+ unsigned long rate = clk_get_rate(hw->clk);
+ u32 raw_value;
+ u16 degrees;
+ u32 delay_num = 0;
+
+ raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
+
+ degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
+
+ if (raw_value & ROCKCHIP_MMC_DELAY_SEL) {
+ /* degrees/delaynum * 10000 */
+ unsigned long factor = (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10) *
+ 36 * (rate / 1000000);
+
+ delay_num = (raw_value & ROCKCHIP_MMC_DELAYNUM_MASK);
+ delay_num >>= ROCKCHIP_MMC_DELAYNUM_OFFSET;
+ degrees += delay_num * factor / 10000;
+ }
+
+ return degrees % 360;
+}
+
+static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
+{
+ struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw);
+ unsigned long rate = clk_get_rate(hw->clk);
+ u8 nineties, remainder;
+ u8 delay_num;
+ u32 raw_value;
+ u64 delay;
+
+ /* allow 22 to be 22.5 */
+ degrees++;
+ /* floor to 22.5 increment */
+ degrees -= ((degrees) * 10 % 225) / 10;
+
+ nineties = degrees / 90;
+ /* 22.5 multiples */
+ remainder = (degrees % 90) / 22;
+
+ delay = PSECS_PER_SEC;
+ do_div(delay, rate);
+ /* / 360 / 22.5 */
+ do_div(delay, 16);
+ do_div(delay, ROCKCHIP_MMC_DELAY_ELEMENT_PSEC);
+
+ delay *= remainder;
+ delay_num = (u8) min(delay, 255ULL);
+
+ raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0;
+ raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET;
+ raw_value |= nineties;
+ writel(HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift), mmc_clock->reg);
+
+ pr_debug("%s->set_phase(%d) delay_nums=%u reg[0x%p]=0x%03x actual_degrees=%d\n",
+ __clk_get_name(hw->clk), degrees, delay_num,
+ mmc_clock->reg, raw_value>>(mmc_clock->shift),
+ rockchip_mmc_get_phase(hw)
+ );
+
+ return 0;
+}
+
+static const struct clk_ops rockchip_mmc_clk_ops = {
+ .recalc_rate = rockchip_mmc_recalc,
+ .get_phase = rockchip_mmc_get_phase,
+ .set_phase = rockchip_mmc_set_phase,
+};
+
+struct clk *rockchip_clk_register_mmc(const char *name,
+ const char **parent_names, u8 num_parents,
+ void __iomem *reg, int shift)
+{
+ struct clk_init_data init;
+ struct rockchip_mmc_clock *mmc_clock;
+ struct clk *clk;
+
+ mmc_clock = kmalloc(sizeof(*mmc_clock), GFP_KERNEL);
+ if (!mmc_clock)
+ return NULL;
+
+ init.num_parents = num_parents;
+ init.parent_names = parent_names;
+ init.ops = &rockchip_mmc_clk_ops;
+
+ mmc_clock->hw.init = &init;
+ mmc_clock->reg = reg;
+ mmc_clock->shift = shift;
+
+ if (name)
+ init.name = name;
+
+ clk = clk_register(NULL, &mmc_clock->hw);
+ if (IS_ERR(clk))
+ goto err_free;
+
+ return clk;
+
+err_free:
+ kfree(mmc_clock);
+ return NULL;
+}
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index a3e886a38480..f8d3baf275b2 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -39,6 +39,7 @@ struct rockchip_clk_pll {
int lock_offset;
unsigned int lock_shift;
enum rockchip_pll_type type;
+ u8 flags;
const struct rockchip_pll_rate_table *rate_table;
unsigned int rate_count;
spinlock_t *lock;
@@ -257,6 +258,55 @@ static int rockchip_rk3066_pll_is_enabled(struct clk_hw *hw)
return !(pllcon & RK3066_PLLCON3_PWRDOWN);
}
+static void rockchip_rk3066_pll_init(struct clk_hw *hw)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+ const struct rockchip_pll_rate_table *rate;
+ unsigned int nf, nr, no, bwadj;
+ unsigned long drate;
+ u32 pllcon;
+
+ if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
+ return;
+
+ drate = __clk_get_rate(hw->clk);
+ rate = rockchip_get_pll_settings(pll, drate);
+
+ /* when no rate setting for the current rate, rely on clk_set_rate */
+ if (!rate)
+ return;
+
+ pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(0));
+ nr = ((pllcon >> RK3066_PLLCON0_NR_SHIFT) & RK3066_PLLCON0_NR_MASK) + 1;
+ no = ((pllcon >> RK3066_PLLCON0_OD_SHIFT) & RK3066_PLLCON0_OD_MASK) + 1;
+
+ pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(1));
+ nf = ((pllcon >> RK3066_PLLCON1_NF_SHIFT) & RK3066_PLLCON1_NF_MASK) + 1;
+
+ pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(2));
+ bwadj = (pllcon >> RK3066_PLLCON2_BWADJ_SHIFT) & RK3066_PLLCON2_BWADJ_MASK;
+
+ pr_debug("%s: pll %s@%lu: nr (%d:%d); no (%d:%d); nf(%d:%d), bwadj(%d:%d)\n",
+ __func__, __clk_get_name(hw->clk), drate, rate->nr, nr,
+ rate->no, no, rate->nf, nf, rate->bwadj, bwadj);
+ if (rate->nr != nr || rate->no != no || rate->nf != nf
+ || rate->bwadj != bwadj) {
+ struct clk *parent = __clk_get_parent(hw->clk);
+ unsigned long prate;
+
+ if (!parent) {
+ pr_warn("%s: parent of %s not available\n",
+ __func__, __clk_get_name(hw->clk));
+ return;
+ }
+
+ pr_debug("%s: pll %s: rate params do not match rate table, adjusting\n",
+ __func__, __clk_get_name(hw->clk));
+ prate = __clk_get_rate(parent);
+ rockchip_rk3066_pll_set_rate(hw, drate, prate);
+ }
+}
+
static const struct clk_ops rockchip_rk3066_pll_clk_norate_ops = {
.recalc_rate = rockchip_rk3066_pll_recalc_rate,
.enable = rockchip_rk3066_pll_enable,
@@ -271,6 +321,7 @@ static const struct clk_ops rockchip_rk3066_pll_clk_ops = {
.enable = rockchip_rk3066_pll_enable,
.disable = rockchip_rk3066_pll_disable,
.is_enabled = rockchip_rk3066_pll_is_enabled,
+ .init = rockchip_rk3066_pll_init,
};
/*
@@ -282,7 +333,7 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
void __iomem *base, int con_offset, int grf_lock_offset,
int lock_shift, int mode_offset, int mode_shift,
struct rockchip_pll_rate_table *rate_table,
- spinlock_t *lock)
+ u8 clk_pll_flags, spinlock_t *lock)
{
const char *pll_parents[3];
struct clk_init_data init;
@@ -345,8 +396,22 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
pll->reg_base = base + con_offset;
pll->lock_offset = grf_lock_offset;
pll->lock_shift = lock_shift;
+ pll->flags = clk_pll_flags;
pll->lock = lock;
+ /* create the mux on top of the real pll */
+ pll->pll_mux_ops = &clk_mux_ops;
+ pll_mux = &pll->pll_mux;
+ pll_mux->reg = base + mode_offset;
+ pll_mux->shift = mode_shift;
+ pll_mux->mask = PLL_MODE_MASK;
+ pll_mux->flags = 0;
+ pll_mux->lock = lock;
+ pll_mux->hw.init = &init;
+
+ if (pll_type == pll_rk3066)
+ pll_mux->flags |= CLK_MUX_HIWORD_MASK;
+
pll_clk = clk_register(NULL, &pll->hw);
if (IS_ERR(pll_clk)) {
pr_err("%s: failed to register pll clock %s : %ld\n",
@@ -355,10 +420,6 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
goto err_pll;
}
- /* create the mux on top of the real pll */
- pll->pll_mux_ops = &clk_mux_ops;
- pll_mux = &pll->pll_mux;
-
/* the actual muxing is xin24m, pll-output, xin32k */
pll_parents[0] = parent_names[0];
pll_parents[1] = pll_name;
@@ -370,16 +431,6 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
init.parent_names = pll_parents;
init.num_parents = ARRAY_SIZE(pll_parents);
- pll_mux->reg = base + mode_offset;
- pll_mux->shift = mode_shift;
- pll_mux->mask = PLL_MODE_MASK;
- pll_mux->flags = 0;
- pll_mux->lock = lock;
- pll_mux->hw.init = &init;
-
- if (pll_type == pll_rk3066)
- pll_mux->flags |= CLK_MUX_HIWORD_MASK;
-
mux_clk = clk_register(NULL, &pll_mux->hw);
if (IS_ERR(mux_clk))
goto err_mux;
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index beed49c79126..c54078960847 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -212,13 +212,13 @@ PNAME(mux_sclk_macref_p) = { "mac_src", "ext_rmii" };
static struct rockchip_pll_clock rk3188_pll_clks[] __initdata = {
[apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0),
- RK2928_MODE_CON, 0, 6, rk3188_pll_rates),
+ RK2928_MODE_CON, 0, 6, 0, rk3188_pll_rates),
[dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK2928_PLL_CON(4),
- RK2928_MODE_CON, 4, 5, NULL),
+ RK2928_MODE_CON, 4, 5, 0, NULL),
[cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK2928_PLL_CON(8),
- RK2928_MODE_CON, 8, 7, rk3188_pll_rates),
+ RK2928_MODE_CON, 8, 7, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates),
[gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK2928_PLL_CON(12),
- RK2928_MODE_CON, 12, 8, rk3188_pll_rates),
+ RK2928_MODE_CON, 12, 8, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates),
};
#define MFLAGS CLK_MUX_HIWORD_MASK
@@ -257,9 +257,9 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
GATE(0, "hclk_vdpu", "aclk_vdpu", 0,
RK2928_CLKGATE_CON(3), 12, GFLAGS),
- GATE(0, "gpll_ddr", "gpll", 0,
+ GATE(0, "gpll_ddr", "gpll", CLK_IGNORE_UNUSED,
RK2928_CLKGATE_CON(1), 7, GFLAGS),
- COMPOSITE(0, "ddrphy", mux_ddrphy_p, 0,
+ COMPOSITE(0, "ddrphy", mux_ddrphy_p, CLK_IGNORE_UNUSED,
RK2928_CLKSEL_CON(26), 8, 1, MFLAGS, 0, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
RK2928_CLKGATE_CON(0), 2, GFLAGS),
@@ -270,10 +270,10 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
RK2928_CLKGATE_CON(0), 6, GFLAGS),
GATE(0, "pclk_cpu", "pclk_cpu_pre", 0,
RK2928_CLKGATE_CON(0), 5, GFLAGS),
- GATE(0, "hclk_cpu", "hclk_cpu_pre", 0,
+ GATE(0, "hclk_cpu", "hclk_cpu_pre", CLK_IGNORE_UNUSED,
RK2928_CLKGATE_CON(0), 4, GFLAGS),
- COMPOSITE(0, "aclk_lcdc0_pre", mux_pll_src_cpll_gpll_p, 0,
+ COMPOSITE(0, "aclk_lcdc0_pre", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED,
RK2928_CLKSEL_CON(31), 7, 1, MFLAGS, 0, 5, DFLAGS,
RK2928_CLKGATE_CON(3), 0, GFLAGS),
COMPOSITE(0, "aclk_lcdc1_pre", mux_pll_src_cpll_gpll_p, 0,
@@ -304,9 +304,9 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
* the 480m are generated inside the usb block from these clocks,
* but they are also a source for the hsicphy clock.
*/
- GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", 0,
+ GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", CLK_IGNORE_UNUSED,
RK2928_CLKGATE_CON(1), 5, GFLAGS),
- GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", 0,
+ GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", CLK_IGNORE_UNUSED,
RK2928_CLKGATE_CON(1), 6, GFLAGS),
COMPOSITE(0, "mac_src", mux_mac_p, 0,
@@ -320,9 +320,9 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
COMPOSITE(0, "hsadc_src", mux_pll_src_gpll_cpll_p, 0,
RK2928_CLKSEL_CON(22), 0, 1, MFLAGS, 8, 8, DFLAGS,
RK2928_CLKGATE_CON(2), 6, GFLAGS),
- COMPOSITE_FRAC(0, "hsadc_frac", "hsadc_src",
+ COMPOSITE_FRAC(0, "hsadc_frac", "hsadc_src", 0,
RK2928_CLKSEL_CON(23), 0,
- RK2928_CLKGATE_CON(2), 7, 0, GFLAGS),
+ RK2928_CLKGATE_CON(2), 7, GFLAGS),
MUX(SCLK_HSADC, "sclk_hsadc", mux_sclk_hsadc_p, 0,
RK2928_CLKSEL_CON(22), 4, 2, MFLAGS),
@@ -330,6 +330,15 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(24), 8, 8, DFLAGS,
RK2928_CLKGATE_CON(2), 8, GFLAGS),
+ COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
+ RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
+ RK2928_CLKGATE_CON(0), 13, GFLAGS),
+ COMPOSITE_FRAC(0, "spdif_frac", "spdif_pll", 0,
+ RK2928_CLKSEL_CON(9), 0,
+ RK2928_CLKGATE_CON(0), 14, GFLAGS),
+ MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, 0,
+ RK2928_CLKSEL_CON(5), 8, 2, MFLAGS),
+
/*
* Clock-Architecture Diagram 4
*/
@@ -399,8 +408,8 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
/* aclk_cpu gates */
GATE(ACLK_DMA1, "aclk_dma1", "aclk_cpu", 0, RK2928_CLKGATE_CON(5), 0, GFLAGS),
- GATE(0, "aclk_intmem", "aclk_cpu", 0, RK2928_CLKGATE_CON(4), 12, GFLAGS),
- GATE(0, "aclk_strc_sys", "aclk_cpu", 0, RK2928_CLKGATE_CON(4), 10, GFLAGS),
+ GATE(0, "aclk_intmem", "aclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 12, GFLAGS),
+ GATE(0, "aclk_strc_sys", "aclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 10, GFLAGS),
/* hclk_cpu gates */
GATE(HCLK_ROM, "hclk_rom", "hclk_cpu", 0, RK2928_CLKGATE_CON(5), 6, GFLAGS),
@@ -410,14 +419,14 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
/* hclk_ahb2apb is part of a clk branch */
GATE(0, "hclk_vio_bus", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 12, GFLAGS),
GATE(HCLK_LCDC0, "hclk_lcdc0", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 1, GFLAGS),
- GATE(HCLK_LCDC1, "hclk_lcdc1", "aclk_cpu", 0, RK2928_CLKGATE_CON(6), 2, GFLAGS),
+ GATE(HCLK_LCDC1, "hclk_lcdc1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 2, GFLAGS),
GATE(HCLK_CIF0, "hclk_cif0", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 4, GFLAGS),
GATE(HCLK_IPP, "hclk_ipp", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 9, GFLAGS),
GATE(HCLK_RGA, "hclk_rga", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 10, GFLAGS),
/* hclk_peri gates */
- GATE(0, "hclk_peri_axi_matrix", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 0, GFLAGS),
- GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 6, GFLAGS),
+ GATE(0, "hclk_peri_axi_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 0, GFLAGS),
+ GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 6, GFLAGS),
GATE(0, "hclk_emem_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 7, GFLAGS),
GATE(HCLK_EMAC, "hclk_emac", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 0, GFLAGS),
GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 9, GFLAGS),
@@ -457,18 +466,18 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
GATE(0, "pclk_ddrupctl", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 7, GFLAGS),
GATE(0, "pclk_ddrpubl", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 6, GFLAGS),
GATE(0, "pclk_dbg", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 1, GFLAGS),
- GATE(PCLK_GRF, "pclk_grf", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 4, GFLAGS),
- GATE(PCLK_PMU, "pclk_pmu", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 5, GFLAGS),
+ GATE(PCLK_GRF, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS),
+ GATE(PCLK_PMU, "pclk_pmu", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 5, GFLAGS),
/* aclk_peri */
GATE(ACLK_DMA2, "aclk_dma2", "aclk_peri", 0, RK2928_CLKGATE_CON(5), 1, GFLAGS),
GATE(ACLK_SMC, "aclk_smc", "aclk_peri", 0, RK2928_CLKGATE_CON(5), 8, GFLAGS),
- GATE(0, "aclk_peri_niu", "aclk_peri", 0, RK2928_CLKGATE_CON(4), 4, GFLAGS),
- GATE(0, "aclk_cpu_peri", "aclk_peri", 0, RK2928_CLKGATE_CON(4), 2, GFLAGS),
- GATE(0, "aclk_peri_axi_matrix", "aclk_peri", 0, RK2928_CLKGATE_CON(4), 3, GFLAGS),
+ GATE(0, "aclk_peri_niu", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 4, GFLAGS),
+ GATE(0, "aclk_cpu_peri", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 2, GFLAGS),
+ GATE(0, "aclk_peri_axi_matrix", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 3, GFLAGS),
/* pclk_peri gates */
- GATE(0, "pclk_peri_axi_matrix", "pclk_peri", 0, RK2928_CLKGATE_CON(4), 1, GFLAGS),
+ GATE(0, "pclk_peri_axi_matrix", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 1, GFLAGS),
GATE(PCLK_PWM23, "pclk_pwm23", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 11, GFLAGS),
GATE(PCLK_WDT, "pclk_wdt", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 15, GFLAGS),
GATE(PCLK_SPI0, "pclk_spi0", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 12, GFLAGS),
@@ -511,7 +520,7 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
| CLK_DIVIDER_READ_ONLY,
RK2928_CLKGATE_CON(4), 9, GFLAGS),
- GATE(CORE_L2C, "core_l2c", "aclk_cpu", 0,
+ GATE(CORE_L2C, "core_l2c", "aclk_cpu", CLK_IGNORE_UNUSED,
RK2928_CLKGATE_CON(9), 4, GFLAGS),
COMPOSITE(0, "aclk_peri_pre", mux_pll_src_gpll_cpll_p, 0,
@@ -577,14 +586,6 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
RK2928_CLKGATE_CON(0), 12, GFLAGS),
MUX(SCLK_I2S2, "sclk_i2s2", mux_sclk_i2s2_p, 0,
RK2928_CLKSEL_CON(4), 8, 2, MFLAGS),
- COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
- RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
- RK2928_CLKGATE_CON(0), 13, GFLAGS),
- COMPOSITE_FRAC(0, "spdif_frac", "spdif_pll", 0,
- RK2928_CLKSEL_CON(9), 0,
- RK2928_CLKGATE_CON(0), 14, GFLAGS),
- MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, 0,
- RK2928_CLKSEL_CON(5), 8, 2, MFLAGS),
GATE(HCLK_I2S1, "hclk_i2s1", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
GATE(HCLK_I2S2, "hclk_i2s2", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
@@ -618,7 +619,7 @@ PNAME(mux_hsicphy_p) = { "sclk_otgphy0", "sclk_otgphy1",
"gpll", "cpll" };
static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
- COMPOSITE_NOMUX_DIVTBL(0, "aclk_core", "armclk", 0,
+ COMPOSITE_NOMUX_DIVTBL(0, "aclk_core", "armclk", CLK_IGNORE_UNUSED,
RK2928_CLKSEL_CON(1), 3, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
div_rk3188_aclk_core_t, RK2928_CLKGATE_CON(0), 7, GFLAGS),
@@ -633,7 +634,7 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(1), 14, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
RK2928_CLKGATE_CON(4), 9, GFLAGS),
- GATE(CORE_L2C, "core_l2c", "armclk", 0,
+ GATE(CORE_L2C, "core_l2c", "armclk", CLK_IGNORE_UNUSED,
RK2928_CLKGATE_CON(9), 4, GFLAGS),
COMPOSITE(0, "aclk_peri_pre", mux_pll_src_cpll_gpll_p, 0,
@@ -663,7 +664,7 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(30), 0, 2, DFLAGS,
RK2928_CLKGATE_CON(3), 6, GFLAGS),
DIV(0, "sclk_hsicphy_12m", "sclk_hsicphy_480m", 0,
- RK2928_CLKGATE_CON(11), 8, 6, DFLAGS),
+ RK2928_CLKSEL_CON(11), 8, 6, DFLAGS),
MUX(0, "i2s_src", mux_pll_src_gpll_cpll_p, 0,
RK2928_CLKSEL_CON(2), 15, 1, MFLAGS),
@@ -675,14 +676,6 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
RK2928_CLKGATE_CON(0), 10, GFLAGS),
MUX(SCLK_I2S0, "sclk_i2s0", mux_sclk_i2s0_p, 0,
RK2928_CLKSEL_CON(3), 8, 2, MFLAGS),
- COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
- RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
- RK2928_CLKGATE_CON(13), 13, GFLAGS),
- COMPOSITE_FRAC(0, "spdif_frac", "spdif_pll", 0,
- RK2928_CLKSEL_CON(9), 0,
- RK2928_CLKGATE_CON(0), 14, GFLAGS),
- MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, 0,
- RK2928_CLKSEL_CON(5), 8, 2, MFLAGS),
GATE(0, "hclk_imem0", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
GATE(0, "hclk_imem1", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 15, GFLAGS),
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 23278291da44..ac6be7c0132d 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -16,6 +16,7 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
#include <dt-bindings/clock/rk3288-cru.h>
#include "clk.h"
@@ -83,11 +84,13 @@ struct rockchip_pll_rate_table rk3288_pll_rates[] = {
RK3066_PLL_RATE( 742500000, 8, 495, 2),
RK3066_PLL_RATE( 696000000, 1, 58, 2),
RK3066_PLL_RATE( 600000000, 1, 50, 2),
- RK3066_PLL_RATE( 594000000, 2, 198, 4),
+ RK3066_PLL_RATE_BWADJ(594000000, 1, 198, 8, 1),
RK3066_PLL_RATE( 552000000, 1, 46, 2),
RK3066_PLL_RATE( 504000000, 1, 84, 4),
+ RK3066_PLL_RATE( 500000000, 3, 125, 2),
RK3066_PLL_RATE( 456000000, 1, 76, 4),
RK3066_PLL_RATE( 408000000, 1, 68, 4),
+ RK3066_PLL_RATE( 400000000, 3, 100, 2),
RK3066_PLL_RATE( 384000000, 2, 128, 4),
RK3066_PLL_RATE( 360000000, 1, 60, 4),
RK3066_PLL_RATE( 312000000, 1, 52, 4),
@@ -173,14 +176,14 @@ PNAME(mux_aclk_cpu_src_p) = { "cpll_aclk_cpu", "gpll_aclk_cpu" };
PNAME(mux_pll_src_cpll_gpll_p) = { "cpll", "gpll" };
PNAME(mux_pll_src_npll_cpll_gpll_p) = { "npll", "cpll", "gpll" };
PNAME(mux_pll_src_cpll_gpll_npll_p) = { "cpll", "gpll", "npll" };
-PNAME(mux_pll_src_cpll_gpll_usb480m_p) = { "cpll", "gpll", "usb480m" };
+PNAME(mux_pll_src_cpll_gpll_usb480m_p) = { "cpll", "gpll", "usbphy480m_src" };
+PNAME(mux_pll_src_cpll_gll_usb_npll_p) = { "cpll", "gpll", "usbphy480m_src", "npll" };
PNAME(mux_mmc_src_p) = { "cpll", "gpll", "xin24m", "xin24m" };
PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" };
PNAME(mux_i2s_clkout_p) = { "i2s_pre", "xin12m" };
PNAME(mux_spdif_p) = { "spdif_pre", "spdif_frac", "xin12m" };
PNAME(mux_spdif_8ch_p) = { "spdif_8ch_pre", "spdif_8ch_frac", "xin12m" };
-PNAME(mux_uart0_pll_p) = { "cpll", "gpll", "usbphy_480m_src", "npll" };
PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" };
PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" };
PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
@@ -192,22 +195,22 @@ PNAME(mux_hsadcout_p) = { "hsadc_src", "ext_hsadc" };
PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" };
PNAME(mux_tspout_p) = { "cpll", "gpll", "npll", "xin27m" };
-PNAME(mux_usbphy480m_p) = { "sclk_otgphy0", "sclk_otgphy1",
- "sclk_otgphy2" };
+PNAME(mux_usbphy480m_p) = { "sclk_otgphy1", "sclk_otgphy2",
+ "sclk_otgphy0" };
PNAME(mux_hsicphy480m_p) = { "cpll", "gpll", "usbphy480m_src" };
PNAME(mux_hsicphy12m_p) = { "hsicphy12m_xin12m", "hsicphy12m_usbphy" };
static struct rockchip_pll_clock rk3288_pll_clks[] __initdata = {
[apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK3288_PLL_CON(0),
- RK3288_MODE_CON, 0, 6, rk3288_pll_rates),
+ RK3288_MODE_CON, 0, 6, 0, rk3288_pll_rates),
[dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK3288_PLL_CON(4),
- RK3288_MODE_CON, 4, 5, NULL),
+ RK3288_MODE_CON, 4, 5, 0, NULL),
[cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK3288_PLL_CON(8),
- RK3288_MODE_CON, 8, 7, rk3288_pll_rates),
+ RK3288_MODE_CON, 8, 7, ROCKCHIP_PLL_SYNC_RATE, rk3288_pll_rates),
[gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK3288_PLL_CON(12),
- RK3288_MODE_CON, 12, 8, rk3288_pll_rates),
+ RK3288_MODE_CON, 12, 8, ROCKCHIP_PLL_SYNC_RATE, rk3288_pll_rates),
[npll] = PLL(pll_rk3066, PLL_NPLL, "npll", mux_pll_p, 0, RK3288_PLL_CON(16),
- RK3288_MODE_CON, 14, 9, rk3288_pll_rates),
+ RK3288_MODE_CON, 14, 9, ROCKCHIP_PLL_SYNC_RATE, rk3288_pll_rates),
};
static struct clk_div_table div_hclk_cpu_t[] = {
@@ -226,67 +229,67 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
* Clock-Architecture Diagram 1
*/
- GATE(0, "apll_core", "apll", 0,
+ GATE(0, "apll_core", "apll", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(0), 1, GFLAGS),
- GATE(0, "gpll_core", "gpll", 0,
+ GATE(0, "gpll_core", "gpll", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(0), 2, GFLAGS),
- COMPOSITE_NOMUX(0, "armcore0", "armclk", 0,
+ COMPOSITE_NOMUX(0, "armcore0", "armclk", CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(36), 0, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3288_CLKGATE_CON(12), 0, GFLAGS),
- COMPOSITE_NOMUX(0, "armcore1", "armclk", 0,
+ COMPOSITE_NOMUX(0, "armcore1", "armclk", CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(36), 4, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3288_CLKGATE_CON(12), 1, GFLAGS),
- COMPOSITE_NOMUX(0, "armcore2", "armclk", 0,
+ COMPOSITE_NOMUX(0, "armcore2", "armclk", CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(36), 8, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3288_CLKGATE_CON(12), 2, GFLAGS),
- COMPOSITE_NOMUX(0, "armcore3", "armclk", 0,
+ COMPOSITE_NOMUX(0, "armcore3", "armclk", CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(36), 12, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3288_CLKGATE_CON(12), 3, GFLAGS),
- COMPOSITE_NOMUX(0, "l2ram", "armclk", 0,
+ COMPOSITE_NOMUX(0, "l2ram", "armclk", CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(37), 0, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3288_CLKGATE_CON(12), 4, GFLAGS),
- COMPOSITE_NOMUX(0, "aclk_core_m0", "armclk", 0,
+ COMPOSITE_NOMUX(0, "aclk_core_m0", "armclk", CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(0), 0, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3288_CLKGATE_CON(12), 5, GFLAGS),
- COMPOSITE_NOMUX(0, "aclk_core_mp", "armclk", 0,
+ COMPOSITE_NOMUX(0, "aclk_core_mp", "armclk", CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(0), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3288_CLKGATE_CON(12), 6, GFLAGS),
COMPOSITE_NOMUX(0, "atclk", "armclk", 0,
RK3288_CLKSEL_CON(37), 4, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3288_CLKGATE_CON(12), 7, GFLAGS),
- COMPOSITE_NOMUX(0, "pclk_dbg_pre", "armclk", 0,
+ COMPOSITE_NOMUX(0, "pclk_dbg_pre", "armclk", CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(37), 9, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3288_CLKGATE_CON(12), 8, GFLAGS),
GATE(0, "pclk_dbg", "pclk_dbg_pre", 0,
RK3288_CLKGATE_CON(12), 9, GFLAGS),
- GATE(0, "cs_dbg", "pclk_dbg_pre", 0,
+ GATE(0, "cs_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(12), 10, GFLAGS),
GATE(0, "pclk_core_niu", "pclk_dbg_pre", 0,
RK3288_CLKGATE_CON(12), 11, GFLAGS),
- GATE(0, "dpll_ddr", "dpll", 0,
+ GATE(0, "dpll_ddr", "dpll", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(0), 8, GFLAGS),
GATE(0, "gpll_ddr", "gpll", 0,
RK3288_CLKGATE_CON(0), 9, GFLAGS),
- COMPOSITE_NOGATE(0, "ddrphy", mux_ddrphy_p, 0,
+ COMPOSITE_NOGATE(0, "ddrphy", mux_ddrphy_p, CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(26), 2, 1, MFLAGS, 0, 2,
DFLAGS | CLK_DIVIDER_POWER_OF_TWO),
- GATE(0, "gpll_aclk_cpu", "gpll", 0,
+ GATE(0, "gpll_aclk_cpu", "gpll", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(0), 10, GFLAGS),
- GATE(0, "cpll_aclk_cpu", "cpll", 0,
+ GATE(0, "cpll_aclk_cpu", "cpll", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(0), 11, GFLAGS),
- COMPOSITE_NOGATE(0, "aclk_cpu_src", mux_aclk_cpu_src_p, 0,
+ COMPOSITE_NOGATE(0, "aclk_cpu_src", mux_aclk_cpu_src_p, CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(1), 15, 1, MFLAGS, 3, 5, DFLAGS),
- DIV(0, "aclk_cpu_pre", "aclk_cpu_src", 0,
+ DIV(0, "aclk_cpu_pre", "aclk_cpu_src", CLK_SET_RATE_PARENT,
RK3288_CLKSEL_CON(1), 0, 3, DFLAGS),
- GATE(ACLK_CPU, "aclk_cpu", "aclk_cpu_pre", 0,
+ GATE(ACLK_CPU, "aclk_cpu", "aclk_cpu_pre", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(0), 3, GFLAGS),
- COMPOSITE_NOMUX(PCLK_CPU, "pclk_cpu", "aclk_cpu_pre", 0,
+ COMPOSITE_NOMUX(PCLK_CPU, "pclk_cpu", "aclk_cpu_pre", CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(1), 12, 3, DFLAGS,
RK3288_CLKGATE_CON(0), 5, GFLAGS),
- COMPOSITE_NOMUX_DIVTBL(HCLK_CPU, "hclk_cpu", "aclk_cpu_pre", 0,
+ COMPOSITE_NOMUX_DIVTBL(HCLK_CPU, "hclk_cpu", "aclk_cpu_pre", CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(1), 8, 2, DFLAGS, div_hclk_cpu_t,
RK3288_CLKGATE_CON(0), 4, GFLAGS),
GATE(0, "c2c_host", "aclk_cpu_src", 0,
@@ -294,7 +297,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "crypto", "aclk_cpu_pre", 0,
RK3288_CLKSEL_CON(26), 6, 2, DFLAGS,
RK3288_CLKGATE_CON(5), 4, GFLAGS),
- GATE(0, "aclk_bus_2pmu", "aclk_cpu_pre", 0,
+ GATE(0, "aclk_bus_2pmu", "aclk_cpu_pre", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(0), 7, GFLAGS),
COMPOSITE(0, "i2s_src", mux_pll_src_cpll_gpll_p, 0,
@@ -305,7 +308,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
RK3288_CLKGATE_CON(4), 2, GFLAGS),
MUX(0, "i2s_pre", mux_i2s_pre_p, CLK_SET_RATE_PARENT,
RK3288_CLKSEL_CON(4), 8, 2, MFLAGS),
- COMPOSITE_NODIV(0, "i2s0_clkout", mux_i2s_clkout_p, CLK_SET_RATE_PARENT,
+ COMPOSITE_NODIV(SCLK_I2S0_OUT, "i2s0_clkout", mux_i2s_clkout_p, 0,
RK3288_CLKSEL_CON(4), 12, 1, MFLAGS,
RK3288_CLKGATE_CON(4), 0, GFLAGS),
GATE(SCLK_I2S0, "sclk_i2s0", "i2s_pre", CLK_SET_RATE_PARENT,
@@ -325,7 +328,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "spdif_8ch_pre", "spdif_src", 0,
RK3288_CLKSEL_CON(40), 0, 7, DFLAGS,
RK3288_CLKGATE_CON(4), 7, GFLAGS),
- COMPOSITE_FRAC(0, "spdif_8ch_frac", "spdif_8ch_src", 0,
+ COMPOSITE_FRAC(0, "spdif_8ch_frac", "spdif_8ch_pre", 0,
RK3288_CLKSEL_CON(41), 0,
RK3288_CLKGATE_CON(4), 8, GFLAGS),
COMPOSITE_NODIV(SCLK_SPDIF8CH, "sclk_spdif_8ch", mux_spdif_8ch_p, 0,
@@ -373,12 +376,12 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
GATE(HCLK_VCODEC, "hclk_vcodec", "hclk_vcodec_pre", 0,
RK3288_CLKGATE_CON(9), 1, GFLAGS),
- COMPOSITE(0, "aclk_vio0", mux_pll_src_cpll_gpll_usb480m_p, 0,
+ COMPOSITE(0, "aclk_vio0", mux_pll_src_cpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(31), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK3288_CLKGATE_CON(3), 0, GFLAGS),
DIV(0, "hclk_vio", "aclk_vio0", 0,
RK3288_CLKSEL_CON(28), 8, 5, DFLAGS),
- COMPOSITE(0, "aclk_vio1", mux_pll_src_cpll_gpll_usb480m_p, 0,
+ COMPOSITE(0, "aclk_vio1", mux_pll_src_cpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(31), 14, 2, MFLAGS, 8, 5, DFLAGS,
RK3288_CLKGATE_CON(3), 2, GFLAGS),
@@ -436,24 +439,24 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
DIV(0, "pclk_pd_alive", "gpll", 0,
RK3288_CLKSEL_CON(33), 8, 5, DFLAGS),
- COMPOSITE_NOMUX(0, "pclk_pd_pmu", "gpll", 0,
+ COMPOSITE_NOMUX(0, "pclk_pd_pmu", "gpll", CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(33), 0, 5, DFLAGS,
RK3288_CLKGATE_CON(5), 8, GFLAGS),
- COMPOSITE(SCLK_GPU, "sclk_gpu", mux_pll_src_cpll_gpll_usb480m_p, 0,
+ COMPOSITE(SCLK_GPU, "sclk_gpu", mux_pll_src_cpll_gll_usb_npll_p, 0,
RK3288_CLKSEL_CON(34), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK3288_CLKGATE_CON(5), 7, GFLAGS),
- COMPOSITE(0, "aclk_peri_src", mux_pll_src_cpll_gpll_p, 0,
+ COMPOSITE(0, "aclk_peri_src", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(10), 15, 1, MFLAGS, 0, 5, DFLAGS,
RK3288_CLKGATE_CON(2), 0, GFLAGS),
COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "aclk_peri_src", 0,
RK3288_CLKSEL_CON(10), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
RK3288_CLKGATE_CON(2), 3, GFLAGS),
- COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "aclk_peri_src", 0,
+ COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "aclk_peri_src", CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(10), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
RK3288_CLKGATE_CON(2), 2, GFLAGS),
- GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", 0,
+ GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(2), 1, GFLAGS),
/*
@@ -483,6 +486,18 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
RK3288_CLKSEL_CON(12), 14, 2, MFLAGS, 8, 6, DFLAGS,
RK3288_CLKGATE_CON(13), 3, GFLAGS),
+ MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "sclk_sdmmc", RK3288_SDMMC_CON0, 1),
+ MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3288_SDMMC_CON1, 0),
+
+ MMC(SCLK_SDIO0_DRV, "sdio0_drv", "sclk_sdio0", RK3288_SDIO0_CON0, 1),
+ MMC(SCLK_SDIO0_SAMPLE, "sdio0_sample", "sclk_sdio0", RK3288_SDIO0_CON1, 0),
+
+ MMC(SCLK_SDIO1_DRV, "sdio1_drv", "sclk_sdio1", RK3288_SDIO1_CON0, 1),
+ MMC(SCLK_SDIO1_SAMPLE, "sdio1_sample", "sclk_sdio1", RK3288_SDIO1_CON1, 0),
+
+ MMC(SCLK_EMMC_DRV, "emmc_drv", "sclk_emmc", RK3288_EMMC_CON0, 1),
+ MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RK3288_EMMC_CON1, 0),
+
COMPOSITE(0, "sclk_tspout", mux_tspout_p, 0,
RK3288_CLKSEL_CON(35), 14, 2, MFLAGS, 8, 5, DFLAGS,
RK3288_CLKGATE_CON(4), 11, GFLAGS),
@@ -490,13 +505,13 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
RK3288_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK3288_CLKGATE_CON(4), 10, GFLAGS),
- GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", 0,
+ GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(13), 4, GFLAGS),
- GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", 0,
+ GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(13), 5, GFLAGS),
- GATE(SCLK_OTGPHY2, "sclk_otgphy2", "usb480m", 0,
+ GATE(SCLK_OTGPHY2, "sclk_otgphy2", "usb480m", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(13), 6, GFLAGS),
- GATE(SCLK_OTG_ADP, "sclk_otg_adp", "xin32k", 0,
+ GATE(SCLK_OTG_ADP, "sclk_otg_adp", "xin32k", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(13), 7, GFLAGS),
COMPOSITE_NOMUX(SCLK_TSADC, "sclk_tsadc", "xin32k", 0,
@@ -517,7 +532,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
RK3288_CLKSEL_CON(38), 15, 1, MFLAGS, 8, 5, DFLAGS,
RK3288_CLKGATE_CON(5), 6, GFLAGS),
- COMPOSITE(0, "uart0_src", mux_uart0_pll_p, 0,
+ COMPOSITE(0, "uart0_src", mux_pll_src_cpll_gll_usb_npll_p, 0,
RK3288_CLKSEL_CON(13), 13, 2, MFLAGS, 0, 7, DFLAGS,
RK3288_CLKGATE_CON(1), 8, GFLAGS),
COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", 0,
@@ -585,7 +600,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
COMPOSITE_NODIV(0, "usbphy480m_src", mux_usbphy480m_p, 0,
RK3288_CLKSEL_CON(13), 11, 2, MFLAGS,
- RK3288_CLKGATE_CON(5), 15, GFLAGS),
+ RK3288_CLKGATE_CON(5), 14, GFLAGS),
COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
RK3288_CLKSEL_CON(29), 0, 2, MFLAGS,
RK3288_CLKGATE_CON(3), 6, GFLAGS),
@@ -601,19 +616,19 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
*/
/* aclk_cpu gates */
- GATE(0, "sclk_intmem0", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 5, GFLAGS),
- GATE(0, "sclk_intmem1", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 6, GFLAGS),
- GATE(0, "sclk_intmem2", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 7, GFLAGS),
+ GATE(0, "sclk_intmem0", "aclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 5, GFLAGS),
+ GATE(0, "sclk_intmem1", "aclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 6, GFLAGS),
+ GATE(0, "sclk_intmem2", "aclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 7, GFLAGS),
GATE(ACLK_DMAC1, "aclk_dmac1", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 12, GFLAGS),
- GATE(0, "aclk_strc_sys", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 13, GFLAGS),
- GATE(0, "aclk_intmem", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 4, GFLAGS),
+ GATE(0, "aclk_strc_sys", "aclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 13, GFLAGS),
+ GATE(0, "aclk_intmem", "aclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 4, GFLAGS),
GATE(ACLK_CRYPTO, "aclk_crypto", "aclk_cpu", 0, RK3288_CLKGATE_CON(11), 6, GFLAGS),
GATE(0, "aclk_ccp", "aclk_cpu", 0, RK3288_CLKGATE_CON(11), 8, GFLAGS),
/* hclk_cpu gates */
GATE(HCLK_CRYPTO, "hclk_crypto", "hclk_cpu", 0, RK3288_CLKGATE_CON(11), 7, GFLAGS),
GATE(HCLK_I2S0, "hclk_i2s0", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 8, GFLAGS),
- GATE(HCLK_ROM, "hclk_rom", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 9, GFLAGS),
+ GATE(HCLK_ROM, "hclk_rom", "hclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 9, GFLAGS),
GATE(HCLK_SPDIF, "hclk_spdif", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 10, GFLAGS),
GATE(HCLK_SPDIF8CH, "hclk_spdif_8ch", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 11, GFLAGS),
@@ -622,42 +637,42 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
GATE(PCLK_TIMER, "pclk_timer", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 1, GFLAGS),
GATE(PCLK_I2C0, "pclk_i2c0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 2, GFLAGS),
GATE(PCLK_I2C2, "pclk_i2c2", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 3, GFLAGS),
- GATE(0, "pclk_ddrupctl0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 14, GFLAGS),
- GATE(0, "pclk_publ0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 15, GFLAGS),
- GATE(0, "pclk_ddrupctl1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 0, GFLAGS),
- GATE(0, "pclk_publ1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 1, GFLAGS),
+ GATE(PCLK_DDRUPCTL0, "pclk_ddrupctl0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 14, GFLAGS),
+ GATE(PCLK_PUBL0, "pclk_publ0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 15, GFLAGS),
+ GATE(PCLK_DDRUPCTL1, "pclk_ddrupctl1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 0, GFLAGS),
+ GATE(PCLK_PUBL1, "pclk_publ1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 1, GFLAGS),
GATE(0, "pclk_efuse_1024", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 2, GFLAGS),
GATE(PCLK_TZPC, "pclk_tzpc", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 3, GFLAGS),
GATE(PCLK_UART2, "pclk_uart2", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 9, GFLAGS),
GATE(0, "pclk_efuse_256", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 10, GFLAGS),
- GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 11, GFLAGS),
+ GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 11, GFLAGS),
/* ddrctrl [DDR Controller PHY clock] gates */
- GATE(0, "nclk_ddrupctl0", "ddrphy", 0, RK3288_CLKGATE_CON(11), 4, GFLAGS),
- GATE(0, "nclk_ddrupctl1", "ddrphy", 0, RK3288_CLKGATE_CON(11), 5, GFLAGS),
+ GATE(0, "nclk_ddrupctl0", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 4, GFLAGS),
+ GATE(0, "nclk_ddrupctl1", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 5, GFLAGS),
/* ddrphy gates */
- GATE(0, "sclk_ddrphy0", "ddrphy", 0, RK3288_CLKGATE_CON(4), 12, GFLAGS),
- GATE(0, "sclk_ddrphy1", "ddrphy", 0, RK3288_CLKGATE_CON(4), 13, GFLAGS),
+ GATE(0, "sclk_ddrphy0", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(4), 12, GFLAGS),
+ GATE(0, "sclk_ddrphy1", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(4), 13, GFLAGS),
/* aclk_peri gates */
- GATE(0, "aclk_peri_axi_matrix", "aclk_peri", 0, RK3288_CLKGATE_CON(6), 2, GFLAGS),
+ GATE(0, "aclk_peri_axi_matrix", "aclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(6), 2, GFLAGS),
GATE(ACLK_DMAC2, "aclk_dmac2", "aclk_peri", 0, RK3288_CLKGATE_CON(6), 3, GFLAGS),
- GATE(0, "aclk_peri_niu", "aclk_peri", 0, RK3288_CLKGATE_CON(7), 11, GFLAGS),
- GATE(ACLK_MMU, "aclk_mmu", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 12, GFLAGS),
+ GATE(0, "aclk_peri_niu", "aclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 11, GFLAGS),
+ GATE(ACLK_MMU, "aclk_mmu", "aclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(8), 12, GFLAGS),
GATE(ACLK_GMAC, "aclk_gmac", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 0, GFLAGS),
GATE(HCLK_GPS, "hclk_gps", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 2, GFLAGS),
/* hclk_peri gates */
- GATE(0, "hclk_peri_matrix", "hclk_peri", 0, RK3288_CLKGATE_CON(6), 0, GFLAGS),
- GATE(HCLK_OTG0, "hclk_otg0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 4, GFLAGS),
+ GATE(0, "hclk_peri_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(6), 0, GFLAGS),
+ GATE(HCLK_OTG0, "hclk_otg0", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 4, GFLAGS),
GATE(HCLK_USBHOST0, "hclk_host0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 6, GFLAGS),
- GATE(HCLK_USBHOST1, "hclk_host1", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 7, GFLAGS),
+ GATE(HCLK_USBHOST1, "hclk_host1", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 7, GFLAGS),
GATE(HCLK_HSIC, "hclk_hsic", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 8, GFLAGS),
- GATE(0, "hclk_usb_peri", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 9, GFLAGS),
- GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 10, GFLAGS),
- GATE(0, "hclk_emem", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 12, GFLAGS),
- GATE(0, "hclk_mem", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 13, GFLAGS),
+ GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 9, GFLAGS),
+ GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 10, GFLAGS),
+ GATE(0, "hclk_emem", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 12, GFLAGS),
+ GATE(0, "hclk_mem", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 13, GFLAGS),
GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 14, GFLAGS),
GATE(HCLK_NANDC1, "hclk_nandc1", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 15, GFLAGS),
GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 0, RK3288_CLKGATE_CON(8), 8, GFLAGS),
@@ -669,7 +684,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
GATE(0, "pmu_hclk_otg0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 5, GFLAGS),
/* pclk_peri gates */
- GATE(0, "pclk_peri_matrix", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 1, GFLAGS),
+ GATE(0, "pclk_peri_matrix", "pclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(6), 1, GFLAGS),
GATE(PCLK_SPI0, "pclk_spi0", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 4, GFLAGS),
GATE(PCLK_SPI1, "pclk_spi1", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 5, GFLAGS),
GATE(PCLK_SPI2, "pclk_spi2", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 6, GFLAGS),
@@ -705,48 +720,48 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 4, GFLAGS),
GATE(PCLK_GPIO5, "pclk_gpio5", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 5, GFLAGS),
GATE(PCLK_GPIO6, "pclk_gpio6", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 6, GFLAGS),
- GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 11, GFLAGS),
- GATE(0, "pclk_alive_niu", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 12, GFLAGS),
+ GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(14), 11, GFLAGS),
+ GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(14), 12, GFLAGS),
/* pclk_pd_pmu gates */
- GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 0, GFLAGS),
- GATE(0, "pclk_intmem1", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 1, GFLAGS),
- GATE(0, "pclk_pmu_niu", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 2, GFLAGS),
- GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 3, GFLAGS),
+ GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(17), 0, GFLAGS),
+ GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(17), 1, GFLAGS),
+ GATE(0, "pclk_pmu_niu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(17), 2, GFLAGS),
+ GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(17), 3, GFLAGS),
GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 4, GFLAGS),
/* hclk_vio gates */
GATE(HCLK_RGA, "hclk_rga", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 1, GFLAGS),
GATE(HCLK_VOP0, "hclk_vop0", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 6, GFLAGS),
GATE(HCLK_VOP1, "hclk_vop1", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 8, GFLAGS),
- GATE(HCLK_VIO_AHB_ARBI, "hclk_vio_ahb_arbi", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 9, GFLAGS),
- GATE(HCLK_VIO_NIU, "hclk_vio_niu", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 10, GFLAGS),
+ GATE(HCLK_VIO_AHB_ARBI, "hclk_vio_ahb_arbi", "hclk_vio", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 9, GFLAGS),
+ GATE(HCLK_VIO_NIU, "hclk_vio_niu", "hclk_vio", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 10, GFLAGS),
GATE(HCLK_VIP, "hclk_vip", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 15, GFLAGS),
GATE(HCLK_IEP, "hclk_iep", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 3, GFLAGS),
GATE(HCLK_ISP, "hclk_isp", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 1, GFLAGS),
- GATE(HCLK_VIO2_H2P, "hclk_vio2_h2p", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 10, GFLAGS),
+ GATE(HCLK_VIO2_H2P, "hclk_vio2_h2p", "hclk_vio", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(16), 10, GFLAGS),
GATE(PCLK_MIPI_DSI0, "pclk_mipi_dsi0", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 4, GFLAGS),
GATE(PCLK_MIPI_DSI1, "pclk_mipi_dsi1", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 5, GFLAGS),
GATE(PCLK_MIPI_CSI, "pclk_mipi_csi", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 6, GFLAGS),
GATE(PCLK_LVDS_PHY, "pclk_lvds_phy", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 7, GFLAGS),
- GATE(PCLK_EDP_CTRL, "pclk_edp_ctrl", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 8, GFLAGS),
+ GATE(PCLK_EDP_CTRL, "pclk_edp_ctrl", "hclk_vio", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(16), 8, GFLAGS),
GATE(PCLK_HDMI_CTRL, "pclk_hdmi_ctrl", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 9, GFLAGS),
- GATE(PCLK_VIO2_H2P, "pclk_vio2_h2p", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 11, GFLAGS),
+ GATE(PCLK_VIO2_H2P, "pclk_vio2_h2p", "hclk_vio", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(16), 11, GFLAGS),
/* aclk_vio0 gates */
GATE(ACLK_VOP0, "aclk_vop0", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 5, GFLAGS),
GATE(ACLK_IEP, "aclk_iep", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 2, GFLAGS),
- GATE(ACLK_VIO0_NIU, "aclk_vio0_niu", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 11, GFLAGS),
+ GATE(ACLK_VIO0_NIU, "aclk_vio0_niu", "aclk_vio0", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 11, GFLAGS),
GATE(ACLK_VIP, "aclk_vip", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 14, GFLAGS),
/* aclk_vio1 gates */
GATE(ACLK_VOP1, "aclk_vop1", "aclk_vio1", 0, RK3288_CLKGATE_CON(15), 7, GFLAGS),
GATE(ACLK_ISP, "aclk_isp", "aclk_vio1", 0, RK3288_CLKGATE_CON(16), 2, GFLAGS),
- GATE(ACLK_VIO1_NIU, "aclk_vio1_niu", "aclk_vio1", 0, RK3288_CLKGATE_CON(15), 12, GFLAGS),
+ GATE(ACLK_VIO1_NIU, "aclk_vio1_niu", "aclk_vio1", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 12, GFLAGS),
/* aclk_rga_pre gates */
GATE(ACLK_RGA, "aclk_rga", "aclk_rga_pre", 0, RK3288_CLKGATE_CON(15), 0, GFLAGS),
- GATE(ACLK_RGA_NIU, "aclk_rga_niu", "aclk_rga_pre", 0, RK3288_CLKGATE_CON(15), 13, GFLAGS),
+ GATE(ACLK_RGA_NIU, "aclk_rga_niu", "aclk_rga_pre", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 13, GFLAGS),
/*
* Other ungrouped clocks.
@@ -762,6 +777,64 @@ static const char *rk3288_critical_clocks[] __initconst = {
"hclk_peri",
};
+#ifdef CONFIG_PM_SLEEP
+static void __iomem *rk3288_cru_base;
+
+/* Some CRU registers will be reset in maskrom when the system
+ * wakes up from fastboot.
+ * So save them before suspend, restore them after resume.
+ */
+static const int rk3288_saved_cru_reg_ids[] = {
+ RK3288_MODE_CON,
+ RK3288_CLKSEL_CON(0),
+ RK3288_CLKSEL_CON(1),
+ RK3288_CLKSEL_CON(10),
+ RK3288_CLKSEL_CON(33),
+ RK3288_CLKSEL_CON(37),
+};
+
+static u32 rk3288_saved_cru_regs[ARRAY_SIZE(rk3288_saved_cru_reg_ids)];
+
+static int rk3288_clk_suspend(void)
+{
+ int i, reg_id;
+
+ for (i = 0; i < ARRAY_SIZE(rk3288_saved_cru_reg_ids); i++) {
+ reg_id = rk3288_saved_cru_reg_ids[i];
+
+ rk3288_saved_cru_regs[i] =
+ readl_relaxed(rk3288_cru_base + reg_id);
+ }
+ return 0;
+}
+
+static void rk3288_clk_resume(void)
+{
+ int i, reg_id;
+
+ for (i = ARRAY_SIZE(rk3288_saved_cru_reg_ids) - 1; i >= 0; i--) {
+ reg_id = rk3288_saved_cru_reg_ids[i];
+
+ writel_relaxed(rk3288_saved_cru_regs[i] | 0xffff0000,
+ rk3288_cru_base + reg_id);
+ }
+}
+
+static struct syscore_ops rk3288_clk_syscore_ops = {
+ .suspend = rk3288_clk_suspend,
+ .resume = rk3288_clk_resume,
+};
+
+static void rk3288_clk_sleep_init(void __iomem *reg_base)
+{
+ rk3288_cru_base = reg_base;
+ register_syscore_ops(&rk3288_clk_syscore_ops);
+}
+
+#else /* CONFIG_PM_SLEEP */
+static void rk3288_clk_sleep_init(void __iomem *reg_base) {}
+#endif
+
static void __init rk3288_clk_init(struct device_node *np)
{
void __iomem *reg_base;
@@ -810,5 +883,6 @@ static void __init rk3288_clk_init(struct device_node *np)
ROCKCHIP_SOFTRST_HIWORD_MASK);
rockchip_register_restart_notifier(RK3288_GLB_SRST_FST);
+ rk3288_clk_sleep_init(reg_base);
}
CLK_OF_DECLARE(rk3288_cru, "rockchip,rk3288-cru", rk3288_clk_init);
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 880a266f0143..20e05bbb3a67 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -197,7 +197,8 @@ void __init rockchip_clk_register_plls(struct rockchip_pll_clock *list,
list->parent_names, list->num_parents,
reg_base, list->con_offset, grf_lock_offset,
list->lock_shift, list->mode_offset,
- list->mode_shift, list->rate_table, &clk_lock);
+ list->mode_shift, list->rate_table,
+ list->pll_flags, &clk_lock);
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock %s\n", __func__,
list->name);
@@ -244,9 +245,6 @@ void __init rockchip_clk_register_branches(
list->div_flags, &clk_lock);
break;
case branch_fraction_divider:
- /* keep all gates untouched for now */
- flags |= CLK_IGNORE_UNUSED;
-
clk = rockchip_clk_register_frac_branch(list->name,
list->parent_names, list->num_parents,
reg_base, list->muxdiv_offset, list->div_flags,
@@ -256,18 +254,12 @@ void __init rockchip_clk_register_branches(
case branch_gate:
flags |= CLK_SET_RATE_PARENT;
- /* keep all gates untouched for now */
- flags |= CLK_IGNORE_UNUSED;
-
clk = clk_register_gate(NULL, list->name,
list->parent_names[0], flags,
reg_base + list->gate_offset,
list->gate_shift, list->gate_flags, &clk_lock);
break;
case branch_composite:
- /* keep all gates untouched for now */
- flags |= CLK_IGNORE_UNUSED;
-
clk = rockchip_clk_register_branch(list->name,
list->parent_names, list->num_parents,
reg_base, list->muxdiv_offset, list->mux_shift,
@@ -277,6 +269,14 @@ void __init rockchip_clk_register_branches(
list->gate_offset, list->gate_shift,
list->gate_flags, flags, &clk_lock);
break;
+ case branch_mmc:
+ clk = rockchip_clk_register_mmc(
+ list->name,
+ list->parent_names, list->num_parents,
+ reg_base + list->muxdiv_offset,
+ list->div_shift
+ );
+ break;
}
/* none of the cases above matched */
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index ca009ab0a33a..58d2e3bdf22f 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -48,6 +48,14 @@
#define RK3288_GLB_SRST_SND 0x1b4
#define RK3288_SOFTRST_CON(x) (x * 0x4 + 0x1b8)
#define RK3288_MISC_CON 0x1e8
+#define RK3288_SDMMC_CON0 0x200
+#define RK3288_SDMMC_CON1 0x204
+#define RK3288_SDIO0_CON0 0x208
+#define RK3288_SDIO0_CON1 0x20c
+#define RK3288_SDIO1_CON0 0x210
+#define RK3288_SDIO1_CON1 0x214
+#define RK3288_EMMC_CON0 0x218
+#define RK3288_EMMC_CON1 0x21c
enum rockchip_pll_type {
pll_rk3066,
@@ -62,6 +70,15 @@ enum rockchip_pll_type {
.bwadj = (_nf >> 1), \
}
+#define RK3066_PLL_RATE_BWADJ(_rate, _nr, _nf, _no, _bw) \
+{ \
+ .rate = _rate##U, \
+ .nr = _nr, \
+ .nf = _nf, \
+ .no = _no, \
+ .bwadj = _bw, \
+}
+
struct rockchip_pll_rate_table {
unsigned long rate;
unsigned int nr;
@@ -81,7 +98,12 @@ struct rockchip_pll_rate_table {
* @mode_shift: offset inside the mode-register for the mode of this pll.
* @lock_shift: offset inside the lock register for the lock status.
* @type: Type of PLL to be registered.
+ * @pll_flags: hardware-specific flags
* @rate_table: Table of usable pll rates
+ *
+ * Flags:
+ * ROCKCHIP_PLL_SYNC_RATE - check rate parameters to match against the
+ * rate_table parameters and ajust them if necessary.
*/
struct rockchip_pll_clock {
unsigned int id;
@@ -94,11 +116,14 @@ struct rockchip_pll_clock {
int mode_shift;
int lock_shift;
enum rockchip_pll_type type;
+ u8 pll_flags;
struct rockchip_pll_rate_table *rate_table;
};
+#define ROCKCHIP_PLL_SYNC_RATE BIT(0)
+
#define PLL(_type, _id, _name, _pnames, _flags, _con, _mode, _mshift, \
- _lshift, _rtable) \
+ _lshift, _pflags, _rtable) \
{ \
.id = _id, \
.type = _type, \
@@ -110,6 +135,7 @@ struct rockchip_pll_clock {
.mode_offset = _mode, \
.mode_shift = _mshift, \
.lock_shift = _lshift, \
+ .pll_flags = _pflags, \
.rate_table = _rtable, \
}
@@ -118,7 +144,7 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
void __iomem *base, int con_offset, int grf_lock_offset,
int lock_shift, int reg_mode, int mode_shift,
struct rockchip_pll_rate_table *rate_table,
- spinlock_t *lock);
+ u8 clk_pll_flags, spinlock_t *lock);
struct rockchip_cpuclk_clksel {
int reg;
@@ -152,6 +178,10 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
const struct rockchip_cpuclk_rate_table *rates,
int nrates, void __iomem *reg_base, spinlock_t *lock);
+struct clk *rockchip_clk_register_mmc(const char *name,
+ const char **parent_names, u8 num_parents,
+ void __iomem *reg, int shift);
+
#define PNAME(x) static const char *x[] __initconst
enum rockchip_clk_branch_type {
@@ -160,6 +190,7 @@ enum rockchip_clk_branch_type {
branch_divider,
branch_fraction_divider,
branch_gate,
+ branch_mmc,
};
struct rockchip_clk_branch {
@@ -352,6 +383,16 @@ struct rockchip_clk_branch {
.gate_flags = gf, \
}
+#define MMC(_id, cname, pname, offset, shift) \
+ { \
+ .id = _id, \
+ .branch_type = branch_mmc, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .num_parents = 1, \
+ .muxdiv_offset = offset, \
+ .div_shift = shift, \
+ }
void rockchip_clk_init(struct device_node *np, void __iomem *base,
unsigned long nr_clks);
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 6fb4bc602e8a..006c6f294310 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -5,6 +5,7 @@
obj-$(CONFIG_COMMON_CLK) += clk.o clk-pll.o
obj-$(CONFIG_SOC_EXYNOS3250) += clk-exynos3250.o
obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4.o
+obj-$(CONFIG_SOC_EXYNOS4415) += clk-exynos4415.o
obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o
obj-$(CONFIG_SOC_EXYNOS5260) += clk-exynos5260.o
obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o
@@ -12,6 +13,7 @@ obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o
obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o
obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-audss.o
obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-clkout.o
+obj-$(CONFIG_ARCH_EXYNOS7) += clk-exynos7.o
obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o
obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o
obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index acce708ace18..f2c2ccce49bb 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -29,6 +29,13 @@ static DEFINE_SPINLOCK(lock);
static struct clk **clk_table;
static void __iomem *reg_base;
static struct clk_onecell_data clk_data;
+/*
+ * On Exynos5420 this will be a clock which has to be enabled before any
+ * access to audss registers. Typically a child of EPLL.
+ *
+ * On other platforms this will be -ENODEV.
+ */
+static struct clk *epll;
#define ASS_CLK_SRC 0x0
#define ASS_CLK_DIV 0x4
@@ -98,6 +105,8 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to map audss registers\n");
return PTR_ERR(reg_base);
}
+ /* EPLL don't have to be enabled for boards other than Exynos5420 */
+ epll = ERR_PTR(-ENODEV);
clk_table = devm_kzalloc(&pdev->dev,
sizeof(struct clk *) * EXYNOS_AUDSS_MAX_CLKS,
@@ -115,8 +124,20 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
pll_in = devm_clk_get(&pdev->dev, "pll_in");
if (!IS_ERR(pll_ref))
mout_audss_p[0] = __clk_get_name(pll_ref);
- if (!IS_ERR(pll_in))
+ if (!IS_ERR(pll_in)) {
mout_audss_p[1] = __clk_get_name(pll_in);
+
+ if (variant == TYPE_EXYNOS5420) {
+ epll = pll_in;
+
+ ret = clk_prepare_enable(epll);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to prepare the epll clock\n");
+ return ret;
+ }
+ }
+ }
clk_table[EXYNOS_MOUT_AUDSS] = clk_register_mux(NULL, "mout_audss",
mout_audss_p, ARRAY_SIZE(mout_audss_p),
CLK_SET_RATE_NO_REPARENT,
@@ -203,6 +224,9 @@ unregister:
clk_unregister(clk_table[i]);
}
+ if (!IS_ERR(epll))
+ clk_disable_unprepare(epll);
+
return ret;
}
@@ -210,6 +234,10 @@ static int exynos_audss_clk_remove(struct platform_device *pdev)
{
int i;
+#ifdef CONFIG_PM_SLEEP
+ unregister_syscore_ops(&exynos_audss_clk_syscore_ops);
+#endif
+
of_clk_del_provider(pdev->dev.of_node);
for (i = 0; i < clk_data.clk_num; i++) {
@@ -217,6 +245,9 @@ static int exynos_audss_clk_remove(struct platform_device *pdev)
clk_unregister(clk_table[i]);
}
+ if (!IS_ERR(epll))
+ clk_disable_unprepare(epll);
+
return 0;
}
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 940f02837b82..88e8c6bbd77f 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -505,7 +505,7 @@ static struct samsung_fixed_rate_clock exynos4_fixed_rate_ext_clks[] __initdata
/* fixed rate clocks generated inside the soc */
static struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initdata = {
FRATE(0, "sclk_hdmi24m", NULL, CLK_IS_ROOT, 24000000),
- FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000),
+ FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", "hdmi", 0, 27000000),
FRATE(0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000),
};
diff --git a/drivers/clk/samsung/clk-exynos4415.c b/drivers/clk/samsung/clk-exynos4415.c
new file mode 100644
index 000000000000..2123fc251e0f
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos4415.c
@@ -0,0 +1,1144 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for Exynos4415 SoC.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/syscore_ops.h>
+
+#include <dt-bindings/clock/exynos4415.h>
+
+#include "clk.h"
+#include "clk-pll.h"
+
+#define SRC_LEFTBUS 0x4200
+#define DIV_LEFTBUS 0x4500
+#define GATE_IP_LEFTBUS 0x4800
+#define GATE_IP_IMAGE 0x4930
+#define SRC_RIGHTBUS 0x8200
+#define DIV_RIGHTBUS 0x8500
+#define GATE_IP_RIGHTBUS 0x8800
+#define GATE_IP_PERIR 0x8960
+#define EPLL_LOCK 0xc010
+#define G3D_PLL_LOCK 0xc020
+#define DISP_PLL_LOCK 0xc030
+#define ISP_PLL_LOCK 0xc040
+#define EPLL_CON0 0xc110
+#define EPLL_CON1 0xc114
+#define EPLL_CON2 0xc118
+#define G3D_PLL_CON0 0xc120
+#define G3D_PLL_CON1 0xc124
+#define G3D_PLL_CON2 0xc128
+#define ISP_PLL_CON0 0xc130
+#define ISP_PLL_CON1 0xc134
+#define ISP_PLL_CON2 0xc138
+#define DISP_PLL_CON0 0xc140
+#define DISP_PLL_CON1 0xc144
+#define DISP_PLL_CON2 0xc148
+#define SRC_TOP0 0xc210
+#define SRC_TOP1 0xc214
+#define SRC_CAM 0xc220
+#define SRC_TV 0xc224
+#define SRC_MFC 0xc228
+#define SRC_G3D 0xc22c
+#define SRC_LCD 0xc234
+#define SRC_ISP 0xc238
+#define SRC_MAUDIO 0xc23c
+#define SRC_FSYS 0xc240
+#define SRC_PERIL0 0xc250
+#define SRC_PERIL1 0xc254
+#define SRC_CAM1 0xc258
+#define SRC_TOP_ISP0 0xc25c
+#define SRC_TOP_ISP1 0xc260
+#define SRC_MASK_TOP 0xc310
+#define SRC_MASK_CAM 0xc320
+#define SRC_MASK_TV 0xc324
+#define SRC_MASK_LCD 0xc334
+#define SRC_MASK_ISP 0xc338
+#define SRC_MASK_MAUDIO 0xc33c
+#define SRC_MASK_FSYS 0xc340
+#define SRC_MASK_PERIL0 0xc350
+#define SRC_MASK_PERIL1 0xc354
+#define DIV_TOP 0xc510
+#define DIV_CAM 0xc520
+#define DIV_TV 0xc524
+#define DIV_MFC 0xc528
+#define DIV_G3D 0xc52c
+#define DIV_LCD 0xc534
+#define DIV_ISP 0xc538
+#define DIV_MAUDIO 0xc53c
+#define DIV_FSYS0 0xc540
+#define DIV_FSYS1 0xc544
+#define DIV_FSYS2 0xc548
+#define DIV_PERIL0 0xc550
+#define DIV_PERIL1 0xc554
+#define DIV_PERIL2 0xc558
+#define DIV_PERIL3 0xc55c
+#define DIV_PERIL4 0xc560
+#define DIV_PERIL5 0xc564
+#define DIV_CAM1 0xc568
+#define DIV_TOP_ISP1 0xc56c
+#define DIV_TOP_ISP0 0xc570
+#define CLKDIV2_RATIO 0xc580
+#define GATE_SCLK_CAM 0xc820
+#define GATE_SCLK_TV 0xc824
+#define GATE_SCLK_MFC 0xc828
+#define GATE_SCLK_G3D 0xc82c
+#define GATE_SCLK_LCD 0xc834
+#define GATE_SCLK_MAUDIO 0xc83c
+#define GATE_SCLK_FSYS 0xc840
+#define GATE_SCLK_PERIL 0xc850
+#define GATE_IP_CAM 0xc920
+#define GATE_IP_TV 0xc924
+#define GATE_IP_MFC 0xc928
+#define GATE_IP_G3D 0xc92c
+#define GATE_IP_LCD 0xc934
+#define GATE_IP_FSYS 0xc940
+#define GATE_IP_PERIL 0xc950
+#define GATE_BLOCK 0xc970
+#define APLL_LOCK 0x14000
+#define APLL_CON0 0x14100
+#define SRC_CPU 0x14200
+#define DIV_CPU0 0x14500
+#define DIV_CPU1 0x14504
+
+enum exynos4415_plls {
+ apll, epll, g3d_pll, isp_pll, disp_pll,
+ nr_plls,
+};
+
+static struct samsung_clk_provider *exynos4415_ctx;
+
+/*
+ * Support for CMU save/restore across system suspends
+ */
+#ifdef CONFIG_PM_SLEEP
+static struct samsung_clk_reg_dump *exynos4415_clk_regs;
+
+static unsigned long exynos4415_cmu_clk_regs[] __initdata = {
+ SRC_LEFTBUS,
+ DIV_LEFTBUS,
+ GATE_IP_LEFTBUS,
+ GATE_IP_IMAGE,
+ SRC_RIGHTBUS,
+ DIV_RIGHTBUS,
+ GATE_IP_RIGHTBUS,
+ GATE_IP_PERIR,
+ EPLL_LOCK,
+ G3D_PLL_LOCK,
+ DISP_PLL_LOCK,
+ ISP_PLL_LOCK,
+ EPLL_CON0,
+ EPLL_CON1,
+ EPLL_CON2,
+ G3D_PLL_CON0,
+ G3D_PLL_CON1,
+ G3D_PLL_CON2,
+ ISP_PLL_CON0,
+ ISP_PLL_CON1,
+ ISP_PLL_CON2,
+ DISP_PLL_CON0,
+ DISP_PLL_CON1,
+ DISP_PLL_CON2,
+ SRC_TOP0,
+ SRC_TOP1,
+ SRC_CAM,
+ SRC_TV,
+ SRC_MFC,
+ SRC_G3D,
+ SRC_LCD,
+ SRC_ISP,
+ SRC_MAUDIO,
+ SRC_FSYS,
+ SRC_PERIL0,
+ SRC_PERIL1,
+ SRC_CAM1,
+ SRC_TOP_ISP0,
+ SRC_TOP_ISP1,
+ SRC_MASK_TOP,
+ SRC_MASK_CAM,
+ SRC_MASK_TV,
+ SRC_MASK_LCD,
+ SRC_MASK_ISP,
+ SRC_MASK_MAUDIO,
+ SRC_MASK_FSYS,
+ SRC_MASK_PERIL0,
+ SRC_MASK_PERIL1,
+ DIV_TOP,
+ DIV_CAM,
+ DIV_TV,
+ DIV_MFC,
+ DIV_G3D,
+ DIV_LCD,
+ DIV_ISP,
+ DIV_MAUDIO,
+ DIV_FSYS0,
+ DIV_FSYS1,
+ DIV_FSYS2,
+ DIV_PERIL0,
+ DIV_PERIL1,
+ DIV_PERIL2,
+ DIV_PERIL3,
+ DIV_PERIL4,
+ DIV_PERIL5,
+ DIV_CAM1,
+ DIV_TOP_ISP1,
+ DIV_TOP_ISP0,
+ CLKDIV2_RATIO,
+ GATE_SCLK_CAM,
+ GATE_SCLK_TV,
+ GATE_SCLK_MFC,
+ GATE_SCLK_G3D,
+ GATE_SCLK_LCD,
+ GATE_SCLK_MAUDIO,
+ GATE_SCLK_FSYS,
+ GATE_SCLK_PERIL,
+ GATE_IP_CAM,
+ GATE_IP_TV,
+ GATE_IP_MFC,
+ GATE_IP_G3D,
+ GATE_IP_LCD,
+ GATE_IP_FSYS,
+ GATE_IP_PERIL,
+ GATE_BLOCK,
+ APLL_LOCK,
+ APLL_CON0,
+ SRC_CPU,
+ DIV_CPU0,
+ DIV_CPU1,
+};
+
+static int exynos4415_clk_suspend(void)
+{
+ samsung_clk_save(exynos4415_ctx->reg_base, exynos4415_clk_regs,
+ ARRAY_SIZE(exynos4415_cmu_clk_regs));
+
+ return 0;
+}
+
+static void exynos4415_clk_resume(void)
+{
+ samsung_clk_restore(exynos4415_ctx->reg_base, exynos4415_clk_regs,
+ ARRAY_SIZE(exynos4415_cmu_clk_regs));
+}
+
+static struct syscore_ops exynos4415_clk_syscore_ops = {
+ .suspend = exynos4415_clk_suspend,
+ .resume = exynos4415_clk_resume,
+};
+
+static void exynos4415_clk_sleep_init(void)
+{
+ exynos4415_clk_regs =
+ samsung_clk_alloc_reg_dump(exynos4415_cmu_clk_regs,
+ ARRAY_SIZE(exynos4415_cmu_clk_regs));
+ if (!exynos4415_clk_regs) {
+ pr_warn("%s: Failed to allocate sleep save data\n", __func__);
+ return;
+ }
+
+ register_syscore_ops(&exynos4415_clk_syscore_ops);
+}
+#else
+static inline void exynos4415_clk_sleep_init(void) { }
+#endif
+
+/* list of all parent clock list */
+PNAME(mout_g3d_pllsrc_p) = { "fin_pll", };
+
+PNAME(mout_apll_p) = { "fin_pll", "fout_apll", };
+PNAME(mout_g3d_pll_p) = { "fin_pll", "fout_g3d_pll", };
+PNAME(mout_isp_pll_p) = { "fin_pll", "fout_isp_pll", };
+PNAME(mout_disp_pll_p) = { "fin_pll", "fout_disp_pll", };
+
+PNAME(mout_mpll_user_p) = { "fin_pll", "div_mpll_pre", };
+PNAME(mout_epll_p) = { "fin_pll", "fout_epll", };
+PNAME(mout_core_p) = { "mout_apll", "mout_mpll_user_c", };
+PNAME(mout_hpm_p) = { "mout_apll", "mout_mpll_user_c", };
+
+PNAME(mout_ebi_p) = { "div_aclk_200", "div_aclk_160", };
+PNAME(mout_ebi_1_p) = { "mout_ebi", "mout_g3d_pll", };
+
+PNAME(mout_gdl_p) = { "mout_mpll_user_l", };
+PNAME(mout_gdr_p) = { "mout_mpll_user_r", };
+
+PNAME(mout_aclk_266_p) = { "mout_mpll_user_t", "mout_g3d_pll", };
+
+PNAME(group_epll_g3dpll_p) = { "mout_epll", "mout_g3d_pll" };
+PNAME(group_sclk_p) = { "xxti", "xusbxti",
+ "none", "mout_isp_pll",
+ "none", "none", "div_mpll_pre",
+ "mout_epll", "mout_g3d_pll", };
+PNAME(group_spdif_p) = { "mout_audio0", "mout_audio1",
+ "mout_audio2", "spdif_extclk", };
+PNAME(group_sclk_audio2_p) = { "audiocdclk2", "none",
+ "none", "mout_isp_pll",
+ "mout_disp_pll", "xusbxti",
+ "div_mpll_pre", "mout_epll",
+ "mout_g3d_pll", };
+PNAME(group_sclk_audio1_p) = { "audiocdclk1", "none",
+ "none", "mout_isp_pll",
+ "mout_disp_pll", "xusbxti",
+ "div_mpll_pre", "mout_epll",
+ "mout_g3d_pll", };
+PNAME(group_sclk_audio0_p) = { "audiocdclk0", "none",
+ "none", "mout_isp_pll",
+ "mout_disp_pll", "xusbxti",
+ "div_mpll_pre", "mout_epll",
+ "mout_g3d_pll", };
+PNAME(group_fimc_lclk_p) = { "xxti", "xusbxti",
+ "none", "mout_isp_pll",
+ "none", "mout_disp_pll",
+ "mout_mpll_user_t", "mout_epll",
+ "mout_g3d_pll", };
+PNAME(group_sclk_fimd0_p) = { "xxti", "xusbxti",
+ "m_bitclkhsdiv4_4l", "mout_isp_pll",
+ "mout_disp_pll", "sclk_hdmiphy",
+ "div_mpll_pre", "mout_epll",
+ "mout_g3d_pll", };
+PNAME(mout_hdmi_p) = { "sclk_pixel", "sclk_hdmiphy" };
+PNAME(mout_mfc_p) = { "mout_mfc_0", "mout_mfc_1" };
+PNAME(mout_g3d_p) = { "mout_g3d_0", "mout_g3d_1" };
+PNAME(mout_jpeg_p) = { "mout_jpeg_0", "mout_jpeg_1" };
+PNAME(mout_jpeg1_p) = { "mout_epll", "mout_g3d_pll" };
+PNAME(group_aclk_isp0_300_p) = { "mout_isp_pll", "div_mpll_pre" };
+PNAME(group_aclk_isp0_400_user_p) = { "fin_pll", "div_aclk_400_mcuisp" };
+PNAME(group_aclk_isp0_300_user_p) = { "fin_pll", "mout_aclk_isp0_300" };
+PNAME(group_aclk_isp1_300_user_p) = { "fin_pll", "mout_aclk_isp1_300" };
+PNAME(group_mout_mpll_user_t_p) = { "mout_mpll_user_t" };
+
+static struct samsung_fixed_factor_clock exynos4415_fixed_factor_clks[] __initdata = {
+ /* HACK: fin_pll hardcoded to xusbxti until detection is implemented. */
+ FFACTOR(CLK_FIN_PLL, "fin_pll", "xusbxti", 1, 1, 0),
+};
+
+static struct samsung_fixed_rate_clock exynos4415_fixed_rate_clks[] __initdata = {
+ FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000),
+};
+
+static struct samsung_mux_clock exynos4415_mux_clks[] __initdata = {
+ /*
+ * NOTE: Following table is sorted by register address in ascending
+ * order and then bitfield shift in descending order, as it is done
+ * in the User's Manual. When adding new entries, please make sure
+ * that the order is preserved, to avoid merge conflicts and make
+ * further work with defined data easier.
+ */
+
+ /* SRC_LEFTBUS */
+ MUX(CLK_MOUT_MPLL_USER_L, "mout_mpll_user_l", mout_mpll_user_p,
+ SRC_LEFTBUS, 4, 1),
+ MUX(CLK_MOUT_GDL, "mout_gdl", mout_gdl_p, SRC_LEFTBUS, 0, 1),
+
+ /* SRC_RIGHTBUS */
+ MUX(CLK_MOUT_MPLL_USER_R, "mout_mpll_user_r", mout_mpll_user_p,
+ SRC_RIGHTBUS, 4, 1),
+ MUX(CLK_MOUT_GDR, "mout_gdr", mout_gdr_p, SRC_RIGHTBUS, 0, 1),
+
+ /* SRC_TOP0 */
+ MUX(CLK_MOUT_EBI, "mout_ebi", mout_ebi_p, SRC_TOP0, 28, 1),
+ MUX(CLK_MOUT_ACLK_200, "mout_aclk_200", group_mout_mpll_user_t_p,
+ SRC_TOP0, 24, 1),
+ MUX(CLK_MOUT_ACLK_160, "mout_aclk_160", group_mout_mpll_user_t_p,
+ SRC_TOP0, 20, 1),
+ MUX(CLK_MOUT_ACLK_100, "mout_aclk_100", group_mout_mpll_user_t_p,
+ SRC_TOP0, 16, 1),
+ MUX(CLK_MOUT_ACLK_266, "mout_aclk_266", mout_aclk_266_p,
+ SRC_TOP0, 12, 1),
+ MUX(CLK_MOUT_G3D_PLL, "mout_g3d_pll", mout_g3d_pll_p,
+ SRC_TOP0, 8, 1),
+ MUX(CLK_MOUT_EPLL, "mout_epll", mout_epll_p, SRC_TOP0, 4, 1),
+ MUX(CLK_MOUT_EBI_1, "mout_ebi_1", mout_ebi_1_p, SRC_TOP0, 0, 1),
+
+ /* SRC_TOP1 */
+ MUX(CLK_MOUT_ISP_PLL, "mout_isp_pll", mout_isp_pll_p,
+ SRC_TOP1, 28, 1),
+ MUX(CLK_MOUT_DISP_PLL, "mout_disp_pll", mout_disp_pll_p,
+ SRC_TOP1, 16, 1),
+ MUX(CLK_MOUT_MPLL_USER_T, "mout_mpll_user_t", mout_mpll_user_p,
+ SRC_TOP1, 12, 1),
+ MUX(CLK_MOUT_ACLK_400_MCUISP, "mout_aclk_400_mcuisp",
+ group_mout_mpll_user_t_p, SRC_TOP1, 8, 1),
+ MUX(CLK_MOUT_G3D_PLLSRC, "mout_g3d_pllsrc", mout_g3d_pllsrc_p,
+ SRC_TOP1, 0, 1),
+
+ /* SRC_CAM */
+ MUX(CLK_MOUT_CSIS1, "mout_csis1", group_fimc_lclk_p, SRC_CAM, 28, 4),
+ MUX(CLK_MOUT_CSIS0, "mout_csis0", group_fimc_lclk_p, SRC_CAM, 24, 4),
+ MUX(CLK_MOUT_CAM1, "mout_cam1", group_fimc_lclk_p, SRC_CAM, 20, 4),
+ MUX(CLK_MOUT_FIMC3_LCLK, "mout_fimc3_lclk", group_fimc_lclk_p, SRC_CAM,
+ 12, 4),
+ MUX(CLK_MOUT_FIMC2_LCLK, "mout_fimc2_lclk", group_fimc_lclk_p, SRC_CAM,
+ 8, 4),
+ MUX(CLK_MOUT_FIMC1_LCLK, "mout_fimc1_lclk", group_fimc_lclk_p, SRC_CAM,
+ 4, 4),
+ MUX(CLK_MOUT_FIMC0_LCLK, "mout_fimc0_lclk", group_fimc_lclk_p, SRC_CAM,
+ 0, 4),
+
+ /* SRC_TV */
+ MUX(CLK_MOUT_HDMI, "mout_hdmi", mout_hdmi_p, SRC_TV, 0, 1),
+
+ /* SRC_MFC */
+ MUX(CLK_MOUT_MFC, "mout_mfc", mout_mfc_p, SRC_MFC, 8, 1),
+ MUX(CLK_MOUT_MFC_1, "mout_mfc_1", group_epll_g3dpll_p, SRC_MFC, 4, 1),
+ MUX(CLK_MOUT_MFC_0, "mout_mfc_0", group_mout_mpll_user_t_p, SRC_MFC, 0,
+ 1),
+
+ /* SRC_G3D */
+ MUX(CLK_MOUT_G3D, "mout_g3d", mout_g3d_p, SRC_G3D, 8, 1),
+ MUX(CLK_MOUT_G3D_1, "mout_g3d_1", group_epll_g3dpll_p, SRC_G3D, 4, 1),
+ MUX(CLK_MOUT_G3D_0, "mout_g3d_0", group_mout_mpll_user_t_p, SRC_G3D, 0,
+ 1),
+
+ /* SRC_LCD */
+ MUX(CLK_MOUT_MIPI0, "mout_mipi0", group_fimc_lclk_p, SRC_LCD, 12, 4),
+ MUX(CLK_MOUT_FIMD0, "mout_fimd0", group_sclk_fimd0_p, SRC_LCD, 0, 4),
+
+ /* SRC_ISP */
+ MUX(CLK_MOUT_TSADC_ISP, "mout_tsadc_isp", group_fimc_lclk_p, SRC_ISP,
+ 16, 4),
+ MUX(CLK_MOUT_UART_ISP, "mout_uart_isp", group_fimc_lclk_p, SRC_ISP,
+ 12, 4),
+ MUX(CLK_MOUT_SPI1_ISP, "mout_spi1_isp", group_fimc_lclk_p, SRC_ISP,
+ 8, 4),
+ MUX(CLK_MOUT_SPI0_ISP, "mout_spi0_isp", group_fimc_lclk_p, SRC_ISP,
+ 4, 4),
+ MUX(CLK_MOUT_PWM_ISP, "mout_pwm_isp", group_fimc_lclk_p, SRC_ISP,
+ 0, 4),
+
+ /* SRC_MAUDIO */
+ MUX(CLK_MOUT_AUDIO0, "mout_audio0", group_sclk_audio0_p, SRC_MAUDIO,
+ 0, 4),
+
+ /* SRC_FSYS */
+ MUX(CLK_MOUT_TSADC, "mout_tsadc", group_sclk_p, SRC_FSYS, 28, 4),
+ MUX(CLK_MOUT_MMC2, "mout_mmc2", group_sclk_p, SRC_FSYS, 8, 4),
+ MUX(CLK_MOUT_MMC1, "mout_mmc1", group_sclk_p, SRC_FSYS, 4, 4),
+ MUX(CLK_MOUT_MMC0, "mout_mmc0", group_sclk_p, SRC_FSYS, 0, 4),
+
+ /* SRC_PERIL0 */
+ MUX(CLK_MOUT_UART3, "mout_uart3", group_sclk_p, SRC_PERIL0, 12, 4),
+ MUX(CLK_MOUT_UART2, "mout_uart2", group_sclk_p, SRC_PERIL0, 8, 4),
+ MUX(CLK_MOUT_UART1, "mout_uart1", group_sclk_p, SRC_PERIL0, 4, 4),
+ MUX(CLK_MOUT_UART0, "mout_uart0", group_sclk_p, SRC_PERIL0, 0, 4),
+
+ /* SRC_PERIL1 */
+ MUX(CLK_MOUT_SPI2, "mout_spi2", group_sclk_p, SRC_PERIL1, 24, 4),
+ MUX(CLK_MOUT_SPI1, "mout_spi1", group_sclk_p, SRC_PERIL1, 20, 4),
+ MUX(CLK_MOUT_SPI0, "mout_spi0", group_sclk_p, SRC_PERIL1, 16, 4),
+ MUX(CLK_MOUT_SPDIF, "mout_spdif", group_spdif_p, SRC_PERIL1, 8, 4),
+ MUX(CLK_MOUT_AUDIO2, "mout_audio2", group_sclk_audio2_p, SRC_PERIL1,
+ 4, 4),
+ MUX(CLK_MOUT_AUDIO1, "mout_audio1", group_sclk_audio1_p, SRC_PERIL1,
+ 0, 4),
+
+ /* SRC_CPU */
+ MUX(CLK_MOUT_MPLL_USER_C, "mout_mpll_user_c", mout_mpll_user_p,
+ SRC_CPU, 24, 1),
+ MUX(CLK_MOUT_HPM, "mout_hpm", mout_hpm_p, SRC_CPU, 20, 1),
+ MUX_F(CLK_MOUT_CORE, "mout_core", mout_core_p, SRC_CPU, 16, 1, 0,
+ CLK_MUX_READ_ONLY),
+ MUX_F(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
+ CLK_SET_RATE_PARENT, 0),
+
+ /* SRC_CAM1 */
+ MUX(CLK_MOUT_PXLASYNC_CSIS1_FIMC, "mout_pxlasync_csis1",
+ group_fimc_lclk_p, SRC_CAM1, 20, 1),
+ MUX(CLK_MOUT_PXLASYNC_CSIS0_FIMC, "mout_pxlasync_csis0",
+ group_fimc_lclk_p, SRC_CAM1, 16, 1),
+ MUX(CLK_MOUT_JPEG, "mout_jpeg", mout_jpeg_p, SRC_CAM1, 8, 1),
+ MUX(CLK_MOUT_JPEG1, "mout_jpeg_1", mout_jpeg1_p, SRC_CAM1, 4, 1),
+ MUX(CLK_MOUT_JPEG0, "mout_jpeg_0", group_mout_mpll_user_t_p, SRC_CAM1,
+ 0, 1),
+
+ /* SRC_TOP_ISP0 */
+ MUX(CLK_MOUT_ACLK_ISP0_300, "mout_aclk_isp0_300",
+ group_aclk_isp0_300_p, SRC_TOP_ISP0, 8, 1),
+ MUX(CLK_MOUT_ACLK_ISP0_400, "mout_aclk_isp0_400_user",
+ group_aclk_isp0_400_user_p, SRC_TOP_ISP0, 4, 1),
+ MUX(CLK_MOUT_ACLK_ISP0_300_USER, "mout_aclk_isp0_300_user",
+ group_aclk_isp0_300_user_p, SRC_TOP_ISP0, 0, 1),
+
+ /* SRC_TOP_ISP1 */
+ MUX(CLK_MOUT_ACLK_ISP1_300, "mout_aclk_isp1_300",
+ group_aclk_isp0_300_p, SRC_TOP_ISP1, 4, 1),
+ MUX(CLK_MOUT_ACLK_ISP1_300_USER, "mout_aclk_isp1_300_user",
+ group_aclk_isp1_300_user_p, SRC_TOP_ISP1, 0, 1),
+};
+
+static struct samsung_div_clock exynos4415_div_clks[] __initdata = {
+ /*
+ * NOTE: Following table is sorted by register address in ascending
+ * order and then bitfield shift in descending order, as it is done
+ * in the User's Manual. When adding new entries, please make sure
+ * that the order is preserved, to avoid merge conflicts and make
+ * further work with defined data easier.
+ */
+
+ /* DIV_LEFTBUS */
+ DIV(CLK_DIV_GPL, "div_gpl", "div_gdl", DIV_LEFTBUS, 4, 3),
+ DIV(CLK_DIV_GDL, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 4),
+
+ /* DIV_RIGHTBUS */
+ DIV(CLK_DIV_GPR, "div_gpr", "div_gdr", DIV_RIGHTBUS, 4, 3),
+ DIV(CLK_DIV_GDR, "div_gdr", "mout_gdr", DIV_RIGHTBUS, 0, 4),
+
+ /* DIV_TOP */
+ DIV(CLK_DIV_ACLK_400_MCUISP, "div_aclk_400_mcuisp",
+ "mout_aclk_400_mcuisp", DIV_TOP, 24, 3),
+ DIV(CLK_DIV_EBI, "div_ebi", "mout_ebi_1", DIV_TOP, 16, 3),
+ DIV(CLK_DIV_ACLK_200, "div_aclk_200", "mout_aclk_200", DIV_TOP, 12, 3),
+ DIV(CLK_DIV_ACLK_160, "div_aclk_160", "mout_aclk_160", DIV_TOP, 8, 3),
+ DIV(CLK_DIV_ACLK_100, "div_aclk_100", "mout_aclk_100", DIV_TOP, 4, 4),
+ DIV(CLK_DIV_ACLK_266, "div_aclk_266", "mout_aclk_266", DIV_TOP, 0, 3),
+
+ /* DIV_CAM */
+ DIV(CLK_DIV_CSIS1, "div_csis1", "mout_csis1", DIV_CAM, 28, 4),
+ DIV(CLK_DIV_CSIS0, "div_csis0", "mout_csis0", DIV_CAM, 24, 4),
+ DIV(CLK_DIV_CAM1, "div_cam1", "mout_cam1", DIV_CAM, 20, 4),
+ DIV(CLK_DIV_FIMC3_LCLK, "div_fimc3_lclk", "mout_fimc3_lclk", DIV_CAM,
+ 12, 4),
+ DIV(CLK_DIV_FIMC2_LCLK, "div_fimc2_lclk", "mout_fimc2_lclk", DIV_CAM,
+ 8, 4),
+ DIV(CLK_DIV_FIMC1_LCLK, "div_fimc1_lclk", "mout_fimc1_lclk", DIV_CAM,
+ 4, 4),
+ DIV(CLK_DIV_FIMC0_LCLK, "div_fimc0_lclk", "mout_fimc0_lclk", DIV_CAM,
+ 0, 4),
+
+ /* DIV_TV */
+ DIV(CLK_DIV_TV_BLK, "div_tv_blk", "mout_g3d_pll", DIV_TV, 0, 4),
+
+ /* DIV_MFC */
+ DIV(CLK_DIV_MFC, "div_mfc", "mout_mfc", DIV_MFC, 0, 4),
+
+ /* DIV_G3D */
+ DIV(CLK_DIV_G3D, "div_g3d", "mout_g3d", DIV_G3D, 0, 4),
+
+ /* DIV_LCD */
+ DIV_F(CLK_DIV_MIPI0_PRE, "div_mipi0_pre", "div_mipi0", DIV_LCD, 20, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DIV_MIPI0, "div_mipi0", "mout_mipi0", DIV_LCD, 16, 4),
+ DIV(CLK_DIV_FIMD0, "div_fimd0", "mout_fimd0", DIV_LCD, 0, 4),
+
+ /* DIV_ISP */
+ DIV(CLK_DIV_UART_ISP, "div_uart_isp", "mout_uart_isp", DIV_ISP, 28, 4),
+ DIV_F(CLK_DIV_SPI1_ISP_PRE, "div_spi1_isp_pre", "div_spi1_isp",
+ DIV_ISP, 20, 8, CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DIV_SPI1_ISP, "div_spi1_isp", "mout_spi1_isp", DIV_ISP, 16, 4),
+ DIV_F(CLK_DIV_SPI0_ISP_PRE, "div_spi0_isp_pre", "div_spi0_isp",
+ DIV_ISP, 8, 8, CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DIV_SPI0_ISP, "div_spi0_isp", "mout_spi0_isp", DIV_ISP, 4, 4),
+ DIV(CLK_DIV_PWM_ISP, "div_pwm_isp", "mout_pwm_isp", DIV_ISP, 0, 4),
+
+ /* DIV_MAUDIO */
+ DIV(CLK_DIV_PCM0, "div_pcm0", "div_audio0", DIV_MAUDIO, 4, 8),
+ DIV(CLK_DIV_AUDIO0, "div_audio0", "mout_audio0", DIV_MAUDIO, 0, 4),
+
+ /* DIV_FSYS0 */
+ DIV_F(CLK_DIV_TSADC_PRE, "div_tsadc_pre", "div_tsadc", DIV_FSYS0, 8, 8,
+ CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DIV_TSADC, "div_tsadc", "mout_tsadc", DIV_FSYS0, 0, 4),
+
+ /* DIV_FSYS1 */
+ DIV_F(CLK_DIV_MMC1_PRE, "div_mmc1_pre", "div_mmc1", DIV_FSYS1, 24, 8,
+ CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DIV_MMC1, "div_mmc1", "mout_mmc1", DIV_FSYS1, 16, 4),
+ DIV_F(CLK_DIV_MMC0_PRE, "div_mmc0_pre", "div_mmc0", DIV_FSYS1, 8, 8,
+ CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DIV_MMC0, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4),
+
+ /* DIV_FSYS2 */
+ DIV_F(CLK_DIV_MMC2_PRE, "div_mmc2_pre", "div_mmc2", DIV_FSYS2, 8, 8,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DIV_MMC2_PRE, "div_mmc2", "mout_mmc2", DIV_FSYS2, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+
+ /* DIV_PERIL0 */
+ DIV(CLK_DIV_UART3, "div_uart3", "mout_uart3", DIV_PERIL0, 12, 4),
+ DIV(CLK_DIV_UART2, "div_uart2", "mout_uart2", DIV_PERIL0, 8, 4),
+ DIV(CLK_DIV_UART1, "div_uart1", "mout_uart1", DIV_PERIL0, 4, 4),
+ DIV(CLK_DIV_UART0, "div_uart0", "mout_uart0", DIV_PERIL0, 0, 4),
+
+ /* DIV_PERIL1 */
+ DIV_F(CLK_DIV_SPI1_PRE, "div_spi1_pre", "div_spi1", DIV_PERIL1, 24, 8,
+ CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DIV_SPI1, "div_spi1", "mout_spi1", DIV_PERIL1, 16, 4),
+ DIV_F(CLK_DIV_SPI0_PRE, "div_spi0_pre", "div_spi0", DIV_PERIL1, 8, 8,
+ CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DIV_SPI0, "div_spi0", "mout_spi0", DIV_PERIL1, 0, 4),
+
+ /* DIV_PERIL2 */
+ DIV_F(CLK_DIV_SPI2_PRE, "div_spi2_pre", "div_spi2", DIV_PERIL2, 8, 8,
+ CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DIV_SPI2, "div_spi2", "mout_spi2", DIV_PERIL2, 0, 4),
+
+ /* DIV_PERIL4 */
+ DIV(CLK_DIV_PCM2, "div_pcm2", "div_audio2", DIV_PERIL4, 20, 8),
+ DIV(CLK_DIV_AUDIO2, "div_audio2", "mout_audio2", DIV_PERIL4, 16, 4),
+ DIV(CLK_DIV_PCM1, "div_pcm1", "div_audio1", DIV_PERIL4, 20, 8),
+ DIV(CLK_DIV_AUDIO1, "div_audio1", "mout_audio1", DIV_PERIL4, 0, 4),
+
+ /* DIV_PERIL5 */
+ DIV(CLK_DIV_I2S1, "div_i2s1", "div_audio1", DIV_PERIL5, 0, 6),
+
+ /* DIV_CAM1 */
+ DIV(CLK_DIV_PXLASYNC_CSIS1_FIMC, "div_pxlasync_csis1_fimc",
+ "mout_pxlasync_csis1", DIV_CAM1, 24, 4),
+ DIV(CLK_DIV_PXLASYNC_CSIS0_FIMC, "div_pxlasync_csis0_fimc",
+ "mout_pxlasync_csis0", DIV_CAM1, 20, 4),
+ DIV(CLK_DIV_JPEG, "div_jpeg", "mout_jpeg", DIV_CAM1, 0, 4),
+
+ /* DIV_CPU0 */
+ DIV(CLK_DIV_CORE2, "div_core2", "div_core", DIV_CPU0, 28, 3),
+ DIV_F(CLK_DIV_APLL, "div_apll", "mout_apll", DIV_CPU0, 24, 3,
+ CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
+ DIV(CLK_DIV_PCLK_DBG, "div_pclk_dbg", "div_core2", DIV_CPU0, 20, 3),
+ DIV(CLK_DIV_ATB, "div_atb", "div_core2", DIV_CPU0, 16, 3),
+ DIV(CLK_DIV_PERIPH, "div_periph", "div_core2", DIV_CPU0, 12, 3),
+ DIV(CLK_DIV_COREM1, "div_corem1", "div_core2", DIV_CPU0, 8, 3),
+ DIV(CLK_DIV_COREM0, "div_corem0", "div_core2", DIV_CPU0, 4, 3),
+ DIV_F(CLK_DIV_CORE, "div_core", "mout_core", DIV_CPU0, 0, 3,
+ CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
+
+ /* DIV_CPU1 */
+ DIV(CLK_DIV_HPM, "div_hpm", "div_copy", DIV_CPU1, 4, 3),
+ DIV(CLK_DIV_COPY, "div_copy", "mout_hpm", DIV_CPU1, 0, 3),
+};
+
+static struct samsung_gate_clock exynos4415_gate_clks[] __initdata = {
+ /*
+ * NOTE: Following table is sorted by register address in ascending
+ * order and then bitfield shift in descending order, as it is done
+ * in the User's Manual. When adding new entries, please make sure
+ * that the order is preserved, to avoid merge conflicts and make
+ * further work with defined data easier.
+ */
+
+ /* GATE_IP_LEFTBUS */
+ GATE(CLK_ASYNC_G3D, "async_g3d", "div_aclk_100", GATE_IP_LEFTBUS, 6,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ASYNC_MFCL, "async_mfcl", "div_aclk_100", GATE_IP_LEFTBUS, 4,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ASYNC_TVX, "async_tvx", "div_aclk_100", GATE_IP_LEFTBUS, 3,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PPMULEFT, "ppmuleft", "div_aclk_100", GATE_IP_LEFTBUS, 1,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GPIO_LEFT, "gpio_left", "div_aclk_100", GATE_IP_LEFTBUS, 0,
+ CLK_IGNORE_UNUSED, 0),
+
+ /* GATE_IP_IMAGE */
+ GATE(CLK_PPMUIMAGE, "ppmuimage", "div_aclk_100", GATE_IP_IMAGE,
+ 9, 0, 0),
+ GATE(CLK_QEMDMA2, "qe_mdma2", "div_aclk_100", GATE_IP_IMAGE,
+ 8, 0, 0),
+ GATE(CLK_QEROTATOR, "qe_rotator", "div_aclk_100", GATE_IP_IMAGE,
+ 7, 0, 0),
+ GATE(CLK_SMMUMDMA2, "smmu_mdam2", "div_aclk_100", GATE_IP_IMAGE,
+ 5, 0, 0),
+ GATE(CLK_SMMUROTATOR, "smmu_rotator", "div_aclk_100", GATE_IP_IMAGE,
+ 4, 0, 0),
+ GATE(CLK_MDMA2, "mdma2", "div_aclk_100", GATE_IP_IMAGE, 2, 0, 0),
+ GATE(CLK_ROTATOR, "rotator", "div_aclk_100", GATE_IP_IMAGE, 1, 0, 0),
+
+ /* GATE_IP_RIGHTBUS */
+ GATE(CLK_ASYNC_ISPMX, "async_ispmx", "div_aclk_100",
+ GATE_IP_RIGHTBUS, 9, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ASYNC_MAUDIOX, "async_maudiox", "div_aclk_100",
+ GATE_IP_RIGHTBUS, 7, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ASYNC_MFCR, "async_mfcr", "div_aclk_100",
+ GATE_IP_RIGHTBUS, 6, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ASYNC_FSYSD, "async_fsysd", "div_aclk_100",
+ GATE_IP_RIGHTBUS, 5, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ASYNC_LCD0X, "async_lcd0x", "div_aclk_100",
+ GATE_IP_RIGHTBUS, 3, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_ASYNC_CAMX, "async_camx", "div_aclk_100",
+ GATE_IP_RIGHTBUS, 2, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PPMURIGHT, "ppmuright", "div_aclk_100",
+ GATE_IP_RIGHTBUS, 1, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GPIO_RIGHT, "gpio_right", "div_aclk_100",
+ GATE_IP_RIGHTBUS, 0, CLK_IGNORE_UNUSED, 0),
+
+ /* GATE_IP_PERIR */
+ GATE(CLK_ANTIRBK_APBIF, "antirbk_apbif", "div_aclk_100",
+ GATE_IP_PERIR, 24, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_EFUSE_WRITER_APBIF, "efuse_writer_apbif", "div_aclk_100",
+ GATE_IP_PERIR, 23, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_MONOCNT, "monocnt", "div_aclk_100", GATE_IP_PERIR, 22,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_TZPC6, "tzpc6", "div_aclk_100", GATE_IP_PERIR, 21,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PROVISIONKEY1, "provisionkey1", "div_aclk_100",
+ GATE_IP_PERIR, 20, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PROVISIONKEY0, "provisionkey0", "div_aclk_100",
+ GATE_IP_PERIR, 19, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_CMU_ISPPART, "cmu_isppart", "div_aclk_100", GATE_IP_PERIR, 18,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_TMU_APBIF, "tmu_apbif", "div_aclk_100",
+ GATE_IP_PERIR, 17, 0, 0),
+ GATE(CLK_KEYIF, "keyif", "div_aclk_100", GATE_IP_PERIR, 16, 0, 0),
+ GATE(CLK_RTC, "rtc", "div_aclk_100", GATE_IP_PERIR, 15, 0, 0),
+ GATE(CLK_WDT, "wdt", "div_aclk_100", GATE_IP_PERIR, 14, 0, 0),
+ GATE(CLK_MCT, "mct", "div_aclk_100", GATE_IP_PERIR, 13, 0, 0),
+ GATE(CLK_SECKEY, "seckey", "div_aclk_100", GATE_IP_PERIR, 12,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_HDMI_CEC, "hdmi_cec", "div_aclk_100", GATE_IP_PERIR, 11,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_TZPC5, "tzpc5", "div_aclk_100", GATE_IP_PERIR, 10,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_TZPC4, "tzpc4", "div_aclk_100", GATE_IP_PERIR, 9,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_TZPC3, "tzpc3", "div_aclk_100", GATE_IP_PERIR, 8,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_TZPC2, "tzpc2", "div_aclk_100", GATE_IP_PERIR, 7,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_TZPC1, "tzpc1", "div_aclk_100", GATE_IP_PERIR, 6,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_TZPC0, "tzpc0", "div_aclk_100", GATE_IP_PERIR, 5,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_CMU_COREPART, "cmu_corepart", "div_aclk_100", GATE_IP_PERIR, 4,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_CMU_TOPPART, "cmu_toppart", "div_aclk_100", GATE_IP_PERIR, 3,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PMU_APBIF, "pmu_apbif", "div_aclk_100", GATE_IP_PERIR, 2,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SYSREG, "sysreg", "div_aclk_100", GATE_IP_PERIR, 1,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_CHIP_ID, "chip_id", "div_aclk_100", GATE_IP_PERIR, 0,
+ CLK_IGNORE_UNUSED, 0),
+
+ /* GATE_SCLK_CAM - non-completed */
+ GATE(CLK_SCLK_PXLAYSNC_CSIS1_FIMC, "sclk_pxlasync_csis1_fimc",
+ "div_pxlasync_csis1_fimc", GATE_SCLK_CAM, 11,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_PXLAYSNC_CSIS0_FIMC, "sclk_pxlasync_csis0_fimc",
+ "div_pxlasync_csis0_fimc", GATE_SCLK_CAM,
+ 10, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_JPEG, "sclk_jpeg", "div_jpeg",
+ GATE_SCLK_CAM, 8, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_CSIS1, "sclk_csis1", "div_csis1",
+ GATE_SCLK_CAM, 7, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_CSIS0, "sclk_csis0", "div_csis0",
+ GATE_SCLK_CAM, 6, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_CAM1, "sclk_cam1", "div_cam1",
+ GATE_SCLK_CAM, 5, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_FIMC3_LCLK, "sclk_fimc3_lclk", "div_fimc3_lclk",
+ GATE_SCLK_CAM, 3, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_FIMC2_LCLK, "sclk_fimc2_lclk", "div_fimc2_lclk",
+ GATE_SCLK_CAM, 2, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_FIMC1_LCLK, "sclk_fimc1_lclk", "div_fimc1_lclk",
+ GATE_SCLK_CAM, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_FIMC0_LCLK, "sclk_fimc0_lclk", "div_fimc0_lclk",
+ GATE_SCLK_CAM, 0, CLK_SET_RATE_PARENT, 0),
+
+ /* GATE_SCLK_TV */
+ GATE(CLK_SCLK_PIXEL, "sclk_pixel", "div_tv_blk",
+ GATE_SCLK_TV, 3, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_HDMI, "sclk_hdmi", "mout_hdmi",
+ GATE_SCLK_TV, 2, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MIXER, "sclk_mixer", "div_tv_blk",
+ GATE_SCLK_TV, 0, CLK_SET_RATE_PARENT, 0),
+
+ /* GATE_SCLK_MFC */
+ GATE(CLK_SCLK_MFC, "sclk_mfc", "div_mfc",
+ GATE_SCLK_MFC, 0, CLK_SET_RATE_PARENT, 0),
+
+ /* GATE_SCLK_G3D */
+ GATE(CLK_SCLK_G3D, "sclk_g3d", "div_g3d",
+ GATE_SCLK_G3D, 0, CLK_SET_RATE_PARENT, 0),
+
+ /* GATE_SCLK_LCD */
+ GATE(CLK_SCLK_MIPIDPHY4L, "sclk_mipidphy4l", "div_mipi0",
+ GATE_SCLK_LCD, 4, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MIPI0, "sclk_mipi0", "div_mipi0_pre",
+ GATE_SCLK_LCD, 3, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MDNIE0, "sclk_mdnie0", "div_fimd0",
+ GATE_SCLK_LCD, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_FIMD0, "sclk_fimd0", "div_fimd0",
+ GATE_SCLK_LCD, 0, CLK_SET_RATE_PARENT, 0),
+
+ /* GATE_SCLK_MAUDIO */
+ GATE(CLK_SCLK_PCM0, "sclk_pcm0", "div_pcm0",
+ GATE_SCLK_MAUDIO, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_AUDIO0, "sclk_audio0", "div_audio0",
+ GATE_SCLK_MAUDIO, 0, CLK_SET_RATE_PARENT, 0),
+
+ /* GATE_SCLK_FSYS */
+ GATE(CLK_SCLK_TSADC, "sclk_tsadc", "div_tsadc_pre",
+ GATE_SCLK_FSYS, 9, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_EBI, "sclk_ebi", "div_ebi",
+ GATE_SCLK_FSYS, 6, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MMC2, "sclk_mmc2", "div_mmc2_pre",
+ GATE_SCLK_FSYS, 2, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MMC1, "sclk_mmc1", "div_mmc1_pre",
+ GATE_SCLK_FSYS, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MMC0, "sclk_mmc0", "div_mmc0_pre",
+ GATE_SCLK_FSYS, 0, CLK_SET_RATE_PARENT, 0),
+
+ /* GATE_SCLK_PERIL */
+ GATE(CLK_SCLK_I2S, "sclk_i2s1", "div_i2s1",
+ GATE_SCLK_PERIL, 18, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_PCM2, "sclk_pcm2", "div_pcm2",
+ GATE_SCLK_PERIL, 16, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_PCM1, "sclk_pcm1", "div_pcm1",
+ GATE_SCLK_PERIL, 15, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_AUDIO2, "sclk_audio2", "div_audio2",
+ GATE_SCLK_PERIL, 14, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_AUDIO1, "sclk_audio1", "div_audio1",
+ GATE_SCLK_PERIL, 13, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_SPDIF, "sclk_spdif", "mout_spdif",
+ GATE_SCLK_PERIL, 10, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_SPI2, "sclk_spi2", "div_spi2_pre",
+ GATE_SCLK_PERIL, 8, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_SPI1, "sclk_spi1", "div_spi1_pre",
+ GATE_SCLK_PERIL, 7, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_SPI0, "sclk_spi0", "div_spi0_pre",
+ GATE_SCLK_PERIL, 6, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_UART3, "sclk_uart3", "div_uart3",
+ GATE_SCLK_PERIL, 3, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_UART2, "sclk_uart2", "div_uart2",
+ GATE_SCLK_PERIL, 2, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_UART1, "sclk_uart1", "div_uart1",
+ GATE_SCLK_PERIL, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_UART0, "sclk_uart0", "div_uart0",
+ GATE_SCLK_PERIL, 0, CLK_SET_RATE_PARENT, 0),
+
+ /* GATE_IP_CAM */
+ GATE(CLK_SMMUFIMC_LITE2, "smmufimc_lite2", "div_aclk_160", GATE_IP_CAM,
+ 22, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_FIMC_LITE2, "fimc_lite2", "div_aclk_160", GATE_IP_CAM,
+ 20, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PIXELASYNCM1, "pixelasyncm1", "div_aclk_160", GATE_IP_CAM,
+ 18, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PIXELASYNCM0, "pixelasyncm0", "div_aclk_160", GATE_IP_CAM,
+ 17, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PPMUCAMIF, "ppmucamif", "div_aclk_160", GATE_IP_CAM,
+ 16, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SMMUJPEG, "smmujpeg", "div_aclk_160", GATE_IP_CAM, 11, 0, 0),
+ GATE(CLK_SMMUFIMC3, "smmufimc3", "div_aclk_160", GATE_IP_CAM, 10, 0, 0),
+ GATE(CLK_SMMUFIMC2, "smmufimc2", "div_aclk_160", GATE_IP_CAM, 9, 0, 0),
+ GATE(CLK_SMMUFIMC1, "smmufimc1", "div_aclk_160", GATE_IP_CAM, 8, 0, 0),
+ GATE(CLK_SMMUFIMC0, "smmufimc0", "div_aclk_160", GATE_IP_CAM, 7, 0, 0),
+ GATE(CLK_JPEG, "jpeg", "div_aclk_160", GATE_IP_CAM, 6, 0, 0),
+ GATE(CLK_CSIS1, "csis1", "div_aclk_160", GATE_IP_CAM, 5, 0, 0),
+ GATE(CLK_CSIS0, "csis0", "div_aclk_160", GATE_IP_CAM, 4, 0, 0),
+ GATE(CLK_FIMC3, "fimc3", "div_aclk_160", GATE_IP_CAM, 3, 0, 0),
+ GATE(CLK_FIMC2, "fimc2", "div_aclk_160", GATE_IP_CAM, 2, 0, 0),
+ GATE(CLK_FIMC1, "fimc1", "div_aclk_160", GATE_IP_CAM, 1, 0, 0),
+ GATE(CLK_FIMC0, "fimc0", "div_aclk_160", GATE_IP_CAM, 0, 0, 0),
+
+ /* GATE_IP_TV */
+ GATE(CLK_PPMUTV, "ppmutv", "div_aclk_100", GATE_IP_TV, 5, 0, 0),
+ GATE(CLK_SMMUTV, "smmutv", "div_aclk_100", GATE_IP_TV, 4, 0, 0),
+ GATE(CLK_HDMI, "hdmi", "div_aclk_100", GATE_IP_TV, 3, 0, 0),
+ GATE(CLK_MIXER, "mixer", "div_aclk_100", GATE_IP_TV, 1, 0, 0),
+ GATE(CLK_VP, "vp", "div_aclk_100", GATE_IP_TV, 0, 0, 0),
+
+ /* GATE_IP_MFC */
+ GATE(CLK_PPMUMFC_R, "ppmumfc_r", "div_aclk_200", GATE_IP_MFC, 4,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_PPMUMFC_L, "ppmumfc_l", "div_aclk_200", GATE_IP_MFC, 3,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SMMUMFC_R, "smmumfc_r", "div_aclk_200", GATE_IP_MFC, 2, 0, 0),
+ GATE(CLK_SMMUMFC_L, "smmumfc_l", "div_aclk_200", GATE_IP_MFC, 1, 0, 0),
+ GATE(CLK_MFC, "mfc", "div_aclk_200", GATE_IP_MFC, 0, 0, 0),
+
+ /* GATE_IP_G3D */
+ GATE(CLK_PPMUG3D, "ppmug3d", "div_aclk_200", GATE_IP_G3D, 1,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_G3D, "g3d", "div_aclk_200", GATE_IP_G3D, 0, 0, 0),
+
+ /* GATE_IP_LCD */
+ GATE(CLK_PPMULCD0, "ppmulcd0", "div_aclk_160", GATE_IP_LCD, 5,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_SMMUFIMD0, "smmufimd0", "div_aclk_160", GATE_IP_LCD, 4, 0, 0),
+ GATE(CLK_DSIM0, "dsim0", "div_aclk_160", GATE_IP_LCD, 3, 0, 0),
+ GATE(CLK_SMIES, "smies", "div_aclk_160", GATE_IP_LCD, 2, 0, 0),
+ GATE(CLK_MIE0, "mie0", "div_aclk_160", GATE_IP_LCD, 1, 0, 0),
+ GATE(CLK_FIMD0, "fimd0", "div_aclk_160", GATE_IP_LCD, 0, 0, 0),
+
+ /* GATE_IP_FSYS */
+ GATE(CLK_TSADC, "tsadc", "div_aclk_200", GATE_IP_FSYS, 20, 0, 0),
+ GATE(CLK_PPMUFILE, "ppmufile", "div_aclk_200", GATE_IP_FSYS, 17,
+ CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_NFCON, "nfcon", "div_aclk_200", GATE_IP_FSYS, 16, 0, 0),
+ GATE(CLK_USBDEVICE, "usbdevice", "div_aclk_200", GATE_IP_FSYS, 13,
+ 0, 0),
+ GATE(CLK_USBHOST, "usbhost", "div_aclk_200", GATE_IP_FSYS, 12, 0, 0),
+ GATE(CLK_SROMC, "sromc", "div_aclk_200", GATE_IP_FSYS, 11, 0, 0),
+ GATE(CLK_SDMMC2, "sdmmc2", "div_aclk_200", GATE_IP_FSYS, 7, 0, 0),
+ GATE(CLK_SDMMC1, "sdmmc1", "div_aclk_200", GATE_IP_FSYS, 6, 0, 0),
+ GATE(CLK_SDMMC0, "sdmmc0", "div_aclk_200", GATE_IP_FSYS, 5, 0, 0),
+ GATE(CLK_PDMA1, "pdma1", "div_aclk_200", GATE_IP_FSYS, 1, 0, 0),
+ GATE(CLK_PDMA0, "pdma0", "div_aclk_200", GATE_IP_FSYS, 0, 0, 0),
+
+ /* GATE_IP_PERIL */
+ GATE(CLK_SPDIF, "spdif", "div_aclk_100", GATE_IP_PERIL, 26, 0, 0),
+ GATE(CLK_PWM, "pwm", "div_aclk_100", GATE_IP_PERIL, 24, 0, 0),
+ GATE(CLK_PCM2, "pcm2", "div_aclk_100", GATE_IP_PERIL, 23, 0, 0),
+ GATE(CLK_PCM1, "pcm1", "div_aclk_100", GATE_IP_PERIL, 22, 0, 0),
+ GATE(CLK_I2S1, "i2s1", "div_aclk_100", GATE_IP_PERIL, 20, 0, 0),
+ GATE(CLK_SPI2, "spi2", "div_aclk_100", GATE_IP_PERIL, 18, 0, 0),
+ GATE(CLK_SPI1, "spi1", "div_aclk_100", GATE_IP_PERIL, 17, 0, 0),
+ GATE(CLK_SPI0, "spi0", "div_aclk_100", GATE_IP_PERIL, 16, 0, 0),
+ GATE(CLK_I2CHDMI, "i2chdmi", "div_aclk_100", GATE_IP_PERIL, 14, 0, 0),
+ GATE(CLK_I2C7, "i2c7", "div_aclk_100", GATE_IP_PERIL, 13, 0, 0),
+ GATE(CLK_I2C6, "i2c6", "div_aclk_100", GATE_IP_PERIL, 12, 0, 0),
+ GATE(CLK_I2C5, "i2c5", "div_aclk_100", GATE_IP_PERIL, 11, 0, 0),
+ GATE(CLK_I2C4, "i2c4", "div_aclk_100", GATE_IP_PERIL, 10, 0, 0),
+ GATE(CLK_I2C3, "i2c3", "div_aclk_100", GATE_IP_PERIL, 9, 0, 0),
+ GATE(CLK_I2C2, "i2c2", "div_aclk_100", GATE_IP_PERIL, 8, 0, 0),
+ GATE(CLK_I2C1, "i2c1", "div_aclk_100", GATE_IP_PERIL, 7, 0, 0),
+ GATE(CLK_I2C0, "i2c0", "div_aclk_100", GATE_IP_PERIL, 6, 0, 0),
+ GATE(CLK_UART3, "uart3", "div_aclk_100", GATE_IP_PERIL, 3, 0, 0),
+ GATE(CLK_UART2, "uart2", "div_aclk_100", GATE_IP_PERIL, 2, 0, 0),
+ GATE(CLK_UART1, "uart1", "div_aclk_100", GATE_IP_PERIL, 1, 0, 0),
+ GATE(CLK_UART0, "uart0", "div_aclk_100", GATE_IP_PERIL, 0, 0, 0),
+};
+
+/*
+ * APLL & MPLL & BPLL & ISP_PLL & DISP_PLL & G3D_PLL
+ */
+static struct samsung_pll_rate_table exynos4415_pll_rates[] = {
+ PLL_35XX_RATE(1600000000, 400, 3, 1),
+ PLL_35XX_RATE(1500000000, 250, 2, 1),
+ PLL_35XX_RATE(1400000000, 175, 3, 0),
+ PLL_35XX_RATE(1300000000, 325, 3, 1),
+ PLL_35XX_RATE(1200000000, 400, 4, 1),
+ PLL_35XX_RATE(1100000000, 275, 3, 1),
+ PLL_35XX_RATE(1066000000, 533, 6, 1),
+ PLL_35XX_RATE(1000000000, 250, 3, 1),
+ PLL_35XX_RATE(960000000, 320, 4, 1),
+ PLL_35XX_RATE(900000000, 300, 4, 1),
+ PLL_35XX_RATE(850000000, 425, 6, 1),
+ PLL_35XX_RATE(800000000, 200, 3, 1),
+ PLL_35XX_RATE(700000000, 175, 3, 1),
+ PLL_35XX_RATE(667000000, 667, 12, 1),
+ PLL_35XX_RATE(600000000, 400, 4, 2),
+ PLL_35XX_RATE(550000000, 275, 3, 2),
+ PLL_35XX_RATE(533000000, 533, 6, 2),
+ PLL_35XX_RATE(520000000, 260, 3, 2),
+ PLL_35XX_RATE(500000000, 250, 3, 2),
+ PLL_35XX_RATE(440000000, 220, 3, 2),
+ PLL_35XX_RATE(400000000, 200, 3, 2),
+ PLL_35XX_RATE(350000000, 175, 3, 2),
+ PLL_35XX_RATE(300000000, 300, 3, 3),
+ PLL_35XX_RATE(266000000, 266, 3, 3),
+ PLL_35XX_RATE(200000000, 200, 3, 3),
+ PLL_35XX_RATE(160000000, 160, 3, 3),
+ PLL_35XX_RATE(100000000, 200, 3, 4),
+ { /* sentinel */ }
+};
+
+/* EPLL */
+static struct samsung_pll_rate_table exynos4415_epll_rates[] = {
+ PLL_36XX_RATE(800000000, 200, 3, 1, 0),
+ PLL_36XX_RATE(288000000, 96, 2, 2, 0),
+ PLL_36XX_RATE(192000000, 128, 2, 3, 0),
+ PLL_36XX_RATE(144000000, 96, 2, 3, 0),
+ PLL_36XX_RATE(96000000, 128, 2, 4, 0),
+ PLL_36XX_RATE(84000000, 112, 2, 4, 0),
+ PLL_36XX_RATE(80750011, 107, 2, 4, 43691),
+ PLL_36XX_RATE(73728004, 98, 2, 4, 19923),
+ PLL_36XX_RATE(67987602, 271, 3, 5, 62285),
+ PLL_36XX_RATE(65911004, 175, 2, 5, 49982),
+ PLL_36XX_RATE(50000000, 200, 3, 5, 0),
+ PLL_36XX_RATE(49152003, 131, 2, 5, 4719),
+ PLL_36XX_RATE(48000000, 128, 2, 5, 0),
+ PLL_36XX_RATE(45250000, 181, 3, 5, 0),
+ { /* sentinel */ }
+};
+
+static struct samsung_pll_clock exynos4415_plls[nr_plls] __initdata = {
+ [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
+ APLL_LOCK, APLL_CON0, NULL),
+ [epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
+ EPLL_LOCK, EPLL_CON0, NULL),
+ [g3d_pll] = PLL(pll_35xx, CLK_FOUT_G3D_PLL, "fout_g3d_pll",
+ "mout_g3d_pllsrc", G3D_PLL_LOCK, G3D_PLL_CON0, NULL),
+ [isp_pll] = PLL(pll_35xx, CLK_FOUT_ISP_PLL, "fout_isp_pll", "fin_pll",
+ ISP_PLL_LOCK, ISP_PLL_CON0, NULL),
+ [disp_pll] = PLL(pll_35xx, CLK_FOUT_DISP_PLL, "fout_disp_pll",
+ "fin_pll", DISP_PLL_LOCK, DISP_PLL_CON0, NULL),
+};
+
+static void __init exynos4415_cmu_init(struct device_node *np)
+{
+ void __iomem *reg_base;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: failed to map registers\n", __func__);
+
+ exynos4415_ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
+ if (!exynos4415_ctx)
+ panic("%s: unable to allocate context.\n", __func__);
+
+ exynos4415_plls[apll].rate_table = exynos4415_pll_rates;
+ exynos4415_plls[epll].rate_table = exynos4415_epll_rates;
+ exynos4415_plls[g3d_pll].rate_table = exynos4415_pll_rates;
+ exynos4415_plls[isp_pll].rate_table = exynos4415_pll_rates;
+ exynos4415_plls[disp_pll].rate_table = exynos4415_pll_rates;
+
+ samsung_clk_register_fixed_factor(exynos4415_ctx,
+ exynos4415_fixed_factor_clks,
+ ARRAY_SIZE(exynos4415_fixed_factor_clks));
+ samsung_clk_register_fixed_rate(exynos4415_ctx,
+ exynos4415_fixed_rate_clks,
+ ARRAY_SIZE(exynos4415_fixed_rate_clks));
+
+ samsung_clk_register_pll(exynos4415_ctx, exynos4415_plls,
+ ARRAY_SIZE(exynos4415_plls), reg_base);
+ samsung_clk_register_mux(exynos4415_ctx, exynos4415_mux_clks,
+ ARRAY_SIZE(exynos4415_mux_clks));
+ samsung_clk_register_div(exynos4415_ctx, exynos4415_div_clks,
+ ARRAY_SIZE(exynos4415_div_clks));
+ samsung_clk_register_gate(exynos4415_ctx, exynos4415_gate_clks,
+ ARRAY_SIZE(exynos4415_gate_clks));
+
+ exynos4415_clk_sleep_init();
+
+ samsung_clk_of_add_provider(np, exynos4415_ctx);
+}
+CLK_OF_DECLARE(exynos4415_cmu, "samsung,exynos4415-cmu", exynos4415_cmu_init);
+
+/*
+ * CMU DMC
+ */
+
+#define MPLL_LOCK 0x008
+#define MPLL_CON0 0x108
+#define MPLL_CON1 0x10c
+#define MPLL_CON2 0x110
+#define BPLL_LOCK 0x118
+#define BPLL_CON0 0x218
+#define BPLL_CON1 0x21c
+#define BPLL_CON2 0x220
+#define SRC_DMC 0x300
+#define DIV_DMC1 0x504
+
+enum exynos4415_dmc_plls {
+ mpll, bpll,
+ nr_dmc_plls,
+};
+
+static struct samsung_clk_provider *exynos4415_dmc_ctx;
+
+#ifdef CONFIG_PM_SLEEP
+static struct samsung_clk_reg_dump *exynos4415_dmc_clk_regs;
+
+static unsigned long exynos4415_cmu_dmc_clk_regs[] __initdata = {
+ MPLL_LOCK,
+ MPLL_CON0,
+ MPLL_CON1,
+ MPLL_CON2,
+ BPLL_LOCK,
+ BPLL_CON0,
+ BPLL_CON1,
+ BPLL_CON2,
+ SRC_DMC,
+ DIV_DMC1,
+};
+
+static int exynos4415_dmc_clk_suspend(void)
+{
+ samsung_clk_save(exynos4415_dmc_ctx->reg_base,
+ exynos4415_dmc_clk_regs,
+ ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
+ return 0;
+}
+
+static void exynos4415_dmc_clk_resume(void)
+{
+ samsung_clk_restore(exynos4415_dmc_ctx->reg_base,
+ exynos4415_dmc_clk_regs,
+ ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
+}
+
+static struct syscore_ops exynos4415_dmc_clk_syscore_ops = {
+ .suspend = exynos4415_dmc_clk_suspend,
+ .resume = exynos4415_dmc_clk_resume,
+};
+
+static void exynos4415_dmc_clk_sleep_init(void)
+{
+ exynos4415_dmc_clk_regs =
+ samsung_clk_alloc_reg_dump(exynos4415_cmu_dmc_clk_regs,
+ ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
+ if (!exynos4415_dmc_clk_regs) {
+ pr_warn("%s: Failed to allocate sleep save data\n", __func__);
+ return;
+ }
+
+ register_syscore_ops(&exynos4415_dmc_clk_syscore_ops);
+}
+#else
+static inline void exynos4415_dmc_clk_sleep_init(void) { }
+#endif /* CONFIG_PM_SLEEP */
+
+PNAME(mout_mpll_p) = { "fin_pll", "fout_mpll", };
+PNAME(mout_bpll_p) = { "fin_pll", "fout_bpll", };
+PNAME(mbpll_p) = { "mout_mpll", "mout_bpll", };
+
+static struct samsung_mux_clock exynos4415_dmc_mux_clks[] __initdata = {
+ MUX(CLK_DMC_MOUT_MPLL, "mout_mpll", mout_mpll_p, SRC_DMC, 12, 1),
+ MUX(CLK_DMC_MOUT_BPLL, "mout_bpll", mout_bpll_p, SRC_DMC, 10, 1),
+ MUX(CLK_DMC_MOUT_DPHY, "mout_dphy", mbpll_p, SRC_DMC, 8, 1),
+ MUX(CLK_DMC_MOUT_DMC_BUS, "mout_dmc_bus", mbpll_p, SRC_DMC, 4, 1),
+};
+
+static struct samsung_div_clock exynos4415_dmc_div_clks[] __initdata = {
+ DIV(CLK_DMC_DIV_DMC, "div_dmc", "div_dmc_pre", DIV_DMC1, 27, 3),
+ DIV(CLK_DMC_DIV_DPHY, "div_dphy", "mout_dphy", DIV_DMC1, 23, 3),
+ DIV(CLK_DMC_DIV_DMC_PRE, "div_dmc_pre", "mout_dmc_bus",
+ DIV_DMC1, 19, 2),
+ DIV(CLK_DMC_DIV_DMCP, "div_dmcp", "div_dmcd", DIV_DMC1, 15, 3),
+ DIV(CLK_DMC_DIV_DMCD, "div_dmcd", "div_dmc", DIV_DMC1, 11, 3),
+ DIV(CLK_DMC_DIV_MPLL_PRE, "div_mpll_pre", "mout_mpll", DIV_DMC1, 8, 2),
+};
+
+static struct samsung_pll_clock exynos4415_dmc_plls[nr_dmc_plls] __initdata = {
+ [mpll] = PLL(pll_35xx, CLK_DMC_FOUT_MPLL, "fout_mpll", "fin_pll",
+ MPLL_LOCK, MPLL_CON0, NULL),
+ [bpll] = PLL(pll_35xx, CLK_DMC_FOUT_BPLL, "fout_bpll", "fin_pll",
+ BPLL_LOCK, BPLL_CON0, NULL),
+};
+
+static void __init exynos4415_cmu_dmc_init(struct device_node *np)
+{
+ void __iomem *reg_base;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: failed to map registers\n", __func__);
+
+ exynos4415_dmc_ctx = samsung_clk_init(np, reg_base, NR_CLKS_DMC);
+ if (!exynos4415_dmc_ctx)
+ panic("%s: unable to allocate context.\n", __func__);
+
+ exynos4415_dmc_plls[mpll].rate_table = exynos4415_pll_rates;
+ exynos4415_dmc_plls[bpll].rate_table = exynos4415_pll_rates;
+
+ samsung_clk_register_pll(exynos4415_dmc_ctx, exynos4415_dmc_plls,
+ ARRAY_SIZE(exynos4415_dmc_plls), reg_base);
+ samsung_clk_register_mux(exynos4415_dmc_ctx, exynos4415_dmc_mux_clks,
+ ARRAY_SIZE(exynos4415_dmc_mux_clks));
+ samsung_clk_register_div(exynos4415_dmc_ctx, exynos4415_dmc_div_clks,
+ ARRAY_SIZE(exynos4415_dmc_div_clks));
+
+ exynos4415_dmc_clk_sleep_init();
+
+ samsung_clk_of_add_provider(np, exynos4415_dmc_ctx);
+}
+CLK_OF_DECLARE(exynos4415_cmu_dmc, "samsung,exynos4415-cmu-dmc",
+ exynos4415_cmu_dmc_init);
diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c
index 2527e39aadcf..e2e5193d1049 100644
--- a/drivers/clk/samsung/clk-exynos5260.c
+++ b/drivers/clk/samsung/clk-exynos5260.c
@@ -11,10 +11,8 @@
#include <linux/clk.h>
#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/syscore_ops.h>
#include "clk-exynos5260.h"
#include "clk.h"
@@ -22,39 +20,6 @@
#include <dt-bindings/clock/exynos5260-clk.h>
-static LIST_HEAD(clock_reg_cache_list);
-
-struct exynos5260_clock_reg_cache {
- struct list_head node;
- void __iomem *reg_base;
- struct samsung_clk_reg_dump *rdump;
- unsigned int rd_num;
-};
-
-struct exynos5260_cmu_info {
- /* list of pll clocks and respective count */
- struct samsung_pll_clock *pll_clks;
- unsigned int nr_pll_clks;
- /* list of mux clocks and respective count */
- struct samsung_mux_clock *mux_clks;
- unsigned int nr_mux_clks;
- /* list of div clocks and respective count */
- struct samsung_div_clock *div_clks;
- unsigned int nr_div_clks;
- /* list of gate clocks and respective count */
- struct samsung_gate_clock *gate_clks;
- unsigned int nr_gate_clks;
- /* list of fixed clocks and respective count */
- struct samsung_fixed_rate_clock *fixed_clks;
- unsigned int nr_fixed_clks;
- /* total number of clocks with IDs assigned*/
- unsigned int nr_clk_ids;
-
- /* list and number of clocks registers */
- unsigned long *clk_regs;
- unsigned int nr_clk_regs;
-};
-
/*
* Applicable for all 2550 Type PLLS for Exynos5260, listed below
* DISP_PLL, EGL_PLL, KFC_PLL, MEM_PLL, BUS_PLL, MEDIA_PLL, G3D_PLL.
@@ -113,104 +78,6 @@ static struct samsung_pll_rate_table pll2650_24mhz_tbl[] __initdata = {
PLL_36XX_RATE(66000000, 176, 2, 5, 0),
};
-#ifdef CONFIG_PM_SLEEP
-
-static int exynos5260_clk_suspend(void)
-{
- struct exynos5260_clock_reg_cache *cache;
-
- list_for_each_entry(cache, &clock_reg_cache_list, node)
- samsung_clk_save(cache->reg_base, cache->rdump,
- cache->rd_num);
-
- return 0;
-}
-
-static void exynos5260_clk_resume(void)
-{
- struct exynos5260_clock_reg_cache *cache;
-
- list_for_each_entry(cache, &clock_reg_cache_list, node)
- samsung_clk_restore(cache->reg_base, cache->rdump,
- cache->rd_num);
-}
-
-static struct syscore_ops exynos5260_clk_syscore_ops = {
- .suspend = exynos5260_clk_suspend,
- .resume = exynos5260_clk_resume,
-};
-
-static void exynos5260_clk_sleep_init(void __iomem *reg_base,
- unsigned long *rdump,
- unsigned long nr_rdump)
-{
- struct exynos5260_clock_reg_cache *reg_cache;
-
- reg_cache = kzalloc(sizeof(struct exynos5260_clock_reg_cache),
- GFP_KERNEL);
- if (!reg_cache)
- panic("could not allocate register cache.\n");
-
- reg_cache->rdump = samsung_clk_alloc_reg_dump(rdump, nr_rdump);
-
- if (!reg_cache->rdump)
- panic("could not allocate register dump storage.\n");
-
- if (list_empty(&clock_reg_cache_list))
- register_syscore_ops(&exynos5260_clk_syscore_ops);
-
- reg_cache->rd_num = nr_rdump;
- reg_cache->reg_base = reg_base;
- list_add_tail(&reg_cache->node, &clock_reg_cache_list);
-}
-
-#else
-static void exynos5260_clk_sleep_init(void __iomem *reg_base,
- unsigned long *rdump,
- unsigned long nr_rdump){}
-#endif
-
-/*
- * Common function which registers plls, muxes, dividers and gates
- * for each CMU. It also add CMU register list to register cache.
- */
-
-void __init exynos5260_cmu_register_one(struct device_node *np,
- struct exynos5260_cmu_info *cmu)
-{
- void __iomem *reg_base;
- struct samsung_clk_provider *ctx;
-
- reg_base = of_iomap(np, 0);
- if (!reg_base)
- panic("%s: failed to map registers\n", __func__);
-
- ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids);
- if (!ctx)
- panic("%s: unable to alllocate ctx\n", __func__);
-
- if (cmu->pll_clks)
- samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks,
- reg_base);
- if (cmu->mux_clks)
- samsung_clk_register_mux(ctx, cmu->mux_clks,
- cmu->nr_mux_clks);
- if (cmu->div_clks)
- samsung_clk_register_div(ctx, cmu->div_clks, cmu->nr_div_clks);
- if (cmu->gate_clks)
- samsung_clk_register_gate(ctx, cmu->gate_clks,
- cmu->nr_gate_clks);
- if (cmu->fixed_clks)
- samsung_clk_register_fixed_rate(ctx, cmu->fixed_clks,
- cmu->nr_fixed_clks);
- if (cmu->clk_regs)
- exynos5260_clk_sleep_init(reg_base, cmu->clk_regs,
- cmu->nr_clk_regs);
-
- samsung_clk_of_add_provider(np, ctx);
-}
-
-
/* CMU_AUD */
static unsigned long aud_clk_regs[] __initdata = {
@@ -268,7 +135,7 @@ struct samsung_gate_clock aud_gate_clks[] __initdata = {
static void __init exynos5260_clk_aud_init(struct device_node *np)
{
- struct exynos5260_cmu_info cmu = {0};
+ struct samsung_cmu_info cmu = {0};
cmu.mux_clks = aud_mux_clks;
cmu.nr_mux_clks = ARRAY_SIZE(aud_mux_clks);
@@ -280,7 +147,7 @@ static void __init exynos5260_clk_aud_init(struct device_node *np)
cmu.clk_regs = aud_clk_regs;
cmu.nr_clk_regs = ARRAY_SIZE(aud_clk_regs);
- exynos5260_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &cmu);
}
CLK_OF_DECLARE(exynos5260_clk_aud, "samsung,exynos5260-clock-aud",
@@ -458,7 +325,7 @@ struct samsung_gate_clock disp_gate_clks[] __initdata = {
static void __init exynos5260_clk_disp_init(struct device_node *np)
{
- struct exynos5260_cmu_info cmu = {0};
+ struct samsung_cmu_info cmu = {0};
cmu.mux_clks = disp_mux_clks;
cmu.nr_mux_clks = ARRAY_SIZE(disp_mux_clks);
@@ -470,7 +337,7 @@ static void __init exynos5260_clk_disp_init(struct device_node *np)
cmu.clk_regs = disp_clk_regs;
cmu.nr_clk_regs = ARRAY_SIZE(disp_clk_regs);
- exynos5260_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &cmu);
}
CLK_OF_DECLARE(exynos5260_clk_disp, "samsung,exynos5260-clock-disp",
@@ -522,7 +389,7 @@ static struct samsung_pll_clock egl_pll_clks[] __initdata = {
static void __init exynos5260_clk_egl_init(struct device_node *np)
{
- struct exynos5260_cmu_info cmu = {0};
+ struct samsung_cmu_info cmu = {0};
cmu.pll_clks = egl_pll_clks;
cmu.nr_pll_clks = ARRAY_SIZE(egl_pll_clks);
@@ -534,7 +401,7 @@ static void __init exynos5260_clk_egl_init(struct device_node *np)
cmu.clk_regs = egl_clk_regs;
cmu.nr_clk_regs = ARRAY_SIZE(egl_clk_regs);
- exynos5260_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &cmu);
}
CLK_OF_DECLARE(exynos5260_clk_egl, "samsung,exynos5260-clock-egl",
@@ -624,7 +491,7 @@ struct samsung_gate_clock fsys_gate_clks[] __initdata = {
static void __init exynos5260_clk_fsys_init(struct device_node *np)
{
- struct exynos5260_cmu_info cmu = {0};
+ struct samsung_cmu_info cmu = {0};
cmu.mux_clks = fsys_mux_clks;
cmu.nr_mux_clks = ARRAY_SIZE(fsys_mux_clks);
@@ -634,7 +501,7 @@ static void __init exynos5260_clk_fsys_init(struct device_node *np)
cmu.clk_regs = fsys_clk_regs;
cmu.nr_clk_regs = ARRAY_SIZE(fsys_clk_regs);
- exynos5260_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &cmu);
}
CLK_OF_DECLARE(exynos5260_clk_fsys, "samsung,exynos5260-clock-fsys",
@@ -713,7 +580,7 @@ struct samsung_gate_clock g2d_gate_clks[] __initdata = {
static void __init exynos5260_clk_g2d_init(struct device_node *np)
{
- struct exynos5260_cmu_info cmu = {0};
+ struct samsung_cmu_info cmu = {0};
cmu.mux_clks = g2d_mux_clks;
cmu.nr_mux_clks = ARRAY_SIZE(g2d_mux_clks);
@@ -725,7 +592,7 @@ static void __init exynos5260_clk_g2d_init(struct device_node *np)
cmu.clk_regs = g2d_clk_regs;
cmu.nr_clk_regs = ARRAY_SIZE(g2d_clk_regs);
- exynos5260_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &cmu);
}
CLK_OF_DECLARE(exynos5260_clk_g2d, "samsung,exynos5260-clock-g2d",
@@ -774,7 +641,7 @@ static struct samsung_pll_clock g3d_pll_clks[] __initdata = {
static void __init exynos5260_clk_g3d_init(struct device_node *np)
{
- struct exynos5260_cmu_info cmu = {0};
+ struct samsung_cmu_info cmu = {0};
cmu.pll_clks = g3d_pll_clks;
cmu.nr_pll_clks = ARRAY_SIZE(g3d_pll_clks);
@@ -788,7 +655,7 @@ static void __init exynos5260_clk_g3d_init(struct device_node *np)
cmu.clk_regs = g3d_clk_regs;
cmu.nr_clk_regs = ARRAY_SIZE(g3d_clk_regs);
- exynos5260_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &cmu);
}
CLK_OF_DECLARE(exynos5260_clk_g3d, "samsung,exynos5260-clock-g3d",
@@ -909,7 +776,7 @@ struct samsung_gate_clock gscl_gate_clks[] __initdata = {
static void __init exynos5260_clk_gscl_init(struct device_node *np)
{
- struct exynos5260_cmu_info cmu = {0};
+ struct samsung_cmu_info cmu = {0};
cmu.mux_clks = gscl_mux_clks;
cmu.nr_mux_clks = ARRAY_SIZE(gscl_mux_clks);
@@ -921,7 +788,7 @@ static void __init exynos5260_clk_gscl_init(struct device_node *np)
cmu.clk_regs = gscl_clk_regs;
cmu.nr_clk_regs = ARRAY_SIZE(gscl_clk_regs);
- exynos5260_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &cmu);
}
CLK_OF_DECLARE(exynos5260_clk_gscl, "samsung,exynos5260-clock-gscl",
@@ -1028,7 +895,7 @@ struct samsung_gate_clock isp_gate_clks[] __initdata = {
static void __init exynos5260_clk_isp_init(struct device_node *np)
{
- struct exynos5260_cmu_info cmu = {0};
+ struct samsung_cmu_info cmu = {0};
cmu.mux_clks = isp_mux_clks;
cmu.nr_mux_clks = ARRAY_SIZE(isp_mux_clks);
@@ -1040,7 +907,7 @@ static void __init exynos5260_clk_isp_init(struct device_node *np)
cmu.clk_regs = isp_clk_regs;
cmu.nr_clk_regs = ARRAY_SIZE(isp_clk_regs);
- exynos5260_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &cmu);
}
CLK_OF_DECLARE(exynos5260_clk_isp, "samsung,exynos5260-clock-isp",
@@ -1092,7 +959,7 @@ static struct samsung_pll_clock kfc_pll_clks[] __initdata = {
static void __init exynos5260_clk_kfc_init(struct device_node *np)
{
- struct exynos5260_cmu_info cmu = {0};
+ struct samsung_cmu_info cmu = {0};
cmu.pll_clks = kfc_pll_clks;
cmu.nr_pll_clks = ARRAY_SIZE(kfc_pll_clks);
@@ -1104,7 +971,7 @@ static void __init exynos5260_clk_kfc_init(struct device_node *np)
cmu.clk_regs = kfc_clk_regs;
cmu.nr_clk_regs = ARRAY_SIZE(kfc_clk_regs);
- exynos5260_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &cmu);
}
CLK_OF_DECLARE(exynos5260_clk_kfc, "samsung,exynos5260-clock-kfc",
@@ -1148,7 +1015,7 @@ struct samsung_gate_clock mfc_gate_clks[] __initdata = {
static void __init exynos5260_clk_mfc_init(struct device_node *np)
{
- struct exynos5260_cmu_info cmu = {0};
+ struct samsung_cmu_info cmu = {0};
cmu.mux_clks = mfc_mux_clks;
cmu.nr_mux_clks = ARRAY_SIZE(mfc_mux_clks);
@@ -1160,7 +1027,7 @@ static void __init exynos5260_clk_mfc_init(struct device_node *np)
cmu.clk_regs = mfc_clk_regs;
cmu.nr_clk_regs = ARRAY_SIZE(mfc_clk_regs);
- exynos5260_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &cmu);
}
CLK_OF_DECLARE(exynos5260_clk_mfc, "samsung,exynos5260-clock-mfc",
@@ -1295,7 +1162,7 @@ static struct samsung_pll_clock mif_pll_clks[] __initdata = {
static void __init exynos5260_clk_mif_init(struct device_node *np)
{
- struct exynos5260_cmu_info cmu = {0};
+ struct samsung_cmu_info cmu = {0};
cmu.pll_clks = mif_pll_clks;
cmu.nr_pll_clks = ARRAY_SIZE(mif_pll_clks);
@@ -1309,7 +1176,7 @@ static void __init exynos5260_clk_mif_init(struct device_node *np)
cmu.clk_regs = mif_clk_regs;
cmu.nr_clk_regs = ARRAY_SIZE(mif_clk_regs);
- exynos5260_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &cmu);
}
CLK_OF_DECLARE(exynos5260_clk_mif, "samsung,exynos5260-clock-mif",
@@ -1503,7 +1370,7 @@ struct samsung_gate_clock peri_gate_clks[] __initdata = {
static void __init exynos5260_clk_peri_init(struct device_node *np)
{
- struct exynos5260_cmu_info cmu = {0};
+ struct samsung_cmu_info cmu = {0};
cmu.mux_clks = peri_mux_clks;
cmu.nr_mux_clks = ARRAY_SIZE(peri_mux_clks);
@@ -1515,7 +1382,7 @@ static void __init exynos5260_clk_peri_init(struct device_node *np)
cmu.clk_regs = peri_clk_regs;
cmu.nr_clk_regs = ARRAY_SIZE(peri_clk_regs);
- exynos5260_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &cmu);
}
CLK_OF_DECLARE(exynos5260_clk_peri, "samsung,exynos5260-clock-peri",
@@ -1959,7 +1826,7 @@ static struct samsung_pll_clock top_pll_clks[] __initdata = {
static void __init exynos5260_clk_top_init(struct device_node *np)
{
- struct exynos5260_cmu_info cmu = {0};
+ struct samsung_cmu_info cmu = {0};
cmu.pll_clks = top_pll_clks;
cmu.nr_pll_clks = ARRAY_SIZE(top_pll_clks);
@@ -1975,7 +1842,7 @@ static void __init exynos5260_clk_top_init(struct device_node *np)
cmu.clk_regs = top_clk_regs;
cmu.nr_clk_regs = ARRAY_SIZE(top_clk_regs);
- exynos5260_cmu_register_one(np, &cmu);
+ samsung_cmu_register_one(np, &cmu);
}
CLK_OF_DECLARE(exynos5260_clk_top, "samsung,exynos5260-clock-top",
diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
new file mode 100644
index 000000000000..ea4483b8d62e
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos7.c
@@ -0,0 +1,743 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Naveen Krishna Ch <naveenkrishna.ch@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+*/
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+
+#include "clk.h"
+#include <dt-bindings/clock/exynos7-clk.h>
+
+/* Register Offset definitions for CMU_TOPC (0x10570000) */
+#define CC_PLL_LOCK 0x0000
+#define BUS0_PLL_LOCK 0x0004
+#define BUS1_DPLL_LOCK 0x0008
+#define MFC_PLL_LOCK 0x000C
+#define AUD_PLL_LOCK 0x0010
+#define CC_PLL_CON0 0x0100
+#define BUS0_PLL_CON0 0x0110
+#define BUS1_DPLL_CON0 0x0120
+#define MFC_PLL_CON0 0x0130
+#define AUD_PLL_CON0 0x0140
+#define MUX_SEL_TOPC0 0x0200
+#define MUX_SEL_TOPC1 0x0204
+#define MUX_SEL_TOPC2 0x0208
+#define MUX_SEL_TOPC3 0x020C
+#define DIV_TOPC0 0x0600
+#define DIV_TOPC1 0x0604
+#define DIV_TOPC3 0x060C
+
+static struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initdata = {
+ FFACTOR(0, "ffac_topc_bus0_pll_div2", "mout_bus0_pll_ctrl", 1, 2, 0),
+ FFACTOR(0, "ffac_topc_bus0_pll_div4",
+ "ffac_topc_bus0_pll_div2", 1, 2, 0),
+ FFACTOR(0, "ffac_topc_bus1_pll_div2", "mout_bus1_pll_ctrl", 1, 2, 0),
+ FFACTOR(0, "ffac_topc_cc_pll_div2", "mout_cc_pll_ctrl", 1, 2, 0),
+ FFACTOR(0, "ffac_topc_mfc_pll_div2", "mout_mfc_pll_ctrl", 1, 2, 0),
+};
+
+/* List of parent clocks for Muxes in CMU_TOPC */
+PNAME(mout_bus0_pll_ctrl_p) = { "fin_pll", "fout_bus0_pll" };
+PNAME(mout_bus1_pll_ctrl_p) = { "fin_pll", "fout_bus1_pll" };
+PNAME(mout_cc_pll_ctrl_p) = { "fin_pll", "fout_cc_pll" };
+PNAME(mout_mfc_pll_ctrl_p) = { "fin_pll", "fout_mfc_pll" };
+
+PNAME(mout_topc_group2) = { "mout_sclk_bus0_pll_cmuc",
+ "mout_sclk_bus1_pll_cmuc", "mout_sclk_cc_pll_cmuc",
+ "mout_sclk_mfc_pll_cmuc" };
+
+PNAME(mout_sclk_bus0_pll_cmuc_p) = { "mout_bus0_pll_ctrl",
+ "ffac_topc_bus0_pll_div2", "ffac_topc_bus0_pll_div4"};
+PNAME(mout_sclk_bus1_pll_cmuc_p) = { "mout_bus1_pll_ctrl",
+ "ffac_topc_bus1_pll_div2"};
+PNAME(mout_sclk_cc_pll_cmuc_p) = { "mout_cc_pll_ctrl",
+ "ffac_topc_cc_pll_div2"};
+PNAME(mout_sclk_mfc_pll_cmuc_p) = { "mout_mfc_pll_ctrl",
+ "ffac_topc_mfc_pll_div2"};
+
+
+PNAME(mout_sclk_bus0_pll_out_p) = {"mout_bus0_pll_ctrl",
+ "ffac_topc_bus0_pll_div2"};
+
+static unsigned long topc_clk_regs[] __initdata = {
+ CC_PLL_LOCK,
+ BUS0_PLL_LOCK,
+ BUS1_DPLL_LOCK,
+ MFC_PLL_LOCK,
+ AUD_PLL_LOCK,
+ CC_PLL_CON0,
+ BUS0_PLL_CON0,
+ BUS1_DPLL_CON0,
+ MFC_PLL_CON0,
+ AUD_PLL_CON0,
+ MUX_SEL_TOPC0,
+ MUX_SEL_TOPC1,
+ MUX_SEL_TOPC2,
+ MUX_SEL_TOPC3,
+ DIV_TOPC0,
+ DIV_TOPC1,
+ DIV_TOPC3,
+};
+
+static struct samsung_mux_clock topc_mux_clks[] __initdata = {
+ MUX(0, "mout_bus0_pll_ctrl", mout_bus0_pll_ctrl_p, MUX_SEL_TOPC0, 0, 1),
+ MUX(0, "mout_bus1_pll_ctrl", mout_bus1_pll_ctrl_p, MUX_SEL_TOPC0, 4, 1),
+ MUX(0, "mout_cc_pll_ctrl", mout_cc_pll_ctrl_p, MUX_SEL_TOPC0, 8, 1),
+ MUX(0, "mout_mfc_pll_ctrl", mout_mfc_pll_ctrl_p, MUX_SEL_TOPC0, 12, 1),
+
+ MUX(0, "mout_sclk_bus0_pll_cmuc", mout_sclk_bus0_pll_cmuc_p,
+ MUX_SEL_TOPC0, 16, 2),
+ MUX(0, "mout_sclk_bus1_pll_cmuc", mout_sclk_bus1_pll_cmuc_p,
+ MUX_SEL_TOPC0, 20, 1),
+ MUX(0, "mout_sclk_cc_pll_cmuc", mout_sclk_cc_pll_cmuc_p,
+ MUX_SEL_TOPC0, 24, 1),
+ MUX(0, "mout_sclk_mfc_pll_cmuc", mout_sclk_mfc_pll_cmuc_p,
+ MUX_SEL_TOPC0, 28, 1),
+
+ MUX(0, "mout_sclk_bus0_pll_out", mout_sclk_bus0_pll_out_p,
+ MUX_SEL_TOPC1, 16, 1),
+
+ MUX(0, "mout_aclk_ccore_133", mout_topc_group2, MUX_SEL_TOPC2, 4, 2),
+
+ MUX(0, "mout_aclk_peris_66", mout_topc_group2, MUX_SEL_TOPC3, 24, 2),
+};
+
+static struct samsung_div_clock topc_div_clks[] __initdata = {
+ DIV(DOUT_ACLK_CCORE_133, "dout_aclk_ccore_133", "mout_aclk_ccore_133",
+ DIV_TOPC0, 4, 4),
+
+ DIV(DOUT_ACLK_PERIS, "dout_aclk_peris_66", "mout_aclk_peris_66",
+ DIV_TOPC1, 24, 4),
+
+ DIV(DOUT_SCLK_BUS0_PLL, "dout_sclk_bus0_pll", "mout_sclk_bus0_pll_out",
+ DIV_TOPC3, 0, 3),
+ DIV(DOUT_SCLK_BUS1_PLL, "dout_sclk_bus1_pll", "mout_bus1_pll_ctrl",
+ DIV_TOPC3, 8, 3),
+ DIV(DOUT_SCLK_CC_PLL, "dout_sclk_cc_pll", "mout_cc_pll_ctrl",
+ DIV_TOPC3, 12, 3),
+ DIV(DOUT_SCLK_MFC_PLL, "dout_sclk_mfc_pll", "mout_mfc_pll_ctrl",
+ DIV_TOPC3, 16, 3),
+};
+
+static struct samsung_pll_clock topc_pll_clks[] __initdata = {
+ PLL(pll_1451x, 0, "fout_bus0_pll", "fin_pll", BUS0_PLL_LOCK,
+ BUS0_PLL_CON0, NULL),
+ PLL(pll_1452x, 0, "fout_cc_pll", "fin_pll", CC_PLL_LOCK,
+ CC_PLL_CON0, NULL),
+ PLL(pll_1452x, 0, "fout_bus1_pll", "fin_pll", BUS1_DPLL_LOCK,
+ BUS1_DPLL_CON0, NULL),
+ PLL(pll_1452x, 0, "fout_mfc_pll", "fin_pll", MFC_PLL_LOCK,
+ MFC_PLL_CON0, NULL),
+ PLL(pll_1460x, 0, "fout_aud_pll", "fin_pll", AUD_PLL_LOCK,
+ AUD_PLL_CON0, NULL),
+};
+
+static struct samsung_cmu_info topc_cmu_info __initdata = {
+ .pll_clks = topc_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(topc_pll_clks),
+ .mux_clks = topc_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(topc_mux_clks),
+ .div_clks = topc_div_clks,
+ .nr_div_clks = ARRAY_SIZE(topc_div_clks),
+ .fixed_factor_clks = topc_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(topc_fixed_factor_clks),
+ .nr_clk_ids = TOPC_NR_CLK,
+ .clk_regs = topc_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(topc_clk_regs),
+};
+
+static void __init exynos7_clk_topc_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &topc_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_topc, "samsung,exynos7-clock-topc",
+ exynos7_clk_topc_init);
+
+/* Register Offset definitions for CMU_TOP0 (0x105D0000) */
+#define MUX_SEL_TOP00 0x0200
+#define MUX_SEL_TOP01 0x0204
+#define MUX_SEL_TOP03 0x020C
+#define MUX_SEL_TOP0_PERIC3 0x023C
+#define DIV_TOP03 0x060C
+#define DIV_TOP0_PERIC3 0x063C
+#define ENABLE_SCLK_TOP0_PERIC3 0x0A3C
+
+/* List of parent clocks for Muxes in CMU_TOP0 */
+PNAME(mout_bus0_pll_p) = { "fin_pll", "dout_sclk_bus0_pll" };
+PNAME(mout_bus1_pll_p) = { "fin_pll", "dout_sclk_bus1_pll" };
+PNAME(mout_cc_pll_p) = { "fin_pll", "dout_sclk_cc_pll" };
+PNAME(mout_mfc_pll_p) = { "fin_pll", "dout_sclk_mfc_pll" };
+
+PNAME(mout_top0_half_bus0_pll_p) = {"mout_top0_bus0_pll",
+ "ffac_top0_bus0_pll_div2"};
+PNAME(mout_top0_half_bus1_pll_p) = {"mout_top0_bus1_pll",
+ "ffac_top0_bus1_pll_div2"};
+PNAME(mout_top0_half_cc_pll_p) = {"mout_top0_cc_pll",
+ "ffac_top0_cc_pll_div2"};
+PNAME(mout_top0_half_mfc_pll_p) = {"mout_top0_mfc_pll",
+ "ffac_top0_mfc_pll_div2"};
+
+PNAME(mout_top0_group1) = {"mout_top0_half_bus0_pll",
+ "mout_top0_half_bus1_pll", "mout_top0_half_cc_pll",
+ "mout_top0_half_mfc_pll"};
+
+static unsigned long top0_clk_regs[] __initdata = {
+ MUX_SEL_TOP00,
+ MUX_SEL_TOP01,
+ MUX_SEL_TOP03,
+ MUX_SEL_TOP0_PERIC3,
+ DIV_TOP03,
+ DIV_TOP0_PERIC3,
+ ENABLE_SCLK_TOP0_PERIC3,
+};
+
+static struct samsung_mux_clock top0_mux_clks[] __initdata = {
+ MUX(0, "mout_top0_mfc_pll", mout_mfc_pll_p, MUX_SEL_TOP00, 4, 1),
+ MUX(0, "mout_top0_cc_pll", mout_cc_pll_p, MUX_SEL_TOP00, 8, 1),
+ MUX(0, "mout_top0_bus1_pll", mout_bus1_pll_p, MUX_SEL_TOP00, 12, 1),
+ MUX(0, "mout_top0_bus0_pll", mout_bus0_pll_p, MUX_SEL_TOP00, 16, 1),
+
+ MUX(0, "mout_top0_half_mfc_pll", mout_top0_half_mfc_pll_p,
+ MUX_SEL_TOP01, 4, 1),
+ MUX(0, "mout_top0_half_cc_pll", mout_top0_half_cc_pll_p,
+ MUX_SEL_TOP01, 8, 1),
+ MUX(0, "mout_top0_half_bus1_pll", mout_top0_half_bus1_pll_p,
+ MUX_SEL_TOP01, 12, 1),
+ MUX(0, "mout_top0_half_bus0_pll", mout_top0_half_bus0_pll_p,
+ MUX_SEL_TOP01, 16, 1),
+
+ MUX(0, "mout_aclk_peric1_66", mout_top0_group1, MUX_SEL_TOP03, 12, 2),
+ MUX(0, "mout_aclk_peric0_66", mout_top0_group1, MUX_SEL_TOP03, 20, 2),
+
+ MUX(0, "mout_sclk_uart3", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 4, 2),
+ MUX(0, "mout_sclk_uart2", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 8, 2),
+ MUX(0, "mout_sclk_uart1", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 12, 2),
+ MUX(0, "mout_sclk_uart0", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 16, 2),
+};
+
+static struct samsung_div_clock top0_div_clks[] __initdata = {
+ DIV(DOUT_ACLK_PERIC1, "dout_aclk_peric1_66", "mout_aclk_peric1_66",
+ DIV_TOP03, 12, 6),
+ DIV(DOUT_ACLK_PERIC0, "dout_aclk_peric0_66", "mout_aclk_peric0_66",
+ DIV_TOP03, 20, 6),
+
+ DIV(0, "dout_sclk_uart3", "mout_sclk_uart3", DIV_TOP0_PERIC3, 4, 4),
+ DIV(0, "dout_sclk_uart2", "mout_sclk_uart2", DIV_TOP0_PERIC3, 8, 4),
+ DIV(0, "dout_sclk_uart1", "mout_sclk_uart1", DIV_TOP0_PERIC3, 12, 4),
+ DIV(0, "dout_sclk_uart0", "mout_sclk_uart0", DIV_TOP0_PERIC3, 16, 4),
+};
+
+static struct samsung_gate_clock top0_gate_clks[] __initdata = {
+ GATE(CLK_SCLK_UART3, "sclk_uart3", "dout_sclk_uart3",
+ ENABLE_SCLK_TOP0_PERIC3, 4, 0, 0),
+ GATE(CLK_SCLK_UART2, "sclk_uart2", "dout_sclk_uart2",
+ ENABLE_SCLK_TOP0_PERIC3, 8, 0, 0),
+ GATE(CLK_SCLK_UART1, "sclk_uart1", "dout_sclk_uart1",
+ ENABLE_SCLK_TOP0_PERIC3, 12, 0, 0),
+ GATE(CLK_SCLK_UART0, "sclk_uart0", "dout_sclk_uart0",
+ ENABLE_SCLK_TOP0_PERIC3, 16, 0, 0),
+};
+
+static struct samsung_fixed_factor_clock top0_fixed_factor_clks[] __initdata = {
+ FFACTOR(0, "ffac_top0_bus0_pll_div2", "mout_top0_bus0_pll", 1, 2, 0),
+ FFACTOR(0, "ffac_top0_bus1_pll_div2", "mout_top0_bus1_pll", 1, 2, 0),
+ FFACTOR(0, "ffac_top0_cc_pll_div2", "mout_top0_cc_pll", 1, 2, 0),
+ FFACTOR(0, "ffac_top0_mfc_pll_div2", "mout_top0_mfc_pll", 1, 2, 0),
+};
+
+static struct samsung_cmu_info top0_cmu_info __initdata = {
+ .mux_clks = top0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(top0_mux_clks),
+ .div_clks = top0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(top0_div_clks),
+ .gate_clks = top0_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(top0_gate_clks),
+ .fixed_factor_clks = top0_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(top0_fixed_factor_clks),
+ .nr_clk_ids = TOP0_NR_CLK,
+ .clk_regs = top0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(top0_clk_regs),
+};
+
+static void __init exynos7_clk_top0_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &top0_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_top0, "samsung,exynos7-clock-top0",
+ exynos7_clk_top0_init);
+
+/* Register Offset definitions for CMU_TOP1 (0x105E0000) */
+#define MUX_SEL_TOP10 0x0200
+#define MUX_SEL_TOP11 0x0204
+#define MUX_SEL_TOP13 0x020C
+#define MUX_SEL_TOP1_FSYS0 0x0224
+#define MUX_SEL_TOP1_FSYS1 0x0228
+#define DIV_TOP13 0x060C
+#define DIV_TOP1_FSYS0 0x0624
+#define DIV_TOP1_FSYS1 0x0628
+#define ENABLE_ACLK_TOP13 0x080C
+#define ENABLE_SCLK_TOP1_FSYS0 0x0A24
+#define ENABLE_SCLK_TOP1_FSYS1 0x0A28
+
+/* List of parent clocks for Muxes in CMU_TOP1 */
+PNAME(mout_top1_bus0_pll_p) = { "fin_pll", "dout_sclk_bus0_pll" };
+PNAME(mout_top1_bus1_pll_p) = { "fin_pll", "dout_sclk_bus1_pll_b" };
+PNAME(mout_top1_cc_pll_p) = { "fin_pll", "dout_sclk_cc_pll_b" };
+PNAME(mout_top1_mfc_pll_p) = { "fin_pll", "dout_sclk_mfc_pll_b" };
+
+PNAME(mout_top1_half_bus0_pll_p) = {"mout_top1_bus0_pll",
+ "ffac_top1_bus0_pll_div2"};
+PNAME(mout_top1_half_bus1_pll_p) = {"mout_top1_bus1_pll",
+ "ffac_top1_bus1_pll_div2"};
+PNAME(mout_top1_half_cc_pll_p) = {"mout_top1_cc_pll",
+ "ffac_top1_cc_pll_div2"};
+PNAME(mout_top1_half_mfc_pll_p) = {"mout_top1_mfc_pll",
+ "ffac_top1_mfc_pll_div2"};
+
+PNAME(mout_top1_group1) = {"mout_top1_half_bus0_pll",
+ "mout_top1_half_bus1_pll", "mout_top1_half_cc_pll",
+ "mout_top1_half_mfc_pll"};
+
+static unsigned long top1_clk_regs[] __initdata = {
+ MUX_SEL_TOP10,
+ MUX_SEL_TOP11,
+ MUX_SEL_TOP13,
+ MUX_SEL_TOP1_FSYS0,
+ MUX_SEL_TOP1_FSYS1,
+ DIV_TOP13,
+ DIV_TOP1_FSYS0,
+ DIV_TOP1_FSYS1,
+ ENABLE_ACLK_TOP13,
+ ENABLE_SCLK_TOP1_FSYS0,
+ ENABLE_SCLK_TOP1_FSYS1,
+};
+
+static struct samsung_mux_clock top1_mux_clks[] __initdata = {
+ MUX(0, "mout_top1_mfc_pll", mout_top1_mfc_pll_p, MUX_SEL_TOP10, 4, 1),
+ MUX(0, "mout_top1_cc_pll", mout_top1_cc_pll_p, MUX_SEL_TOP10, 8, 1),
+ MUX(0, "mout_top1_bus1_pll", mout_top1_bus1_pll_p,
+ MUX_SEL_TOP10, 12, 1),
+ MUX(0, "mout_top1_bus0_pll", mout_top1_bus0_pll_p,
+ MUX_SEL_TOP10, 16, 1),
+
+ MUX(0, "mout_top1_half_mfc_pll", mout_top1_half_mfc_pll_p,
+ MUX_SEL_TOP11, 4, 1),
+ MUX(0, "mout_top1_half_cc_pll", mout_top1_half_cc_pll_p,
+ MUX_SEL_TOP11, 8, 1),
+ MUX(0, "mout_top1_half_bus1_pll", mout_top1_half_bus1_pll_p,
+ MUX_SEL_TOP11, 12, 1),
+ MUX(0, "mout_top1_half_bus0_pll", mout_top1_half_bus0_pll_p,
+ MUX_SEL_TOP11, 16, 1),
+
+ MUX(0, "mout_aclk_fsys1_200", mout_top1_group1, MUX_SEL_TOP13, 24, 2),
+ MUX(0, "mout_aclk_fsys0_200", mout_top1_group1, MUX_SEL_TOP13, 28, 2),
+
+ MUX(0, "mout_sclk_mmc2", mout_top1_group1, MUX_SEL_TOP1_FSYS0, 24, 2),
+
+ MUX(0, "mout_sclk_mmc1", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 24, 2),
+ MUX(0, "mout_sclk_mmc0", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 28, 2),
+};
+
+static struct samsung_div_clock top1_div_clks[] __initdata = {
+ DIV(DOUT_ACLK_FSYS1_200, "dout_aclk_fsys1_200", "mout_aclk_fsys1_200",
+ DIV_TOP13, 24, 4),
+ DIV(DOUT_ACLK_FSYS0_200, "dout_aclk_fsys0_200", "mout_aclk_fsys0_200",
+ DIV_TOP13, 28, 4),
+
+ DIV(DOUT_SCLK_MMC2, "dout_sclk_mmc2", "mout_sclk_mmc2",
+ DIV_TOP1_FSYS0, 24, 4),
+
+ DIV(DOUT_SCLK_MMC1, "dout_sclk_mmc1", "mout_sclk_mmc1",
+ DIV_TOP1_FSYS1, 24, 4),
+ DIV(DOUT_SCLK_MMC0, "dout_sclk_mmc0", "mout_sclk_mmc0",
+ DIV_TOP1_FSYS1, 28, 4),
+};
+
+static struct samsung_gate_clock top1_gate_clks[] __initdata = {
+ GATE(CLK_SCLK_MMC2, "sclk_mmc2", "dout_sclk_mmc2",
+ ENABLE_SCLK_TOP1_FSYS0, 24, CLK_SET_RATE_PARENT, 0),
+
+ GATE(CLK_SCLK_MMC1, "sclk_mmc1", "dout_sclk_mmc1",
+ ENABLE_SCLK_TOP1_FSYS1, 24, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MMC0, "sclk_mmc0", "dout_sclk_mmc0",
+ ENABLE_SCLK_TOP1_FSYS1, 28, CLK_SET_RATE_PARENT, 0),
+};
+
+static struct samsung_fixed_factor_clock top1_fixed_factor_clks[] __initdata = {
+ FFACTOR(0, "ffac_top1_bus0_pll_div2", "mout_top1_bus0_pll", 1, 2, 0),
+ FFACTOR(0, "ffac_top1_bus1_pll_div2", "mout_top1_bus1_pll", 1, 2, 0),
+ FFACTOR(0, "ffac_top1_cc_pll_div2", "mout_top1_cc_pll", 1, 2, 0),
+ FFACTOR(0, "ffac_top1_mfc_pll_div2", "mout_top1_mfc_pll", 1, 2, 0),
+};
+
+static struct samsung_cmu_info top1_cmu_info __initdata = {
+ .mux_clks = top1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(top1_mux_clks),
+ .div_clks = top1_div_clks,
+ .nr_div_clks = ARRAY_SIZE(top1_div_clks),
+ .gate_clks = top1_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(top1_gate_clks),
+ .fixed_factor_clks = top1_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(top1_fixed_factor_clks),
+ .nr_clk_ids = TOP1_NR_CLK,
+ .clk_regs = top1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(top1_clk_regs),
+};
+
+static void __init exynos7_clk_top1_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &top1_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_top1, "samsung,exynos7-clock-top1",
+ exynos7_clk_top1_init);
+
+/* Register Offset definitions for CMU_CCORE (0x105B0000) */
+#define MUX_SEL_CCORE 0x0200
+#define DIV_CCORE 0x0600
+#define ENABLE_ACLK_CCORE0 0x0800
+#define ENABLE_ACLK_CCORE1 0x0804
+#define ENABLE_PCLK_CCORE 0x0900
+
+/*
+ * List of parent clocks for Muxes in CMU_CCORE
+ */
+PNAME(mout_aclk_ccore_133_p) = { "fin_pll", "dout_aclk_ccore_133" };
+
+static unsigned long ccore_clk_regs[] __initdata = {
+ MUX_SEL_CCORE,
+ ENABLE_PCLK_CCORE,
+};
+
+static struct samsung_mux_clock ccore_mux_clks[] __initdata = {
+ MUX(0, "mout_aclk_ccore_133_user", mout_aclk_ccore_133_p,
+ MUX_SEL_CCORE, 1, 1),
+};
+
+static struct samsung_gate_clock ccore_gate_clks[] __initdata = {
+ GATE(PCLK_RTC, "pclk_rtc", "mout_aclk_ccore_133_user",
+ ENABLE_PCLK_CCORE, 8, 0, 0),
+};
+
+static struct samsung_cmu_info ccore_cmu_info __initdata = {
+ .mux_clks = ccore_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(ccore_mux_clks),
+ .gate_clks = ccore_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(ccore_gate_clks),
+ .nr_clk_ids = CCORE_NR_CLK,
+ .clk_regs = ccore_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(ccore_clk_regs),
+};
+
+static void __init exynos7_clk_ccore_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &ccore_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_ccore, "samsung,exynos7-clock-ccore",
+ exynos7_clk_ccore_init);
+
+/* Register Offset definitions for CMU_PERIC0 (0x13610000) */
+#define MUX_SEL_PERIC0 0x0200
+#define ENABLE_PCLK_PERIC0 0x0900
+#define ENABLE_SCLK_PERIC0 0x0A00
+
+/* List of parent clocks for Muxes in CMU_PERIC0 */
+PNAME(mout_aclk_peric0_66_p) = { "fin_pll", "dout_aclk_peric0_66" };
+PNAME(mout_sclk_uart0_p) = { "fin_pll", "sclk_uart0" };
+
+static unsigned long peric0_clk_regs[] __initdata = {
+ MUX_SEL_PERIC0,
+ ENABLE_PCLK_PERIC0,
+ ENABLE_SCLK_PERIC0,
+};
+
+static struct samsung_mux_clock peric0_mux_clks[] __initdata = {
+ MUX(0, "mout_aclk_peric0_66_user", mout_aclk_peric0_66_p,
+ MUX_SEL_PERIC0, 0, 1),
+ MUX(0, "mout_sclk_uart0_user", mout_sclk_uart0_p,
+ MUX_SEL_PERIC0, 16, 1),
+};
+
+static struct samsung_gate_clock peric0_gate_clks[] __initdata = {
+ GATE(PCLK_HSI2C0, "pclk_hsi2c0", "mout_aclk_peric0_66_user",
+ ENABLE_PCLK_PERIC0, 8, 0, 0),
+ GATE(PCLK_HSI2C1, "pclk_hsi2c1", "mout_aclk_peric0_66_user",
+ ENABLE_PCLK_PERIC0, 9, 0, 0),
+ GATE(PCLK_HSI2C4, "pclk_hsi2c4", "mout_aclk_peric0_66_user",
+ ENABLE_PCLK_PERIC0, 10, 0, 0),
+ GATE(PCLK_HSI2C5, "pclk_hsi2c5", "mout_aclk_peric0_66_user",
+ ENABLE_PCLK_PERIC0, 11, 0, 0),
+ GATE(PCLK_HSI2C9, "pclk_hsi2c9", "mout_aclk_peric0_66_user",
+ ENABLE_PCLK_PERIC0, 12, 0, 0),
+ GATE(PCLK_HSI2C10, "pclk_hsi2c10", "mout_aclk_peric0_66_user",
+ ENABLE_PCLK_PERIC0, 13, 0, 0),
+ GATE(PCLK_HSI2C11, "pclk_hsi2c11", "mout_aclk_peric0_66_user",
+ ENABLE_PCLK_PERIC0, 14, 0, 0),
+ GATE(PCLK_UART0, "pclk_uart0", "mout_aclk_peric0_66_user",
+ ENABLE_PCLK_PERIC0, 16, 0, 0),
+ GATE(PCLK_ADCIF, "pclk_adcif", "mout_aclk_peric0_66_user",
+ ENABLE_PCLK_PERIC0, 20, 0, 0),
+ GATE(PCLK_PWM, "pclk_pwm", "mout_aclk_peric0_66_user",
+ ENABLE_PCLK_PERIC0, 21, 0, 0),
+
+ GATE(SCLK_UART0, "sclk_uart0_user", "mout_sclk_uart0_user",
+ ENABLE_SCLK_PERIC0, 16, 0, 0),
+ GATE(SCLK_PWM, "sclk_pwm", "fin_pll", ENABLE_SCLK_PERIC0, 21, 0, 0),
+};
+
+static struct samsung_cmu_info peric0_cmu_info __initdata = {
+ .mux_clks = peric0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric0_mux_clks),
+ .gate_clks = peric0_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peric0_gate_clks),
+ .nr_clk_ids = PERIC0_NR_CLK,
+ .clk_regs = peric0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric0_clk_regs),
+};
+
+static void __init exynos7_clk_peric0_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &peric0_cmu_info);
+}
+
+/* Register Offset definitions for CMU_PERIC1 (0x14C80000) */
+#define MUX_SEL_PERIC10 0x0200
+#define MUX_SEL_PERIC11 0x0204
+#define ENABLE_PCLK_PERIC1 0x0900
+#define ENABLE_SCLK_PERIC10 0x0A00
+
+CLK_OF_DECLARE(exynos7_clk_peric0, "samsung,exynos7-clock-peric0",
+ exynos7_clk_peric0_init);
+
+/* List of parent clocks for Muxes in CMU_PERIC1 */
+PNAME(mout_aclk_peric1_66_p) = { "fin_pll", "dout_aclk_peric1_66" };
+PNAME(mout_sclk_uart1_p) = { "fin_pll", "sclk_uart1" };
+PNAME(mout_sclk_uart2_p) = { "fin_pll", "sclk_uart2" };
+PNAME(mout_sclk_uart3_p) = { "fin_pll", "sclk_uart3" };
+
+static unsigned long peric1_clk_regs[] __initdata = {
+ MUX_SEL_PERIC10,
+ MUX_SEL_PERIC11,
+ ENABLE_PCLK_PERIC1,
+ ENABLE_SCLK_PERIC10,
+};
+
+static struct samsung_mux_clock peric1_mux_clks[] __initdata = {
+ MUX(0, "mout_aclk_peric1_66_user", mout_aclk_peric1_66_p,
+ MUX_SEL_PERIC10, 0, 1),
+
+ MUX(0, "mout_sclk_uart1_user", mout_sclk_uart1_p,
+ MUX_SEL_PERIC11, 20, 1),
+ MUX(0, "mout_sclk_uart2_user", mout_sclk_uart2_p,
+ MUX_SEL_PERIC11, 24, 1),
+ MUX(0, "mout_sclk_uart3_user", mout_sclk_uart3_p,
+ MUX_SEL_PERIC11, 28, 1),
+};
+
+static struct samsung_gate_clock peric1_gate_clks[] __initdata = {
+ GATE(PCLK_HSI2C2, "pclk_hsi2c2", "mout_aclk_peric1_66_user",
+ ENABLE_PCLK_PERIC1, 4, 0, 0),
+ GATE(PCLK_HSI2C3, "pclk_hsi2c3", "mout_aclk_peric1_66_user",
+ ENABLE_PCLK_PERIC1, 5, 0, 0),
+ GATE(PCLK_HSI2C6, "pclk_hsi2c6", "mout_aclk_peric1_66_user",
+ ENABLE_PCLK_PERIC1, 6, 0, 0),
+ GATE(PCLK_HSI2C7, "pclk_hsi2c7", "mout_aclk_peric1_66_user",
+ ENABLE_PCLK_PERIC1, 7, 0, 0),
+ GATE(PCLK_HSI2C8, "pclk_hsi2c8", "mout_aclk_peric1_66_user",
+ ENABLE_PCLK_PERIC1, 8, 0, 0),
+ GATE(PCLK_UART1, "pclk_uart1", "mout_aclk_peric1_66_user",
+ ENABLE_PCLK_PERIC1, 9, 0, 0),
+ GATE(PCLK_UART2, "pclk_uart2", "mout_aclk_peric1_66_user",
+ ENABLE_PCLK_PERIC1, 10, 0, 0),
+ GATE(PCLK_UART3, "pclk_uart3", "mout_aclk_peric1_66_user",
+ ENABLE_PCLK_PERIC1, 11, 0, 0),
+
+ GATE(SCLK_UART1, "sclk_uart1_user", "mout_sclk_uart1_user",
+ ENABLE_SCLK_PERIC10, 9, 0, 0),
+ GATE(SCLK_UART2, "sclk_uart2_user", "mout_sclk_uart2_user",
+ ENABLE_SCLK_PERIC10, 10, 0, 0),
+ GATE(SCLK_UART3, "sclk_uart3_user", "mout_sclk_uart3_user",
+ ENABLE_SCLK_PERIC10, 11, 0, 0),
+};
+
+static struct samsung_cmu_info peric1_cmu_info __initdata = {
+ .mux_clks = peric1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric1_mux_clks),
+ .gate_clks = peric1_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peric1_gate_clks),
+ .nr_clk_ids = PERIC1_NR_CLK,
+ .clk_regs = peric1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric1_clk_regs),
+};
+
+static void __init exynos7_clk_peric1_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &peric1_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_peric1, "samsung,exynos7-clock-peric1",
+ exynos7_clk_peric1_init);
+
+/* Register Offset definitions for CMU_PERIS (0x10040000) */
+#define MUX_SEL_PERIS 0x0200
+#define ENABLE_PCLK_PERIS 0x0900
+#define ENABLE_PCLK_PERIS_SECURE_CHIPID 0x0910
+#define ENABLE_SCLK_PERIS 0x0A00
+#define ENABLE_SCLK_PERIS_SECURE_CHIPID 0x0A10
+
+/* List of parent clocks for Muxes in CMU_PERIS */
+PNAME(mout_aclk_peris_66_p) = { "fin_pll", "dout_aclk_peris_66" };
+
+static unsigned long peris_clk_regs[] __initdata = {
+ MUX_SEL_PERIS,
+ ENABLE_PCLK_PERIS,
+ ENABLE_PCLK_PERIS_SECURE_CHIPID,
+ ENABLE_SCLK_PERIS,
+ ENABLE_SCLK_PERIS_SECURE_CHIPID,
+};
+
+static struct samsung_mux_clock peris_mux_clks[] __initdata = {
+ MUX(0, "mout_aclk_peris_66_user",
+ mout_aclk_peris_66_p, MUX_SEL_PERIS, 0, 1),
+};
+
+static struct samsung_gate_clock peris_gate_clks[] __initdata = {
+ GATE(PCLK_WDT, "pclk_wdt", "mout_aclk_peris_66_user",
+ ENABLE_PCLK_PERIS, 6, 0, 0),
+ GATE(PCLK_TMU, "pclk_tmu_apbif", "mout_aclk_peris_66_user",
+ ENABLE_PCLK_PERIS, 10, 0, 0),
+
+ GATE(PCLK_CHIPID, "pclk_chipid", "mout_aclk_peris_66_user",
+ ENABLE_PCLK_PERIS_SECURE_CHIPID, 0, 0, 0),
+ GATE(SCLK_CHIPID, "sclk_chipid", "fin_pll",
+ ENABLE_SCLK_PERIS_SECURE_CHIPID, 0, 0, 0),
+
+ GATE(SCLK_TMU, "sclk_tmu", "fin_pll", ENABLE_SCLK_PERIS, 10, 0, 0),
+};
+
+static struct samsung_cmu_info peris_cmu_info __initdata = {
+ .mux_clks = peris_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peris_mux_clks),
+ .gate_clks = peris_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peris_gate_clks),
+ .nr_clk_ids = PERIS_NR_CLK,
+ .clk_regs = peris_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peris_clk_regs),
+};
+
+static void __init exynos7_clk_peris_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &peris_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_peris, "samsung,exynos7-clock-peris",
+ exynos7_clk_peris_init);
+
+/* Register Offset definitions for CMU_FSYS0 (0x10E90000) */
+#define MUX_SEL_FSYS00 0x0200
+#define MUX_SEL_FSYS01 0x0204
+#define ENABLE_ACLK_FSYS01 0x0804
+
+/*
+ * List of parent clocks for Muxes in CMU_FSYS0
+ */
+PNAME(mout_aclk_fsys0_200_p) = { "fin_pll", "dout_aclk_fsys0_200" };
+PNAME(mout_sclk_mmc2_p) = { "fin_pll", "sclk_mmc2" };
+
+static unsigned long fsys0_clk_regs[] __initdata = {
+ MUX_SEL_FSYS00,
+ MUX_SEL_FSYS01,
+ ENABLE_ACLK_FSYS01,
+};
+
+static struct samsung_mux_clock fsys0_mux_clks[] __initdata = {
+ MUX(0, "mout_aclk_fsys0_200_user", mout_aclk_fsys0_200_p,
+ MUX_SEL_FSYS00, 24, 1),
+
+ MUX(0, "mout_sclk_mmc2_user", mout_sclk_mmc2_p, MUX_SEL_FSYS01, 24, 1),
+};
+
+static struct samsung_gate_clock fsys0_gate_clks[] __initdata = {
+ GATE(ACLK_MMC2, "aclk_mmc2", "mout_aclk_fsys0_200_user",
+ ENABLE_ACLK_FSYS01, 31, 0, 0),
+};
+
+static struct samsung_cmu_info fsys0_cmu_info __initdata = {
+ .mux_clks = fsys0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(fsys0_mux_clks),
+ .gate_clks = fsys0_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(fsys0_gate_clks),
+ .nr_clk_ids = TOP1_NR_CLK,
+ .clk_regs = fsys0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(fsys0_clk_regs),
+};
+
+static void __init exynos7_clk_fsys0_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &fsys0_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_fsys0, "samsung,exynos7-clock-fsys0",
+ exynos7_clk_fsys0_init);
+
+/* Register Offset definitions for CMU_FSYS1 (0x156E0000) */
+#define MUX_SEL_FSYS10 0x0200
+#define MUX_SEL_FSYS11 0x0204
+#define ENABLE_ACLK_FSYS1 0x0800
+
+/*
+ * List of parent clocks for Muxes in CMU_FSYS1
+ */
+PNAME(mout_aclk_fsys1_200_p) = { "fin_pll", "dout_aclk_fsys1_200" };
+PNAME(mout_sclk_mmc0_p) = { "fin_pll", "sclk_mmc0" };
+PNAME(mout_sclk_mmc1_p) = { "fin_pll", "sclk_mmc1" };
+
+static unsigned long fsys1_clk_regs[] __initdata = {
+ MUX_SEL_FSYS10,
+ MUX_SEL_FSYS11,
+ ENABLE_ACLK_FSYS1,
+};
+
+static struct samsung_mux_clock fsys1_mux_clks[] __initdata = {
+ MUX(0, "mout_aclk_fsys1_200_user", mout_aclk_fsys1_200_p,
+ MUX_SEL_FSYS10, 28, 1),
+
+ MUX(0, "mout_sclk_mmc1_user", mout_sclk_mmc1_p, MUX_SEL_FSYS11, 24, 1),
+ MUX(0, "mout_sclk_mmc0_user", mout_sclk_mmc0_p, MUX_SEL_FSYS11, 28, 1),
+};
+
+static struct samsung_gate_clock fsys1_gate_clks[] __initdata = {
+ GATE(ACLK_MMC1, "aclk_mmc1", "mout_aclk_fsys1_200_user",
+ ENABLE_ACLK_FSYS1, 29, 0, 0),
+ GATE(ACLK_MMC0, "aclk_mmc0", "mout_aclk_fsys1_200_user",
+ ENABLE_ACLK_FSYS1, 30, 0, 0),
+};
+
+static struct samsung_cmu_info fsys1_cmu_info __initdata = {
+ .mux_clks = fsys1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(fsys1_mux_clks),
+ .gate_clks = fsys1_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(fsys1_gate_clks),
+ .nr_clk_ids = TOP1_NR_CLK,
+ .clk_regs = fsys1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(fsys1_clk_regs),
+};
+
+static void __init exynos7_clk_fsys1_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &fsys1_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_fsys1, "samsung,exynos7-clock-fsys1",
+ exynos7_clk_fsys1_init);
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index b07fad2a9167..9d70e5c03804 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -482,6 +482,8 @@ static const struct clk_ops samsung_pll45xx_clk_min_ops = {
#define PLL46XX_VSEL_MASK (1)
#define PLL46XX_MDIV_MASK (0x1FF)
+#define PLL1460X_MDIV_MASK (0x3FF)
+
#define PLL46XX_PDIV_MASK (0x3F)
#define PLL46XX_SDIV_MASK (0x7)
#define PLL46XX_VSEL_SHIFT (27)
@@ -511,13 +513,15 @@ static unsigned long samsung_pll46xx_recalc_rate(struct clk_hw *hw,
pll_con0 = __raw_readl(pll->con_reg);
pll_con1 = __raw_readl(pll->con_reg + 4);
- mdiv = (pll_con0 >> PLL46XX_MDIV_SHIFT) & PLL46XX_MDIV_MASK;
+ mdiv = (pll_con0 >> PLL46XX_MDIV_SHIFT) & ((pll->type == pll_1460x) ?
+ PLL1460X_MDIV_MASK : PLL46XX_MDIV_MASK);
pdiv = (pll_con0 >> PLL46XX_PDIV_SHIFT) & PLL46XX_PDIV_MASK;
sdiv = (pll_con0 >> PLL46XX_SDIV_SHIFT) & PLL46XX_SDIV_MASK;
kdiv = pll->type == pll_4650c ? pll_con1 & PLL4650C_KDIV_MASK :
pll_con1 & PLL46XX_KDIV_MASK;
- shift = pll->type == pll_4600 ? 16 : 10;
+ shift = ((pll->type == pll_4600) || (pll->type == pll_1460x)) ? 16 : 10;
+
fvco *= (mdiv << shift) + kdiv;
do_div(fvco, (pdiv << sdiv));
fvco >>= shift;
@@ -573,14 +577,21 @@ static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate,
lock = 0xffff;
/* Set PLL PMS and VSEL values. */
- con0 &= ~((PLL46XX_MDIV_MASK << PLL46XX_MDIV_SHIFT) |
+ if (pll->type == pll_1460x) {
+ con0 &= ~((PLL1460X_MDIV_MASK << PLL46XX_MDIV_SHIFT) |
+ (PLL46XX_PDIV_MASK << PLL46XX_PDIV_SHIFT) |
+ (PLL46XX_SDIV_MASK << PLL46XX_SDIV_SHIFT));
+ } else {
+ con0 &= ~((PLL46XX_MDIV_MASK << PLL46XX_MDIV_SHIFT) |
(PLL46XX_PDIV_MASK << PLL46XX_PDIV_SHIFT) |
(PLL46XX_SDIV_MASK << PLL46XX_SDIV_SHIFT) |
(PLL46XX_VSEL_MASK << PLL46XX_VSEL_SHIFT));
+ con0 |= rate->vsel << PLL46XX_VSEL_SHIFT;
+ }
+
con0 |= (rate->mdiv << PLL46XX_MDIV_SHIFT) |
(rate->pdiv << PLL46XX_PDIV_SHIFT) |
- (rate->sdiv << PLL46XX_SDIV_SHIFT) |
- (rate->vsel << PLL46XX_VSEL_SHIFT);
+ (rate->sdiv << PLL46XX_SDIV_SHIFT);
/* Set PLL K, MFR and MRR values. */
con1 = __raw_readl(pll->con_reg + 0x4);
@@ -1190,6 +1201,9 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
/* clk_ops for 35xx and 2550 are similar */
case pll_35xx:
case pll_2550:
+ case pll_1450x:
+ case pll_1451x:
+ case pll_1452x:
if (!pll->rate_table)
init.ops = &samsung_pll35xx_clk_min_ops;
else
@@ -1223,6 +1237,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
case pll_4600:
case pll_4650:
case pll_4650c:
+ case pll_1460x:
if (!pll->rate_table)
init.ops = &samsung_pll46xx_clk_min_ops;
else
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index c0ed4d41fd90..213de9af8b4f 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -33,6 +33,10 @@ enum samsung_pll_type {
pll_s3c2440_mpll,
pll_2550xx,
pll_2650xx,
+ pll_1450x,
+ pll_1451x,
+ pll_1452x,
+ pll_1460x,
};
#define PLL_35XX_RATE(_rate, _m, _p, _s) \
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index deab84d9f37d..4bda54095a16 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -11,9 +11,13 @@
* clock framework for Samsung platforms.
*/
+#include <linux/of_address.h>
#include <linux/syscore_ops.h>
+
#include "clk.h"
+static LIST_HEAD(clock_reg_cache_list);
+
void samsung_clk_save(void __iomem *base,
struct samsung_clk_reg_dump *rd,
unsigned int num_regs)
@@ -281,7 +285,6 @@ void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
* obtain the clock speed of all external fixed clock sources from device
* tree and register it
*/
-#ifdef CONFIG_OF
void __init samsung_clk_of_register_fixed_ext(struct samsung_clk_provider *ctx,
struct samsung_fixed_rate_clock *fixed_rate_clk,
unsigned int nr_fixed_rate_clk,
@@ -298,7 +301,6 @@ void __init samsung_clk_of_register_fixed_ext(struct samsung_clk_provider *ctx,
}
samsung_clk_register_fixed_rate(ctx, fixed_rate_clk, nr_fixed_rate_clk);
}
-#endif
/* utility function to get the rate of a specified clock */
unsigned long _get_rate(const char *clk_name)
@@ -313,3 +315,99 @@ unsigned long _get_rate(const char *clk_name)
return clk_get_rate(clk);
}
+
+#ifdef CONFIG_PM_SLEEP
+static int samsung_clk_suspend(void)
+{
+ struct samsung_clock_reg_cache *reg_cache;
+
+ list_for_each_entry(reg_cache, &clock_reg_cache_list, node)
+ samsung_clk_save(reg_cache->reg_base, reg_cache->rdump,
+ reg_cache->rd_num);
+ return 0;
+}
+
+static void samsung_clk_resume(void)
+{
+ struct samsung_clock_reg_cache *reg_cache;
+
+ list_for_each_entry(reg_cache, &clock_reg_cache_list, node)
+ samsung_clk_restore(reg_cache->reg_base, reg_cache->rdump,
+ reg_cache->rd_num);
+}
+
+static struct syscore_ops samsung_clk_syscore_ops = {
+ .suspend = samsung_clk_suspend,
+ .resume = samsung_clk_resume,
+};
+
+static void samsung_clk_sleep_init(void __iomem *reg_base,
+ const unsigned long *rdump,
+ unsigned long nr_rdump)
+{
+ struct samsung_clock_reg_cache *reg_cache;
+
+ reg_cache = kzalloc(sizeof(struct samsung_clock_reg_cache),
+ GFP_KERNEL);
+ if (!reg_cache)
+ panic("could not allocate register reg_cache.\n");
+ reg_cache->rdump = samsung_clk_alloc_reg_dump(rdump, nr_rdump);
+
+ if (!reg_cache->rdump)
+ panic("could not allocate register dump storage.\n");
+
+ if (list_empty(&clock_reg_cache_list))
+ register_syscore_ops(&samsung_clk_syscore_ops);
+
+ reg_cache->reg_base = reg_base;
+ reg_cache->rd_num = nr_rdump;
+ list_add_tail(&reg_cache->node, &clock_reg_cache_list);
+}
+
+#else
+static void samsung_clk_sleep_init(void __iomem *reg_base,
+ const unsigned long *rdump,
+ unsigned long nr_rdump) {}
+#endif
+
+/*
+ * Common function which registers plls, muxes, dividers and gates
+ * for each CMU. It also add CMU register list to register cache.
+ */
+void __init samsung_cmu_register_one(struct device_node *np,
+ struct samsung_cmu_info *cmu)
+{
+ void __iomem *reg_base;
+ struct samsung_clk_provider *ctx;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: failed to map registers\n", __func__);
+
+ ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids);
+ if (!ctx)
+ panic("%s: unable to alllocate ctx\n", __func__);
+
+ if (cmu->pll_clks)
+ samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks,
+ reg_base);
+ if (cmu->mux_clks)
+ samsung_clk_register_mux(ctx, cmu->mux_clks,
+ cmu->nr_mux_clks);
+ if (cmu->div_clks)
+ samsung_clk_register_div(ctx, cmu->div_clks, cmu->nr_div_clks);
+ if (cmu->gate_clks)
+ samsung_clk_register_gate(ctx, cmu->gate_clks,
+ cmu->nr_gate_clks);
+ if (cmu->fixed_clks)
+ samsung_clk_register_fixed_rate(ctx, cmu->fixed_clks,
+ cmu->nr_fixed_clks);
+ if (cmu->fixed_factor_clks)
+ samsung_clk_register_fixed_factor(ctx, cmu->fixed_factor_clks,
+ cmu->nr_fixed_factor_clks);
+ if (cmu->clk_regs)
+ samsung_clk_sleep_init(reg_base, cmu->clk_regs,
+ cmu->nr_clk_regs);
+
+ samsung_clk_of_add_provider(np, ctx);
+}
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index 66ab36b5cef1..8acabe1f32c4 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -13,19 +13,15 @@
#ifndef __SAMSUNG_CLK_H
#define __SAMSUNG_CLK_H
-#include <linux/clk.h>
#include <linux/clkdev.h>
-#include <linux/io.h>
#include <linux/clk-provider.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
#include "clk-pll.h"
/**
* struct samsung_clk_provider: information about clock provider
* @reg_base: virtual address for the register base.
* @clk_data: holds clock related data like clk* and number of clocks.
- * @lock: maintains exclusion bwtween callbacks for a given clock-provider.
+ * @lock: maintains exclusion between callbacks for a given clock-provider.
*/
struct samsung_clk_provider {
void __iomem *reg_base;
@@ -324,6 +320,40 @@ struct samsung_pll_clock {
__PLL(_typ, _id, NULL, _name, _pname, CLK_GET_RATE_NOCACHE, \
_lock, _con, _rtable, _alias)
+struct samsung_clock_reg_cache {
+ struct list_head node;
+ void __iomem *reg_base;
+ struct samsung_clk_reg_dump *rdump;
+ unsigned int rd_num;
+};
+
+struct samsung_cmu_info {
+ /* list of pll clocks and respective count */
+ struct samsung_pll_clock *pll_clks;
+ unsigned int nr_pll_clks;
+ /* list of mux clocks and respective count */
+ struct samsung_mux_clock *mux_clks;
+ unsigned int nr_mux_clks;
+ /* list of div clocks and respective count */
+ struct samsung_div_clock *div_clks;
+ unsigned int nr_div_clks;
+ /* list of gate clocks and respective count */
+ struct samsung_gate_clock *gate_clks;
+ unsigned int nr_gate_clks;
+ /* list of fixed clocks and respective count */
+ struct samsung_fixed_rate_clock *fixed_clks;
+ unsigned int nr_fixed_clks;
+ /* list of fixed factor clocks and respective count */
+ struct samsung_fixed_factor_clock *fixed_factor_clks;
+ unsigned int nr_fixed_factor_clks;
+ /* total number of clocks with IDs assigned*/
+ unsigned int nr_clk_ids;
+
+ /* list and number of clocks registers */
+ unsigned long *clk_regs;
+ unsigned int nr_clk_regs;
+};
+
extern struct samsung_clk_provider *__init samsung_clk_init(
struct device_node *np, void __iomem *base,
unsigned long nr_clks);
@@ -362,6 +392,9 @@ extern void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx,
struct samsung_pll_clock *pll_list,
unsigned int nr_clk, void __iomem *base);
+extern void __init samsung_cmu_register_one(struct device_node *,
+ struct samsung_cmu_info *);
+
extern unsigned long _get_rate(const char *clk_name);
extern void samsung_clk_save(void __iomem *base,
diff --git a/drivers/clk/shmobile/clk-div6.c b/drivers/clk/shmobile/clk-div6.c
index f065f694cb65..639241e31e03 100644
--- a/drivers/clk/shmobile/clk-div6.c
+++ b/drivers/clk/shmobile/clk-div6.c
@@ -32,6 +32,9 @@ struct div6_clock {
struct clk_hw hw;
void __iomem *reg;
unsigned int div;
+ u32 src_shift;
+ u32 src_width;
+ u8 *parents;
};
#define to_div6_clock(_hw) container_of(_hw, struct div6_clock, hw)
@@ -39,8 +42,11 @@ struct div6_clock {
static int cpg_div6_clock_enable(struct clk_hw *hw)
{
struct div6_clock *clock = to_div6_clock(hw);
+ u32 val;
- clk_writel(CPG_DIV6_DIV(clock->div - 1), clock->reg);
+ val = (clk_readl(clock->reg) & ~(CPG_DIV6_DIV_MASK | CPG_DIV6_CKSTP))
+ | CPG_DIV6_DIV(clock->div - 1);
+ clk_writel(val, clock->reg);
return 0;
}
@@ -52,7 +58,7 @@ static void cpg_div6_clock_disable(struct clk_hw *hw)
/* DIV6 clocks require the divisor field to be non-zero when stopping
* the clock.
*/
- clk_writel(CPG_DIV6_CKSTP | CPG_DIV6_DIV(CPG_DIV6_DIV_MASK),
+ clk_writel(clk_readl(clock->reg) | CPG_DIV6_CKSTP | CPG_DIV6_DIV_MASK,
clock->reg);
}
@@ -94,12 +100,53 @@ static int cpg_div6_clock_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct div6_clock *clock = to_div6_clock(hw);
unsigned int div = cpg_div6_clock_calc_div(rate, parent_rate);
+ u32 val;
clock->div = div;
+ val = clk_readl(clock->reg) & ~CPG_DIV6_DIV_MASK;
/* Only program the new divisor if the clock isn't stopped. */
- if (!(clk_readl(clock->reg) & CPG_DIV6_CKSTP))
- clk_writel(CPG_DIV6_DIV(clock->div - 1), clock->reg);
+ if (!(val & CPG_DIV6_CKSTP))
+ clk_writel(val | CPG_DIV6_DIV(clock->div - 1), clock->reg);
+
+ return 0;
+}
+
+static u8 cpg_div6_clock_get_parent(struct clk_hw *hw)
+{
+ struct div6_clock *clock = to_div6_clock(hw);
+ unsigned int i;
+ u8 hw_index;
+
+ if (clock->src_width == 0)
+ return 0;
+
+ hw_index = (clk_readl(clock->reg) >> clock->src_shift) &
+ (BIT(clock->src_width) - 1);
+ for (i = 0; i < __clk_get_num_parents(hw->clk); i++) {
+ if (clock->parents[i] == hw_index)
+ return i;
+ }
+
+ pr_err("%s: %s DIV6 clock set to invalid parent %u\n",
+ __func__, __clk_get_name(hw->clk), hw_index);
+ return 0;
+}
+
+static int cpg_div6_clock_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct div6_clock *clock = to_div6_clock(hw);
+ u8 hw_index;
+ u32 mask;
+
+ if (index >= __clk_get_num_parents(hw->clk))
+ return -EINVAL;
+
+ mask = ~((BIT(clock->src_width) - 1) << clock->src_shift);
+ hw_index = clock->parents[index];
+
+ clk_writel((clk_readl(clock->reg) & mask) |
+ (hw_index << clock->src_shift), clock->reg);
return 0;
}
@@ -108,6 +155,8 @@ static const struct clk_ops cpg_div6_clock_ops = {
.enable = cpg_div6_clock_enable,
.disable = cpg_div6_clock_disable,
.is_enabled = cpg_div6_clock_is_enabled,
+ .get_parent = cpg_div6_clock_get_parent,
+ .set_parent = cpg_div6_clock_set_parent,
.recalc_rate = cpg_div6_clock_recalc_rate,
.round_rate = cpg_div6_clock_round_rate,
.set_rate = cpg_div6_clock_set_rate,
@@ -115,20 +164,33 @@ static const struct clk_ops cpg_div6_clock_ops = {
static void __init cpg_div6_clock_init(struct device_node *np)
{
+ unsigned int num_parents, valid_parents;
+ const char **parent_names;
struct clk_init_data init;
struct div6_clock *clock;
- const char *parent_name;
const char *name;
struct clk *clk;
+ unsigned int i;
int ret;
clock = kzalloc(sizeof(*clock), GFP_KERNEL);
- if (!clock) {
- pr_err("%s: failed to allocate %s DIV6 clock\n",
+ if (!clock)
+ return;
+
+ num_parents = of_clk_get_parent_count(np);
+ if (num_parents < 1) {
+ pr_err("%s: no parent found for %s DIV6 clock\n",
__func__, np->name);
return;
}
+ clock->parents = kmalloc_array(num_parents, sizeof(*clock->parents),
+ GFP_KERNEL);
+ parent_names = kmalloc_array(num_parents, sizeof(*parent_names),
+ GFP_KERNEL);
+ if (!parent_names)
+ return;
+
/* Remap the clock register and read the divisor. Disabling the
* clock overwrites the divisor, so we need to cache its value for the
* enable operation.
@@ -150,9 +212,34 @@ static void __init cpg_div6_clock_init(struct device_node *np)
goto error;
}
- parent_name = of_clk_get_parent_name(np, 0);
- if (parent_name == NULL) {
- pr_err("%s: failed to get %s DIV6 clock parent name\n",
+
+ for (i = 0, valid_parents = 0; i < num_parents; i++) {
+ const char *name = of_clk_get_parent_name(np, i);
+
+ if (name) {
+ parent_names[valid_parents] = name;
+ clock->parents[valid_parents] = i;
+ valid_parents++;
+ }
+ }
+
+ switch (num_parents) {
+ case 1:
+ /* fixed parent clock */
+ clock->src_shift = clock->src_width = 0;
+ break;
+ case 4:
+ /* clock with EXSRC bits 6-7 */
+ clock->src_shift = 6;
+ clock->src_width = 2;
+ break;
+ case 8:
+ /* VCLK with EXSRC bits 12-14 */
+ clock->src_shift = 12;
+ clock->src_width = 3;
+ break;
+ default:
+ pr_err("%s: invalid number of parents for DIV6 clock %s\n",
__func__, np->name);
goto error;
}
@@ -161,8 +248,8 @@ static void __init cpg_div6_clock_init(struct device_node *np)
init.name = name;
init.ops = &cpg_div6_clock_ops;
init.flags = CLK_IS_BASIC;
- init.parent_names = &parent_name;
- init.num_parents = 1;
+ init.parent_names = parent_names;
+ init.num_parents = valid_parents;
clock->hw.init = &init;
@@ -175,11 +262,13 @@ static void __init cpg_div6_clock_init(struct device_node *np)
of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ kfree(parent_names);
return;
error:
if (clock->reg)
iounmap(clock->reg);
+ kfree(parent_names);
kfree(clock);
}
CLK_OF_DECLARE(cpg_div6_clk, "renesas,cpg-div6-clock", cpg_div6_clock_init);
diff --git a/drivers/clk/sunxi/Makefile b/drivers/clk/sunxi/Makefile
index 7ddc2b553846..a66953c0f430 100644
--- a/drivers/clk/sunxi/Makefile
+++ b/drivers/clk/sunxi/Makefile
@@ -7,6 +7,7 @@ obj-y += clk-a10-hosc.o
obj-y += clk-a20-gmac.o
obj-y += clk-mod0.o
obj-y += clk-sun8i-mbus.o
+obj-y += clk-sun9i-core.o
obj-$(CONFIG_MFD_SUN6I_PRCM) += \
clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o \
diff --git a/drivers/clk/sunxi/clk-a20-gmac.c b/drivers/clk/sunxi/clk-a20-gmac.c
index 5296fd6dd7b3..0dcf4f205fb8 100644
--- a/drivers/clk/sunxi/clk-a20-gmac.c
+++ b/drivers/clk/sunxi/clk-a20-gmac.c
@@ -53,6 +53,11 @@ static DEFINE_SPINLOCK(gmac_lock);
#define SUN7I_A20_GMAC_MASK 0x3
#define SUN7I_A20_GMAC_PARENTS 2
+static u32 sun7i_a20_gmac_mux_table[SUN7I_A20_GMAC_PARENTS] = {
+ 0x00, /* Select mii_phy_tx_clk */
+ 0x02, /* Select gmac_int_tx_clk */
+};
+
static void __init sun7i_a20_gmac_clk_setup(struct device_node *node)
{
struct clk *clk;
@@ -90,7 +95,7 @@ static void __init sun7i_a20_gmac_clk_setup(struct device_node *node)
gate->lock = &gmac_lock;
mux->reg = reg;
mux->mask = SUN7I_A20_GMAC_MASK;
- mux->flags = CLK_MUX_INDEX_BIT;
+ mux->table = sun7i_a20_gmac_mux_table;
mux->lock = &gmac_lock;
clk = clk_register_composite(NULL, clk_name,
diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c
index f83ba097126c..62e08fb58554 100644
--- a/drivers/clk/sunxi/clk-factors.c
+++ b/drivers/clk/sunxi/clk-factors.c
@@ -81,7 +81,7 @@ static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate,
static long clk_factors_determine_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *best_parent_rate,
- struct clk **best_parent_p)
+ struct clk_hw **best_parent_p)
{
struct clk *clk = hw->clk, *parent, *best_parent = NULL;
int i, num_parents;
@@ -108,7 +108,7 @@ static long clk_factors_determine_rate(struct clk_hw *hw, unsigned long rate,
}
if (best_parent)
- *best_parent_p = best_parent;
+ *best_parent_p = __clk_get_hw(best_parent);
*best_parent_rate = best;
return best_child_rate;
@@ -224,7 +224,7 @@ struct clk * __init sunxi_factors_register(struct device_node *node,
/* set up gate properties */
mux->reg = reg;
mux->shift = data->mux;
- mux->mask = SUNXI_FACTORS_MUX_MASK;
+ mux->mask = data->muxmask;
mux->lock = factors->lock;
mux_hw = &mux->hw;
}
diff --git a/drivers/clk/sunxi/clk-factors.h b/drivers/clk/sunxi/clk-factors.h
index 9913840018d3..912238fde132 100644
--- a/drivers/clk/sunxi/clk-factors.h
+++ b/drivers/clk/sunxi/clk-factors.h
@@ -7,8 +7,6 @@
#define SUNXI_FACTORS_NOT_APPLICABLE (0)
-#define SUNXI_FACTORS_MUX_MASK 0x3
-
struct clk_factors_config {
u8 nshift;
u8 nwidth;
@@ -24,6 +22,7 @@ struct clk_factors_config {
struct factors_data {
int enable;
int mux;
+ int muxmask;
struct clk_factors_config *table;
void (*getter) (u32 *rate, u32 parent_rate, u8 *n, u8 *k, u8 *m, u8 *p);
const char *name;
diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c
index 4a563850ee6e..da0524eaee94 100644
--- a/drivers/clk/sunxi/clk-mod0.c
+++ b/drivers/clk/sunxi/clk-mod0.c
@@ -70,6 +70,7 @@ static struct clk_factors_config sun4i_a10_mod0_config = {
static const struct factors_data sun4i_a10_mod0_data __initconst = {
.enable = 31,
.mux = 24,
+ .muxmask = BIT(1) | BIT(0),
.table = &sun4i_a10_mod0_config,
.getter = sun4i_a10_get_mod0_factors,
};
diff --git a/drivers/clk/sunxi/clk-sun6i-ar100.c b/drivers/clk/sunxi/clk-sun6i-ar100.c
index acca53290be2..3d282fb8f85c 100644
--- a/drivers/clk/sunxi/clk-sun6i-ar100.c
+++ b/drivers/clk/sunxi/clk-sun6i-ar100.c
@@ -46,7 +46,7 @@ static unsigned long ar100_recalc_rate(struct clk_hw *hw,
static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *best_parent_rate,
- struct clk **best_parent_clk)
+ struct clk_hw **best_parent_clk)
{
int nparents = __clk_get_num_parents(hw->clk);
long best_rate = -EINVAL;
@@ -100,7 +100,7 @@ static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate,
tmp_rate = (parent_rate >> shift) / div;
if (!*best_parent_clk || tmp_rate > best_rate) {
- *best_parent_clk = parent;
+ *best_parent_clk = __clk_get_hw(parent);
*best_parent_rate = parent_rate;
best_rate = tmp_rate;
}
diff --git a/drivers/clk/sunxi/clk-sun8i-mbus.c b/drivers/clk/sunxi/clk-sun8i-mbus.c
index 8e49b44cee41..ef49786eefd3 100644
--- a/drivers/clk/sunxi/clk-sun8i-mbus.c
+++ b/drivers/clk/sunxi/clk-sun8i-mbus.c
@@ -60,6 +60,7 @@ static struct clk_factors_config sun8i_a23_mbus_config = {
static const struct factors_data sun8i_a23_mbus_data __initconst = {
.enable = 31,
.mux = 24,
+ .muxmask = BIT(1) | BIT(0),
.table = &sun8i_a23_mbus_config,
.getter = sun8i_a23_get_mbus_factors,
};
diff --git a/drivers/clk/sunxi/clk-sun9i-core.c b/drivers/clk/sunxi/clk-sun9i-core.c
new file mode 100644
index 000000000000..3cb9036d91bb
--- /dev/null
+++ b/drivers/clk/sunxi/clk-sun9i-core.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright 2014 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai <wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/log2.h>
+
+#include "clk-factors.h"
+
+
+/**
+ * sun9i_a80_get_pll4_factors() - calculates n, p, m factors for PLL1
+ * PLL4 rate is calculated as follows
+ * rate = (parent_rate * n >> p) / (m + 1);
+ * parent_rate is always 24Mhz
+ *
+ * p and m are named div1 and div2 in Allwinner's SDK
+ */
+
+static void sun9i_a80_get_pll4_factors(u32 *freq, u32 parent_rate,
+ u8 *n, u8 *k, u8 *m, u8 *p)
+{
+ int div;
+
+ /* Normalize value to a 6M multiple */
+ div = DIV_ROUND_UP(*freq, 6000000);
+
+ /* divs above 256 cannot be odd */
+ if (div > 256)
+ div = round_up(div, 2);
+
+ /* divs above 512 must be a multiple of 4 */
+ if (div > 512)
+ div = round_up(div, 4);
+
+ *freq = 6000000 * div;
+
+ /* we were called to round the frequency, we can now return */
+ if (n == NULL)
+ return;
+
+ /* p will be 1 for divs under 512 */
+ if (div < 512)
+ *p = 1;
+ else
+ *p = 0;
+
+ /* m will be 1 if div is odd */
+ if (div & 1)
+ *m = 1;
+ else
+ *m = 0;
+
+ /* calculate a suitable n based on m and p */
+ *n = div / (*p + 1) / (*m + 1);
+}
+
+static struct clk_factors_config sun9i_a80_pll4_config = {
+ .mshift = 18,
+ .mwidth = 1,
+ .nshift = 8,
+ .nwidth = 8,
+ .pshift = 16,
+ .pwidth = 1,
+};
+
+static const struct factors_data sun9i_a80_pll4_data __initconst = {
+ .enable = 31,
+ .table = &sun9i_a80_pll4_config,
+ .getter = sun9i_a80_get_pll4_factors,
+};
+
+static DEFINE_SPINLOCK(sun9i_a80_pll4_lock);
+
+static void __init sun9i_a80_pll4_setup(struct device_node *node)
+{
+ sunxi_factors_register(node, &sun9i_a80_pll4_data, &sun9i_a80_pll4_lock);
+}
+CLK_OF_DECLARE(sun9i_a80_pll4, "allwinner,sun9i-a80-pll4-clk", sun9i_a80_pll4_setup);
+
+
+/**
+ * sun9i_a80_get_gt_factors() - calculates m factor for GT
+ * GT rate is calculated as follows
+ * rate = parent_rate / (m + 1);
+ */
+
+static void sun9i_a80_get_gt_factors(u32 *freq, u32 parent_rate,
+ u8 *n, u8 *k, u8 *m, u8 *p)
+{
+ u32 div;
+
+ if (parent_rate < *freq)
+ *freq = parent_rate;
+
+ div = DIV_ROUND_UP(parent_rate, *freq);
+
+ /* maximum divider is 4 */
+ if (div > 4)
+ div = 4;
+
+ *freq = parent_rate / div;
+
+ /* we were called to round the frequency, we can now return */
+ if (!m)
+ return;
+
+ *m = div;
+}
+
+static struct clk_factors_config sun9i_a80_gt_config = {
+ .mshift = 0,
+ .mwidth = 2,
+};
+
+static const struct factors_data sun9i_a80_gt_data __initconst = {
+ .mux = 24,
+ .muxmask = BIT(1) | BIT(0),
+ .table = &sun9i_a80_gt_config,
+ .getter = sun9i_a80_get_gt_factors,
+};
+
+static DEFINE_SPINLOCK(sun9i_a80_gt_lock);
+
+static void __init sun9i_a80_gt_setup(struct device_node *node)
+{
+ struct clk *gt = sunxi_factors_register(node, &sun9i_a80_gt_data,
+ &sun9i_a80_gt_lock);
+
+ /* The GT bus clock needs to be always enabled */
+ __clk_get(gt);
+ clk_prepare_enable(gt);
+}
+CLK_OF_DECLARE(sun9i_a80_gt, "allwinner,sun9i-a80-gt-clk", sun9i_a80_gt_setup);
+
+
+/**
+ * sun9i_a80_get_ahb_factors() - calculates p factor for AHB0/1/2
+ * AHB rate is calculated as follows
+ * rate = parent_rate >> p;
+ */
+
+static void sun9i_a80_get_ahb_factors(u32 *freq, u32 parent_rate,
+ u8 *n, u8 *k, u8 *m, u8 *p)
+{
+ u32 _p;
+
+ if (parent_rate < *freq)
+ *freq = parent_rate;
+
+ _p = order_base_2(DIV_ROUND_UP(parent_rate, *freq));
+
+ /* maximum p is 3 */
+ if (_p > 3)
+ _p = 3;
+
+ *freq = parent_rate >> _p;
+
+ /* we were called to round the frequency, we can now return */
+ if (!p)
+ return;
+
+ *p = _p;
+}
+
+static struct clk_factors_config sun9i_a80_ahb_config = {
+ .pshift = 0,
+ .pwidth = 2,
+};
+
+static const struct factors_data sun9i_a80_ahb_data __initconst = {
+ .mux = 24,
+ .muxmask = BIT(1) | BIT(0),
+ .table = &sun9i_a80_ahb_config,
+ .getter = sun9i_a80_get_ahb_factors,
+};
+
+static DEFINE_SPINLOCK(sun9i_a80_ahb_lock);
+
+static void __init sun9i_a80_ahb_setup(struct device_node *node)
+{
+ sunxi_factors_register(node, &sun9i_a80_ahb_data, &sun9i_a80_ahb_lock);
+}
+CLK_OF_DECLARE(sun9i_a80_ahb, "allwinner,sun9i-a80-ahb-clk", sun9i_a80_ahb_setup);
+
+
+static const struct factors_data sun9i_a80_apb0_data __initconst = {
+ .mux = 24,
+ .muxmask = BIT(0),
+ .table = &sun9i_a80_ahb_config,
+ .getter = sun9i_a80_get_ahb_factors,
+};
+
+static DEFINE_SPINLOCK(sun9i_a80_apb0_lock);
+
+static void __init sun9i_a80_apb0_setup(struct device_node *node)
+{
+ sunxi_factors_register(node, &sun9i_a80_apb0_data, &sun9i_a80_apb0_lock);
+}
+CLK_OF_DECLARE(sun9i_a80_apb0, "allwinner,sun9i-a80-apb0-clk", sun9i_a80_apb0_setup);
+
+
+/**
+ * sun9i_a80_get_apb1_factors() - calculates m, p factors for APB1
+ * APB1 rate is calculated as follows
+ * rate = (parent_rate >> p) / (m + 1);
+ */
+
+static void sun9i_a80_get_apb1_factors(u32 *freq, u32 parent_rate,
+ u8 *n, u8 *k, u8 *m, u8 *p)
+{
+ u32 div;
+ u8 calcm, calcp;
+
+ if (parent_rate < *freq)
+ *freq = parent_rate;
+
+ div = DIV_ROUND_UP(parent_rate, *freq);
+
+ /* Highest possible divider is 256 (p = 3, m = 31) */
+ if (div > 256)
+ div = 256;
+
+ calcp = order_base_2(div);
+ calcm = (parent_rate >> calcp) - 1;
+ *freq = (parent_rate >> calcp) / (calcm + 1);
+
+ /* we were called to round the frequency, we can now return */
+ if (n == NULL)
+ return;
+
+ *m = calcm;
+ *p = calcp;
+}
+
+static struct clk_factors_config sun9i_a80_apb1_config = {
+ .mshift = 0,
+ .mwidth = 5,
+ .pshift = 16,
+ .pwidth = 2,
+};
+
+static const struct factors_data sun9i_a80_apb1_data __initconst = {
+ .mux = 24,
+ .muxmask = BIT(0),
+ .table = &sun9i_a80_apb1_config,
+ .getter = sun9i_a80_get_apb1_factors,
+};
+
+static DEFINE_SPINLOCK(sun9i_a80_apb1_lock);
+
+static void __init sun9i_a80_apb1_setup(struct device_node *node)
+{
+ sunxi_factors_register(node, &sun9i_a80_apb1_data, &sun9i_a80_apb1_lock);
+}
+CLK_OF_DECLARE(sun9i_a80_apb1, "allwinner,sun9i-a80-apb1-clk", sun9i_a80_apb1_setup);
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index d5dc951264ca..570202582dcf 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -245,9 +245,9 @@ static void sun4i_get_pll5_factors(u32 *freq, u32 parent_rate,
}
/**
- * sun6i_a31_get_pll6_factors() - calculates n, k factors for A31 PLL6
- * PLL6 rate is calculated as follows
- * rate = parent_rate * n * (k + 1) / 2
+ * sun6i_a31_get_pll6_factors() - calculates n, k factors for A31 PLL6x2
+ * PLL6x2 rate is calculated as follows
+ * rate = parent_rate * (n + 1) * (k + 1)
* parent_rate is always 24Mhz
*/
@@ -256,13 +256,7 @@ static void sun6i_a31_get_pll6_factors(u32 *freq, u32 parent_rate,
{
u8 div;
- /*
- * We always have 24MHz / 2, so we can just say that our
- * parent clock is 12MHz.
- */
- parent_rate = parent_rate / 2;
-
- /* Normalize value to a parent_rate multiple (24M / 2) */
+ /* Normalize value to a parent_rate multiple (24M) */
div = *freq / parent_rate;
*freq = parent_rate * div;
@@ -274,7 +268,7 @@ static void sun6i_a31_get_pll6_factors(u32 *freq, u32 parent_rate,
if (*k > 3)
*k = 3;
- *n = DIV_ROUND_UP(div, (*k+1));
+ *n = DIV_ROUND_UP(div, (*k+1)) - 1;
}
/**
@@ -445,6 +439,7 @@ static struct clk_factors_config sun6i_a31_pll6_config = {
.nwidth = 5,
.kshift = 4,
.kwidth = 2,
+ .n_start = 1,
};
static struct clk_factors_config sun4i_apb1_config = {
@@ -504,9 +499,12 @@ static const struct factors_data sun6i_a31_pll6_data __initconst = {
.enable = 31,
.table = &sun6i_a31_pll6_config,
.getter = sun6i_a31_get_pll6_factors,
+ .name = "pll6x2",
};
static const struct factors_data sun4i_apb1_data __initconst = {
+ .mux = 24,
+ .muxmask = BIT(1) | BIT(0),
.table = &sun4i_apb1_config,
.getter = sun4i_get_apb1_factors,
};
@@ -514,6 +512,7 @@ static const struct factors_data sun4i_apb1_data __initconst = {
static const struct factors_data sun7i_a20_out_data __initconst = {
.enable = 31,
.mux = 24,
+ .muxmask = BIT(1) | BIT(0),
.table = &sun7i_a20_out_config,
.getter = sun7i_a20_get_out_factors,
};
@@ -544,10 +543,6 @@ static const struct mux_data sun6i_a31_ahb1_mux_data __initconst = {
.shift = 12,
};
-static const struct mux_data sun4i_apb1_mux_data __initconst = {
- .shift = 24,
-};
-
static void __init sunxi_mux_clk_setup(struct device_node *node,
struct mux_data *data)
{
@@ -633,12 +628,6 @@ static const struct div_data sun4i_apb0_data __initconst = {
.table = sun4i_apb0_table,
};
-static const struct div_data sun6i_a31_apb2_div_data __initconst = {
- .shift = 0,
- .pow = 0,
- .width = 4,
-};
-
static void __init sunxi_divider_clk_setup(struct device_node *node,
struct div_data *data)
{
@@ -757,6 +746,18 @@ static const struct gates_data sun8i_a23_ahb1_gates_data __initconst = {
.mask = {0x25386742, 0x2505111},
};
+static const struct gates_data sun9i_a80_ahb0_gates_data __initconst = {
+ .mask = {0xF5F12B},
+};
+
+static const struct gates_data sun9i_a80_ahb1_gates_data __initconst = {
+ .mask = {0x1E20003},
+};
+
+static const struct gates_data sun9i_a80_ahb2_gates_data __initconst = {
+ .mask = {0x9B7},
+};
+
static const struct gates_data sun4i_apb0_gates_data __initconst = {
.mask = {0x4EF},
};
@@ -773,6 +774,10 @@ static const struct gates_data sun7i_a20_apb0_gates_data __initconst = {
.mask = { 0x4ff },
};
+static const struct gates_data sun9i_a80_apb0_gates_data __initconst = {
+ .mask = {0xEB822},
+};
+
static const struct gates_data sun4i_apb1_gates_data __initconst = {
.mask = {0xFF00F7},
};
@@ -801,6 +806,10 @@ static const struct gates_data sun7i_a20_apb1_gates_data __initconst = {
.mask = { 0xff80ff },
};
+static const struct gates_data sun9i_a80_apb1_gates_data __initconst = {
+ .mask = {0x3F001F},
+};
+
static const struct gates_data sun8i_a23_apb2_gates_data __initconst = {
.mask = {0x1F0007},
};
@@ -893,6 +902,7 @@ static void __init sunxi_gates_clk_setup(struct device_node *node,
struct divs_data {
const struct factors_data *factors; /* data for the factor clock */
+ int ndivs; /* number of children */
struct {
u8 fixed; /* is it a fixed divisor? if not... */
struct clk_div_table *table; /* is it a table based divisor? */
@@ -912,6 +922,7 @@ static struct clk_div_table pll6_sata_tbl[] = {
static const struct divs_data pll5_divs_data __initconst = {
.factors = &sun4i_pll5_data,
+ .ndivs = 2,
.div = {
{ .shift = 0, .pow = 0, }, /* M, DDR */
{ .shift = 16, .pow = 1, }, /* P, other */
@@ -920,12 +931,21 @@ static const struct divs_data pll5_divs_data __initconst = {
static const struct divs_data pll6_divs_data __initconst = {
.factors = &sun4i_pll6_data,
+ .ndivs = 2,
.div = {
{ .shift = 0, .table = pll6_sata_tbl, .gate = 14 }, /* M, SATA */
{ .fixed = 2 }, /* P, other */
}
};
+static const struct divs_data sun6i_a31_pll6_divs_data __initconst = {
+ .factors = &sun6i_a31_pll6_data,
+ .ndivs = 1,
+ .div = {
+ { .fixed = 2 }, /* normal output */
+ }
+};
+
/**
* sunxi_divs_clk_setup() - Setup function for leaf divisors on clocks
*
@@ -950,7 +970,7 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
struct clk_fixed_factor *fix_factor;
struct clk_divider *divider;
void __iomem *reg;
- int i = 0;
+ int ndivs = SUNXI_DIVS_MAX_QTY, i = 0;
int flags, clkflags;
/* Set up factor clock that we will be dividing */
@@ -973,7 +993,11 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
* our RAM clock! */
clkflags = !strcmp("pll5", parent) ? 0 : CLK_SET_RATE_PARENT;
- for (i = 0; i < SUNXI_DIVS_MAX_QTY; i++) {
+ /* if number of children known, use it */
+ if (data->ndivs)
+ ndivs = data->ndivs;
+
+ for (i = 0; i < ndivs; i++) {
if (of_property_read_string_index(node, "clock-output-names",
i, &clk_name) != 0)
break;
@@ -1062,7 +1086,6 @@ static const struct of_device_id clk_factors_match[] __initconst = {
{.compatible = "allwinner,sun6i-a31-pll1-clk", .data = &sun6i_a31_pll1_data,},
{.compatible = "allwinner,sun8i-a23-pll1-clk", .data = &sun8i_a23_pll1_data,},
{.compatible = "allwinner,sun7i-a20-pll4-clk", .data = &sun7i_a20_pll4_data,},
- {.compatible = "allwinner,sun6i-a31-pll6-clk", .data = &sun6i_a31_pll6_data,},
{.compatible = "allwinner,sun4i-a10-apb1-clk", .data = &sun4i_apb1_data,},
{.compatible = "allwinner,sun7i-a20-out-clk", .data = &sun7i_a20_out_data,},
{}
@@ -1074,7 +1097,6 @@ static const struct of_device_id clk_div_match[] __initconst = {
{.compatible = "allwinner,sun8i-a23-axi-clk", .data = &sun8i_a23_axi_data,},
{.compatible = "allwinner,sun4i-a10-ahb-clk", .data = &sun4i_ahb_data,},
{.compatible = "allwinner,sun4i-a10-apb0-clk", .data = &sun4i_apb0_data,},
- {.compatible = "allwinner,sun6i-a31-apb2-div-clk", .data = &sun6i_a31_apb2_div_data,},
{}
};
@@ -1082,13 +1104,13 @@ static const struct of_device_id clk_div_match[] __initconst = {
static const struct of_device_id clk_divs_match[] __initconst = {
{.compatible = "allwinner,sun4i-a10-pll5-clk", .data = &pll5_divs_data,},
{.compatible = "allwinner,sun4i-a10-pll6-clk", .data = &pll6_divs_data,},
+ {.compatible = "allwinner,sun6i-a31-pll6-clk", .data = &sun6i_a31_pll6_divs_data,},
{}
};
/* Matches for mux clocks */
static const struct of_device_id clk_mux_match[] __initconst = {
{.compatible = "allwinner,sun4i-a10-cpu-clk", .data = &sun4i_cpu_mux_data,},
- {.compatible = "allwinner,sun4i-a10-apb1-mux-clk", .data = &sun4i_apb1_mux_data,},
{.compatible = "allwinner,sun6i-a31-ahb1-mux-clk", .data = &sun6i_a31_ahb1_mux_data,},
{}
};
@@ -1102,16 +1124,21 @@ static const struct of_device_id clk_gates_match[] __initconst = {
{.compatible = "allwinner,sun6i-a31-ahb1-gates-clk", .data = &sun6i_a31_ahb1_gates_data,},
{.compatible = "allwinner,sun7i-a20-ahb-gates-clk", .data = &sun7i_a20_ahb_gates_data,},
{.compatible = "allwinner,sun8i-a23-ahb1-gates-clk", .data = &sun8i_a23_ahb1_gates_data,},
+ {.compatible = "allwinner,sun9i-a80-ahb0-gates-clk", .data = &sun9i_a80_ahb0_gates_data,},
+ {.compatible = "allwinner,sun9i-a80-ahb1-gates-clk", .data = &sun9i_a80_ahb1_gates_data,},
+ {.compatible = "allwinner,sun9i-a80-ahb2-gates-clk", .data = &sun9i_a80_ahb2_gates_data,},
{.compatible = "allwinner,sun4i-a10-apb0-gates-clk", .data = &sun4i_apb0_gates_data,},
{.compatible = "allwinner,sun5i-a10s-apb0-gates-clk", .data = &sun5i_a10s_apb0_gates_data,},
{.compatible = "allwinner,sun5i-a13-apb0-gates-clk", .data = &sun5i_a13_apb0_gates_data,},
{.compatible = "allwinner,sun7i-a20-apb0-gates-clk", .data = &sun7i_a20_apb0_gates_data,},
+ {.compatible = "allwinner,sun9i-a80-apb0-gates-clk", .data = &sun9i_a80_apb0_gates_data,},
{.compatible = "allwinner,sun4i-a10-apb1-gates-clk", .data = &sun4i_apb1_gates_data,},
{.compatible = "allwinner,sun5i-a10s-apb1-gates-clk", .data = &sun5i_a10s_apb1_gates_data,},
{.compatible = "allwinner,sun5i-a13-apb1-gates-clk", .data = &sun5i_a13_apb1_gates_data,},
{.compatible = "allwinner,sun6i-a31-apb1-gates-clk", .data = &sun6i_a31_apb1_gates_data,},
{.compatible = "allwinner,sun7i-a20-apb1-gates-clk", .data = &sun7i_a20_apb1_gates_data,},
{.compatible = "allwinner,sun8i-a23-apb1-gates-clk", .data = &sun8i_a23_apb1_gates_data,},
+ {.compatible = "allwinner,sun9i-a80-apb1-gates-clk", .data = &sun9i_a80_apb1_gates_data,},
{.compatible = "allwinner,sun6i-a31-apb2-gates-clk", .data = &sun6i_a31_apb2_gates_data,},
{.compatible = "allwinner,sun8i-a23-apb2-gates-clk", .data = &sun8i_a23_apb2_gates_data,},
{.compatible = "allwinner,sun4i-a10-usb-clk", .data = &sun4i_a10_usb_gates_data,},
@@ -1200,3 +1227,9 @@ static void __init sun6i_init_clocks(struct device_node *node)
}
CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sun6i_init_clocks);
CLK_OF_DECLARE(sun8i_a23_clk_init, "allwinner,sun8i-a23", sun6i_init_clocks);
+
+static void __init sun9i_init_clocks(struct device_node *node)
+{
+ sunxi_init_clocks(NULL, 0);
+}
+CLK_OF_DECLARE(sun9i_a80_clk_init, "allwinner,sun9i-a80", sun9i_init_clocks);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 91a488c7cc44..31e8308ba899 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -753,6 +753,7 @@ config I2C_SH7760
config I2C_SH_MOBILE
tristate "SuperH Mobile I2C Controller"
+ depends on HAS_DMA
depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
help
If you say yes to this option, support will be included for the
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 373f6d4e4080..30059c1df2a3 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -30,12 +30,12 @@
#define MV64XXX_I2C_BAUD_DIV_N(val) (val & 0x7)
#define MV64XXX_I2C_BAUD_DIV_M(val) ((val & 0xf) << 3)
-#define MV64XXX_I2C_REG_CONTROL_ACK 0x00000004
-#define MV64XXX_I2C_REG_CONTROL_IFLG 0x00000008
-#define MV64XXX_I2C_REG_CONTROL_STOP 0x00000010
-#define MV64XXX_I2C_REG_CONTROL_START 0x00000020
-#define MV64XXX_I2C_REG_CONTROL_TWSIEN 0x00000040
-#define MV64XXX_I2C_REG_CONTROL_INTEN 0x00000080
+#define MV64XXX_I2C_REG_CONTROL_ACK BIT(2)
+#define MV64XXX_I2C_REG_CONTROL_IFLG BIT(3)
+#define MV64XXX_I2C_REG_CONTROL_STOP BIT(4)
+#define MV64XXX_I2C_REG_CONTROL_START BIT(5)
+#define MV64XXX_I2C_REG_CONTROL_TWSIEN BIT(6)
+#define MV64XXX_I2C_REG_CONTROL_INTEN BIT(7)
/* Ctlr status values */
#define MV64XXX_I2C_STATUS_BUS_ERR 0x00
@@ -68,19 +68,17 @@
#define MV64XXX_I2C_REG_BRIDGE_TIMING 0xe0
/* Bridge Control values */
-#define MV64XXX_I2C_BRIDGE_CONTROL_WR 0x00000001
-#define MV64XXX_I2C_BRIDGE_CONTROL_RD 0x00000002
+#define MV64XXX_I2C_BRIDGE_CONTROL_WR BIT(0)
+#define MV64XXX_I2C_BRIDGE_CONTROL_RD BIT(1)
#define MV64XXX_I2C_BRIDGE_CONTROL_ADDR_SHIFT 2
-#define MV64XXX_I2C_BRIDGE_CONTROL_ADDR_EXT 0x00001000
+#define MV64XXX_I2C_BRIDGE_CONTROL_ADDR_EXT BIT(12)
#define MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT 13
#define MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT 16
-#define MV64XXX_I2C_BRIDGE_CONTROL_ENABLE 0x00080000
+#define MV64XXX_I2C_BRIDGE_CONTROL_ENABLE BIT(19)
+#define MV64XXX_I2C_BRIDGE_CONTROL_REPEATED_START BIT(20)
/* Bridge Status values */
-#define MV64XXX_I2C_BRIDGE_STATUS_ERROR 0x00000001
-#define MV64XXX_I2C_STATUS_OFFLOAD_ERROR 0xf0000001
-#define MV64XXX_I2C_STATUS_OFFLOAD_OK 0xf0000000
-
+#define MV64XXX_I2C_BRIDGE_STATUS_ERROR BIT(0)
/* Driver states */
enum {
@@ -99,14 +97,12 @@ enum {
MV64XXX_I2C_ACTION_INVALID,
MV64XXX_I2C_ACTION_CONTINUE,
MV64XXX_I2C_ACTION_SEND_RESTART,
- MV64XXX_I2C_ACTION_OFFLOAD_RESTART,
MV64XXX_I2C_ACTION_SEND_ADDR_1,
MV64XXX_I2C_ACTION_SEND_ADDR_2,
MV64XXX_I2C_ACTION_SEND_DATA,
MV64XXX_I2C_ACTION_RCV_DATA,
MV64XXX_I2C_ACTION_RCV_DATA_STOP,
MV64XXX_I2C_ACTION_SEND_STOP,
- MV64XXX_I2C_ACTION_OFFLOAD_SEND_STOP,
};
struct mv64xxx_i2c_regs {
@@ -193,75 +189,6 @@ mv64xxx_i2c_prepare_for_io(struct mv64xxx_i2c_data *drv_data,
}
}
-static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data)
-{
- unsigned long data_reg_hi = 0;
- unsigned long data_reg_lo = 0;
- unsigned long ctrl_reg;
- struct i2c_msg *msg = drv_data->msgs;
-
- if (!drv_data->offload_enabled)
- return -EOPNOTSUPP;
-
- /* Only regular transactions can be offloaded */
- if ((msg->flags & ~(I2C_M_TEN | I2C_M_RD)) != 0)
- return -EINVAL;
-
- /* Only 1-8 byte transfers can be offloaded */
- if (msg->len < 1 || msg->len > 8)
- return -EINVAL;
-
- /* Build transaction */
- ctrl_reg = MV64XXX_I2C_BRIDGE_CONTROL_ENABLE |
- (msg->addr << MV64XXX_I2C_BRIDGE_CONTROL_ADDR_SHIFT);
-
- if ((msg->flags & I2C_M_TEN) != 0)
- ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_ADDR_EXT;
-
- if ((msg->flags & I2C_M_RD) == 0) {
- u8 local_buf[8] = { 0 };
-
- memcpy(local_buf, msg->buf, msg->len);
- data_reg_lo = cpu_to_le32(*((u32 *)local_buf));
- data_reg_hi = cpu_to_le32(*((u32 *)(local_buf+4)));
-
- ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR |
- (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT;
-
- writel(data_reg_lo,
- drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO);
- writel(data_reg_hi,
- drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI);
-
- } else {
- ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_RD |
- (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT;
- }
-
- /* Execute transaction */
- writel(ctrl_reg, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL);
-
- return 0;
-}
-
-static void
-mv64xxx_i2c_update_offload_data(struct mv64xxx_i2c_data *drv_data)
-{
- struct i2c_msg *msg = drv_data->msg;
-
- if (msg->flags & I2C_M_RD) {
- u32 data_reg_lo = readl(drv_data->reg_base +
- MV64XXX_I2C_REG_RX_DATA_LO);
- u32 data_reg_hi = readl(drv_data->reg_base +
- MV64XXX_I2C_REG_RX_DATA_HI);
- u8 local_buf[8] = { 0 };
-
- *((u32 *)local_buf) = le32_to_cpu(data_reg_lo);
- *((u32 *)(local_buf+4)) = le32_to_cpu(data_reg_hi);
- memcpy(msg->buf, local_buf, msg->len);
- }
-
-}
/*
*****************************************************************************
*
@@ -389,16 +316,6 @@ mv64xxx_i2c_fsm(struct mv64xxx_i2c_data *drv_data, u32 status)
drv_data->rc = -ENXIO;
break;
- case MV64XXX_I2C_STATUS_OFFLOAD_OK:
- if (drv_data->send_stop || drv_data->aborting) {
- drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_SEND_STOP;
- drv_data->state = MV64XXX_I2C_STATE_IDLE;
- } else {
- drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_RESTART;
- drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_RESTART;
- }
- break;
-
default:
dev_err(&drv_data->adapter.dev,
"mv64xxx_i2c_fsm: Ctlr Error -- state: 0x%x, "
@@ -419,25 +336,15 @@ static void mv64xxx_i2c_send_start(struct mv64xxx_i2c_data *drv_data)
drv_data->aborting = 0;
drv_data->rc = 0;
- /* Can we offload this msg ? */
- if (mv64xxx_i2c_offload_msg(drv_data) < 0) {
- /* No, switch to standard path */
- mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs);
- writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
- drv_data->reg_base + drv_data->reg_offsets.control);
- }
+ mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs);
+ writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
+ drv_data->reg_base + drv_data->reg_offsets.control);
}
static void
mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
{
switch(drv_data->action) {
- case MV64XXX_I2C_ACTION_OFFLOAD_RESTART:
- mv64xxx_i2c_update_offload_data(drv_data);
- writel(0, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL);
- writel(0, drv_data->reg_base +
- MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE);
- /* FALLTHRU */
case MV64XXX_I2C_ACTION_SEND_RESTART:
/* We should only get here if we have further messages */
BUG_ON(drv_data->num_msgs == 0);
@@ -518,16 +425,71 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
drv_data->block = 0;
wake_up(&drv_data->waitq);
break;
+ }
+}
- case MV64XXX_I2C_ACTION_OFFLOAD_SEND_STOP:
- mv64xxx_i2c_update_offload_data(drv_data);
- writel(0, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL);
- writel(0, drv_data->reg_base +
- MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE);
- drv_data->block = 0;
- wake_up(&drv_data->waitq);
- break;
+static void
+mv64xxx_i2c_read_offload_rx_data(struct mv64xxx_i2c_data *drv_data,
+ struct i2c_msg *msg)
+{
+ u32 buf[2];
+
+ buf[0] = readl(drv_data->reg_base + MV64XXX_I2C_REG_RX_DATA_LO);
+ buf[1] = readl(drv_data->reg_base + MV64XXX_I2C_REG_RX_DATA_HI);
+
+ memcpy(msg->buf, buf, msg->len);
+}
+
+static int
+mv64xxx_i2c_intr_offload(struct mv64xxx_i2c_data *drv_data)
+{
+ u32 cause, status;
+
+ cause = readl(drv_data->reg_base +
+ MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE);
+ if (!cause)
+ return IRQ_NONE;
+
+ status = readl(drv_data->reg_base +
+ MV64XXX_I2C_REG_BRIDGE_STATUS);
+
+ if (status & MV64XXX_I2C_BRIDGE_STATUS_ERROR) {
+ drv_data->rc = -EIO;
+ goto out;
+ }
+
+ drv_data->rc = 0;
+
+ /*
+ * Transaction is a one message read transaction, read data
+ * for this message.
+ */
+ if (drv_data->num_msgs == 1 && drv_data->msgs[0].flags & I2C_M_RD) {
+ mv64xxx_i2c_read_offload_rx_data(drv_data, drv_data->msgs);
+ drv_data->msgs++;
+ drv_data->num_msgs--;
+ }
+ /*
+ * Transaction is a two messages write/read transaction, read
+ * data for the second (read) message.
+ */
+ else if (drv_data->num_msgs == 2 &&
+ !(drv_data->msgs[0].flags & I2C_M_RD) &&
+ drv_data->msgs[1].flags & I2C_M_RD) {
+ mv64xxx_i2c_read_offload_rx_data(drv_data, drv_data->msgs + 1);
+ drv_data->msgs += 2;
+ drv_data->num_msgs -= 2;
}
+
+out:
+ writel(0, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL);
+ writel(0, drv_data->reg_base +
+ MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE);
+ drv_data->block = 0;
+
+ wake_up(&drv_data->waitq);
+
+ return IRQ_HANDLED;
}
static irqreturn_t
@@ -540,20 +502,9 @@ mv64xxx_i2c_intr(int irq, void *dev_id)
spin_lock_irqsave(&drv_data->lock, flags);
- if (drv_data->offload_enabled) {
- while (readl(drv_data->reg_base +
- MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE)) {
- int reg_status = readl(drv_data->reg_base +
- MV64XXX_I2C_REG_BRIDGE_STATUS);
- if (reg_status & MV64XXX_I2C_BRIDGE_STATUS_ERROR)
- status = MV64XXX_I2C_STATUS_OFFLOAD_ERROR;
- else
- status = MV64XXX_I2C_STATUS_OFFLOAD_OK;
- mv64xxx_i2c_fsm(drv_data, status);
- mv64xxx_i2c_do_action(drv_data);
- rc = IRQ_HANDLED;
- }
- }
+ if (drv_data->offload_enabled)
+ rc = mv64xxx_i2c_intr_offload(drv_data);
+
while (readl(drv_data->reg_base + drv_data->reg_offsets.control) &
MV64XXX_I2C_REG_CONTROL_IFLG) {
status = readl(drv_data->reg_base + drv_data->reg_offsets.status);
@@ -635,6 +586,117 @@ mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg,
return drv_data->rc;
}
+static void
+mv64xxx_i2c_prepare_tx(struct mv64xxx_i2c_data *drv_data)
+{
+ struct i2c_msg *msg = drv_data->msgs;
+ u32 buf[2];
+
+ memcpy(buf, msg->buf, msg->len);
+
+ writel(buf[0], drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO);
+ writel(buf[1], drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI);
+}
+
+static int
+mv64xxx_i2c_offload_xfer(struct mv64xxx_i2c_data *drv_data)
+{
+ struct i2c_msg *msgs = drv_data->msgs;
+ int num = drv_data->num_msgs;
+ unsigned long ctrl_reg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+
+ /* Build transaction */
+ ctrl_reg = MV64XXX_I2C_BRIDGE_CONTROL_ENABLE |
+ (msgs[0].addr << MV64XXX_I2C_BRIDGE_CONTROL_ADDR_SHIFT);
+
+ if (msgs[0].flags & I2C_M_TEN)
+ ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_ADDR_EXT;
+
+ /* Single write message transaction */
+ if (num == 1 && !(msgs[0].flags & I2C_M_RD)) {
+ size_t len = msgs[0].len - 1;
+
+ ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR |
+ (len << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT);
+ mv64xxx_i2c_prepare_tx(drv_data);
+ }
+ /* Single read message transaction */
+ else if (num == 1 && msgs[0].flags & I2C_M_RD) {
+ size_t len = msgs[0].len - 1;
+
+ ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_RD |
+ (len << MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT);
+ }
+ /*
+ * Transaction with one write and one read message. This is
+ * guaranteed by the mv64xx_i2c_can_offload() checks.
+ */
+ else if (num == 2) {
+ size_t lentx = msgs[0].len - 1;
+ size_t lenrx = msgs[1].len - 1;
+
+ ctrl_reg |=
+ MV64XXX_I2C_BRIDGE_CONTROL_RD |
+ MV64XXX_I2C_BRIDGE_CONTROL_WR |
+ (lentx << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT) |
+ (lenrx << MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT) |
+ MV64XXX_I2C_BRIDGE_CONTROL_REPEATED_START;
+ mv64xxx_i2c_prepare_tx(drv_data);
+ }
+
+ /* Execute transaction */
+ drv_data->block = 1;
+ writel(ctrl_reg, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL);
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ mv64xxx_i2c_wait_for_completion(drv_data);
+
+ return drv_data->rc;
+}
+
+static bool
+mv64xxx_i2c_valid_offload_sz(struct i2c_msg *msg)
+{
+ return msg->len <= 8 && msg->len >= 1;
+}
+
+static bool
+mv64xxx_i2c_can_offload(struct mv64xxx_i2c_data *drv_data)
+{
+ struct i2c_msg *msgs = drv_data->msgs;
+ int num = drv_data->num_msgs;
+
+ return false;
+
+ if (!drv_data->offload_enabled)
+ return false;
+
+ /*
+ * We can offload a transaction consisting of a single
+ * message, as long as the message has a length between 1 and
+ * 8 bytes.
+ */
+ if (num == 1 && mv64xxx_i2c_valid_offload_sz(msgs))
+ return true;
+
+ /*
+ * We can offload a transaction consisting of two messages, if
+ * the first is a write and a second is a read, and both have
+ * a length between 1 and 8 bytes.
+ */
+ if (num == 2 &&
+ mv64xxx_i2c_valid_offload_sz(msgs) &&
+ mv64xxx_i2c_valid_offload_sz(msgs + 1) &&
+ !(msgs[0].flags & I2C_M_RD) &&
+ msgs[1].flags & I2C_M_RD)
+ return true;
+
+ return false;
+}
+
/*
*****************************************************************************
*
@@ -658,7 +720,11 @@ mv64xxx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
drv_data->msgs = msgs;
drv_data->num_msgs = num;
- rc = mv64xxx_i2c_execute_msg(drv_data, &msgs[0], num == 1);
+ if (mv64xxx_i2c_can_offload(drv_data))
+ rc = mv64xxx_i2c_offload_xfer(drv_data);
+ else
+ rc = mv64xxx_i2c_execute_msg(drv_data, &msgs[0], num == 1);
+
if (rc < 0)
ret = rc;
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index d7efaf44868b..440d5dbc8b5f 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -140,6 +140,7 @@ struct sh_mobile_i2c_data {
int sr;
bool send_stop;
+ struct resource *res;
struct dma_chan *dma_tx;
struct dma_chan *dma_rx;
struct scatterlist sg;
@@ -539,6 +540,42 @@ static void sh_mobile_i2c_dma_callback(void *data)
iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE);
}
+static struct dma_chan *sh_mobile_i2c_request_dma_chan(struct device *dev,
+ enum dma_transfer_direction dir, dma_addr_t port_addr)
+{
+ struct dma_chan *chan;
+ struct dma_slave_config cfg;
+ char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx";
+ int ret;
+
+ chan = dma_request_slave_channel_reason(dev, chan_name);
+ if (IS_ERR(chan)) {
+ ret = PTR_ERR(chan);
+ dev_dbg(dev, "request_channel failed for %s (%d)\n", chan_name, ret);
+ return chan;
+ }
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.direction = dir;
+ if (dir == DMA_MEM_TO_DEV) {
+ cfg.dst_addr = port_addr;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ } else {
+ cfg.src_addr = port_addr;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ }
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret) {
+ dev_dbg(dev, "slave_config failed for %s (%d)\n", chan_name, ret);
+ dma_release_channel(chan);
+ return ERR_PTR(ret);
+ }
+
+ dev_dbg(dev, "got DMA channel for %s\n", chan_name);
+ return chan;
+}
+
static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
{
bool read = pd->msg->flags & I2C_M_RD;
@@ -548,7 +585,16 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
dma_addr_t dma_addr;
dma_cookie_t cookie;
- if (!chan)
+ if (PTR_ERR(chan) == -EPROBE_DEFER) {
+ if (read)
+ chan = pd->dma_rx = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_DEV_TO_MEM,
+ pd->res->start + ICDR);
+ else
+ chan = pd->dma_tx = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_MEM_TO_DEV,
+ pd->res->start + ICDR);
+ }
+
+ if (IS_ERR(chan))
return;
dma_addr = dma_map_single(chan->device->dev, pd->msg->buf, pd->msg->len, dir);
@@ -747,56 +793,16 @@ static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, sh_mobile_i2c_dt_ids);
-static int sh_mobile_i2c_request_dma_chan(struct device *dev, enum dma_transfer_direction dir,
- dma_addr_t port_addr, struct dma_chan **chan_ptr)
-{
- struct dma_chan *chan;
- struct dma_slave_config cfg;
- char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx";
- int ret;
-
- *chan_ptr = NULL;
-
- chan = dma_request_slave_channel_reason(dev, chan_name);
- if (IS_ERR(chan)) {
- ret = PTR_ERR(chan);
- dev_dbg(dev, "request_channel failed for %s (%d)\n", chan_name, ret);
- return ret;
- }
-
- memset(&cfg, 0, sizeof(cfg));
- cfg.direction = dir;
- if (dir == DMA_MEM_TO_DEV) {
- cfg.dst_addr = port_addr;
- cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- } else {
- cfg.src_addr = port_addr;
- cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- }
-
- ret = dmaengine_slave_config(chan, &cfg);
- if (ret) {
- dev_dbg(dev, "slave_config failed for %s (%d)\n", chan_name, ret);
- dma_release_channel(chan);
- return ret;
- }
-
- *chan_ptr = chan;
-
- dev_dbg(dev, "got DMA channel for %s\n", chan_name);
- return 0;
-}
-
static void sh_mobile_i2c_release_dma(struct sh_mobile_i2c_data *pd)
{
- if (pd->dma_tx) {
+ if (!IS_ERR(pd->dma_tx)) {
dma_release_channel(pd->dma_tx);
- pd->dma_tx = NULL;
+ pd->dma_tx = ERR_PTR(-EPROBE_DEFER);
}
- if (pd->dma_rx) {
+ if (!IS_ERR(pd->dma_rx)) {
dma_release_channel(pd->dma_rx);
- pd->dma_rx = NULL;
+ pd->dma_rx = ERR_PTR(-EPROBE_DEFER);
}
}
@@ -849,6 +855,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ pd->res = res;
pd->reg = devm_ioremap_resource(&dev->dev, res);
if (IS_ERR(pd->reg))
return PTR_ERR(pd->reg);
@@ -889,17 +896,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
/* Init DMA */
sg_init_table(&pd->sg, 1);
pd->dma_direction = DMA_NONE;
- ret = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_DEV_TO_MEM,
- res->start + ICDR, &pd->dma_rx);
- if (ret == -EPROBE_DEFER)
- return ret;
-
- ret = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_MEM_TO_DEV,
- res->start + ICDR, &pd->dma_tx);
- if (ret == -EPROBE_DEFER) {
- sh_mobile_i2c_release_dma(pd);
- return ret;
- }
+ pd->dma_rx = pd->dma_tx = ERR_PTR(-EPROBE_DEFER);
/* Enable Runtime PM for this device.
*
@@ -937,8 +934,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
return ret;
}
- dev_info(&dev->dev, "I2C adapter %d, bus speed %lu Hz, DMA=%c\n",
- adap->nr, pd->bus_speed, (pd->dma_rx || pd->dma_tx) ? 'y' : 'n');
+ dev_info(&dev->dev, "I2C adapter %d, bus speed %lu Hz\n", adap->nr, pd->bus_speed);
return 0;
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 10641b7816f4..dafb3c531f96 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -22,7 +22,6 @@
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/in6.h>
-#include <linux/llist.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <target/target_core_base.h>
@@ -36,11 +35,17 @@
#define ISERT_MAX_CONN 8
#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
+#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
+ ISERT_MAX_CONN)
+
+int isert_debug_level = 0;
+module_param_named(debug_level, isert_debug_level, int, 0644);
+MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
static DEFINE_MUTEX(device_list_mutex);
static LIST_HEAD(device_list);
-static struct workqueue_struct *isert_rx_wq;
static struct workqueue_struct *isert_comp_wq;
+static struct workqueue_struct *isert_release_wq;
static void
isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
@@ -54,19 +59,32 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
struct isert_rdma_wr *wr);
static int
isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
+static int
+isert_rdma_post_recvl(struct isert_conn *isert_conn);
+static int
+isert_rdma_accept(struct isert_conn *isert_conn);
+struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
+
+static inline bool
+isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
+{
+ return (conn->pi_support &&
+ cmd->prot_op != TARGET_PROT_NORMAL);
+}
+
static void
isert_qp_event_callback(struct ib_event *e, void *context)
{
struct isert_conn *isert_conn = (struct isert_conn *)context;
- pr_err("isert_qp_event_callback event: %d\n", e->event);
+ isert_err("conn %p event: %d\n", isert_conn, e->event);
switch (e->event) {
case IB_EVENT_COMM_EST:
rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
break;
case IB_EVENT_QP_LAST_WQE_REACHED:
- pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
+ isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
break;
default:
break;
@@ -80,39 +98,41 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
ret = ib_query_device(ib_dev, devattr);
if (ret) {
- pr_err("ib_query_device() failed: %d\n", ret);
+ isert_err("ib_query_device() failed: %d\n", ret);
return ret;
}
- pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
- pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
+ isert_dbg("devattr->max_sge: %d\n", devattr->max_sge);
+ isert_dbg("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
return 0;
}
static int
-isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
- u8 protection)
+isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
{
struct isert_device *device = isert_conn->conn_device;
struct ib_qp_init_attr attr;
- int ret, index, min_index = 0;
+ struct isert_comp *comp;
+ int ret, i, min = 0;
mutex_lock(&device_list_mutex);
- for (index = 0; index < device->cqs_used; index++)
- if (device->cq_active_qps[index] <
- device->cq_active_qps[min_index])
- min_index = index;
- device->cq_active_qps[min_index]++;
- pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
+ for (i = 0; i < device->comps_used; i++)
+ if (device->comps[i].active_qps <
+ device->comps[min].active_qps)
+ min = i;
+ comp = &device->comps[min];
+ comp->active_qps++;
+ isert_info("conn %p, using comp %p min_index: %d\n",
+ isert_conn, comp, min);
mutex_unlock(&device_list_mutex);
memset(&attr, 0, sizeof(struct ib_qp_init_attr));
attr.event_handler = isert_qp_event_callback;
attr.qp_context = isert_conn;
- attr.send_cq = device->dev_tx_cq[min_index];
- attr.recv_cq = device->dev_rx_cq[min_index];
+ attr.send_cq = comp->cq;
+ attr.recv_cq = comp->cq;
attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
- attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
+ attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
/*
* FIXME: Use devattr.max_sge - 2 for max_send_sge as
* work-around for RDMA_READs with ConnectX-2.
@@ -126,29 +146,29 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
attr.cap.max_recv_sge = 1;
attr.sq_sig_type = IB_SIGNAL_REQ_WR;
attr.qp_type = IB_QPT_RC;
- if (protection)
+ if (device->pi_capable)
attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
- pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
- cma_id->device);
- pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
- isert_conn->conn_pd->device);
-
ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
if (ret) {
- pr_err("rdma_create_qp failed for cma_id %d\n", ret);
- return ret;
+ isert_err("rdma_create_qp failed for cma_id %d\n", ret);
+ goto err;
}
isert_conn->conn_qp = cma_id->qp;
- pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
return 0;
+err:
+ mutex_lock(&device_list_mutex);
+ comp->active_qps--;
+ mutex_unlock(&device_list_mutex);
+
+ return ret;
}
static void
isert_cq_event_callback(struct ib_event *e, void *context)
{
- pr_debug("isert_cq_event_callback event: %d\n", e->event);
+ isert_dbg("event: %d\n", e->event);
}
static int
@@ -182,6 +202,7 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
}
isert_conn->conn_rx_desc_head = 0;
+
return 0;
dma_map_fail:
@@ -193,6 +214,8 @@ dma_map_fail:
kfree(isert_conn->conn_rx_descs);
isert_conn->conn_rx_descs = NULL;
fail:
+ isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
+
return -ENOMEM;
}
@@ -216,27 +239,23 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
isert_conn->conn_rx_descs = NULL;
}
-static void isert_cq_tx_work(struct work_struct *);
-static void isert_cq_tx_callback(struct ib_cq *, void *);
-static void isert_cq_rx_work(struct work_struct *);
-static void isert_cq_rx_callback(struct ib_cq *, void *);
+static void isert_cq_work(struct work_struct *);
+static void isert_cq_callback(struct ib_cq *, void *);
static int
isert_create_device_ib_res(struct isert_device *device)
{
struct ib_device *ib_dev = device->ib_device;
- struct isert_cq_desc *cq_desc;
struct ib_device_attr *dev_attr;
- int ret = 0, i, j;
- int max_rx_cqe, max_tx_cqe;
+ int ret = 0, i;
+ int max_cqe;
dev_attr = &device->dev_attr;
ret = isert_query_device(ib_dev, dev_attr);
if (ret)
return ret;
- max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe);
- max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe);
+ max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);
/* asign function handlers */
if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
@@ -254,55 +273,38 @@ isert_create_device_ib_res(struct isert_device *device)
device->pi_capable = dev_attr->device_cap_flags &
IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
- device->cqs_used = min_t(int, num_online_cpus(),
- device->ib_device->num_comp_vectors);
- device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
- pr_debug("Using %d CQs, device %s supports %d vectors support "
- "Fast registration %d pi_capable %d\n",
- device->cqs_used, device->ib_device->name,
- device->ib_device->num_comp_vectors, device->use_fastreg,
- device->pi_capable);
- device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
- device->cqs_used, GFP_KERNEL);
- if (!device->cq_desc) {
- pr_err("Unable to allocate device->cq_desc\n");
+ device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
+ device->ib_device->num_comp_vectors));
+ isert_info("Using %d CQs, %s supports %d vectors support "
+ "Fast registration %d pi_capable %d\n",
+ device->comps_used, device->ib_device->name,
+ device->ib_device->num_comp_vectors, device->use_fastreg,
+ device->pi_capable);
+
+ device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
+ GFP_KERNEL);
+ if (!device->comps) {
+ isert_err("Unable to allocate completion contexts\n");
return -ENOMEM;
}
- cq_desc = device->cq_desc;
-
- for (i = 0; i < device->cqs_used; i++) {
- cq_desc[i].device = device;
- cq_desc[i].cq_index = i;
-
- INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
- device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
- isert_cq_rx_callback,
- isert_cq_event_callback,
- (void *)&cq_desc[i],
- max_rx_cqe, i);
- if (IS_ERR(device->dev_rx_cq[i])) {
- ret = PTR_ERR(device->dev_rx_cq[i]);
- device->dev_rx_cq[i] = NULL;
- goto out_cq;
- }
- INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
- device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
- isert_cq_tx_callback,
- isert_cq_event_callback,
- (void *)&cq_desc[i],
- max_tx_cqe, i);
- if (IS_ERR(device->dev_tx_cq[i])) {
- ret = PTR_ERR(device->dev_tx_cq[i]);
- device->dev_tx_cq[i] = NULL;
- goto out_cq;
- }
+ for (i = 0; i < device->comps_used; i++) {
+ struct isert_comp *comp = &device->comps[i];
- ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
- if (ret)
+ comp->device = device;
+ INIT_WORK(&comp->work, isert_cq_work);
+ comp->cq = ib_create_cq(device->ib_device,
+ isert_cq_callback,
+ isert_cq_event_callback,
+ (void *)comp,
+ max_cqe, i);
+ if (IS_ERR(comp->cq)) {
+ ret = PTR_ERR(comp->cq);
+ comp->cq = NULL;
goto out_cq;
+ }
- ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
+ ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
if (ret)
goto out_cq;
}
@@ -310,19 +312,15 @@ isert_create_device_ib_res(struct isert_device *device)
return 0;
out_cq:
- for (j = 0; j < i; j++) {
- cq_desc = &device->cq_desc[j];
+ for (i = 0; i < device->comps_used; i++) {
+ struct isert_comp *comp = &device->comps[i];
- if (device->dev_rx_cq[j]) {
- cancel_work_sync(&cq_desc->cq_rx_work);
- ib_destroy_cq(device->dev_rx_cq[j]);
- }
- if (device->dev_tx_cq[j]) {
- cancel_work_sync(&cq_desc->cq_tx_work);
- ib_destroy_cq(device->dev_tx_cq[j]);
+ if (comp->cq) {
+ cancel_work_sync(&comp->work);
+ ib_destroy_cq(comp->cq);
}
}
- kfree(device->cq_desc);
+ kfree(device->comps);
return ret;
}
@@ -330,21 +328,18 @@ out_cq:
static void
isert_free_device_ib_res(struct isert_device *device)
{
- struct isert_cq_desc *cq_desc;
int i;
- for (i = 0; i < device->cqs_used; i++) {
- cq_desc = &device->cq_desc[i];
+ isert_info("device %p\n", device);
- cancel_work_sync(&cq_desc->cq_rx_work);
- cancel_work_sync(&cq_desc->cq_tx_work);
- ib_destroy_cq(device->dev_rx_cq[i]);
- ib_destroy_cq(device->dev_tx_cq[i]);
- device->dev_rx_cq[i] = NULL;
- device->dev_tx_cq[i] = NULL;
- }
+ for (i = 0; i < device->comps_used; i++) {
+ struct isert_comp *comp = &device->comps[i];
- kfree(device->cq_desc);
+ cancel_work_sync(&comp->work);
+ ib_destroy_cq(comp->cq);
+ comp->cq = NULL;
+ }
+ kfree(device->comps);
}
static void
@@ -352,6 +347,7 @@ isert_device_try_release(struct isert_device *device)
{
mutex_lock(&device_list_mutex);
device->refcount--;
+ isert_info("device %p refcount %d\n", device, device->refcount);
if (!device->refcount) {
isert_free_device_ib_res(device);
list_del(&device->dev_node);
@@ -370,6 +366,8 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
list_for_each_entry(device, &device_list, dev_node) {
if (device->ib_device->node_guid == cma_id->device->node_guid) {
device->refcount++;
+ isert_info("Found iser device %p refcount %d\n",
+ device, device->refcount);
mutex_unlock(&device_list_mutex);
return device;
}
@@ -393,6 +391,8 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
device->refcount++;
list_add_tail(&device->dev_node, &device_list);
+ isert_info("Created a new iser device %p refcount %d\n",
+ device, device->refcount);
mutex_unlock(&device_list_mutex);
return device;
@@ -407,7 +407,7 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
if (list_empty(&isert_conn->conn_fr_pool))
return;
- pr_debug("Freeing conn %p fastreg pool", isert_conn);
+ isert_info("Freeing conn %p fastreg pool", isert_conn);
list_for_each_entry_safe(fr_desc, tmp,
&isert_conn->conn_fr_pool, list) {
@@ -425,87 +425,97 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
}
if (i < isert_conn->conn_fr_pool_size)
- pr_warn("Pool still has %d regions registered\n",
+ isert_warn("Pool still has %d regions registered\n",
isert_conn->conn_fr_pool_size - i);
}
static int
+isert_create_pi_ctx(struct fast_reg_descriptor *desc,
+ struct ib_device *device,
+ struct ib_pd *pd)
+{
+ struct ib_mr_init_attr mr_init_attr;
+ struct pi_context *pi_ctx;
+ int ret;
+
+ pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
+ if (!pi_ctx) {
+ isert_err("Failed to allocate pi context\n");
+ return -ENOMEM;
+ }
+
+ pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
+ ISCSI_ISER_SG_TABLESIZE);
+ if (IS_ERR(pi_ctx->prot_frpl)) {
+ isert_err("Failed to allocate prot frpl err=%ld\n",
+ PTR_ERR(pi_ctx->prot_frpl));
+ ret = PTR_ERR(pi_ctx->prot_frpl);
+ goto err_pi_ctx;
+ }
+
+ pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
+ if (IS_ERR(pi_ctx->prot_mr)) {
+ isert_err("Failed to allocate prot frmr err=%ld\n",
+ PTR_ERR(pi_ctx->prot_mr));
+ ret = PTR_ERR(pi_ctx->prot_mr);
+ goto err_prot_frpl;
+ }
+ desc->ind |= ISERT_PROT_KEY_VALID;
+
+ memset(&mr_init_attr, 0, sizeof(mr_init_attr));
+ mr_init_attr.max_reg_descriptors = 2;
+ mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
+ pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
+ if (IS_ERR(pi_ctx->sig_mr)) {
+ isert_err("Failed to allocate signature enabled mr err=%ld\n",
+ PTR_ERR(pi_ctx->sig_mr));
+ ret = PTR_ERR(pi_ctx->sig_mr);
+ goto err_prot_mr;
+ }
+
+ desc->pi_ctx = pi_ctx;
+ desc->ind |= ISERT_SIG_KEY_VALID;
+ desc->ind &= ~ISERT_PROTECTED;
+
+ return 0;
+
+err_prot_mr:
+ ib_dereg_mr(desc->pi_ctx->prot_mr);
+err_prot_frpl:
+ ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
+err_pi_ctx:
+ kfree(desc->pi_ctx);
+
+ return ret;
+}
+
+static int
isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
- struct fast_reg_descriptor *fr_desc, u8 protection)
+ struct fast_reg_descriptor *fr_desc)
{
int ret;
fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
ISCSI_ISER_SG_TABLESIZE);
if (IS_ERR(fr_desc->data_frpl)) {
- pr_err("Failed to allocate data frpl err=%ld\n",
- PTR_ERR(fr_desc->data_frpl));
+ isert_err("Failed to allocate data frpl err=%ld\n",
+ PTR_ERR(fr_desc->data_frpl));
return PTR_ERR(fr_desc->data_frpl);
}
fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
if (IS_ERR(fr_desc->data_mr)) {
- pr_err("Failed to allocate data frmr err=%ld\n",
- PTR_ERR(fr_desc->data_mr));
+ isert_err("Failed to allocate data frmr err=%ld\n",
+ PTR_ERR(fr_desc->data_mr));
ret = PTR_ERR(fr_desc->data_mr);
goto err_data_frpl;
}
- pr_debug("Create fr_desc %p page_list %p\n",
- fr_desc, fr_desc->data_frpl->page_list);
fr_desc->ind |= ISERT_DATA_KEY_VALID;
- if (protection) {
- struct ib_mr_init_attr mr_init_attr = {0};
- struct pi_context *pi_ctx;
-
- fr_desc->pi_ctx = kzalloc(sizeof(*fr_desc->pi_ctx), GFP_KERNEL);
- if (!fr_desc->pi_ctx) {
- pr_err("Failed to allocate pi context\n");
- ret = -ENOMEM;
- goto err_data_mr;
- }
- pi_ctx = fr_desc->pi_ctx;
-
- pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
- ISCSI_ISER_SG_TABLESIZE);
- if (IS_ERR(pi_ctx->prot_frpl)) {
- pr_err("Failed to allocate prot frpl err=%ld\n",
- PTR_ERR(pi_ctx->prot_frpl));
- ret = PTR_ERR(pi_ctx->prot_frpl);
- goto err_pi_ctx;
- }
-
- pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
- if (IS_ERR(pi_ctx->prot_mr)) {
- pr_err("Failed to allocate prot frmr err=%ld\n",
- PTR_ERR(pi_ctx->prot_mr));
- ret = PTR_ERR(pi_ctx->prot_mr);
- goto err_prot_frpl;
- }
- fr_desc->ind |= ISERT_PROT_KEY_VALID;
-
- mr_init_attr.max_reg_descriptors = 2;
- mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
- pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
- if (IS_ERR(pi_ctx->sig_mr)) {
- pr_err("Failed to allocate signature enabled mr err=%ld\n",
- PTR_ERR(pi_ctx->sig_mr));
- ret = PTR_ERR(pi_ctx->sig_mr);
- goto err_prot_mr;
- }
- fr_desc->ind |= ISERT_SIG_KEY_VALID;
- }
- fr_desc->ind &= ~ISERT_PROTECTED;
+ isert_dbg("Created fr_desc %p\n", fr_desc);
return 0;
-err_prot_mr:
- ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
-err_prot_frpl:
- ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
-err_pi_ctx:
- kfree(fr_desc->pi_ctx);
-err_data_mr:
- ib_dereg_mr(fr_desc->data_mr);
+
err_data_frpl:
ib_free_fast_reg_page_list(fr_desc->data_frpl);
@@ -513,7 +523,7 @@ err_data_frpl:
}
static int
-isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
+isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
{
struct fast_reg_descriptor *fr_desc;
struct isert_device *device = isert_conn->conn_device;
@@ -531,16 +541,15 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
for (i = 0; i < tag_num; i++) {
fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
if (!fr_desc) {
- pr_err("Failed to allocate fast_reg descriptor\n");
+ isert_err("Failed to allocate fast_reg descriptor\n");
ret = -ENOMEM;
goto err;
}
ret = isert_create_fr_desc(device->ib_device,
- isert_conn->conn_pd, fr_desc,
- pi_support);
+ isert_conn->conn_pd, fr_desc);
if (ret) {
- pr_err("Failed to create fastreg descriptor err=%d\n",
+ isert_err("Failed to create fastreg descriptor err=%d\n",
ret);
kfree(fr_desc);
goto err;
@@ -550,7 +559,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
isert_conn->conn_fr_pool_size++;
}
- pr_debug("Creating conn %p fastreg pool size=%d",
+ isert_dbg("Creating conn %p fastreg pool size=%d",
isert_conn, isert_conn->conn_fr_pool_size);
return 0;
@@ -563,47 +572,45 @@ err:
static int
isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
- struct iscsi_np *np = cma_id->context;
- struct isert_np *isert_np = np->np_context;
+ struct isert_np *isert_np = cma_id->context;
+ struct iscsi_np *np = isert_np->np;
struct isert_conn *isert_conn;
struct isert_device *device;
struct ib_device *ib_dev = cma_id->device;
int ret = 0;
- u8 pi_support;
spin_lock_bh(&np->np_thread_lock);
if (!np->enabled) {
spin_unlock_bh(&np->np_thread_lock);
- pr_debug("iscsi_np is not enabled, reject connect request\n");
+ isert_dbg("iscsi_np is not enabled, reject connect request\n");
return rdma_reject(cma_id, NULL, 0);
}
spin_unlock_bh(&np->np_thread_lock);
- pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
+ isert_dbg("cma_id: %p, portal: %p\n",
cma_id, cma_id->context);
isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
if (!isert_conn) {
- pr_err("Unable to allocate isert_conn\n");
+ isert_err("Unable to allocate isert_conn\n");
return -ENOMEM;
}
isert_conn->state = ISER_CONN_INIT;
INIT_LIST_HEAD(&isert_conn->conn_accept_node);
init_completion(&isert_conn->conn_login_comp);
+ init_completion(&isert_conn->login_req_comp);
init_completion(&isert_conn->conn_wait);
- init_completion(&isert_conn->conn_wait_comp_err);
kref_init(&isert_conn->conn_kref);
mutex_init(&isert_conn->conn_mutex);
spin_lock_init(&isert_conn->conn_lock);
INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
- cma_id->context = isert_conn;
isert_conn->conn_cm_id = cma_id;
isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
ISER_RX_LOGIN_SIZE, GFP_KERNEL);
if (!isert_conn->login_buf) {
- pr_err("Unable to allocate isert_conn->login_buf\n");
+ isert_err("Unable to allocate isert_conn->login_buf\n");
ret = -ENOMEM;
goto out;
}
@@ -611,7 +618,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
isert_conn->login_req_buf = isert_conn->login_buf;
isert_conn->login_rsp_buf = isert_conn->login_buf +
ISCSI_DEF_MAX_RECV_SEG_LEN;
- pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
+ isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
isert_conn->login_buf, isert_conn->login_req_buf,
isert_conn->login_rsp_buf);
@@ -621,7 +628,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
if (ret) {
- pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
+ isert_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
ret);
isert_conn->login_req_dma = 0;
goto out_login_buf;
@@ -633,7 +640,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
if (ret) {
- pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
+ isert_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
ret);
isert_conn->login_rsp_dma = 0;
goto out_req_dma_map;
@@ -649,13 +656,13 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
isert_conn->initiator_depth = min_t(u8,
event->param.conn.initiator_depth,
device->dev_attr.max_qp_init_rd_atom);
- pr_debug("Using initiator_depth: %u\n", isert_conn->initiator_depth);
+ isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
isert_conn->conn_device = device;
isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
if (IS_ERR(isert_conn->conn_pd)) {
ret = PTR_ERR(isert_conn->conn_pd);
- pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
+ isert_err("ib_alloc_pd failed for conn %p: ret=%d\n",
isert_conn, ret);
goto out_pd;
}
@@ -664,20 +671,20 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(isert_conn->conn_mr)) {
ret = PTR_ERR(isert_conn->conn_mr);
- pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
+ isert_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
isert_conn, ret);
goto out_mr;
}
- pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
- if (pi_support && !device->pi_capable) {
- pr_err("Protection information requested but not supported, "
- "rejecting connect request\n");
- ret = rdma_reject(cma_id, NULL, 0);
- goto out_mr;
- }
+ ret = isert_conn_setup_qp(isert_conn, cma_id);
+ if (ret)
+ goto out_conn_dev;
- ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support);
+ ret = isert_rdma_post_recvl(isert_conn);
+ if (ret)
+ goto out_conn_dev;
+
+ ret = isert_rdma_accept(isert_conn);
if (ret)
goto out_conn_dev;
@@ -685,7 +692,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
mutex_unlock(&isert_np->np_accept_mutex);
- pr_debug("isert_connect_request() up np_sem np: %p\n", np);
+ isert_info("np %p: Allow accept_np to continue\n", np);
up(&isert_np->np_sem);
return 0;
@@ -705,6 +712,7 @@ out_login_buf:
kfree(isert_conn->login_buf);
out:
kfree(isert_conn);
+ rdma_reject(cma_id, NULL, 0);
return ret;
}
@@ -713,24 +721,25 @@ isert_connect_release(struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct isert_device *device = isert_conn->conn_device;
- int cq_index;
- pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
+ isert_dbg("conn %p\n", isert_conn);
if (device && device->use_fastreg)
isert_conn_free_fastreg_pool(isert_conn);
+ isert_free_rx_descriptors(isert_conn);
+ rdma_destroy_id(isert_conn->conn_cm_id);
+
if (isert_conn->conn_qp) {
- cq_index = ((struct isert_cq_desc *)
- isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
- pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
- isert_conn->conn_device->cq_active_qps[cq_index]--;
+ struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context;
- rdma_destroy_qp(isert_conn->conn_cm_id);
- }
+ isert_dbg("dec completion context %p active_qps\n", comp);
+ mutex_lock(&device_list_mutex);
+ comp->active_qps--;
+ mutex_unlock(&device_list_mutex);
- isert_free_rx_descriptors(isert_conn);
- rdma_destroy_id(isert_conn->conn_cm_id);
+ ib_destroy_qp(isert_conn->conn_qp);
+ }
ib_dereg_mr(isert_conn->conn_mr);
ib_dealloc_pd(isert_conn->conn_pd);
@@ -747,16 +756,24 @@ isert_connect_release(struct isert_conn *isert_conn)
if (device)
isert_device_try_release(device);
-
- pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
}
static void
isert_connected_handler(struct rdma_cm_id *cma_id)
{
- struct isert_conn *isert_conn = cma_id->context;
+ struct isert_conn *isert_conn = cma_id->qp->qp_context;
- kref_get(&isert_conn->conn_kref);
+ isert_info("conn %p\n", isert_conn);
+
+ if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
+ isert_warn("conn %p connect_release is running\n", isert_conn);
+ return;
+ }
+
+ mutex_lock(&isert_conn->conn_mutex);
+ if (isert_conn->state != ISER_CONN_FULL_FEATURE)
+ isert_conn->state = ISER_CONN_UP;
+ mutex_unlock(&isert_conn->conn_mutex);
}
static void
@@ -765,8 +782,8 @@ isert_release_conn_kref(struct kref *kref)
struct isert_conn *isert_conn = container_of(kref,
struct isert_conn, conn_kref);
- pr_debug("Calling isert_connect_release for final kref %s/%d\n",
- current->comm, current->pid);
+ isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
+ current->pid);
isert_connect_release(isert_conn);
}
@@ -777,75 +794,111 @@ isert_put_conn(struct isert_conn *isert_conn)
kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
}
+/**
+ * isert_conn_terminate() - Initiate connection termination
+ * @isert_conn: isert connection struct
+ *
+ * Notes:
+ * In case the connection state is FULL_FEATURE, move state
+ * to TEMINATING and start teardown sequence (rdma_disconnect).
+ * In case the connection state is UP, complete flush as well.
+ *
+ * This routine must be called with conn_mutex held. Thus it is
+ * safe to call multiple times.
+ */
static void
-isert_disconnect_work(struct work_struct *work)
+isert_conn_terminate(struct isert_conn *isert_conn)
{
- struct isert_conn *isert_conn = container_of(work,
- struct isert_conn, conn_logout_work);
+ int err;
- pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
- mutex_lock(&isert_conn->conn_mutex);
- if (isert_conn->state == ISER_CONN_UP)
+ switch (isert_conn->state) {
+ case ISER_CONN_TERMINATING:
+ break;
+ case ISER_CONN_UP:
+ case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
+ isert_info("Terminating conn %p state %d\n",
+ isert_conn, isert_conn->state);
isert_conn->state = ISER_CONN_TERMINATING;
-
- if (isert_conn->post_recv_buf_count == 0 &&
- atomic_read(&isert_conn->post_send_buf_count) == 0) {
- mutex_unlock(&isert_conn->conn_mutex);
- goto wake_up;
- }
- if (!isert_conn->conn_cm_id) {
- mutex_unlock(&isert_conn->conn_mutex);
- isert_put_conn(isert_conn);
- return;
+ err = rdma_disconnect(isert_conn->conn_cm_id);
+ if (err)
+ isert_warn("Failed rdma_disconnect isert_conn %p\n",
+ isert_conn);
+ break;
+ default:
+ isert_warn("conn %p teminating in state %d\n",
+ isert_conn, isert_conn->state);
}
+}
- if (isert_conn->disconnect) {
- /* Send DREQ/DREP towards our initiator */
- rdma_disconnect(isert_conn->conn_cm_id);
- }
+static int
+isert_np_cma_handler(struct isert_np *isert_np,
+ enum rdma_cm_event_type event)
+{
+ isert_dbg("isert np %p, handling event %d\n", isert_np, event);
- mutex_unlock(&isert_conn->conn_mutex);
+ switch (event) {
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ isert_np->np_cm_id = NULL;
+ break;
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ isert_np->np_cm_id = isert_setup_id(isert_np);
+ if (IS_ERR(isert_np->np_cm_id)) {
+ isert_err("isert np %p setup id failed: %ld\n",
+ isert_np, PTR_ERR(isert_np->np_cm_id));
+ isert_np->np_cm_id = NULL;
+ }
+ break;
+ default:
+ isert_err("isert np %p Unexpected event %d\n",
+ isert_np, event);
+ }
-wake_up:
- complete(&isert_conn->conn_wait);
+ return -1;
}
static int
-isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
+isert_disconnected_handler(struct rdma_cm_id *cma_id,
+ enum rdma_cm_event_type event)
{
+ struct isert_np *isert_np = cma_id->context;
struct isert_conn *isert_conn;
- if (!cma_id->qp) {
- struct isert_np *isert_np = cma_id->context;
+ if (isert_np->np_cm_id == cma_id)
+ return isert_np_cma_handler(cma_id->context, event);
- isert_np->np_cm_id = NULL;
- return -1;
- }
+ isert_conn = cma_id->qp->qp_context;
- isert_conn = (struct isert_conn *)cma_id->context;
+ mutex_lock(&isert_conn->conn_mutex);
+ isert_conn_terminate(isert_conn);
+ mutex_unlock(&isert_conn->conn_mutex);
- isert_conn->disconnect = disconnect;
- INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
- schedule_work(&isert_conn->conn_logout_work);
+ isert_info("conn %p completing conn_wait\n", isert_conn);
+ complete(&isert_conn->conn_wait);
return 0;
}
+static void
+isert_connect_error(struct rdma_cm_id *cma_id)
+{
+ struct isert_conn *isert_conn = cma_id->qp->qp_context;
+
+ isert_put_conn(isert_conn);
+}
+
static int
isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
int ret = 0;
- bool disconnect = false;
- pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
- event->event, event->status, cma_id->context, cma_id);
+ isert_info("event %d status %d id %p np %p\n", event->event,
+ event->status, cma_id, cma_id->context);
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
ret = isert_connect_request(cma_id, event);
if (ret)
- pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
- event->event, ret);
+ isert_err("failed handle connect request %d\n", ret);
break;
case RDMA_CM_EVENT_ESTABLISHED:
isert_connected_handler(cma_id);
@@ -853,13 +906,16 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
- disconnect = true;
case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
- ret = isert_disconnected_handler(cma_id, disconnect);
+ ret = isert_disconnected_handler(cma_id, event->event);
break;
+ case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
+ case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
case RDMA_CM_EVENT_CONNECT_ERROR:
+ isert_connect_error(cma_id);
+ break;
default:
- pr_err("Unhandled RDMA CMA event: %d\n", event->event);
+ isert_err("Unhandled RDMA CMA event: %d\n", event->event);
break;
}
@@ -876,7 +932,7 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
rx_desc = &isert_conn->conn_rx_descs[rx_head];
- rx_wr->wr_id = (unsigned long)rx_desc;
+ rx_wr->wr_id = (uintptr_t)rx_desc;
rx_wr->sg_list = &rx_desc->rx_sg;
rx_wr->num_sge = 1;
rx_wr->next = rx_wr + 1;
@@ -890,10 +946,10 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
&rx_wr_failed);
if (ret) {
- pr_err("ib_post_recv() failed with ret: %d\n", ret);
+ isert_err("ib_post_recv() failed with ret: %d\n", ret);
isert_conn->post_recv_buf_count -= count;
} else {
- pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
+ isert_dbg("isert_post_recv(): Posted %d RX buffers\n", count);
isert_conn->conn_rx_desc_head = rx_head;
}
return ret;
@@ -910,19 +966,15 @@ isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
ISER_HEADERS_LEN, DMA_TO_DEVICE);
send_wr.next = NULL;
- send_wr.wr_id = (unsigned long)tx_desc;
+ send_wr.wr_id = (uintptr_t)tx_desc;
send_wr.sg_list = tx_desc->tx_sg;
send_wr.num_sge = tx_desc->num_sge;
send_wr.opcode = IB_WR_SEND;
send_wr.send_flags = IB_SEND_SIGNALED;
- atomic_inc(&isert_conn->post_send_buf_count);
-
ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
- if (ret) {
- pr_err("ib_post_send() failed, ret: %d\n", ret);
- atomic_dec(&isert_conn->post_send_buf_count);
- }
+ if (ret)
+ isert_err("ib_post_send() failed, ret: %d\n", ret);
return ret;
}
@@ -945,7 +997,7 @@ isert_create_send_desc(struct isert_conn *isert_conn,
if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
- pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
+ isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
}
}
@@ -959,7 +1011,7 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
if (ib_dma_mapping_error(ib_dev, dma_addr)) {
- pr_err("ib_dma_mapping_error() failed\n");
+ isert_err("ib_dma_mapping_error() failed\n");
return -ENOMEM;
}
@@ -968,40 +1020,24 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
- pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
- " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
- tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
+ isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
+ tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
+ tx_desc->tx_sg[0].lkey);
return 0;
}
static void
isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
- struct ib_send_wr *send_wr, bool coalesce)
+ struct ib_send_wr *send_wr)
{
struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
- send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
+ send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
send_wr->opcode = IB_WR_SEND;
send_wr->sg_list = &tx_desc->tx_sg[0];
send_wr->num_sge = isert_cmd->tx_desc.num_sge;
- /*
- * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
- * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
- */
- mutex_lock(&isert_conn->conn_mutex);
- if (coalesce && isert_conn->state == ISER_CONN_UP &&
- ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
- tx_desc->llnode_active = true;
- llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
- mutex_unlock(&isert_conn->conn_mutex);
- return;
- }
- isert_conn->conn_comp_batch = 0;
- tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
- mutex_unlock(&isert_conn->conn_mutex);
-
send_wr->send_flags = IB_SEND_SIGNALED;
}
@@ -1017,22 +1053,21 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
sge.length = ISER_RX_LOGIN_SIZE;
sge.lkey = isert_conn->conn_mr->lkey;
- pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
+ isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
sge.addr, sge.length, sge.lkey);
memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
- rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
+ rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf;
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
isert_conn->post_recv_buf_count++;
ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
if (ret) {
- pr_err("ib_post_recv() failed: %d\n", ret);
+ isert_err("ib_post_recv() failed: %d\n", ret);
isert_conn->post_recv_buf_count--;
}
- pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
return ret;
}
@@ -1072,13 +1107,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
if (login->login_complete) {
if (!conn->sess->sess_ops->SessionType &&
isert_conn->conn_device->use_fastreg) {
- /* Normal Session and fastreg is used */
- u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi;
-
- ret = isert_conn_create_fastreg_pool(isert_conn,
- pi_support);
+ ret = isert_conn_create_fastreg_pool(isert_conn);
if (ret) {
- pr_err("Conn: %p failed to create"
+ isert_err("Conn: %p failed to create"
" fastreg pool\n", isert_conn);
return ret;
}
@@ -1092,7 +1123,10 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
if (ret)
return ret;
- isert_conn->state = ISER_CONN_UP;
+ /* Now we are in FULL_FEATURE phase */
+ mutex_lock(&isert_conn->conn_mutex);
+ isert_conn->state = ISER_CONN_FULL_FEATURE;
+ mutex_unlock(&isert_conn->conn_mutex);
goto post_send;
}
@@ -1109,18 +1143,17 @@ post_send:
}
static void
-isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
- struct isert_conn *isert_conn)
+isert_rx_login_req(struct isert_conn *isert_conn)
{
+ struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
+ int rx_buflen = isert_conn->login_req_len;
struct iscsi_conn *conn = isert_conn->conn;
struct iscsi_login *login = conn->conn_login;
int size;
- if (!login) {
- pr_err("conn->conn_login is NULL\n");
- dump_stack();
- return;
- }
+ isert_info("conn %p\n", isert_conn);
+
+ WARN_ON_ONCE(!login);
if (login->first_request) {
struct iscsi_login_req *login_req =
@@ -1146,8 +1179,9 @@ isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
- pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
- size, rx_buflen, MAX_KEY_VALUE_PAIRS);
+ isert_dbg("Using login payload size: %d, rx_buflen: %d "
+ "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
+ MAX_KEY_VALUE_PAIRS);
memcpy(login->req_buf, &rx_desc->data[0], size);
if (login->first_request) {
@@ -1166,7 +1200,7 @@ static struct iscsi_cmd
cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd) {
- pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
+ isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
return NULL;
}
isert_cmd = iscsit_priv_cmd(cmd);
@@ -1209,8 +1243,8 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
sg = &cmd->se_cmd.t_data_sg[0];
sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
- pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
- sg, sg_nents, &rx_desc->data[0], imm_data_len);
+ isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
+ sg, sg_nents, &rx_desc->data[0], imm_data_len);
sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
@@ -1254,13 +1288,15 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
* FIXME: Unexpected unsolicited_data out
*/
if (!cmd->unsolicited_data) {
- pr_err("Received unexpected solicited data payload\n");
+ isert_err("Received unexpected solicited data payload\n");
dump_stack();
return -1;
}
- pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
- unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
+ isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
+ "write_data_done: %u, data_length: %u\n",
+ unsol_data_len, cmd->write_data_done,
+ cmd->se_cmd.data_length);
sg_off = cmd->write_data_done / PAGE_SIZE;
sg_start = &cmd->se_cmd.t_data_sg[sg_off];
@@ -1270,12 +1306,13 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
* FIXME: Non page-aligned unsolicited_data out
*/
if (page_off) {
- pr_err("Received unexpected non-page aligned data payload\n");
+ isert_err("unexpected non-page aligned data payload\n");
dump_stack();
return -1;
}
- pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
- sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
+ isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
+ "sg_nents: %u from %p %u\n", sg_start, sg_off,
+ sg_nents, &rx_desc->data[0], unsol_data_len);
sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
unsol_data_len);
@@ -1322,8 +1359,8 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd
text_in = kzalloc(payload_length, GFP_KERNEL);
if (!text_in) {
- pr_err("Unable to allocate text_in of payload_length: %u\n",
- payload_length);
+ isert_err("Unable to allocate text_in of payload_length: %u\n",
+ payload_length);
return -ENOMEM;
}
cmd->text_in_ptr = text_in;
@@ -1348,8 +1385,8 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
if (sess->sess_ops->SessionType &&
(!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
- pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
- " ignoring\n", opcode);
+ isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
+ " ignoring\n", opcode);
return 0;
}
@@ -1395,10 +1432,6 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
break;
ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
- if (ret > 0)
- wait_for_completion_timeout(&conn->conn_logout_comp,
- SECONDS_FOR_LOGOUT_COMP *
- HZ);
break;
case ISCSI_OP_TEXT:
cmd = isert_allocate_cmd(conn);
@@ -1410,7 +1443,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
rx_desc, (struct iscsi_text *)hdr);
break;
default:
- pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
+ isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
dump_stack();
break;
}
@@ -1431,23 +1464,23 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
if (iser_hdr->flags & ISER_RSV) {
read_stag = be32_to_cpu(iser_hdr->read_stag);
read_va = be64_to_cpu(iser_hdr->read_va);
- pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
- read_stag, (unsigned long long)read_va);
+ isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
+ read_stag, (unsigned long long)read_va);
}
if (iser_hdr->flags & ISER_WSV) {
write_stag = be32_to_cpu(iser_hdr->write_stag);
write_va = be64_to_cpu(iser_hdr->write_va);
- pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
- write_stag, (unsigned long long)write_va);
+ isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
+ write_stag, (unsigned long long)write_va);
}
- pr_debug("ISER ISCSI_CTRL PDU\n");
+ isert_dbg("ISER ISCSI_CTRL PDU\n");
break;
case ISER_HELLO:
- pr_err("iSER Hello message\n");
+ isert_err("iSER Hello message\n");
break;
default:
- pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
+ isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
break;
}
@@ -1457,7 +1490,7 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
static void
isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
- unsigned long xfer_len)
+ u32 xfer_len)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct iscsi_hdr *hdr;
@@ -1467,34 +1500,43 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
if ((char *)desc == isert_conn->login_req_buf) {
rx_dma = isert_conn->login_req_dma;
rx_buflen = ISER_RX_LOGIN_SIZE;
- pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
+ isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
rx_dma, rx_buflen);
} else {
rx_dma = desc->dma_addr;
rx_buflen = ISER_RX_PAYLOAD_SIZE;
- pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
+ isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
rx_dma, rx_buflen);
}
ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
hdr = &desc->iscsi_header;
- pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
+ isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
hdr->opcode, hdr->itt, hdr->flags,
(int)(xfer_len - ISER_HEADERS_LEN));
- if ((char *)desc == isert_conn->login_req_buf)
- isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
- isert_conn);
- else
+ if ((char *)desc == isert_conn->login_req_buf) {
+ isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
+ if (isert_conn->conn) {
+ struct iscsi_login *login = isert_conn->conn->conn_login;
+
+ if (login && !login->first_request)
+ isert_rx_login_req(isert_conn);
+ }
+ mutex_lock(&isert_conn->conn_mutex);
+ complete(&isert_conn->login_req_comp);
+ mutex_unlock(&isert_conn->conn_mutex);
+ } else {
isert_rx_do_work(desc, isert_conn);
+ }
ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
DMA_FROM_DEVICE);
isert_conn->post_recv_buf_count--;
- pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
- isert_conn->post_recv_buf_count);
+ isert_dbg("Decremented post_recv_buf_count: %d\n",
+ isert_conn->post_recv_buf_count);
if ((char *)desc == isert_conn->login_req_buf)
return;
@@ -1505,7 +1547,7 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
ISERT_MIN_POSTED_RX);
err = isert_post_recv(isert_conn, count);
if (err) {
- pr_err("isert_post_recv() count: %d failed, %d\n",
+ isert_err("isert_post_recv() count: %d failed, %d\n",
count, err);
}
}
@@ -1534,12 +1576,12 @@ isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
data->dma_dir);
if (unlikely(!data->dma_nents)) {
- pr_err("Cmd: unable to dma map SGs %p\n", sg);
+ isert_err("Cmd: unable to dma map SGs %p\n", sg);
return -EINVAL;
}
- pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
- isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
+ isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
+ isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
return 0;
}
@@ -1560,21 +1602,21 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
{
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
- pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
+ isert_dbg("Cmd %p\n", isert_cmd);
if (wr->data.sg) {
- pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
+ isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
isert_unmap_data_buf(isert_conn, &wr->data);
}
if (wr->send_wr) {
- pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
+ isert_dbg("Cmd %p free send_wr\n", isert_cmd);
kfree(wr->send_wr);
wr->send_wr = NULL;
}
if (wr->ib_sge) {
- pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
+ isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
kfree(wr->ib_sge);
wr->ib_sge = NULL;
}
@@ -1586,11 +1628,10 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
LIST_HEAD(unmap_list);
- pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
+ isert_dbg("Cmd %p\n", isert_cmd);
if (wr->fr_desc) {
- pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
- isert_cmd, wr->fr_desc);
+ isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc);
if (wr->fr_desc->ind & ISERT_PROTECTED) {
isert_unmap_data_buf(isert_conn, &wr->prot);
wr->fr_desc->ind &= ~ISERT_PROTECTED;
@@ -1602,7 +1643,7 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
}
if (wr->data.sg) {
- pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
+ isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
isert_unmap_data_buf(isert_conn, &wr->data);
}
@@ -1618,7 +1659,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
struct iscsi_conn *conn = isert_conn->conn;
struct isert_device *device = isert_conn->conn_device;
- pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
+ isert_dbg("Cmd %p\n", isert_cmd);
switch (cmd->iscsi_opcode) {
case ISCSI_OP_SCSI_CMD:
@@ -1668,7 +1709,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
* associated cmd->se_cmd needs to be released.
*/
if (cmd->se_cmd.se_tfo != NULL) {
- pr_debug("Calling transport_generic_free_cmd from"
+ isert_dbg("Calling transport_generic_free_cmd from"
" isert_put_cmd for 0x%02x\n",
cmd->iscsi_opcode);
transport_generic_free_cmd(&cmd->se_cmd, 0);
@@ -1687,7 +1728,7 @@ static void
isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
{
if (tx_desc->dma_addr != 0) {
- pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
+ isert_dbg("unmap single for tx_desc->dma_addr\n");
ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
tx_desc->dma_addr = 0;
@@ -1699,7 +1740,7 @@ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
struct ib_device *ib_dev, bool comp_err)
{
if (isert_cmd->pdu_buf_dma != 0) {
- pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
+ isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
isert_cmd->pdu_buf_dma = 0;
@@ -1717,7 +1758,7 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
if (ret) {
- pr_err("ib_check_mr_status failed, ret %d\n", ret);
+ isert_err("ib_check_mr_status failed, ret %d\n", ret);
goto fail_mr_status;
}
@@ -1740,12 +1781,12 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
do_div(sec_offset_err, block_size);
se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
- pr_err("isert: PI error found type %d at sector 0x%llx "
- "expected 0x%x vs actual 0x%x\n",
- mr_status.sig_err.err_type,
- (unsigned long long)se_cmd->bad_sector,
- mr_status.sig_err.expected,
- mr_status.sig_err.actual);
+ isert_err("PI error found type %d at sector 0x%llx "
+ "expected 0x%x vs actual 0x%x\n",
+ mr_status.sig_err.err_type,
+ (unsigned long long)se_cmd->bad_sector,
+ mr_status.sig_err.expected,
+ mr_status.sig_err.actual);
ret = 1;
}
@@ -1801,7 +1842,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
cmd->write_data_done = wr->data.len;
wr->send_wr_num = 0;
- pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
+ isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
spin_lock_bh(&cmd->istate_lock);
cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
@@ -1823,36 +1864,22 @@ isert_do_control_comp(struct work_struct *work)
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
+ isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
+
switch (cmd->i_state) {
case ISTATE_SEND_TASKMGTRSP:
- pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
-
- atomic_dec(&isert_conn->post_send_buf_count);
iscsit_tmr_post_handler(cmd, cmd->conn);
-
- cmd->i_state = ISTATE_SENT_STATUS;
- isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
- break;
- case ISTATE_SEND_REJECT:
- pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
- atomic_dec(&isert_conn->post_send_buf_count);
-
+ case ISTATE_SEND_REJECT: /* FALLTHRU */
+ case ISTATE_SEND_TEXTRSP: /* FALLTHRU */
cmd->i_state = ISTATE_SENT_STATUS;
- isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
+ isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
+ ib_dev, false);
break;
case ISTATE_SEND_LOGOUTRSP:
- pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
-
- atomic_dec(&isert_conn->post_send_buf_count);
iscsit_logout_post_handler(cmd, cmd->conn);
break;
- case ISTATE_SEND_TEXTRSP:
- atomic_dec(&isert_conn->post_send_buf_count);
- cmd->i_state = ISTATE_SENT_STATUS;
- isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
- break;
default:
- pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
+ isert_err("Unknown i_state %d\n", cmd->i_state);
dump_stack();
break;
}
@@ -1865,7 +1892,6 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
struct ib_device *ib_dev)
{
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
- struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
@@ -1878,267 +1904,151 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
return;
}
- /**
- * If send_wr_num is 0 this means that we got
- * RDMA completion and we cleared it and we should
- * simply decrement the response post. else the
- * response is incorporated in send_wr_num, just
- * sub it.
- **/
- if (wr->send_wr_num)
- atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
- else
- atomic_dec(&isert_conn->post_send_buf_count);
-
cmd->i_state = ISTATE_SENT_STATUS;
isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
}
static void
-__isert_send_completion(struct iser_tx_desc *tx_desc,
- struct isert_conn *isert_conn)
+isert_send_completion(struct iser_tx_desc *tx_desc,
+ struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
struct isert_rdma_wr *wr;
if (!isert_cmd) {
- atomic_dec(&isert_conn->post_send_buf_count);
isert_unmap_tx_desc(tx_desc, ib_dev);
return;
}
wr = &isert_cmd->rdma_wr;
+ isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op);
+
switch (wr->iser_ib_op) {
case ISER_IB_RECV:
- pr_err("isert_send_completion: Got ISER_IB_RECV\n");
+ isert_err("Got ISER_IB_RECV\n");
dump_stack();
break;
case ISER_IB_SEND:
- pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
isert_response_completion(tx_desc, isert_cmd,
isert_conn, ib_dev);
break;
case ISER_IB_RDMA_WRITE:
- pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
- atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
isert_completion_rdma_write(tx_desc, isert_cmd);
break;
case ISER_IB_RDMA_READ:
- pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
-
- atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
isert_completion_rdma_read(tx_desc, isert_cmd);
break;
default:
- pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
+ isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr->iser_ib_op);
dump_stack();
break;
}
}
-static void
-isert_send_completion(struct iser_tx_desc *tx_desc,
- struct isert_conn *isert_conn)
-{
- struct llist_node *llnode = tx_desc->comp_llnode_batch;
- struct iser_tx_desc *t;
- /*
- * Drain coalesced completion llist starting from comp_llnode_batch
- * setup in isert_init_send_wr(), and then complete trailing tx_desc.
- */
- while (llnode) {
- t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
- llnode = llist_next(llnode);
- __isert_send_completion(t, isert_conn);
- }
- __isert_send_completion(tx_desc, isert_conn);
-}
-
-static void
-isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev)
+/**
+ * is_isert_tx_desc() - Indicate if the completion wr_id
+ * is a TX descriptor or not.
+ * @isert_conn: iser connection
+ * @wr_id: completion WR identifier
+ *
+ * Since we cannot rely on wc opcode in FLUSH errors
+ * we must work around it by checking if the wr_id address
+ * falls in the iser connection rx_descs buffer. If so
+ * it is an RX descriptor, otherwize it is a TX.
+ */
+static inline bool
+is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
{
- struct llist_node *llnode;
- struct isert_rdma_wr *wr;
- struct iser_tx_desc *t;
+ void *start = isert_conn->conn_rx_descs;
+ int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->conn_rx_descs);
- mutex_lock(&isert_conn->conn_mutex);
- llnode = llist_del_all(&isert_conn->conn_comp_llist);
- isert_conn->conn_comp_batch = 0;
- mutex_unlock(&isert_conn->conn_mutex);
-
- while (llnode) {
- t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
- llnode = llist_next(llnode);
- wr = &t->isert_cmd->rdma_wr;
-
- /**
- * If send_wr_num is 0 this means that we got
- * RDMA completion and we cleared it and we should
- * simply decrement the response post. else the
- * response is incorporated in send_wr_num, just
- * sub it.
- **/
- if (wr->send_wr_num)
- atomic_sub(wr->send_wr_num,
- &isert_conn->post_send_buf_count);
- else
- atomic_dec(&isert_conn->post_send_buf_count);
+ if (wr_id >= start && wr_id < start + len)
+ return false;
- isert_completion_put(t, t->isert_cmd, ib_dev, true);
- }
+ return true;
}
static void
-isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
+isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
- struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
- struct llist_node *llnode = tx_desc->comp_llnode_batch;
- struct isert_rdma_wr *wr;
- struct iser_tx_desc *t;
-
- while (llnode) {
- t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
- llnode = llist_next(llnode);
- wr = &t->isert_cmd->rdma_wr;
+ if (wc->wr_id == ISER_BEACON_WRID) {
+ isert_info("conn %p completing conn_wait_comp_err\n",
+ isert_conn);
+ complete(&isert_conn->conn_wait_comp_err);
+ } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) {
+ struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_cmd *isert_cmd;
+ struct iser_tx_desc *desc;
- /**
- * If send_wr_num is 0 this means that we got
- * RDMA completion and we cleared it and we should
- * simply decrement the response post. else the
- * response is incorporated in send_wr_num, just
- * sub it.
- **/
- if (wr->send_wr_num)
- atomic_sub(wr->send_wr_num,
- &isert_conn->post_send_buf_count);
+ desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
+ isert_cmd = desc->isert_cmd;
+ if (!isert_cmd)
+ isert_unmap_tx_desc(desc, ib_dev);
else
- atomic_dec(&isert_conn->post_send_buf_count);
-
- isert_completion_put(t, t->isert_cmd, ib_dev, true);
- }
- tx_desc->comp_llnode_batch = NULL;
-
- if (!isert_cmd)
- isert_unmap_tx_desc(tx_desc, ib_dev);
- else
- isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
-}
-
-static void
-isert_cq_rx_comp_err(struct isert_conn *isert_conn)
-{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
- struct iscsi_conn *conn = isert_conn->conn;
-
- if (isert_conn->post_recv_buf_count)
- return;
-
- isert_cq_drain_comp_llist(isert_conn, ib_dev);
-
- if (conn->sess) {
- target_sess_cmd_list_set_waiting(conn->sess->se_sess);
- target_wait_for_sess_cmds(conn->sess->se_sess);
+ isert_completion_put(desc, isert_cmd, ib_dev, true);
+ } else {
+ isert_conn->post_recv_buf_count--;
+ if (!isert_conn->post_recv_buf_count)
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
}
-
- while (atomic_read(&isert_conn->post_send_buf_count))
- msleep(3000);
-
- mutex_lock(&isert_conn->conn_mutex);
- isert_conn->state = ISER_CONN_DOWN;
- mutex_unlock(&isert_conn->conn_mutex);
-
- iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
-
- complete(&isert_conn->conn_wait_comp_err);
}
static void
-isert_cq_tx_work(struct work_struct *work)
+isert_handle_wc(struct ib_wc *wc)
{
- struct isert_cq_desc *cq_desc = container_of(work,
- struct isert_cq_desc, cq_tx_work);
- struct isert_device *device = cq_desc->device;
- int cq_index = cq_desc->cq_index;
- struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
struct isert_conn *isert_conn;
struct iser_tx_desc *tx_desc;
- struct ib_wc wc;
-
- while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
- tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
- isert_conn = wc.qp->qp_context;
+ struct iser_rx_desc *rx_desc;
- if (wc.status == IB_WC_SUCCESS) {
- isert_send_completion(tx_desc, isert_conn);
+ isert_conn = wc->qp->qp_context;
+ if (likely(wc->status == IB_WC_SUCCESS)) {
+ if (wc->opcode == IB_WC_RECV) {
+ rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
+ isert_rx_completion(rx_desc, isert_conn, wc->byte_len);
} else {
- pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
- pr_debug("TX wc.status: 0x%08x\n", wc.status);
- pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
-
- if (wc.wr_id != ISER_FASTREG_LI_WRID) {
- if (tx_desc->llnode_active)
- continue;
-
- atomic_dec(&isert_conn->post_send_buf_count);
- isert_cq_tx_comp_err(tx_desc, isert_conn);
- }
+ tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
+ isert_send_completion(tx_desc, isert_conn);
}
- }
-
- ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
-}
-
-static void
-isert_cq_tx_callback(struct ib_cq *cq, void *context)
-{
- struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
+ } else {
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ isert_err("wr id %llx status %d vend_err %x\n",
+ wc->wr_id, wc->status, wc->vendor_err);
+ else
+ isert_dbg("flush error: wr id %llx\n", wc->wr_id);
- queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
+ if (wc->wr_id != ISER_FASTREG_LI_WRID)
+ isert_cq_comp_err(isert_conn, wc);
+ }
}
static void
-isert_cq_rx_work(struct work_struct *work)
+isert_cq_work(struct work_struct *work)
{
- struct isert_cq_desc *cq_desc = container_of(work,
- struct isert_cq_desc, cq_rx_work);
- struct isert_device *device = cq_desc->device;
- int cq_index = cq_desc->cq_index;
- struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
- struct isert_conn *isert_conn;
- struct iser_rx_desc *rx_desc;
- struct ib_wc wc;
- unsigned long xfer_len;
+ enum { isert_poll_budget = 65536 };
+ struct isert_comp *comp = container_of(work, struct isert_comp,
+ work);
+ struct ib_wc *const wcs = comp->wcs;
+ int i, n, completed = 0;
- while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
- rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
- isert_conn = wc.qp->qp_context;
+ while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
+ for (i = 0; i < n; i++)
+ isert_handle_wc(&wcs[i]);
- if (wc.status == IB_WC_SUCCESS) {
- xfer_len = (unsigned long)wc.byte_len;
- isert_rx_completion(rx_desc, isert_conn, xfer_len);
- } else {
- pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
- if (wc.status != IB_WC_WR_FLUSH_ERR) {
- pr_debug("RX wc.status: 0x%08x\n", wc.status);
- pr_debug("RX wc.vendor_err: 0x%08x\n",
- wc.vendor_err);
- }
- isert_conn->post_recv_buf_count--;
- isert_cq_rx_comp_err(isert_conn);
- }
+ completed += n;
+ if (completed >= isert_poll_budget)
+ break;
}
- ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
+ ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
}
static void
-isert_cq_rx_callback(struct ib_cq *cq, void *context)
+isert_cq_callback(struct ib_cq *cq, void *context)
{
- struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
+ struct isert_comp *comp = context;
- queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
+ queue_work(isert_comp_wq, &comp->work);
}
static int
@@ -2147,13 +2057,10 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
struct ib_send_wr *wr_failed;
int ret;
- atomic_inc(&isert_conn->post_send_buf_count);
-
ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
&wr_failed);
if (ret) {
- pr_err("ib_post_send failed with %d\n", ret);
- atomic_dec(&isert_conn->post_send_buf_count);
+ isert_err("ib_post_send failed with %d\n", ret);
return ret;
}
return ret;
@@ -2200,9 +2107,9 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
isert_cmd->tx_desc.num_sge = 2;
}
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
- pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+ isert_dbg("Posting SCSI Response\n");
return isert_post_response(isert_conn, isert_cmd);
}
@@ -2231,8 +2138,16 @@ isert_get_sup_prot_ops(struct iscsi_conn *conn)
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
struct isert_device *device = isert_conn->conn_device;
- if (device->pi_capable)
- return TARGET_PROT_ALL;
+ if (conn->tpg->tpg_attrib.t10_pi) {
+ if (device->pi_capable) {
+ isert_info("conn %p PI offload enabled\n", isert_conn);
+ isert_conn->pi_support = true;
+ return TARGET_PROT_ALL;
+ }
+ }
+
+ isert_info("conn %p PI offload disabled\n", isert_conn);
+ isert_conn->pi_support = false;
return TARGET_PROT_NORMAL;
}
@@ -2250,9 +2165,9 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
&isert_cmd->tx_desc.iscsi_header,
nopout_response);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
- pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+ isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
return isert_post_response(isert_conn, isert_cmd);
}
@@ -2268,9 +2183,9 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
- pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+ isert_dbg("conn %p Posting Logout Response\n", isert_conn);
return isert_post_response(isert_conn, isert_cmd);
}
@@ -2286,9 +2201,9 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
- pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+ isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
return isert_post_response(isert_conn, isert_cmd);
}
@@ -2318,9 +2233,9 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
tx_dsg->lkey = isert_conn->conn_mr->lkey;
isert_cmd->tx_desc.num_sge = 2;
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
- pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+ isert_dbg("conn %p Posting Reject\n", isert_conn);
return isert_post_response(isert_conn, isert_cmd);
}
@@ -2358,9 +2273,9 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
tx_dsg->lkey = isert_conn->conn_mr->lkey;
isert_cmd->tx_desc.num_sge = 2;
}
- isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
- pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+ isert_dbg("conn %p Text Reject\n", isert_conn);
return isert_post_response(isert_conn, isert_cmd);
}
@@ -2383,30 +2298,31 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
send_wr->sg_list = ib_sge;
send_wr->num_sge = sg_nents;
- send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
+ send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
/*
* Perform mapping of TCM scatterlist memory ib_sge dma_addr.
*/
for_each_sg(sg_start, tmp_sg, sg_nents, i) {
- pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
- (unsigned long long)tmp_sg->dma_address,
- tmp_sg->length, page_off);
+ isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
+ "page_off: %u\n",
+ (unsigned long long)tmp_sg->dma_address,
+ tmp_sg->length, page_off);
ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
ib_sge->length = min_t(u32, data_left,
ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
ib_sge->lkey = isert_conn->conn_mr->lkey;
- pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
- ib_sge->addr, ib_sge->length, ib_sge->lkey);
+ isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
+ ib_sge->addr, ib_sge->length, ib_sge->lkey);
page_off = 0;
data_left -= ib_sge->length;
ib_sge++;
- pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
+ isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
}
- pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
- send_wr->sg_list, send_wr->num_sge);
+ isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
+ send_wr->sg_list, send_wr->num_sge);
return sg_nents;
}
@@ -2438,7 +2354,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
if (!ib_sge) {
- pr_warn("Unable to allocate ib_sge\n");
+ isert_warn("Unable to allocate ib_sge\n");
ret = -ENOMEM;
goto unmap_cmd;
}
@@ -2448,7 +2364,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
GFP_KERNEL);
if (!wr->send_wr) {
- pr_debug("Unable to allocate wr->send_wr\n");
+ isert_dbg("Unable to allocate wr->send_wr\n");
ret = -ENOMEM;
goto unmap_cmd;
}
@@ -2512,9 +2428,9 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
chunk_start = start_addr;
end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
- pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
- i, (unsigned long long)tmp_sg->dma_address,
- tmp_sg->length);
+ isert_dbg("SGL[%d] dma_addr: 0x%llx len: %u\n",
+ i, (unsigned long long)tmp_sg->dma_address,
+ tmp_sg->length);
if ((end_addr & ~PAGE_MASK) && i < last_ent) {
new_chunk = 0;
@@ -2525,8 +2441,8 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
page = chunk_start & PAGE_MASK;
do {
fr_pl[n_pages++] = page;
- pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
- n_pages - 1, page);
+ isert_dbg("Mapped page_list[%d] page_addr: 0x%llx\n",
+ n_pages - 1, page);
page += PAGE_SIZE;
} while (page < end_addr);
}
@@ -2534,6 +2450,21 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
return n_pages;
}
+static inline void
+isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
+{
+ u32 rkey;
+
+ memset(inv_wr, 0, sizeof(*inv_wr));
+ inv_wr->wr_id = ISER_FASTREG_LI_WRID;
+ inv_wr->opcode = IB_WR_LOCAL_INV;
+ inv_wr->ex.invalidate_rkey = mr->rkey;
+
+ /* Bump the key */
+ rkey = ib_inc_rkey(mr->rkey);
+ ib_update_fast_reg_key(mr, rkey);
+}
+
static int
isert_fast_reg_mr(struct isert_conn *isert_conn,
struct fast_reg_descriptor *fr_desc,
@@ -2548,15 +2479,13 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
struct ib_send_wr *bad_wr, *wr = NULL;
int ret, pagelist_len;
u32 page_off;
- u8 key;
if (mem->dma_nents == 1) {
sge->lkey = isert_conn->conn_mr->lkey;
sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
- pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
- __func__, __LINE__, sge->addr, sge->length,
- sge->lkey);
+ isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
+ sge->addr, sge->length, sge->lkey);
return 0;
}
@@ -2572,21 +2501,15 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
page_off = mem->offset % PAGE_SIZE;
- pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
- fr_desc, mem->nents, mem->offset);
+ isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
+ fr_desc, mem->nents, mem->offset);
pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
&frpl->page_list[0]);
- if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) {
- memset(&inv_wr, 0, sizeof(inv_wr));
- inv_wr.wr_id = ISER_FASTREG_LI_WRID;
- inv_wr.opcode = IB_WR_LOCAL_INV;
- inv_wr.ex.invalidate_rkey = mr->rkey;
+ if (!(fr_desc->ind & ind)) {
+ isert_inv_rkey(&inv_wr, mr);
wr = &inv_wr;
- /* Bump the key */
- key = (u8)(mr->rkey & 0x000000FF);
- ib_update_fast_reg_key(mr, ++key);
}
/* Prepare FASTREG WR */
@@ -2608,7 +2531,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
if (ret) {
- pr_err("fast registration failed, ret:%d\n", ret);
+ isert_err("fast registration failed, ret:%d\n", ret);
return ret;
}
fr_desc->ind &= ~ind;
@@ -2617,9 +2540,8 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
sge->addr = frpl->page_list[0] + page_off;
sge->length = mem->len;
- pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
- __func__, __LINE__, sge->addr, sge->length,
- sge->lkey);
+ isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
+ sge->addr, sge->length, sge->lkey);
return ret;
}
@@ -2665,7 +2587,7 @@ isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
break;
default:
- pr_err("Unsupported PI operation %d\n", se_cmd->prot_op);
+ isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
return -EINVAL;
}
@@ -2681,17 +2603,16 @@ isert_set_prot_checks(u8 prot_checks)
}
static int
-isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
- struct fast_reg_descriptor *fr_desc,
- struct ib_sge *data_sge, struct ib_sge *prot_sge,
- struct ib_sge *sig_sge)
+isert_reg_sig_mr(struct isert_conn *isert_conn,
+ struct se_cmd *se_cmd,
+ struct isert_rdma_wr *rdma_wr,
+ struct fast_reg_descriptor *fr_desc)
{
struct ib_send_wr sig_wr, inv_wr;
struct ib_send_wr *bad_wr, *wr = NULL;
struct pi_context *pi_ctx = fr_desc->pi_ctx;
struct ib_sig_attrs sig_attrs;
int ret;
- u32 key;
memset(&sig_attrs, 0, sizeof(sig_attrs));
ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
@@ -2701,26 +2622,20 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
- memset(&inv_wr, 0, sizeof(inv_wr));
- inv_wr.opcode = IB_WR_LOCAL_INV;
- inv_wr.wr_id = ISER_FASTREG_LI_WRID;
- inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
+ isert_inv_rkey(&inv_wr, pi_ctx->sig_mr);
wr = &inv_wr;
- /* Bump the key */
- key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
- ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
}
memset(&sig_wr, 0, sizeof(sig_wr));
sig_wr.opcode = IB_WR_REG_SIG_MR;
sig_wr.wr_id = ISER_FASTREG_LI_WRID;
- sig_wr.sg_list = data_sge;
+ sig_wr.sg_list = &rdma_wr->ib_sg[DATA];
sig_wr.num_sge = 1;
sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
if (se_cmd->t_prot_sg)
- sig_wr.wr.sig_handover.prot = prot_sge;
+ sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT];
if (!wr)
wr = &sig_wr;
@@ -2729,39 +2644,98 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
if (ret) {
- pr_err("fast registration failed, ret:%d\n", ret);
+ isert_err("fast registration failed, ret:%d\n", ret);
goto err;
}
fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
- sig_sge->lkey = pi_ctx->sig_mr->lkey;
- sig_sge->addr = 0;
- sig_sge->length = se_cmd->data_length;
+ rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
+ rdma_wr->ib_sg[SIG].addr = 0;
+ rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
/*
* We have protection guards on the wire
* so we need to set a larget transfer
*/
- sig_sge->length += se_cmd->prot_length;
+ rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
- pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
- sig_sge->addr, sig_sge->length,
- sig_sge->lkey);
+ isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
+ rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
+ rdma_wr->ib_sg[SIG].lkey);
err:
return ret;
}
static int
+isert_handle_prot_cmd(struct isert_conn *isert_conn,
+ struct isert_cmd *isert_cmd,
+ struct isert_rdma_wr *wr)
+{
+ struct isert_device *device = isert_conn->conn_device;
+ struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
+ int ret;
+
+ if (!wr->fr_desc->pi_ctx) {
+ ret = isert_create_pi_ctx(wr->fr_desc,
+ device->ib_device,
+ isert_conn->conn_pd);
+ if (ret) {
+ isert_err("conn %p failed to allocate pi_ctx\n",
+ isert_conn);
+ return ret;
+ }
+ }
+
+ if (se_cmd->t_prot_sg) {
+ ret = isert_map_data_buf(isert_conn, isert_cmd,
+ se_cmd->t_prot_sg,
+ se_cmd->t_prot_nents,
+ se_cmd->prot_length,
+ 0, wr->iser_ib_op, &wr->prot);
+ if (ret) {
+ isert_err("conn %p failed to map protection buffer\n",
+ isert_conn);
+ return ret;
+ }
+
+ memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
+ ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
+ ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
+ if (ret) {
+ isert_err("conn %p failed to fast reg mr\n",
+ isert_conn);
+ goto unmap_prot_cmd;
+ }
+ }
+
+ ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
+ if (ret) {
+ isert_err("conn %p failed to fast reg mr\n",
+ isert_conn);
+ goto unmap_prot_cmd;
+ }
+ wr->fr_desc->ind |= ISERT_PROTECTED;
+
+ return 0;
+
+unmap_prot_cmd:
+ if (se_cmd->t_prot_sg)
+ isert_unmap_data_buf(isert_conn, &wr->prot);
+
+ return ret;
+}
+
+static int
isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
struct isert_rdma_wr *wr)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
- struct ib_sge data_sge;
- struct ib_send_wr *send_wr;
struct fast_reg_descriptor *fr_desc = NULL;
+ struct ib_send_wr *send_wr;
+ struct ib_sge *ib_sg;
u32 offset;
int ret = 0;
unsigned long flags;
@@ -2775,8 +2749,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (ret)
return ret;
- if (wr->data.dma_nents != 1 ||
- se_cmd->prot_op != TARGET_PROT_NORMAL) {
+ if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
spin_lock_irqsave(&isert_conn->conn_lock, flags);
fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
struct fast_reg_descriptor, list);
@@ -2786,38 +2759,21 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
- ISERT_DATA_KEY_VALID, &data_sge);
+ ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
if (ret)
goto unmap_cmd;
- if (se_cmd->prot_op != TARGET_PROT_NORMAL) {
- struct ib_sge prot_sge, sig_sge;
-
- if (se_cmd->t_prot_sg) {
- ret = isert_map_data_buf(isert_conn, isert_cmd,
- se_cmd->t_prot_sg,
- se_cmd->t_prot_nents,
- se_cmd->prot_length,
- 0, wr->iser_ib_op, &wr->prot);
- if (ret)
- goto unmap_cmd;
-
- ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot,
- ISERT_PROT_KEY_VALID, &prot_sge);
- if (ret)
- goto unmap_prot_cmd;
- }
-
- ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc,
- &data_sge, &prot_sge, &sig_sge);
+ if (isert_prot_cmd(isert_conn, se_cmd)) {
+ ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
if (ret)
- goto unmap_prot_cmd;
+ goto unmap_cmd;
- fr_desc->ind |= ISERT_PROTECTED;
- memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge));
- } else
- memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge));
+ ib_sg = &wr->ib_sg[SIG];
+ } else {
+ ib_sg = &wr->ib_sg[DATA];
+ }
+ memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
wr->ib_sge = &wr->s_ib_sge;
wr->send_wr_num = 1;
memset(&wr->s_send_wr, 0, sizeof(*send_wr));
@@ -2827,12 +2783,12 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
send_wr = &isert_cmd->rdma_wr.s_send_wr;
send_wr->sg_list = &wr->s_ib_sge;
send_wr->num_sge = 1;
- send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
+ send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
send_wr->opcode = IB_WR_RDMA_WRITE;
send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
send_wr->wr.rdma.rkey = isert_cmd->read_stag;
- send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ?
+ send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
0 : IB_SEND_SIGNALED;
} else {
send_wr->opcode = IB_WR_RDMA_READ;
@@ -2842,9 +2798,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
return 0;
-unmap_prot_cmd:
- if (se_cmd->t_prot_sg)
- isert_unmap_data_buf(isert_conn, &wr->prot);
+
unmap_cmd:
if (fr_desc) {
spin_lock_irqsave(&isert_conn->conn_lock, flags);
@@ -2867,16 +2821,17 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
struct ib_send_wr *wr_failed;
int rc;
- pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
+ isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
isert_cmd, se_cmd->data_length);
+
wr->iser_ib_op = ISER_IB_RDMA_WRITE;
rc = device->reg_rdma_mem(conn, cmd, wr);
if (rc) {
- pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
+ isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
return rc;
}
- if (se_cmd->prot_op == TARGET_PROT_NORMAL) {
+ if (!isert_prot_cmd(isert_conn, se_cmd)) {
/*
* Build isert_conn->tx_desc for iSCSI response PDU and attach
*/
@@ -2886,24 +2841,20 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
isert_init_send_wr(isert_conn, isert_cmd,
- &isert_cmd->tx_desc.send_wr, false);
+ &isert_cmd->tx_desc.send_wr);
isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
wr->send_wr_num += 1;
}
- atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
-
rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
- if (rc) {
- pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
- atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
- }
+ if (rc)
+ isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
- if (se_cmd->prot_op == TARGET_PROT_NORMAL)
- pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
+ if (!isert_prot_cmd(isert_conn, se_cmd))
+ isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
"READ\n", isert_cmd);
else
- pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
+ isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
isert_cmd);
return 1;
@@ -2920,23 +2871,20 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
struct ib_send_wr *wr_failed;
int rc;
- pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
+ isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
isert_cmd, se_cmd->data_length, cmd->write_data_done);
wr->iser_ib_op = ISER_IB_RDMA_READ;
rc = device->reg_rdma_mem(conn, cmd, wr);
if (rc) {
- pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
+ isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
return rc;
}
- atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
-
rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
- if (rc) {
- pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
- atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
- }
- pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
+ if (rc)
+ isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
+
+ isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
isert_cmd);
return 0;
@@ -2952,7 +2900,7 @@ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
ret = isert_put_nopin(cmd, conn, false);
break;
default:
- pr_err("Unknown immediate state: 0x%02x\n", state);
+ isert_err("Unknown immediate state: 0x%02x\n", state);
ret = -EINVAL;
break;
}
@@ -2963,15 +2911,14 @@ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
static int
isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
{
+ struct isert_conn *isert_conn = conn->context;
int ret;
switch (state) {
case ISTATE_SEND_LOGOUTRSP:
ret = isert_put_logout_rsp(cmd, conn);
- if (!ret) {
- pr_debug("Returning iSER Logout -EAGAIN\n");
- ret = -EAGAIN;
- }
+ if (!ret)
+ isert_conn->logout_posted = true;
break;
case ISTATE_SEND_NOPIN:
ret = isert_put_nopin(cmd, conn, true);
@@ -2993,7 +2940,7 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
ret = isert_put_response(conn, cmd);
break;
default:
- pr_err("Unknown response state: 0x%02x\n", state);
+ isert_err("Unknown response state: 0x%02x\n", state);
ret = -EINVAL;
break;
}
@@ -3001,27 +2948,64 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
return ret;
}
+struct rdma_cm_id *
+isert_setup_id(struct isert_np *isert_np)
+{
+ struct iscsi_np *np = isert_np->np;
+ struct rdma_cm_id *id;
+ struct sockaddr *sa;
+ int ret;
+
+ sa = (struct sockaddr *)&np->np_sockaddr;
+ isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
+
+ id = rdma_create_id(isert_cma_handler, isert_np,
+ RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(id)) {
+ isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
+ ret = PTR_ERR(id);
+ goto out;
+ }
+ isert_dbg("id %p context %p\n", id, id->context);
+
+ ret = rdma_bind_addr(id, sa);
+ if (ret) {
+ isert_err("rdma_bind_addr() failed: %d\n", ret);
+ goto out_id;
+ }
+
+ ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
+ if (ret) {
+ isert_err("rdma_listen() failed: %d\n", ret);
+ goto out_id;
+ }
+
+ return id;
+out_id:
+ rdma_destroy_id(id);
+out:
+ return ERR_PTR(ret);
+}
+
static int
isert_setup_np(struct iscsi_np *np,
struct __kernel_sockaddr_storage *ksockaddr)
{
struct isert_np *isert_np;
struct rdma_cm_id *isert_lid;
- struct sockaddr *sa;
int ret;
isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
if (!isert_np) {
- pr_err("Unable to allocate struct isert_np\n");
+ isert_err("Unable to allocate struct isert_np\n");
return -ENOMEM;
}
sema_init(&isert_np->np_sem, 0);
mutex_init(&isert_np->np_accept_mutex);
INIT_LIST_HEAD(&isert_np->np_accept_list);
init_completion(&isert_np->np_login_comp);
+ isert_np->np = np;
- sa = (struct sockaddr *)ksockaddr;
- pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
/*
* Setup the np->np_sockaddr from the passed sockaddr setup
* in iscsi_target_configfs.c code..
@@ -3029,37 +3013,20 @@ isert_setup_np(struct iscsi_np *np,
memcpy(&np->np_sockaddr, ksockaddr,
sizeof(struct __kernel_sockaddr_storage));
- isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
- IB_QPT_RC);
+ isert_lid = isert_setup_id(isert_np);
if (IS_ERR(isert_lid)) {
- pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
- PTR_ERR(isert_lid));
ret = PTR_ERR(isert_lid);
goto out;
}
- ret = rdma_bind_addr(isert_lid, sa);
- if (ret) {
- pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
- goto out_lid;
- }
-
- ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
- if (ret) {
- pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
- goto out_lid;
- }
-
isert_np->np_cm_id = isert_lid;
np->np_context = isert_np;
- pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
return 0;
-out_lid:
- rdma_destroy_id(isert_lid);
out:
kfree(isert_np);
+
return ret;
}
@@ -3075,16 +3042,12 @@ isert_rdma_accept(struct isert_conn *isert_conn)
cp.retry_count = 7;
cp.rnr_retry_count = 7;
- pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
-
ret = rdma_accept(cm_id, &cp);
if (ret) {
- pr_err("rdma_accept() failed with: %d\n", ret);
+ isert_err("rdma_accept() failed with: %d\n", ret);
return ret;
}
- pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
-
return 0;
}
@@ -3094,7 +3057,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
int ret;
- pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
+ isert_info("before login_req comp conn: %p\n", isert_conn);
+ ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
+ if (ret) {
+ isert_err("isert_conn %p interrupted before got login req\n",
+ isert_conn);
+ return ret;
+ }
+ reinit_completion(&isert_conn->login_req_comp);
+
/*
* For login requests after the first PDU, isert_rx_login_req() will
* kick schedule_delayed_work(&conn->login_work) as the packet is
@@ -3104,11 +3075,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
if (!login->first_request)
return 0;
+ isert_rx_login_req(isert_conn);
+
+ isert_info("before conn_login_comp conn: %p\n", conn);
ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
if (ret)
return ret;
- pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
+ isert_info("processing login->req: %p\n", login->req);
+
return 0;
}
@@ -3161,7 +3136,7 @@ accept_wait:
spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
spin_unlock_bh(&np->np_thread_lock);
- pr_debug("np_thread_state %d for isert_accept_np\n",
+ isert_dbg("np_thread_state %d for isert_accept_np\n",
np->np_thread_state);
/**
* No point in stalling here when np_thread
@@ -3186,17 +3161,10 @@ accept_wait:
isert_conn->conn = conn;
max_accept = 0;
- ret = isert_rdma_post_recvl(isert_conn);
- if (ret)
- return ret;
-
- ret = isert_rdma_accept(isert_conn);
- if (ret)
- return ret;
-
isert_set_conn_info(np, conn, isert_conn);
- pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
+ isert_dbg("Processing isert_conn: %p\n", isert_conn);
+
return 0;
}
@@ -3204,25 +3172,103 @@ static void
isert_free_np(struct iscsi_np *np)
{
struct isert_np *isert_np = (struct isert_np *)np->np_context;
+ struct isert_conn *isert_conn, *n;
if (isert_np->np_cm_id)
rdma_destroy_id(isert_np->np_cm_id);
+ /*
+ * FIXME: At this point we don't have a good way to insure
+ * that at this point we don't have hanging connections that
+ * completed RDMA establishment but didn't start iscsi login
+ * process. So work-around this by cleaning up what ever piled
+ * up in np_accept_list.
+ */
+ mutex_lock(&isert_np->np_accept_mutex);
+ if (!list_empty(&isert_np->np_accept_list)) {
+ isert_info("Still have isert connections, cleaning up...\n");
+ list_for_each_entry_safe(isert_conn, n,
+ &isert_np->np_accept_list,
+ conn_accept_node) {
+ isert_info("cleaning isert_conn %p state (%d)\n",
+ isert_conn, isert_conn->state);
+ isert_connect_release(isert_conn);
+ }
+ }
+ mutex_unlock(&isert_np->np_accept_mutex);
+
np->np_context = NULL;
kfree(isert_np);
}
+static void isert_release_work(struct work_struct *work)
+{
+ struct isert_conn *isert_conn = container_of(work,
+ struct isert_conn,
+ release_work);
+
+ isert_info("Starting release conn %p\n", isert_conn);
+
+ wait_for_completion(&isert_conn->conn_wait);
+
+ mutex_lock(&isert_conn->conn_mutex);
+ isert_conn->state = ISER_CONN_DOWN;
+ mutex_unlock(&isert_conn->conn_mutex);
+
+ isert_info("Destroying conn %p\n", isert_conn);
+ isert_put_conn(isert_conn);
+}
+
+static void
+isert_wait4logout(struct isert_conn *isert_conn)
+{
+ struct iscsi_conn *conn = isert_conn->conn;
+
+ isert_info("conn %p\n", isert_conn);
+
+ if (isert_conn->logout_posted) {
+ isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
+ wait_for_completion_timeout(&conn->conn_logout_comp,
+ SECONDS_FOR_LOGOUT_COMP * HZ);
+ }
+}
+
+static void
+isert_wait4cmds(struct iscsi_conn *conn)
+{
+ isert_info("iscsi_conn %p\n", conn);
+
+ if (conn->sess) {
+ target_sess_cmd_list_set_waiting(conn->sess->se_sess);
+ target_wait_for_sess_cmds(conn->sess->se_sess);
+ }
+}
+
+static void
+isert_wait4flush(struct isert_conn *isert_conn)
+{
+ struct ib_recv_wr *bad_wr;
+
+ isert_info("conn %p\n", isert_conn);
+
+ init_completion(&isert_conn->conn_wait_comp_err);
+ isert_conn->beacon.wr_id = ISER_BEACON_WRID;
+ /* post an indication that all flush errors were consumed */
+ if (ib_post_recv(isert_conn->conn_qp, &isert_conn->beacon, &bad_wr)) {
+ isert_err("conn %p failed to post beacon", isert_conn);
+ return;
+ }
+
+ wait_for_completion(&isert_conn->conn_wait_comp_err);
+}
+
static void isert_wait_conn(struct iscsi_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
- pr_debug("isert_wait_conn: Starting \n");
+ isert_info("Starting conn %p\n", isert_conn);
mutex_lock(&isert_conn->conn_mutex);
- if (isert_conn->conn_cm_id && !isert_conn->disconnect) {
- pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
- rdma_disconnect(isert_conn->conn_cm_id);
- }
/*
* Only wait for conn_wait_comp_err if the isert_conn made it
* into full feature phase..
@@ -3231,14 +3277,15 @@ static void isert_wait_conn(struct iscsi_conn *conn)
mutex_unlock(&isert_conn->conn_mutex);
return;
}
- if (isert_conn->state == ISER_CONN_UP)
- isert_conn->state = ISER_CONN_TERMINATING;
+ isert_conn_terminate(isert_conn);
mutex_unlock(&isert_conn->conn_mutex);
- wait_for_completion(&isert_conn->conn_wait_comp_err);
+ isert_wait4cmds(conn);
+ isert_wait4flush(isert_conn);
+ isert_wait4logout(isert_conn);
- wait_for_completion(&isert_conn->conn_wait);
- isert_put_conn(isert_conn);
+ INIT_WORK(&isert_conn->release_work, isert_release_work);
+ queue_work(isert_release_wq, &isert_conn->release_work);
}
static void isert_free_conn(struct iscsi_conn *conn)
@@ -3273,35 +3320,39 @@ static int __init isert_init(void)
{
int ret;
- isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
- if (!isert_rx_wq) {
- pr_err("Unable to allocate isert_rx_wq\n");
+ isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
+ if (!isert_comp_wq) {
+ isert_err("Unable to allocate isert_comp_wq\n");
+ ret = -ENOMEM;
return -ENOMEM;
}
- isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
- if (!isert_comp_wq) {
- pr_err("Unable to allocate isert_comp_wq\n");
+ isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
+ WQ_UNBOUND_MAX_ACTIVE);
+ if (!isert_release_wq) {
+ isert_err("Unable to allocate isert_release_wq\n");
ret = -ENOMEM;
- goto destroy_rx_wq;
+ goto destroy_comp_wq;
}
iscsit_register_transport(&iser_target_transport);
- pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
+ isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
+
return 0;
-destroy_rx_wq:
- destroy_workqueue(isert_rx_wq);
+destroy_comp_wq:
+ destroy_workqueue(isert_comp_wq);
+
return ret;
}
static void __exit isert_exit(void)
{
flush_scheduled_work();
+ destroy_workqueue(isert_release_wq);
destroy_workqueue(isert_comp_wq);
- destroy_workqueue(isert_rx_wq);
iscsit_unregister_transport(&iser_target_transport);
- pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
+ isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
}
MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 04f51f7bf614..8dc8415d152d 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -4,9 +4,37 @@
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
+#define DRV_NAME "isert"
+#define PFX DRV_NAME ": "
+
+#define isert_dbg(fmt, arg...) \
+ do { \
+ if (unlikely(isert_debug_level > 2)) \
+ printk(KERN_DEBUG PFX "%s: " fmt,\
+ __func__ , ## arg); \
+ } while (0)
+
+#define isert_warn(fmt, arg...) \
+ do { \
+ if (unlikely(isert_debug_level > 0)) \
+ pr_warn(PFX "%s: " fmt, \
+ __func__ , ## arg); \
+ } while (0)
+
+#define isert_info(fmt, arg...) \
+ do { \
+ if (unlikely(isert_debug_level > 1)) \
+ pr_info(PFX "%s: " fmt, \
+ __func__ , ## arg); \
+ } while (0)
+
+#define isert_err(fmt, arg...) \
+ pr_err(PFX "%s: " fmt, __func__ , ## arg)
+
#define ISERT_RDMA_LISTEN_BACKLOG 10
#define ISCSI_ISER_SG_TABLESIZE 256
#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
+#define ISER_BEACON_WRID 0xfffffffffffffffeULL
enum isert_desc_type {
ISCSI_TX_CONTROL,
@@ -23,6 +51,7 @@ enum iser_ib_op_code {
enum iser_conn_state {
ISER_CONN_INIT,
ISER_CONN_UP,
+ ISER_CONN_FULL_FEATURE,
ISER_CONN_TERMINATING,
ISER_CONN_DOWN,
};
@@ -44,9 +73,6 @@ struct iser_tx_desc {
struct ib_sge tx_sg[2];
int num_sge;
struct isert_cmd *isert_cmd;
- struct llist_node *comp_llnode_batch;
- struct llist_node comp_llnode;
- bool llnode_active;
struct ib_send_wr send_wr;
} __packed;
@@ -81,6 +107,12 @@ struct isert_data_buf {
enum dma_data_direction dma_dir;
};
+enum {
+ DATA = 0,
+ PROT = 1,
+ SIG = 2,
+};
+
struct isert_rdma_wr {
struct list_head wr_list;
struct isert_cmd *isert_cmd;
@@ -90,6 +122,7 @@ struct isert_rdma_wr {
int send_wr_num;
struct ib_send_wr *send_wr;
struct ib_send_wr s_send_wr;
+ struct ib_sge ib_sg[3];
struct isert_data_buf data;
struct isert_data_buf prot;
struct fast_reg_descriptor *fr_desc;
@@ -117,14 +150,15 @@ struct isert_device;
struct isert_conn {
enum iser_conn_state state;
int post_recv_buf_count;
- atomic_t post_send_buf_count;
u32 responder_resources;
u32 initiator_depth;
+ bool pi_support;
u32 max_sge;
char *login_buf;
char *login_req_buf;
char *login_rsp_buf;
u64 login_req_dma;
+ int login_req_len;
u64 login_rsp_dma;
unsigned int conn_rx_desc_head;
struct iser_rx_desc *conn_rx_descs;
@@ -132,13 +166,13 @@ struct isert_conn {
struct iscsi_conn *conn;
struct list_head conn_accept_node;
struct completion conn_login_comp;
+ struct completion login_req_comp;
struct iser_tx_desc conn_login_tx_desc;
struct rdma_cm_id *conn_cm_id;
struct ib_pd *conn_pd;
struct ib_mr *conn_mr;
struct ib_qp *conn_qp;
struct isert_device *conn_device;
- struct work_struct conn_logout_work;
struct mutex conn_mutex;
struct completion conn_wait;
struct completion conn_wait_comp_err;
@@ -147,31 +181,38 @@ struct isert_conn {
int conn_fr_pool_size;
/* lock to protect fastreg pool */
spinlock_t conn_lock;
-#define ISERT_COMP_BATCH_COUNT 8
- int conn_comp_batch;
- struct llist_head conn_comp_llist;
- bool disconnect;
+ struct work_struct release_work;
+ struct ib_recv_wr beacon;
+ bool logout_posted;
};
#define ISERT_MAX_CQ 64
-struct isert_cq_desc {
- struct isert_device *device;
- int cq_index;
- struct work_struct cq_rx_work;
- struct work_struct cq_tx_work;
+/**
+ * struct isert_comp - iSER completion context
+ *
+ * @device: pointer to device handle
+ * @cq: completion queue
+ * @wcs: work completion array
+ * @active_qps: Number of active QPs attached
+ * to completion context
+ * @work: completion work handle
+ */
+struct isert_comp {
+ struct isert_device *device;
+ struct ib_cq *cq;
+ struct ib_wc wcs[16];
+ int active_qps;
+ struct work_struct work;
};
struct isert_device {
int use_fastreg;
bool pi_capable;
- int cqs_used;
int refcount;
- int cq_active_qps[ISERT_MAX_CQ];
struct ib_device *ib_device;
- struct ib_cq *dev_rx_cq[ISERT_MAX_CQ];
- struct ib_cq *dev_tx_cq[ISERT_MAX_CQ];
- struct isert_cq_desc *cq_desc;
+ struct isert_comp *comps;
+ int comps_used;
struct list_head dev_node;
struct ib_device_attr dev_attr;
int (*reg_rdma_mem)(struct iscsi_conn *conn,
@@ -182,6 +223,7 @@ struct isert_device {
};
struct isert_np {
+ struct iscsi_np *np;
struct semaphore np_sem;
struct rdma_cm_id *np_cm_id;
struct mutex np_accept_mutex;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index db3c8c851af1..0747c0595a9d 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2740,7 +2740,6 @@ static struct scsi_host_template srp_template = {
.info = srp_target_info,
.queuecommand = srp_queuecommand,
.change_queue_depth = srp_change_queue_depth,
- .change_queue_type = scsi_change_queue_type,
.eh_abort_handler = srp_abort,
.eh_device_reset_handler = srp_reset_device,
.eh_host_reset_handler = srp_reset_host,
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index dc829682701a..eb694ddad79f 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1708,17 +1708,17 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
switch (srp_cmd->task_attr) {
case SRP_CMD_SIMPLE_Q:
- cmd->sam_task_attr = MSG_SIMPLE_TAG;
+ cmd->sam_task_attr = TCM_SIMPLE_TAG;
break;
case SRP_CMD_ORDERED_Q:
default:
- cmd->sam_task_attr = MSG_ORDERED_TAG;
+ cmd->sam_task_attr = TCM_ORDERED_TAG;
break;
case SRP_CMD_HEAD_OF_Q:
- cmd->sam_task_attr = MSG_HEAD_TAG;
+ cmd->sam_task_attr = TCM_HEAD_TAG;
break;
case SRP_CMD_ACA:
- cmd->sam_task_attr = MSG_ACA_TAG;
+ cmd->sam_task_attr = TCM_ACA_TAG;
break;
}
@@ -1733,7 +1733,7 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
sizeof(srp_cmd->lun));
rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
&send_ioctx->sense_data[0], unpacked_lun, data_len,
- MSG_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
+ TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
if (rc != 0) {
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto send_sense;
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index 3067d56b11a6..5844b80bd90e 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -204,16 +204,6 @@ config THERM_ADT746X
iBook G4, and the ATI based aluminium PowerBooks, allowing slightly
better fan behaviour by default, and some manual control.
-config THERM_PM72
- tristate "Support for thermal management on PowerMac G5 (AGP)"
- depends on I2C && I2C_POWERMAC && PPC_PMAC64
- default n
- help
- This driver provides thermostat and fan control for the desktop
- G5 machines.
-
- This is deprecated, use windfarm instead.
-
config WINDFARM
tristate "New PowerMac thermal control infrastructure"
depends on PPC
diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile
index d2f0120bc878..383ba920085b 100644
--- a/drivers/macintosh/Makefile
+++ b/drivers/macintosh/Makefile
@@ -25,7 +25,6 @@ obj-$(CONFIG_ADB_IOP) += adb-iop.o
obj-$(CONFIG_ADB_PMU68K) += via-pmu68k.o
obj-$(CONFIG_ADB_MACIO) += macio-adb.o
-obj-$(CONFIG_THERM_PM72) += therm_pm72.o
obj-$(CONFIG_THERM_WINDTUNNEL) += therm_windtunnel.o
obj-$(CONFIG_THERM_ADT746X) += therm_adt746x.o
obj-$(CONFIG_WINDFARM) += windfarm_core.o
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
deleted file mode 100644
index 7ed92582d2cf..000000000000
--- a/drivers/macintosh/therm_pm72.c
+++ /dev/null
@@ -1,2278 +0,0 @@
-/*
- * Device driver for the thermostats & fan controller of the
- * Apple G5 "PowerMac7,2" desktop machines.
- *
- * (c) Copyright IBM Corp. 2003-2004
- *
- * Maintained by: Benjamin Herrenschmidt
- * <benh@kernel.crashing.org>
- *
- *
- * The algorithm used is the PID control algorithm, used the same
- * way the published Darwin code does, using the same values that
- * are present in the Darwin 7.0 snapshot property lists.
- *
- * As far as the CPUs control loops are concerned, I use the
- * calibration & PID constants provided by the EEPROM,
- * I do _not_ embed any value from the property lists, as the ones
- * provided by Darwin 7.0 seem to always have an older version that
- * what I've seen on the actual computers.
- * It would be interesting to verify that though. Darwin has a
- * version code of 1.0.0d11 for all control loops it seems, while
- * so far, the machines EEPROMs contain a dataset versioned 1.0.0f
- *
- * Darwin doesn't provide source to all parts, some missing
- * bits like the AppleFCU driver or the actual scale of some
- * of the values returned by sensors had to be "guessed" some
- * way... or based on what Open Firmware does.
- *
- * I didn't yet figure out how to get the slots power consumption
- * out of the FCU, so that part has not been implemented yet and
- * the slots fan is set to a fixed 50% PWM, hoping this value is
- * safe enough ...
- *
- * Note: I have observed strange oscillations of the CPU control
- * loop on a dual G5 here. When idle, the CPU exhaust fan tend to
- * oscillates slowly (over several minutes) between the minimum
- * of 300RPMs and approx. 1000 RPMs. I don't know what is causing
- * this, it could be some incorrect constant or an error in the
- * way I ported the algorithm, or it could be just normal. I
- * don't have full understanding on the way Apple tweaked the PID
- * algorithm for the CPU control, it is definitely not a standard
- * implementation...
- *
- * TODO: - Check MPU structure version/signature
- * - Add things like /sbin/overtemp for non-critical
- * overtemp conditions so userland can take some policy
- * decisions, like slowing down CPUs
- * - Deal with fan and i2c failures in a better way
- * - Maybe do a generic PID based on params used for
- * U3 and Drives ? Definitely need to factor code a bit
- * better... also make sensor detection more robust using
- * the device-tree to probe for them
- * - Figure out how to get the slots consumption and set the
- * slots fan accordingly
- *
- * History:
- *
- * Nov. 13, 2003 : 0.5
- * - First release
- *
- * Nov. 14, 2003 : 0.6
- * - Read fan speed from FCU, low level fan routines now deal
- * with errors & check fan status, though higher level don't
- * do much.
- * - Move a bunch of definitions to .h file
- *
- * Nov. 18, 2003 : 0.7
- * - Fix build on ppc64 kernel
- * - Move back statics definitions to .c file
- * - Avoid calling schedule_timeout with a negative number
- *
- * Dec. 18, 2003 : 0.8
- * - Fix typo when reading back fan speed on 2 CPU machines
- *
- * Mar. 11, 2004 : 0.9
- * - Rework code accessing the ADC chips, make it more robust and
- * closer to the chip spec. Also make sure it is configured properly,
- * I've seen yet unexplained cases where on startup, I would have stale
- * values in the configuration register
- * - Switch back to use of target fan speed for PID, thus lowering
- * pressure on i2c
- *
- * Oct. 20, 2004 : 1.1
- * - Add device-tree lookup for fan IDs, should detect liquid cooling
- * pumps when present
- * - Enable driver for PowerMac7,3 machines
- * - Split the U3/Backside cooling on U3 & U3H versions as Darwin does
- * - Add new CPU cooling algorithm for machines with liquid cooling
- * - Workaround for some PowerMac7,3 with empty "fan" node in the devtree
- * - Fix a signed/unsigned compare issue in some PID loops
- *
- * Mar. 10, 2005 : 1.2
- * - Add basic support for Xserve G5
- * - Retrieve pumps min/max from EEPROM image in device-tree (broken)
- * - Use min/max macros here or there
- * - Latest darwin updated U3H min fan speed to 20% PWM
- *
- * July. 06, 2006 : 1.3
- * - Fix setting of RPM fans on Xserve G5 (they were going too fast)
- * - Add missing slots fan control loop for Xserve G5
- * - Lower fixed slots fan speed from 50% to 40% on desktop G5s. We
- * still can't properly implement the control loop for these, so let's
- * reduce the noise a little bit, it appears that 40% still gives us
- * a pretty good air flow
- * - Add code to "tickle" the FCU regulary so it doesn't think that
- * we are gone while in fact, the machine just didn't need any fan
- * speed change lately
- *
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <linux/reboot.h>
-#include <linux/kmod.h>
-#include <linux/i2c.h>
-#include <linux/kthread.h>
-#include <linux/mutex.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#include <asm/prom.h>
-#include <asm/machdep.h>
-#include <asm/io.h>
-#include <asm/sections.h>
-#include <asm/macio.h>
-
-#include "therm_pm72.h"
-
-#define VERSION "1.3"
-
-#undef DEBUG
-
-#ifdef DEBUG
-#define DBG(args...) printk(args)
-#else
-#define DBG(args...) do { } while(0)
-#endif
-
-
-/*
- * Driver statics
- */
-
-static struct platform_device * of_dev;
-static struct i2c_adapter * u3_0;
-static struct i2c_adapter * u3_1;
-static struct i2c_adapter * k2;
-static struct i2c_client * fcu;
-static struct cpu_pid_state processor_state[2];
-static struct basckside_pid_params backside_params;
-static struct backside_pid_state backside_state;
-static struct drives_pid_state drives_state;
-static struct dimm_pid_state dimms_state;
-static struct slots_pid_state slots_state;
-static int state;
-static int cpu_count;
-static int cpu_pid_type;
-static struct task_struct *ctrl_task;
-static struct completion ctrl_complete;
-static int critical_state;
-static int rackmac;
-static s32 dimm_output_clamp;
-static int fcu_rpm_shift;
-static int fcu_tickle_ticks;
-static DEFINE_MUTEX(driver_lock);
-
-/*
- * We have 3 types of CPU PID control. One is "split" old style control
- * for intake & exhaust fans, the other is "combined" control for both
- * CPUs that also deals with the pumps when present. To be "compatible"
- * with OS X at this point, we only use "COMBINED" on the machines that
- * are identified as having the pumps (though that identification is at
- * least dodgy). Ultimately, we could probably switch completely to this
- * algorithm provided we hack it to deal with the UP case
- */
-#define CPU_PID_TYPE_SPLIT 0
-#define CPU_PID_TYPE_COMBINED 1
-#define CPU_PID_TYPE_RACKMAC 2
-
-/*
- * This table describes all fans in the FCU. The "id" and "type" values
- * are defaults valid for all earlier machines. Newer machines will
- * eventually override the table content based on the device-tree
- */
-struct fcu_fan_table
-{
- char* loc; /* location code */
- int type; /* 0 = rpm, 1 = pwm, 2 = pump */
- int id; /* id or -1 */
-};
-
-#define FCU_FAN_RPM 0
-#define FCU_FAN_PWM 1
-
-#define FCU_FAN_ABSENT_ID -1
-
-#define FCU_FAN_COUNT ARRAY_SIZE(fcu_fans)
-
-struct fcu_fan_table fcu_fans[] = {
- [BACKSIDE_FAN_PWM_INDEX] = {
- .loc = "BACKSIDE,SYS CTRLR FAN",
- .type = FCU_FAN_PWM,
- .id = BACKSIDE_FAN_PWM_DEFAULT_ID,
- },
- [DRIVES_FAN_RPM_INDEX] = {
- .loc = "DRIVE BAY",
- .type = FCU_FAN_RPM,
- .id = DRIVES_FAN_RPM_DEFAULT_ID,
- },
- [SLOTS_FAN_PWM_INDEX] = {
- .loc = "SLOT,PCI FAN",
- .type = FCU_FAN_PWM,
- .id = SLOTS_FAN_PWM_DEFAULT_ID,
- },
- [CPUA_INTAKE_FAN_RPM_INDEX] = {
- .loc = "CPU A INTAKE",
- .type = FCU_FAN_RPM,
- .id = CPUA_INTAKE_FAN_RPM_DEFAULT_ID,
- },
- [CPUA_EXHAUST_FAN_RPM_INDEX] = {
- .loc = "CPU A EXHAUST",
- .type = FCU_FAN_RPM,
- .id = CPUA_EXHAUST_FAN_RPM_DEFAULT_ID,
- },
- [CPUB_INTAKE_FAN_RPM_INDEX] = {
- .loc = "CPU B INTAKE",
- .type = FCU_FAN_RPM,
- .id = CPUB_INTAKE_FAN_RPM_DEFAULT_ID,
- },
- [CPUB_EXHAUST_FAN_RPM_INDEX] = {
- .loc = "CPU B EXHAUST",
- .type = FCU_FAN_RPM,
- .id = CPUB_EXHAUST_FAN_RPM_DEFAULT_ID,
- },
- /* pumps aren't present by default, have to be looked up in the
- * device-tree
- */
- [CPUA_PUMP_RPM_INDEX] = {
- .loc = "CPU A PUMP",
- .type = FCU_FAN_RPM,
- .id = FCU_FAN_ABSENT_ID,
- },
- [CPUB_PUMP_RPM_INDEX] = {
- .loc = "CPU B PUMP",
- .type = FCU_FAN_RPM,
- .id = FCU_FAN_ABSENT_ID,
- },
- /* Xserve fans */
- [CPU_A1_FAN_RPM_INDEX] = {
- .loc = "CPU A 1",
- .type = FCU_FAN_RPM,
- .id = FCU_FAN_ABSENT_ID,
- },
- [CPU_A2_FAN_RPM_INDEX] = {
- .loc = "CPU A 2",
- .type = FCU_FAN_RPM,
- .id = FCU_FAN_ABSENT_ID,
- },
- [CPU_A3_FAN_RPM_INDEX] = {
- .loc = "CPU A 3",
- .type = FCU_FAN_RPM,
- .id = FCU_FAN_ABSENT_ID,
- },
- [CPU_B1_FAN_RPM_INDEX] = {
- .loc = "CPU B 1",
- .type = FCU_FAN_RPM,
- .id = FCU_FAN_ABSENT_ID,
- },
- [CPU_B2_FAN_RPM_INDEX] = {
- .loc = "CPU B 2",
- .type = FCU_FAN_RPM,
- .id = FCU_FAN_ABSENT_ID,
- },
- [CPU_B3_FAN_RPM_INDEX] = {
- .loc = "CPU B 3",
- .type = FCU_FAN_RPM,
- .id = FCU_FAN_ABSENT_ID,
- },
-};
-
-static struct i2c_driver therm_pm72_driver;
-
-/*
- * Utility function to create an i2c_client structure and
- * attach it to one of u3 adapters
- */
-static struct i2c_client *attach_i2c_chip(int id, const char *name)
-{
- struct i2c_client *clt;
- struct i2c_adapter *adap;
- struct i2c_board_info info;
-
- if (id & 0x200)
- adap = k2;
- else if (id & 0x100)
- adap = u3_1;
- else
- adap = u3_0;
- if (adap == NULL)
- return NULL;
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- info.addr = (id >> 1) & 0x7f;
- strlcpy(info.type, "therm_pm72", I2C_NAME_SIZE);
- clt = i2c_new_device(adap, &info);
- if (!clt) {
- printk(KERN_ERR "therm_pm72: Failed to attach to i2c ID 0x%x\n", id);
- return NULL;
- }
-
- /*
- * Let i2c-core delete that device on driver removal.
- * This is safe because i2c-core holds the core_lock mutex for us.
- */
- list_add_tail(&clt->detected, &therm_pm72_driver.clients);
- return clt;
-}
-
-/*
- * Here are the i2c chip access wrappers
- */
-
-static void initialize_adc(struct cpu_pid_state *state)
-{
- int rc;
- u8 buf[2];
-
- /* Read ADC the configuration register and cache it. We
- * also make sure Config2 contains proper values, I've seen
- * cases where we got stale grabage in there, thus preventing
- * proper reading of conv. values
- */
-
- /* Clear Config2 */
- buf[0] = 5;
- buf[1] = 0;
- i2c_master_send(state->monitor, buf, 2);
-
- /* Read & cache Config1 */
- buf[0] = 1;
- rc = i2c_master_send(state->monitor, buf, 1);
- if (rc > 0) {
- rc = i2c_master_recv(state->monitor, buf, 1);
- if (rc > 0) {
- state->adc_config = buf[0];
- DBG("ADC config reg: %02x\n", state->adc_config);
- /* Disable shutdown mode */
- state->adc_config &= 0xfe;
- buf[0] = 1;
- buf[1] = state->adc_config;
- rc = i2c_master_send(state->monitor, buf, 2);
- }
- }
- if (rc <= 0)
- printk(KERN_ERR "therm_pm72: Error reading ADC config"
- " register !\n");
-}
-
-static int read_smon_adc(struct cpu_pid_state *state, int chan)
-{
- int rc, data, tries = 0;
- u8 buf[2];
-
- for (;;) {
- /* Set channel */
- buf[0] = 1;
- buf[1] = (state->adc_config & 0x1f) | (chan << 5);
- rc = i2c_master_send(state->monitor, buf, 2);
- if (rc <= 0)
- goto error;
- /* Wait for conversion */
- msleep(1);
- /* Switch to data register */
- buf[0] = 4;
- rc = i2c_master_send(state->monitor, buf, 1);
- if (rc <= 0)
- goto error;
- /* Read result */
- rc = i2c_master_recv(state->monitor, buf, 2);
- if (rc < 0)
- goto error;
- data = ((u16)buf[0]) << 8 | (u16)buf[1];
- return data >> 6;
- error:
- DBG("Error reading ADC, retrying...\n");
- if (++tries > 10) {
- printk(KERN_ERR "therm_pm72: Error reading ADC !\n");
- return -1;
- }
- msleep(10);
- }
-}
-
-static int read_lm87_reg(struct i2c_client * chip, int reg)
-{
- int rc, tries = 0;
- u8 buf;
-
- for (;;) {
- /* Set address */
- buf = (u8)reg;
- rc = i2c_master_send(chip, &buf, 1);
- if (rc <= 0)
- goto error;
- rc = i2c_master_recv(chip, &buf, 1);
- if (rc <= 0)
- goto error;
- return (int)buf;
- error:
- DBG("Error reading LM87, retrying...\n");
- if (++tries > 10) {
- printk(KERN_ERR "therm_pm72: Error reading LM87 !\n");
- return -1;
- }
- msleep(10);
- }
-}
-
-static int fan_read_reg(int reg, unsigned char *buf, int nb)
-{
- int tries, nr, nw;
-
- buf[0] = reg;
- tries = 0;
- for (;;) {
- nw = i2c_master_send(fcu, buf, 1);
- if (nw > 0 || (nw < 0 && nw != -EIO) || tries >= 100)
- break;
- msleep(10);
- ++tries;
- }
- if (nw <= 0) {
- printk(KERN_ERR "Failure writing address to FCU: %d", nw);
- return -EIO;
- }
- tries = 0;
- for (;;) {
- nr = i2c_master_recv(fcu, buf, nb);
- if (nr > 0 || (nr < 0 && nr != -ENODEV) || tries >= 100)
- break;
- msleep(10);
- ++tries;
- }
- if (nr <= 0)
- printk(KERN_ERR "Failure reading data from FCU: %d", nw);
- return nr;
-}
-
-static int fan_write_reg(int reg, const unsigned char *ptr, int nb)
-{
- int tries, nw;
- unsigned char buf[16];
-
- buf[0] = reg;
- memcpy(buf+1, ptr, nb);
- ++nb;
- tries = 0;
- for (;;) {
- nw = i2c_master_send(fcu, buf, nb);
- if (nw > 0 || (nw < 0 && nw != -EIO) || tries >= 100)
- break;
- msleep(10);
- ++tries;
- }
- if (nw < 0)
- printk(KERN_ERR "Failure writing to FCU: %d", nw);
- return nw;
-}
-
-static int start_fcu(void)
-{
- unsigned char buf = 0xff;
- int rc;
-
- rc = fan_write_reg(0xe, &buf, 1);
- if (rc < 0)
- return -EIO;
- rc = fan_write_reg(0x2e, &buf, 1);
- if (rc < 0)
- return -EIO;
- rc = fan_read_reg(0, &buf, 1);
- if (rc < 0)
- return -EIO;
- fcu_rpm_shift = (buf == 1) ? 2 : 3;
- printk(KERN_DEBUG "FCU Initialized, RPM fan shift is %d\n",
- fcu_rpm_shift);
-
- return 0;
-}
-
-static int set_rpm_fan(int fan_index, int rpm)
-{
- unsigned char buf[2];
- int rc, id, min, max;
-
- if (fcu_fans[fan_index].type != FCU_FAN_RPM)
- return -EINVAL;
- id = fcu_fans[fan_index].id;
- if (id == FCU_FAN_ABSENT_ID)
- return -EINVAL;
-
- min = 2400 >> fcu_rpm_shift;
- max = 56000 >> fcu_rpm_shift;
-
- if (rpm < min)
- rpm = min;
- else if (rpm > max)
- rpm = max;
- buf[0] = rpm >> (8 - fcu_rpm_shift);
- buf[1] = rpm << fcu_rpm_shift;
- rc = fan_write_reg(0x10 + (id * 2), buf, 2);
- if (rc < 0)
- return -EIO;
- return 0;
-}
-
-static int get_rpm_fan(int fan_index, int programmed)
-{
- unsigned char failure;
- unsigned char active;
- unsigned char buf[2];
- int rc, id, reg_base;
-
- if (fcu_fans[fan_index].type != FCU_FAN_RPM)
- return -EINVAL;
- id = fcu_fans[fan_index].id;
- if (id == FCU_FAN_ABSENT_ID)
- return -EINVAL;
-
- rc = fan_read_reg(0xb, &failure, 1);
- if (rc != 1)
- return -EIO;
- if ((failure & (1 << id)) != 0)
- return -EFAULT;
- rc = fan_read_reg(0xd, &active, 1);
- if (rc != 1)
- return -EIO;
- if ((active & (1 << id)) == 0)
- return -ENXIO;
-
- /* Programmed value or real current speed */
- reg_base = programmed ? 0x10 : 0x11;
- rc = fan_read_reg(reg_base + (id * 2), buf, 2);
- if (rc != 2)
- return -EIO;
-
- return (buf[0] << (8 - fcu_rpm_shift)) | buf[1] >> fcu_rpm_shift;
-}
-
-static int set_pwm_fan(int fan_index, int pwm)
-{
- unsigned char buf[2];
- int rc, id;
-
- if (fcu_fans[fan_index].type != FCU_FAN_PWM)
- return -EINVAL;
- id = fcu_fans[fan_index].id;
- if (id == FCU_FAN_ABSENT_ID)
- return -EINVAL;
-
- if (pwm < 10)
- pwm = 10;
- else if (pwm > 100)
- pwm = 100;
- pwm = (pwm * 2559) / 1000;
- buf[0] = pwm;
- rc = fan_write_reg(0x30 + (id * 2), buf, 1);
- if (rc < 0)
- return rc;
- return 0;
-}
-
-static int get_pwm_fan(int fan_index)
-{
- unsigned char failure;
- unsigned char active;
- unsigned char buf[2];
- int rc, id;
-
- if (fcu_fans[fan_index].type != FCU_FAN_PWM)
- return -EINVAL;
- id = fcu_fans[fan_index].id;
- if (id == FCU_FAN_ABSENT_ID)
- return -EINVAL;
-
- rc = fan_read_reg(0x2b, &failure, 1);
- if (rc != 1)
- return -EIO;
- if ((failure & (1 << id)) != 0)
- return -EFAULT;
- rc = fan_read_reg(0x2d, &active, 1);
- if (rc != 1)
- return -EIO;
- if ((active & (1 << id)) == 0)
- return -ENXIO;
-
- /* Programmed value or real current speed */
- rc = fan_read_reg(0x30 + (id * 2), buf, 1);
- if (rc != 1)
- return -EIO;
-
- return (buf[0] * 1000) / 2559;
-}
-
-static void tickle_fcu(void)
-{
- int pwm;
-
- pwm = get_pwm_fan(SLOTS_FAN_PWM_INDEX);
-
- DBG("FCU Tickle, slots fan is: %d\n", pwm);
- if (pwm < 0)
- pwm = 100;
-
- if (!rackmac) {
- pwm = SLOTS_FAN_DEFAULT_PWM;
- } else if (pwm < SLOTS_PID_OUTPUT_MIN)
- pwm = SLOTS_PID_OUTPUT_MIN;
-
- /* That is hopefully enough to make the FCU happy */
- set_pwm_fan(SLOTS_FAN_PWM_INDEX, pwm);
-}
-
-
-/*
- * Utility routine to read the CPU calibration EEPROM data
- * from the device-tree
- */
-static int read_eeprom(int cpu, struct mpu_data *out)
-{
- struct device_node *np;
- char nodename[64];
- const u8 *data;
- int len;
-
- /* prom.c routine for finding a node by path is a bit brain dead
- * and requires exact @xxx unit numbers. This is a bit ugly but
- * will work for these machines
- */
- sprintf(nodename, "/u3@0,f8000000/i2c@f8001000/cpuid@a%d", cpu ? 2 : 0);
- np = of_find_node_by_path(nodename);
- if (np == NULL) {
- printk(KERN_ERR "therm_pm72: Failed to retrieve cpuid node from device-tree\n");
- return -ENODEV;
- }
- data = of_get_property(np, "cpuid", &len);
- if (data == NULL) {
- printk(KERN_ERR "therm_pm72: Failed to retrieve cpuid property from device-tree\n");
- of_node_put(np);
- return -ENODEV;
- }
- memcpy(out, data, sizeof(struct mpu_data));
- of_node_put(np);
-
- return 0;
-}
-
-static void fetch_cpu_pumps_minmax(void)
-{
- struct cpu_pid_state *state0 = &processor_state[0];
- struct cpu_pid_state *state1 = &processor_state[1];
- u16 pump_min = 0, pump_max = 0xffff;
- u16 tmp[4];
-
- /* Try to fetch pumps min/max infos from eeprom */
-
- memcpy(&tmp, &state0->mpu.processor_part_num, 8);
- if (tmp[0] != 0xffff && tmp[1] != 0xffff) {
- pump_min = max(pump_min, tmp[0]);
- pump_max = min(pump_max, tmp[1]);
- }
- if (tmp[2] != 0xffff && tmp[3] != 0xffff) {
- pump_min = max(pump_min, tmp[2]);
- pump_max = min(pump_max, tmp[3]);
- }
-
- /* Double check the values, this _IS_ needed as the EEPROM on
- * some dual 2.5Ghz G5s seem, at least, to have both min & max
- * same to the same value ... (grrrr)
- */
- if (pump_min == pump_max || pump_min == 0 || pump_max == 0xffff) {
- pump_min = CPU_PUMP_OUTPUT_MIN;
- pump_max = CPU_PUMP_OUTPUT_MAX;
- }
-
- state0->pump_min = state1->pump_min = pump_min;
- state0->pump_max = state1->pump_max = pump_max;
-}
-
-/*
- * Now, unfortunately, sysfs doesn't give us a nice void * we could
- * pass around to the attribute functions, so we don't really have
- * choice but implement a bunch of them...
- *
- * That sucks a bit, we take the lock because FIX32TOPRINT evaluates
- * the input twice... I accept patches :)
- */
-#define BUILD_SHOW_FUNC_FIX(name, data) \
-static ssize_t show_##name(struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- ssize_t r; \
- mutex_lock(&driver_lock); \
- r = sprintf(buf, "%d.%03d", FIX32TOPRINT(data)); \
- mutex_unlock(&driver_lock); \
- return r; \
-}
-#define BUILD_SHOW_FUNC_INT(name, data) \
-static ssize_t show_##name(struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- return sprintf(buf, "%d", data); \
-}
-
-BUILD_SHOW_FUNC_FIX(cpu0_temperature, processor_state[0].last_temp)
-BUILD_SHOW_FUNC_FIX(cpu0_voltage, processor_state[0].voltage)
-BUILD_SHOW_FUNC_FIX(cpu0_current, processor_state[0].current_a)
-BUILD_SHOW_FUNC_INT(cpu0_exhaust_fan_rpm, processor_state[0].rpm)
-BUILD_SHOW_FUNC_INT(cpu0_intake_fan_rpm, processor_state[0].intake_rpm)
-
-BUILD_SHOW_FUNC_FIX(cpu1_temperature, processor_state[1].last_temp)
-BUILD_SHOW_FUNC_FIX(cpu1_voltage, processor_state[1].voltage)
-BUILD_SHOW_FUNC_FIX(cpu1_current, processor_state[1].current_a)
-BUILD_SHOW_FUNC_INT(cpu1_exhaust_fan_rpm, processor_state[1].rpm)
-BUILD_SHOW_FUNC_INT(cpu1_intake_fan_rpm, processor_state[1].intake_rpm)
-
-BUILD_SHOW_FUNC_FIX(backside_temperature, backside_state.last_temp)
-BUILD_SHOW_FUNC_INT(backside_fan_pwm, backside_state.pwm)
-
-BUILD_SHOW_FUNC_FIX(drives_temperature, drives_state.last_temp)
-BUILD_SHOW_FUNC_INT(drives_fan_rpm, drives_state.rpm)
-
-BUILD_SHOW_FUNC_FIX(slots_temperature, slots_state.last_temp)
-BUILD_SHOW_FUNC_INT(slots_fan_pwm, slots_state.pwm)
-
-BUILD_SHOW_FUNC_FIX(dimms_temperature, dimms_state.last_temp)
-
-static DEVICE_ATTR(cpu0_temperature,S_IRUGO,show_cpu0_temperature,NULL);
-static DEVICE_ATTR(cpu0_voltage,S_IRUGO,show_cpu0_voltage,NULL);
-static DEVICE_ATTR(cpu0_current,S_IRUGO,show_cpu0_current,NULL);
-static DEVICE_ATTR(cpu0_exhaust_fan_rpm,S_IRUGO,show_cpu0_exhaust_fan_rpm,NULL);
-static DEVICE_ATTR(cpu0_intake_fan_rpm,S_IRUGO,show_cpu0_intake_fan_rpm,NULL);
-
-static DEVICE_ATTR(cpu1_temperature,S_IRUGO,show_cpu1_temperature,NULL);
-static DEVICE_ATTR(cpu1_voltage,S_IRUGO,show_cpu1_voltage,NULL);
-static DEVICE_ATTR(cpu1_current,S_IRUGO,show_cpu1_current,NULL);
-static DEVICE_ATTR(cpu1_exhaust_fan_rpm,S_IRUGO,show_cpu1_exhaust_fan_rpm,NULL);
-static DEVICE_ATTR(cpu1_intake_fan_rpm,S_IRUGO,show_cpu1_intake_fan_rpm,NULL);
-
-static DEVICE_ATTR(backside_temperature,S_IRUGO,show_backside_temperature,NULL);
-static DEVICE_ATTR(backside_fan_pwm,S_IRUGO,show_backside_fan_pwm,NULL);
-
-static DEVICE_ATTR(drives_temperature,S_IRUGO,show_drives_temperature,NULL);
-static DEVICE_ATTR(drives_fan_rpm,S_IRUGO,show_drives_fan_rpm,NULL);
-
-static DEVICE_ATTR(slots_temperature,S_IRUGO,show_slots_temperature,NULL);
-static DEVICE_ATTR(slots_fan_pwm,S_IRUGO,show_slots_fan_pwm,NULL);
-
-static DEVICE_ATTR(dimms_temperature,S_IRUGO,show_dimms_temperature,NULL);
-
-/*
- * CPUs fans control loop
- */
-
-static int do_read_one_cpu_values(struct cpu_pid_state *state, s32 *temp, s32 *power)
-{
- s32 ltemp, volts, amps;
- int index, rc = 0;
-
- /* Default (in case of error) */
- *temp = state->cur_temp;
- *power = state->cur_power;
-
- if (cpu_pid_type == CPU_PID_TYPE_RACKMAC)
- index = (state->index == 0) ?
- CPU_A1_FAN_RPM_INDEX : CPU_B1_FAN_RPM_INDEX;
- else
- index = (state->index == 0) ?
- CPUA_EXHAUST_FAN_RPM_INDEX : CPUB_EXHAUST_FAN_RPM_INDEX;
-
- /* Read current fan status */
- rc = get_rpm_fan(index, !RPM_PID_USE_ACTUAL_SPEED);
- if (rc < 0) {
- /* XXX What do we do now ? Nothing for now, keep old value, but
- * return error upstream
- */
- DBG(" cpu %d, fan reading error !\n", state->index);
- } else {
- state->rpm = rc;
- DBG(" cpu %d, exhaust RPM: %d\n", state->index, state->rpm);
- }
-
- /* Get some sensor readings and scale it */
- ltemp = read_smon_adc(state, 1);
- if (ltemp == -1) {
- /* XXX What do we do now ? */
- state->overtemp++;
- if (rc == 0)
- rc = -EIO;
- DBG(" cpu %d, temp reading error !\n", state->index);
- } else {
- /* Fixup temperature according to diode calibration
- */
- DBG(" cpu %d, temp raw: %04x, m_diode: %04x, b_diode: %04x\n",
- state->index,
- ltemp, state->mpu.mdiode, state->mpu.bdiode);
- *temp = ((s32)ltemp * (s32)state->mpu.mdiode + ((s32)state->mpu.bdiode << 12)) >> 2;
- state->last_temp = *temp;
- DBG(" temp: %d.%03d\n", FIX32TOPRINT((*temp)));
- }
-
- /*
- * Read voltage & current and calculate power
- */
- volts = read_smon_adc(state, 3);
- amps = read_smon_adc(state, 4);
-
- /* Scale voltage and current raw sensor values according to fixed scales
- * obtained in Darwin and calculate power from I and V
- */
- volts *= ADC_CPU_VOLTAGE_SCALE;
- amps *= ADC_CPU_CURRENT_SCALE;
- *power = (((u64)volts) * ((u64)amps)) >> 16;
- state->voltage = volts;
- state->current_a = amps;
- state->last_power = *power;
-
- DBG(" cpu %d, current: %d.%03d, voltage: %d.%03d, power: %d.%03d W\n",
- state->index, FIX32TOPRINT(state->current_a),
- FIX32TOPRINT(state->voltage), FIX32TOPRINT(*power));
-
- return 0;
-}
-
-static void do_cpu_pid(struct cpu_pid_state *state, s32 temp, s32 power)
-{
- s32 power_target, integral, derivative, proportional, adj_in_target, sval;
- s64 integ_p, deriv_p, prop_p, sum;
- int i;
-
- /* Calculate power target value (could be done once for all)
- * and convert to a 16.16 fp number
- */
- power_target = ((u32)(state->mpu.pmaxh - state->mpu.padjmax)) << 16;
- DBG(" power target: %d.%03d, error: %d.%03d\n",
- FIX32TOPRINT(power_target), FIX32TOPRINT(power_target - power));
-
- /* Store temperature and power in history array */
- state->cur_temp = (state->cur_temp + 1) % CPU_TEMP_HISTORY_SIZE;
- state->temp_history[state->cur_temp] = temp;
- state->cur_power = (state->cur_power + 1) % state->count_power;
- state->power_history[state->cur_power] = power;
- state->error_history[state->cur_power] = power_target - power;
-
- /* If first loop, fill the history table */
- if (state->first) {
- for (i = 0; i < (state->count_power - 1); i++) {
- state->cur_power = (state->cur_power + 1) % state->count_power;
- state->power_history[state->cur_power] = power;
- state->error_history[state->cur_power] = power_target - power;
- }
- for (i = 0; i < (CPU_TEMP_HISTORY_SIZE - 1); i++) {
- state->cur_temp = (state->cur_temp + 1) % CPU_TEMP_HISTORY_SIZE;
- state->temp_history[state->cur_temp] = temp;
- }
- state->first = 0;
- }
-
- /* Calculate the integral term normally based on the "power" values */
- sum = 0;
- integral = 0;
- for (i = 0; i < state->count_power; i++)
- integral += state->error_history[i];
- integral *= CPU_PID_INTERVAL;
- DBG(" integral: %08x\n", integral);
-
- /* Calculate the adjusted input (sense value).
- * G_r is 12.20
- * integ is 16.16
- * so the result is 28.36
- *
- * input target is mpu.ttarget, input max is mpu.tmax
- */
- integ_p = ((s64)state->mpu.pid_gr) * (s64)integral;
- DBG(" integ_p: %d\n", (int)(integ_p >> 36));
- sval = (state->mpu.tmax << 16) - ((integ_p >> 20) & 0xffffffff);
- adj_in_target = (state->mpu.ttarget << 16);
- if (adj_in_target > sval)
- adj_in_target = sval;
- DBG(" adj_in_target: %d.%03d, ttarget: %d\n", FIX32TOPRINT(adj_in_target),
- state->mpu.ttarget);
-
- /* Calculate the derivative term */
- derivative = state->temp_history[state->cur_temp] -
- state->temp_history[(state->cur_temp + CPU_TEMP_HISTORY_SIZE - 1)
- % CPU_TEMP_HISTORY_SIZE];
- derivative /= CPU_PID_INTERVAL;
- deriv_p = ((s64)state->mpu.pid_gd) * (s64)derivative;
- DBG(" deriv_p: %d\n", (int)(deriv_p >> 36));
- sum += deriv_p;
-
- /* Calculate the proportional term */
- proportional = temp - adj_in_target;
- prop_p = ((s64)state->mpu.pid_gp) * (s64)proportional;
- DBG(" prop_p: %d\n", (int)(prop_p >> 36));
- sum += prop_p;
-
- /* Scale sum */
- sum >>= 36;
-
- DBG(" sum: %d\n", (int)sum);
- state->rpm += (s32)sum;
-}
-
-static void do_monitor_cpu_combined(void)
-{
- struct cpu_pid_state *state0 = &processor_state[0];
- struct cpu_pid_state *state1 = &processor_state[1];
- s32 temp0, power0, temp1, power1;
- s32 temp_combi, power_combi;
- int rc, intake, pump;
-
- rc = do_read_one_cpu_values(state0, &temp0, &power0);
- if (rc < 0) {
- /* XXX What do we do now ? */
- }
- state1->overtemp = 0;
- rc = do_read_one_cpu_values(state1, &temp1, &power1);
- if (rc < 0) {
- /* XXX What do we do now ? */
- }
- if (state1->overtemp)
- state0->overtemp++;
-
- temp_combi = max(temp0, temp1);
- power_combi = max(power0, power1);
-
- /* Check tmax, increment overtemp if we are there. At tmax+8, we go
- * full blown immediately and try to trigger a shutdown
- */
- if (temp_combi >= ((state0->mpu.tmax + 8) << 16)) {
- printk(KERN_WARNING "Warning ! Temperature way above maximum (%d) !\n",
- temp_combi >> 16);
- state0->overtemp += CPU_MAX_OVERTEMP / 4;
- } else if (temp_combi > (state0->mpu.tmax << 16)) {
- state0->overtemp++;
- printk(KERN_WARNING "Temperature %d above max %d. overtemp %d\n",
- temp_combi >> 16, state0->mpu.tmax, state0->overtemp);
- } else {
- if (state0->overtemp)
- printk(KERN_WARNING "Temperature back down to %d\n",
- temp_combi >> 16);
- state0->overtemp = 0;
- }
- if (state0->overtemp >= CPU_MAX_OVERTEMP)
- critical_state = 1;
- if (state0->overtemp > 0) {
- state0->rpm = state0->mpu.rmaxn_exhaust_fan;
- state0->intake_rpm = intake = state0->mpu.rmaxn_intake_fan;
- pump = state0->pump_max;
- goto do_set_fans;
- }
-
- /* Do the PID */
- do_cpu_pid(state0, temp_combi, power_combi);
-
- /* Range check */
- state0->rpm = max(state0->rpm, (int)state0->mpu.rminn_exhaust_fan);
- state0->rpm = min(state0->rpm, (int)state0->mpu.rmaxn_exhaust_fan);
-
- /* Calculate intake fan speed */
- intake = (state0->rpm * CPU_INTAKE_SCALE) >> 16;
- intake = max(intake, (int)state0->mpu.rminn_intake_fan);
- intake = min(intake, (int)state0->mpu.rmaxn_intake_fan);
- state0->intake_rpm = intake;
-
- /* Calculate pump speed */
- pump = (state0->rpm * state0->pump_max) /
- state0->mpu.rmaxn_exhaust_fan;
- pump = min(pump, state0->pump_max);
- pump = max(pump, state0->pump_min);
-
- do_set_fans:
- /* We copy values from state 0 to state 1 for /sysfs */
- state1->rpm = state0->rpm;
- state1->intake_rpm = state0->intake_rpm;
-
- DBG("** CPU %d RPM: %d Ex, %d, Pump: %d, In, overtemp: %d\n",
- state1->index, (int)state1->rpm, intake, pump, state1->overtemp);
-
- /* We should check for errors, shouldn't we ? But then, what
- * do we do once the error occurs ? For FCU notified fan
- * failures (-EFAULT) we probably want to notify userland
- * some way...
- */
- set_rpm_fan(CPUA_INTAKE_FAN_RPM_INDEX, intake);
- set_rpm_fan(CPUA_EXHAUST_FAN_RPM_INDEX, state0->rpm);
- set_rpm_fan(CPUB_INTAKE_FAN_RPM_INDEX, intake);
- set_rpm_fan(CPUB_EXHAUST_FAN_RPM_INDEX, state0->rpm);
-
- if (fcu_fans[CPUA_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID)
- set_rpm_fan(CPUA_PUMP_RPM_INDEX, pump);
- if (fcu_fans[CPUB_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID)
- set_rpm_fan(CPUB_PUMP_RPM_INDEX, pump);
-}
-
-static void do_monitor_cpu_split(struct cpu_pid_state *state)
-{
- s32 temp, power;
- int rc, intake;
-
- /* Read current fan status */
- rc = do_read_one_cpu_values(state, &temp, &power);
- if (rc < 0) {
- /* XXX What do we do now ? */
- }
-
- /* Check tmax, increment overtemp if we are there. At tmax+8, we go
- * full blown immediately and try to trigger a shutdown
- */
- if (temp >= ((state->mpu.tmax + 8) << 16)) {
- printk(KERN_WARNING "Warning ! CPU %d temperature way above maximum"
- " (%d) !\n",
- state->index, temp >> 16);
- state->overtemp += CPU_MAX_OVERTEMP / 4;
- } else if (temp > (state->mpu.tmax << 16)) {
- state->overtemp++;
- printk(KERN_WARNING "CPU %d temperature %d above max %d. overtemp %d\n",
- state->index, temp >> 16, state->mpu.tmax, state->overtemp);
- } else {
- if (state->overtemp)
- printk(KERN_WARNING "CPU %d temperature back down to %d\n",
- state->index, temp >> 16);
- state->overtemp = 0;
- }
- if (state->overtemp >= CPU_MAX_OVERTEMP)
- critical_state = 1;
- if (state->overtemp > 0) {
- state->rpm = state->mpu.rmaxn_exhaust_fan;
- state->intake_rpm = intake = state->mpu.rmaxn_intake_fan;
- goto do_set_fans;
- }
-
- /* Do the PID */
- do_cpu_pid(state, temp, power);
-
- /* Range check */
- state->rpm = max(state->rpm, (int)state->mpu.rminn_exhaust_fan);
- state->rpm = min(state->rpm, (int)state->mpu.rmaxn_exhaust_fan);
-
- /* Calculate intake fan */
- intake = (state->rpm * CPU_INTAKE_SCALE) >> 16;
- intake = max(intake, (int)state->mpu.rminn_intake_fan);
- intake = min(intake, (int)state->mpu.rmaxn_intake_fan);
- state->intake_rpm = intake;
-
- do_set_fans:
- DBG("** CPU %d RPM: %d Ex, %d In, overtemp: %d\n",
- state->index, (int)state->rpm, intake, state->overtemp);
-
- /* We should check for errors, shouldn't we ? But then, what
- * do we do once the error occurs ? For FCU notified fan
- * failures (-EFAULT) we probably want to notify userland
- * some way...
- */
- if (state->index == 0) {
- set_rpm_fan(CPUA_INTAKE_FAN_RPM_INDEX, intake);
- set_rpm_fan(CPUA_EXHAUST_FAN_RPM_INDEX, state->rpm);
- } else {
- set_rpm_fan(CPUB_INTAKE_FAN_RPM_INDEX, intake);
- set_rpm_fan(CPUB_EXHAUST_FAN_RPM_INDEX, state->rpm);
- }
-}
-
-static void do_monitor_cpu_rack(struct cpu_pid_state *state)
-{
- s32 temp, power, fan_min;
- int rc;
-
- /* Read current fan status */
- rc = do_read_one_cpu_values(state, &temp, &power);
- if (rc < 0) {
- /* XXX What do we do now ? */
- }
-
- /* Check tmax, increment overtemp if we are there. At tmax+8, we go
- * full blown immediately and try to trigger a shutdown
- */
- if (temp >= ((state->mpu.tmax + 8) << 16)) {
- printk(KERN_WARNING "Warning ! CPU %d temperature way above maximum"
- " (%d) !\n",
- state->index, temp >> 16);
- state->overtemp = CPU_MAX_OVERTEMP / 4;
- } else if (temp > (state->mpu.tmax << 16)) {
- state->overtemp++;
- printk(KERN_WARNING "CPU %d temperature %d above max %d. overtemp %d\n",
- state->index, temp >> 16, state->mpu.tmax, state->overtemp);
- } else {
- if (state->overtemp)
- printk(KERN_WARNING "CPU %d temperature back down to %d\n",
- state->index, temp >> 16);
- state->overtemp = 0;
- }
- if (state->overtemp >= CPU_MAX_OVERTEMP)
- critical_state = 1;
- if (state->overtemp > 0) {
- state->rpm = state->intake_rpm = state->mpu.rmaxn_intake_fan;
- goto do_set_fans;
- }
-
- /* Do the PID */
- do_cpu_pid(state, temp, power);
-
- /* Check clamp from dimms */
- fan_min = dimm_output_clamp;
- fan_min = max(fan_min, (int)state->mpu.rminn_intake_fan);
-
- DBG(" CPU min mpu = %d, min dimm = %d\n",
- state->mpu.rminn_intake_fan, dimm_output_clamp);
-
- state->rpm = max(state->rpm, (int)fan_min);
- state->rpm = min(state->rpm, (int)state->mpu.rmaxn_intake_fan);
- state->intake_rpm = state->rpm;
-
- do_set_fans:
- DBG("** CPU %d RPM: %d overtemp: %d\n",
- state->index, (int)state->rpm, state->overtemp);
-
- /* We should check for errors, shouldn't we ? But then, what
- * do we do once the error occurs ? For FCU notified fan
- * failures (-EFAULT) we probably want to notify userland
- * some way...
- */
- if (state->index == 0) {
- set_rpm_fan(CPU_A1_FAN_RPM_INDEX, state->rpm);
- set_rpm_fan(CPU_A2_FAN_RPM_INDEX, state->rpm);
- set_rpm_fan(CPU_A3_FAN_RPM_INDEX, state->rpm);
- } else {
- set_rpm_fan(CPU_B1_FAN_RPM_INDEX, state->rpm);
- set_rpm_fan(CPU_B2_FAN_RPM_INDEX, state->rpm);
- set_rpm_fan(CPU_B3_FAN_RPM_INDEX, state->rpm);
- }
-}
-
-/*
- * Initialize the state structure for one CPU control loop
- */
-static int init_processor_state(struct cpu_pid_state *state, int index)
-{
- int err;
-
- state->index = index;
- state->first = 1;
- state->rpm = (cpu_pid_type == CPU_PID_TYPE_RACKMAC) ? 4000 : 1000;
- state->overtemp = 0;
- state->adc_config = 0x00;
-
-
- if (index == 0)
- state->monitor = attach_i2c_chip(SUPPLY_MONITOR_ID, "CPU0_monitor");
- else if (index == 1)
- state->monitor = attach_i2c_chip(SUPPLY_MONITORB_ID, "CPU1_monitor");
- if (state->monitor == NULL)
- goto fail;
-
- if (read_eeprom(index, &state->mpu))
- goto fail;
-
- state->count_power = state->mpu.tguardband;
- if (state->count_power > CPU_POWER_HISTORY_SIZE) {
- printk(KERN_WARNING "Warning ! too many power history slots\n");
- state->count_power = CPU_POWER_HISTORY_SIZE;
- }
- DBG("CPU %d Using %d power history entries\n", index, state->count_power);
-
- if (index == 0) {
- err = device_create_file(&of_dev->dev, &dev_attr_cpu0_temperature);
- err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_voltage);
- err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_current);
- err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_exhaust_fan_rpm);
- err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_intake_fan_rpm);
- } else {
- err = device_create_file(&of_dev->dev, &dev_attr_cpu1_temperature);
- err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_voltage);
- err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_current);
- err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_exhaust_fan_rpm);
- err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_intake_fan_rpm);
- }
- if (err)
- printk(KERN_WARNING "Failed to create some of the attribute"
- "files for CPU %d\n", index);
-
- return 0;
- fail:
- state->monitor = NULL;
-
- return -ENODEV;
-}
-
-/*
- * Dispose of the state data for one CPU control loop
- */
-static void dispose_processor_state(struct cpu_pid_state *state)
-{
- if (state->monitor == NULL)
- return;
-
- if (state->index == 0) {
- device_remove_file(&of_dev->dev, &dev_attr_cpu0_temperature);
- device_remove_file(&of_dev->dev, &dev_attr_cpu0_voltage);
- device_remove_file(&of_dev->dev, &dev_attr_cpu0_current);
- device_remove_file(&of_dev->dev, &dev_attr_cpu0_exhaust_fan_rpm);
- device_remove_file(&of_dev->dev, &dev_attr_cpu0_intake_fan_rpm);
- } else {
- device_remove_file(&of_dev->dev, &dev_attr_cpu1_temperature);
- device_remove_file(&of_dev->dev, &dev_attr_cpu1_voltage);
- device_remove_file(&of_dev->dev, &dev_attr_cpu1_current);
- device_remove_file(&of_dev->dev, &dev_attr_cpu1_exhaust_fan_rpm);
- device_remove_file(&of_dev->dev, &dev_attr_cpu1_intake_fan_rpm);
- }
-
- state->monitor = NULL;
-}
-
-/*
- * Motherboard backside & U3 heatsink fan control loop
- */
-static void do_monitor_backside(struct backside_pid_state *state)
-{
- s32 temp, integral, derivative, fan_min;
- s64 integ_p, deriv_p, prop_p, sum;
- int i, rc;
-
- if (--state->ticks != 0)
- return;
- state->ticks = backside_params.interval;
-
- DBG("backside:\n");
-
- /* Check fan status */
- rc = get_pwm_fan(BACKSIDE_FAN_PWM_INDEX);
- if (rc < 0) {
- printk(KERN_WARNING "Error %d reading backside fan !\n", rc);
- /* XXX What do we do now ? */
- } else
- state->pwm = rc;
- DBG(" current pwm: %d\n", state->pwm);
-
- /* Get some sensor readings */
- temp = i2c_smbus_read_byte_data(state->monitor, MAX6690_EXT_TEMP) << 16;
- state->last_temp = temp;
- DBG(" temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp),
- FIX32TOPRINT(backside_params.input_target));
-
- /* Store temperature and error in history array */
- state->cur_sample = (state->cur_sample + 1) % BACKSIDE_PID_HISTORY_SIZE;
- state->sample_history[state->cur_sample] = temp;
- state->error_history[state->cur_sample] = temp - backside_params.input_target;
-
- /* If first loop, fill the history table */
- if (state->first) {
- for (i = 0; i < (BACKSIDE_PID_HISTORY_SIZE - 1); i++) {
- state->cur_sample = (state->cur_sample + 1) %
- BACKSIDE_PID_HISTORY_SIZE;
- state->sample_history[state->cur_sample] = temp;
- state->error_history[state->cur_sample] =
- temp - backside_params.input_target;
- }
- state->first = 0;
- }
-
- /* Calculate the integral term */
- sum = 0;
- integral = 0;
- for (i = 0; i < BACKSIDE_PID_HISTORY_SIZE; i++)
- integral += state->error_history[i];
- integral *= backside_params.interval;
- DBG(" integral: %08x\n", integral);
- integ_p = ((s64)backside_params.G_r) * (s64)integral;
- DBG(" integ_p: %d\n", (int)(integ_p >> 36));
- sum += integ_p;
-
- /* Calculate the derivative term */
- derivative = state->error_history[state->cur_sample] -
- state->error_history[(state->cur_sample + BACKSIDE_PID_HISTORY_SIZE - 1)
- % BACKSIDE_PID_HISTORY_SIZE];
- derivative /= backside_params.interval;
- deriv_p = ((s64)backside_params.G_d) * (s64)derivative;
- DBG(" deriv_p: %d\n", (int)(deriv_p >> 36));
- sum += deriv_p;
-
- /* Calculate the proportional term */
- prop_p = ((s64)backside_params.G_p) * (s64)(state->error_history[state->cur_sample]);
- DBG(" prop_p: %d\n", (int)(prop_p >> 36));
- sum += prop_p;
-
- /* Scale sum */
- sum >>= 36;
-
- DBG(" sum: %d\n", (int)sum);
- if (backside_params.additive)
- state->pwm += (s32)sum;
- else
- state->pwm = sum;
-
- /* Check for clamp */
- fan_min = (dimm_output_clamp * 100) / 14000;
- fan_min = max(fan_min, backside_params.output_min);
-
- state->pwm = max(state->pwm, fan_min);
- state->pwm = min(state->pwm, backside_params.output_max);
-
- DBG("** BACKSIDE PWM: %d\n", (int)state->pwm);
- set_pwm_fan(BACKSIDE_FAN_PWM_INDEX, state->pwm);
-}
-
-/*
- * Initialize the state structure for the backside fan control loop
- */
-static int init_backside_state(struct backside_pid_state *state)
-{
- struct device_node *u3;
- int u3h = 1; /* conservative by default */
- int err;
-
- /*
- * There are different PID params for machines with U3 and machines
- * with U3H, pick the right ones now
- */
- u3 = of_find_node_by_path("/u3@0,f8000000");
- if (u3 != NULL) {
- const u32 *vers = of_get_property(u3, "device-rev", NULL);
- if (vers)
- if (((*vers) & 0x3f) < 0x34)
- u3h = 0;
- of_node_put(u3);
- }
-
- if (rackmac) {
- backside_params.G_d = BACKSIDE_PID_RACK_G_d;
- backside_params.input_target = BACKSIDE_PID_RACK_INPUT_TARGET;
- backside_params.output_min = BACKSIDE_PID_U3H_OUTPUT_MIN;
- backside_params.interval = BACKSIDE_PID_RACK_INTERVAL;
- backside_params.G_p = BACKSIDE_PID_RACK_G_p;
- backside_params.G_r = BACKSIDE_PID_G_r;
- backside_params.output_max = BACKSIDE_PID_OUTPUT_MAX;
- backside_params.additive = 0;
- } else if (u3h) {
- backside_params.G_d = BACKSIDE_PID_U3H_G_d;
- backside_params.input_target = BACKSIDE_PID_U3H_INPUT_TARGET;
- backside_params.output_min = BACKSIDE_PID_U3H_OUTPUT_MIN;
- backside_params.interval = BACKSIDE_PID_INTERVAL;
- backside_params.G_p = BACKSIDE_PID_G_p;
- backside_params.G_r = BACKSIDE_PID_G_r;
- backside_params.output_max = BACKSIDE_PID_OUTPUT_MAX;
- backside_params.additive = 1;
- } else {
- backside_params.G_d = BACKSIDE_PID_U3_G_d;
- backside_params.input_target = BACKSIDE_PID_U3_INPUT_TARGET;
- backside_params.output_min = BACKSIDE_PID_U3_OUTPUT_MIN;
- backside_params.interval = BACKSIDE_PID_INTERVAL;
- backside_params.G_p = BACKSIDE_PID_G_p;
- backside_params.G_r = BACKSIDE_PID_G_r;
- backside_params.output_max = BACKSIDE_PID_OUTPUT_MAX;
- backside_params.additive = 1;
- }
-
- state->ticks = 1;
- state->first = 1;
- state->pwm = 50;
-
- state->monitor = attach_i2c_chip(BACKSIDE_MAX_ID, "backside_temp");
- if (state->monitor == NULL)
- return -ENODEV;
-
- err = device_create_file(&of_dev->dev, &dev_attr_backside_temperature);
- err |= device_create_file(&of_dev->dev, &dev_attr_backside_fan_pwm);
- if (err)
- printk(KERN_WARNING "Failed to create attribute file(s)"
- " for backside fan\n");
-
- return 0;
-}
-
-/*
- * Dispose of the state data for the backside control loop
- */
-static void dispose_backside_state(struct backside_pid_state *state)
-{
- if (state->monitor == NULL)
- return;
-
- device_remove_file(&of_dev->dev, &dev_attr_backside_temperature);
- device_remove_file(&of_dev->dev, &dev_attr_backside_fan_pwm);
-
- state->monitor = NULL;
-}
-
-/*
- * Drives bay fan control loop
- */
-static void do_monitor_drives(struct drives_pid_state *state)
-{
- s32 temp, integral, derivative;
- s64 integ_p, deriv_p, prop_p, sum;
- int i, rc;
-
- if (--state->ticks != 0)
- return;
- state->ticks = DRIVES_PID_INTERVAL;
-
- DBG("drives:\n");
-
- /* Check fan status */
- rc = get_rpm_fan(DRIVES_FAN_RPM_INDEX, !RPM_PID_USE_ACTUAL_SPEED);
- if (rc < 0) {
- printk(KERN_WARNING "Error %d reading drives fan !\n", rc);
- /* XXX What do we do now ? */
- } else
- state->rpm = rc;
- DBG(" current rpm: %d\n", state->rpm);
-
- /* Get some sensor readings */
- temp = le16_to_cpu(i2c_smbus_read_word_data(state->monitor,
- DS1775_TEMP)) << 8;
- state->last_temp = temp;
- DBG(" temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp),
- FIX32TOPRINT(DRIVES_PID_INPUT_TARGET));
-
- /* Store temperature and error in history array */
- state->cur_sample = (state->cur_sample + 1) % DRIVES_PID_HISTORY_SIZE;
- state->sample_history[state->cur_sample] = temp;
- state->error_history[state->cur_sample] = temp - DRIVES_PID_INPUT_TARGET;
-
- /* If first loop, fill the history table */
- if (state->first) {
- for (i = 0; i < (DRIVES_PID_HISTORY_SIZE - 1); i++) {
- state->cur_sample = (state->cur_sample + 1) %
- DRIVES_PID_HISTORY_SIZE;
- state->sample_history[state->cur_sample] = temp;
- state->error_history[state->cur_sample] =
- temp - DRIVES_PID_INPUT_TARGET;
- }
- state->first = 0;
- }
-
- /* Calculate the integral term */
- sum = 0;
- integral = 0;
- for (i = 0; i < DRIVES_PID_HISTORY_SIZE; i++)
- integral += state->error_history[i];
- integral *= DRIVES_PID_INTERVAL;
- DBG(" integral: %08x\n", integral);
- integ_p = ((s64)DRIVES_PID_G_r) * (s64)integral;
- DBG(" integ_p: %d\n", (int)(integ_p >> 36));
- sum += integ_p;
-
- /* Calculate the derivative term */
- derivative = state->error_history[state->cur_sample] -
- state->error_history[(state->cur_sample + DRIVES_PID_HISTORY_SIZE - 1)
- % DRIVES_PID_HISTORY_SIZE];
- derivative /= DRIVES_PID_INTERVAL;
- deriv_p = ((s64)DRIVES_PID_G_d) * (s64)derivative;
- DBG(" deriv_p: %d\n", (int)(deriv_p >> 36));
- sum += deriv_p;
-
- /* Calculate the proportional term */
- prop_p = ((s64)DRIVES_PID_G_p) * (s64)(state->error_history[state->cur_sample]);
- DBG(" prop_p: %d\n", (int)(prop_p >> 36));
- sum += prop_p;
-
- /* Scale sum */
- sum >>= 36;
-
- DBG(" sum: %d\n", (int)sum);
- state->rpm += (s32)sum;
-
- state->rpm = max(state->rpm, DRIVES_PID_OUTPUT_MIN);
- state->rpm = min(state->rpm, DRIVES_PID_OUTPUT_MAX);
-
- DBG("** DRIVES RPM: %d\n", (int)state->rpm);
- set_rpm_fan(DRIVES_FAN_RPM_INDEX, state->rpm);
-}
-
-/*
- * Initialize the state structure for the drives bay fan control loop
- */
-static int init_drives_state(struct drives_pid_state *state)
-{
- int err;
-
- state->ticks = 1;
- state->first = 1;
- state->rpm = 1000;
-
- state->monitor = attach_i2c_chip(DRIVES_DALLAS_ID, "drives_temp");
- if (state->monitor == NULL)
- return -ENODEV;
-
- err = device_create_file(&of_dev->dev, &dev_attr_drives_temperature);
- err |= device_create_file(&of_dev->dev, &dev_attr_drives_fan_rpm);
- if (err)
- printk(KERN_WARNING "Failed to create attribute file(s)"
- " for drives bay fan\n");
-
- return 0;
-}
-
-/*
- * Dispose of the state data for the drives control loop
- */
-static void dispose_drives_state(struct drives_pid_state *state)
-{
- if (state->monitor == NULL)
- return;
-
- device_remove_file(&of_dev->dev, &dev_attr_drives_temperature);
- device_remove_file(&of_dev->dev, &dev_attr_drives_fan_rpm);
-
- state->monitor = NULL;
-}
-
-/*
- * DIMMs temp control loop
- */
-static void do_monitor_dimms(struct dimm_pid_state *state)
-{
- s32 temp, integral, derivative, fan_min;
- s64 integ_p, deriv_p, prop_p, sum;
- int i;
-
- if (--state->ticks != 0)
- return;
- state->ticks = DIMM_PID_INTERVAL;
-
- DBG("DIMM:\n");
-
- DBG(" current value: %d\n", state->output);
-
- temp = read_lm87_reg(state->monitor, LM87_INT_TEMP);
- if (temp < 0)
- return;
- temp <<= 16;
- state->last_temp = temp;
- DBG(" temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp),
- FIX32TOPRINT(DIMM_PID_INPUT_TARGET));
-
- /* Store temperature and error in history array */
- state->cur_sample = (state->cur_sample + 1) % DIMM_PID_HISTORY_SIZE;
- state->sample_history[state->cur_sample] = temp;
- state->error_history[state->cur_sample] = temp - DIMM_PID_INPUT_TARGET;
-
- /* If first loop, fill the history table */
- if (state->first) {
- for (i = 0; i < (DIMM_PID_HISTORY_SIZE - 1); i++) {
- state->cur_sample = (state->cur_sample + 1) %
- DIMM_PID_HISTORY_SIZE;
- state->sample_history[state->cur_sample] = temp;
- state->error_history[state->cur_sample] =
- temp - DIMM_PID_INPUT_TARGET;
- }
- state->first = 0;
- }
-
- /* Calculate the integral term */
- sum = 0;
- integral = 0;
- for (i = 0; i < DIMM_PID_HISTORY_SIZE; i++)
- integral += state->error_history[i];
- integral *= DIMM_PID_INTERVAL;
- DBG(" integral: %08x\n", integral);
- integ_p = ((s64)DIMM_PID_G_r) * (s64)integral;
- DBG(" integ_p: %d\n", (int)(integ_p >> 36));
- sum += integ_p;
-
- /* Calculate the derivative term */
- derivative = state->error_history[state->cur_sample] -
- state->error_history[(state->cur_sample + DIMM_PID_HISTORY_SIZE - 1)
- % DIMM_PID_HISTORY_SIZE];
- derivative /= DIMM_PID_INTERVAL;
- deriv_p = ((s64)DIMM_PID_G_d) * (s64)derivative;
- DBG(" deriv_p: %d\n", (int)(deriv_p >> 36));
- sum += deriv_p;
-
- /* Calculate the proportional term */
- prop_p = ((s64)DIMM_PID_G_p) * (s64)(state->error_history[state->cur_sample]);
- DBG(" prop_p: %d\n", (int)(prop_p >> 36));
- sum += prop_p;
-
- /* Scale sum */
- sum >>= 36;
-
- DBG(" sum: %d\n", (int)sum);
- state->output = (s32)sum;
- state->output = max(state->output, DIMM_PID_OUTPUT_MIN);
- state->output = min(state->output, DIMM_PID_OUTPUT_MAX);
- dimm_output_clamp = state->output;
-
- DBG("** DIMM clamp value: %d\n", (int)state->output);
-
- /* Backside PID is only every 5 seconds, force backside fan clamping now */
- fan_min = (dimm_output_clamp * 100) / 14000;
- fan_min = max(fan_min, backside_params.output_min);
- if (backside_state.pwm < fan_min) {
- backside_state.pwm = fan_min;
- DBG(" -> applying clamp to backside fan now: %d !\n", fan_min);
- set_pwm_fan(BACKSIDE_FAN_PWM_INDEX, fan_min);
- }
-}
-
-/*
- * Initialize the state structure for the DIMM temp control loop
- */
-static int init_dimms_state(struct dimm_pid_state *state)
-{
- state->ticks = 1;
- state->first = 1;
- state->output = 4000;
-
- state->monitor = attach_i2c_chip(XSERVE_DIMMS_LM87, "dimms_temp");
- if (state->monitor == NULL)
- return -ENODEV;
-
- if (device_create_file(&of_dev->dev, &dev_attr_dimms_temperature))
- printk(KERN_WARNING "Failed to create attribute file"
- " for DIMM temperature\n");
-
- return 0;
-}
-
-/*
- * Dispose of the state data for the DIMM control loop
- */
-static void dispose_dimms_state(struct dimm_pid_state *state)
-{
- if (state->monitor == NULL)
- return;
-
- device_remove_file(&of_dev->dev, &dev_attr_dimms_temperature);
-
- state->monitor = NULL;
-}
-
-/*
- * Slots fan control loop
- */
-static void do_monitor_slots(struct slots_pid_state *state)
-{
- s32 temp, integral, derivative;
- s64 integ_p, deriv_p, prop_p, sum;
- int i, rc;
-
- if (--state->ticks != 0)
- return;
- state->ticks = SLOTS_PID_INTERVAL;
-
- DBG("slots:\n");
-
- /* Check fan status */
- rc = get_pwm_fan(SLOTS_FAN_PWM_INDEX);
- if (rc < 0) {
- printk(KERN_WARNING "Error %d reading slots fan !\n", rc);
- /* XXX What do we do now ? */
- } else
- state->pwm = rc;
- DBG(" current pwm: %d\n", state->pwm);
-
- /* Get some sensor readings */
- temp = le16_to_cpu(i2c_smbus_read_word_data(state->monitor,
- DS1775_TEMP)) << 8;
- state->last_temp = temp;
- DBG(" temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp),
- FIX32TOPRINT(SLOTS_PID_INPUT_TARGET));
-
- /* Store temperature and error in history array */
- state->cur_sample = (state->cur_sample + 1) % SLOTS_PID_HISTORY_SIZE;
- state->sample_history[state->cur_sample] = temp;
- state->error_history[state->cur_sample] = temp - SLOTS_PID_INPUT_TARGET;
-
- /* If first loop, fill the history table */
- if (state->first) {
- for (i = 0; i < (SLOTS_PID_HISTORY_SIZE - 1); i++) {
- state->cur_sample = (state->cur_sample + 1) %
- SLOTS_PID_HISTORY_SIZE;
- state->sample_history[state->cur_sample] = temp;
- state->error_history[state->cur_sample] =
- temp - SLOTS_PID_INPUT_TARGET;
- }
- state->first = 0;
- }
-
- /* Calculate the integral term */
- sum = 0;
- integral = 0;
- for (i = 0; i < SLOTS_PID_HISTORY_SIZE; i++)
- integral += state->error_history[i];
- integral *= SLOTS_PID_INTERVAL;
- DBG(" integral: %08x\n", integral);
- integ_p = ((s64)SLOTS_PID_G_r) * (s64)integral;
- DBG(" integ_p: %d\n", (int)(integ_p >> 36));
- sum += integ_p;
-
- /* Calculate the derivative term */
- derivative = state->error_history[state->cur_sample] -
- state->error_history[(state->cur_sample + SLOTS_PID_HISTORY_SIZE - 1)
- % SLOTS_PID_HISTORY_SIZE];
- derivative /= SLOTS_PID_INTERVAL;
- deriv_p = ((s64)SLOTS_PID_G_d) * (s64)derivative;
- DBG(" deriv_p: %d\n", (int)(deriv_p >> 36));
- sum += deriv_p;
-
- /* Calculate the proportional term */
- prop_p = ((s64)SLOTS_PID_G_p) * (s64)(state->error_history[state->cur_sample]);
- DBG(" prop_p: %d\n", (int)(prop_p >> 36));
- sum += prop_p;
-
- /* Scale sum */
- sum >>= 36;
-
- DBG(" sum: %d\n", (int)sum);
- state->pwm = (s32)sum;
-
- state->pwm = max(state->pwm, SLOTS_PID_OUTPUT_MIN);
- state->pwm = min(state->pwm, SLOTS_PID_OUTPUT_MAX);
-
- DBG("** DRIVES PWM: %d\n", (int)state->pwm);
- set_pwm_fan(SLOTS_FAN_PWM_INDEX, state->pwm);
-}
-
-/*
- * Initialize the state structure for the slots bay fan control loop
- */
-static int init_slots_state(struct slots_pid_state *state)
-{
- int err;
-
- state->ticks = 1;
- state->first = 1;
- state->pwm = 50;
-
- state->monitor = attach_i2c_chip(XSERVE_SLOTS_LM75, "slots_temp");
- if (state->monitor == NULL)
- return -ENODEV;
-
- err = device_create_file(&of_dev->dev, &dev_attr_slots_temperature);
- err |= device_create_file(&of_dev->dev, &dev_attr_slots_fan_pwm);
- if (err)
- printk(KERN_WARNING "Failed to create attribute file(s)"
- " for slots bay fan\n");
-
- return 0;
-}
-
-/*
- * Dispose of the state data for the slots control loop
- */
-static void dispose_slots_state(struct slots_pid_state *state)
-{
- if (state->monitor == NULL)
- return;
-
- device_remove_file(&of_dev->dev, &dev_attr_slots_temperature);
- device_remove_file(&of_dev->dev, &dev_attr_slots_fan_pwm);
-
- state->monitor = NULL;
-}
-
-
-static int call_critical_overtemp(void)
-{
- char *argv[] = { critical_overtemp_path, NULL };
- static char *envp[] = { "HOME=/",
- "TERM=linux",
- "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
- NULL };
-
- return call_usermodehelper(critical_overtemp_path,
- argv, envp, UMH_WAIT_EXEC);
-}
-
-
-/*
- * Here's the kernel thread that calls the various control loops
- */
-static int main_control_loop(void *x)
-{
- DBG("main_control_loop started\n");
-
- mutex_lock(&driver_lock);
-
- if (start_fcu() < 0) {
- printk(KERN_ERR "kfand: failed to start FCU\n");
- mutex_unlock(&driver_lock);
- goto out;
- }
-
- /* Set the PCI fan once for now on non-RackMac */
- if (!rackmac)
- set_pwm_fan(SLOTS_FAN_PWM_INDEX, SLOTS_FAN_DEFAULT_PWM);
-
- /* Initialize ADCs */
- initialize_adc(&processor_state[0]);
- if (processor_state[1].monitor != NULL)
- initialize_adc(&processor_state[1]);
-
- fcu_tickle_ticks = FCU_TICKLE_TICKS;
-
- mutex_unlock(&driver_lock);
-
- while (state == state_attached) {
- unsigned long elapsed, start;
-
- start = jiffies;
-
- mutex_lock(&driver_lock);
-
- /* Tickle the FCU just in case */
- if (--fcu_tickle_ticks < 0) {
- fcu_tickle_ticks = FCU_TICKLE_TICKS;
- tickle_fcu();
- }
-
- /* First, we always calculate the new DIMMs state on an Xserve */
- if (rackmac)
- do_monitor_dimms(&dimms_state);
-
- /* Then, the CPUs */
- if (cpu_pid_type == CPU_PID_TYPE_COMBINED)
- do_monitor_cpu_combined();
- else if (cpu_pid_type == CPU_PID_TYPE_RACKMAC) {
- do_monitor_cpu_rack(&processor_state[0]);
- if (processor_state[1].monitor != NULL)
- do_monitor_cpu_rack(&processor_state[1]);
- // better deal with UP
- } else {
- do_monitor_cpu_split(&processor_state[0]);
- if (processor_state[1].monitor != NULL)
- do_monitor_cpu_split(&processor_state[1]);
- // better deal with UP
- }
- /* Then, the rest */
- do_monitor_backside(&backside_state);
- if (rackmac)
- do_monitor_slots(&slots_state);
- else
- do_monitor_drives(&drives_state);
- mutex_unlock(&driver_lock);
-
- if (critical_state == 1) {
- printk(KERN_WARNING "Temperature control detected a critical condition\n");
- printk(KERN_WARNING "Attempting to shut down...\n");
- if (call_critical_overtemp()) {
- printk(KERN_WARNING "Can't call %s, power off now!\n",
- critical_overtemp_path);
- machine_power_off();
- }
- }
- if (critical_state > 0)
- critical_state++;
- if (critical_state > MAX_CRITICAL_STATE) {
- printk(KERN_WARNING "Shutdown timed out, power off now !\n");
- machine_power_off();
- }
-
- // FIXME: Deal with signals
- elapsed = jiffies - start;
- if (elapsed < HZ)
- schedule_timeout_interruptible(HZ - elapsed);
- }
-
- out:
- DBG("main_control_loop ended\n");
-
- ctrl_task = 0;
- complete_and_exit(&ctrl_complete, 0);
-}
-
-/*
- * Dispose the control loops when tearing down
- */
-static void dispose_control_loops(void)
-{
- dispose_processor_state(&processor_state[0]);
- dispose_processor_state(&processor_state[1]);
- dispose_backside_state(&backside_state);
- dispose_drives_state(&drives_state);
- dispose_slots_state(&slots_state);
- dispose_dimms_state(&dimms_state);
-}
-
-/*
- * Create the control loops. U3-0 i2c bus is up, so we can now
- * get to the various sensors
- */
-static int create_control_loops(void)
-{
- struct device_node *np;
-
- /* Count CPUs from the device-tree, we don't care how many are
- * actually used by Linux
- */
- cpu_count = 0;
- for (np = NULL; NULL != (np = of_find_node_by_type(np, "cpu"));)
- cpu_count++;
-
- DBG("counted %d CPUs in the device-tree\n", cpu_count);
-
- /* Decide the type of PID algorithm to use based on the presence of
- * the pumps, though that may not be the best way, that is good enough
- * for now
- */
- if (rackmac)
- cpu_pid_type = CPU_PID_TYPE_RACKMAC;
- else if (of_machine_is_compatible("PowerMac7,3")
- && (cpu_count > 1)
- && fcu_fans[CPUA_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID
- && fcu_fans[CPUB_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID) {
- printk(KERN_INFO "Liquid cooling pumps detected, using new algorithm !\n");
- cpu_pid_type = CPU_PID_TYPE_COMBINED;
- } else
- cpu_pid_type = CPU_PID_TYPE_SPLIT;
-
- /* Create control loops for everything. If any fail, everything
- * fails
- */
- if (init_processor_state(&processor_state[0], 0))
- goto fail;
- if (cpu_pid_type == CPU_PID_TYPE_COMBINED)
- fetch_cpu_pumps_minmax();
-
- if (cpu_count > 1 && init_processor_state(&processor_state[1], 1))
- goto fail;
- if (init_backside_state(&backside_state))
- goto fail;
- if (rackmac && init_dimms_state(&dimms_state))
- goto fail;
- if (rackmac && init_slots_state(&slots_state))
- goto fail;
- if (!rackmac && init_drives_state(&drives_state))
- goto fail;
-
- DBG("all control loops up !\n");
-
- return 0;
-
- fail:
- DBG("failure creating control loops, disposing\n");
-
- dispose_control_loops();
-
- return -ENODEV;
-}
-
-/*
- * Start the control loops after everything is up, that is create
- * the thread that will make them run
- */
-static void start_control_loops(void)
-{
- init_completion(&ctrl_complete);
-
- ctrl_task = kthread_run(main_control_loop, NULL, "kfand");
-}
-
-/*
- * Stop the control loops when tearing down
- */
-static void stop_control_loops(void)
-{
- if (ctrl_task)
- wait_for_completion(&ctrl_complete);
-}
-
-/*
- * Attach to the i2c FCU after detecting U3-1 bus
- */
-static int attach_fcu(void)
-{
- fcu = attach_i2c_chip(FAN_CTRLER_ID, "fcu");
- if (fcu == NULL)
- return -ENODEV;
-
- DBG("FCU attached\n");
-
- return 0;
-}
-
-/*
- * Detach from the i2c FCU when tearing down
- */
-static void detach_fcu(void)
-{
- fcu = NULL;
-}
-
-/*
- * Attach to the i2c controller. We probe the various chips based
- * on the device-tree nodes and build everything for the driver to
- * run, we then kick the driver monitoring thread
- */
-static int therm_pm72_attach(struct i2c_adapter *adapter)
-{
- mutex_lock(&driver_lock);
-
- /* Check state */
- if (state == state_detached)
- state = state_attaching;
- if (state != state_attaching) {
- mutex_unlock(&driver_lock);
- return 0;
- }
-
- /* Check if we are looking for one of these */
- if (u3_0 == NULL && !strcmp(adapter->name, "u3 0")) {
- u3_0 = adapter;
- DBG("found U3-0\n");
- if (k2 || !rackmac)
- if (create_control_loops())
- u3_0 = NULL;
- } else if (u3_1 == NULL && !strcmp(adapter->name, "u3 1")) {
- u3_1 = adapter;
- DBG("found U3-1, attaching FCU\n");
- if (attach_fcu())
- u3_1 = NULL;
- } else if (k2 == NULL && !strcmp(adapter->name, "mac-io 0")) {
- k2 = adapter;
- DBG("Found K2\n");
- if (u3_0 && rackmac)
- if (create_control_loops())
- k2 = NULL;
- }
- /* We got all we need, start control loops */
- if (u3_0 != NULL && u3_1 != NULL && (k2 || !rackmac)) {
- DBG("everything up, starting control loops\n");
- state = state_attached;
- start_control_loops();
- }
- mutex_unlock(&driver_lock);
-
- return 0;
-}
-
-static int therm_pm72_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- /* Always succeed, the real work was done in therm_pm72_attach() */
- return 0;
-}
-
-/*
- * Called when any of the devices which participates into thermal management
- * is going away.
- */
-static int therm_pm72_remove(struct i2c_client *client)
-{
- struct i2c_adapter *adapter = client->adapter;
-
- mutex_lock(&driver_lock);
-
- if (state != state_detached)
- state = state_detaching;
-
- /* Stop control loops if any */
- DBG("stopping control loops\n");
- mutex_unlock(&driver_lock);
- stop_control_loops();
- mutex_lock(&driver_lock);
-
- if (u3_0 != NULL && !strcmp(adapter->name, "u3 0")) {
- DBG("lost U3-0, disposing control loops\n");
- dispose_control_loops();
- u3_0 = NULL;
- }
-
- if (u3_1 != NULL && !strcmp(adapter->name, "u3 1")) {
- DBG("lost U3-1, detaching FCU\n");
- detach_fcu();
- u3_1 = NULL;
- }
- if (u3_0 == NULL && u3_1 == NULL)
- state = state_detached;
-
- mutex_unlock(&driver_lock);
-
- return 0;
-}
-
-/*
- * i2c_driver structure to attach to the host i2c controller
- */
-
-static const struct i2c_device_id therm_pm72_id[] = {
- /*
- * Fake device name, thermal management is done by several
- * chips but we don't need to differentiate between them at
- * this point.
- */
- { "therm_pm72", 0 },
- { }
-};
-
-static struct i2c_driver therm_pm72_driver = {
- .driver = {
- .name = "therm_pm72",
- },
- .attach_adapter = therm_pm72_attach,
- .probe = therm_pm72_probe,
- .remove = therm_pm72_remove,
- .id_table = therm_pm72_id,
-};
-
-static int fan_check_loc_match(const char *loc, int fan)
-{
- char tmp[64];
- char *c, *e;
-
- strlcpy(tmp, fcu_fans[fan].loc, 64);
-
- c = tmp;
- for (;;) {
- e = strchr(c, ',');
- if (e)
- *e = 0;
- if (strcmp(loc, c) == 0)
- return 1;
- if (e == NULL)
- break;
- c = e + 1;
- }
- return 0;
-}
-
-static void fcu_lookup_fans(struct device_node *fcu_node)
-{
- struct device_node *np = NULL;
- int i;
-
- /* The table is filled by default with values that are suitable
- * for the old machines without device-tree informations. We scan
- * the device-tree and override those values with whatever is
- * there
- */
-
- DBG("Looking up FCU controls in device-tree...\n");
-
- while ((np = of_get_next_child(fcu_node, np)) != NULL) {
- int type = -1;
- const char *loc;
- const u32 *reg;
-
- DBG(" control: %s, type: %s\n", np->name, np->type);
-
- /* Detect control type */
- if (!strcmp(np->type, "fan-rpm-control") ||
- !strcmp(np->type, "fan-rpm"))
- type = FCU_FAN_RPM;
- if (!strcmp(np->type, "fan-pwm-control") ||
- !strcmp(np->type, "fan-pwm"))
- type = FCU_FAN_PWM;
- /* Only care about fans for now */
- if (type == -1)
- continue;
-
- /* Lookup for a matching location */
- loc = of_get_property(np, "location", NULL);
- reg = of_get_property(np, "reg", NULL);
- if (loc == NULL || reg == NULL)
- continue;
- DBG(" matching location: %s, reg: 0x%08x\n", loc, *reg);
-
- for (i = 0; i < FCU_FAN_COUNT; i++) {
- int fan_id;
-
- if (!fan_check_loc_match(loc, i))
- continue;
- DBG(" location match, index: %d\n", i);
- fcu_fans[i].id = FCU_FAN_ABSENT_ID;
- if (type != fcu_fans[i].type) {
- printk(KERN_WARNING "therm_pm72: Fan type mismatch "
- "in device-tree for %s\n", np->full_name);
- break;
- }
- if (type == FCU_FAN_RPM)
- fan_id = ((*reg) - 0x10) / 2;
- else
- fan_id = ((*reg) - 0x30) / 2;
- if (fan_id > 7) {
- printk(KERN_WARNING "therm_pm72: Can't parse "
- "fan ID in device-tree for %s\n", np->full_name);
- break;
- }
- DBG(" fan id -> %d, type -> %d\n", fan_id, type);
- fcu_fans[i].id = fan_id;
- }
- }
-
- /* Now dump the array */
- printk(KERN_INFO "Detected fan controls:\n");
- for (i = 0; i < FCU_FAN_COUNT; i++) {
- if (fcu_fans[i].id == FCU_FAN_ABSENT_ID)
- continue;
- printk(KERN_INFO " %d: %s fan, id %d, location: %s\n", i,
- fcu_fans[i].type == FCU_FAN_RPM ? "RPM" : "PWM",
- fcu_fans[i].id, fcu_fans[i].loc);
- }
-}
-
-static int fcu_of_probe(struct platform_device* dev)
-{
- state = state_detached;
- of_dev = dev;
-
- dev_info(&dev->dev, "PowerMac G5 Thermal control driver %s\n", VERSION);
-
- /* Lookup the fans in the device tree */
- fcu_lookup_fans(dev->dev.of_node);
-
- /* Add the driver */
- return i2c_add_driver(&therm_pm72_driver);
-}
-
-static int fcu_of_remove(struct platform_device* dev)
-{
- i2c_del_driver(&therm_pm72_driver);
-
- return 0;
-}
-
-static const struct of_device_id fcu_match[] =
-{
- {
- .type = "fcu",
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, fcu_match);
-
-static struct platform_driver fcu_of_platform_driver =
-{
- .driver = {
- .name = "temperature",
- .of_match_table = fcu_match,
- },
- .probe = fcu_of_probe,
- .remove = fcu_of_remove
-};
-
-/*
- * Check machine type, attach to i2c controller
- */
-static int __init therm_pm72_init(void)
-{
- rackmac = of_machine_is_compatible("RackMac3,1");
-
- if (!of_machine_is_compatible("PowerMac7,2") &&
- !of_machine_is_compatible("PowerMac7,3") &&
- !rackmac)
- return -ENODEV;
-
- return platform_driver_register(&fcu_of_platform_driver);
-}
-
-static void __exit therm_pm72_exit(void)
-{
- platform_driver_unregister(&fcu_of_platform_driver);
-}
-
-module_init(therm_pm72_init);
-module_exit(therm_pm72_exit);
-
-MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
-MODULE_DESCRIPTION("Driver for Apple's PowerMac G5 thermal control");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/macintosh/therm_pm72.h b/drivers/macintosh/therm_pm72.h
deleted file mode 100644
index df3680e2a22f..000000000000
--- a/drivers/macintosh/therm_pm72.h
+++ /dev/null
@@ -1,326 +0,0 @@
-#ifndef __THERM_PMAC_7_2_H__
-#define __THERM_PMAC_7_2_H__
-
-typedef unsigned short fu16;
-typedef int fs32;
-typedef short fs16;
-
-struct mpu_data
-{
- u8 signature; /* 0x00 - EEPROM sig. */
- u8 bytes_used; /* 0x01 - Bytes used in eeprom (160 ?) */
- u8 size; /* 0x02 - EEPROM size (256 ?) */
- u8 version; /* 0x03 - EEPROM version */
- u32 data_revision; /* 0x04 - Dataset revision */
- u8 processor_bin_code[3]; /* 0x08 - Processor BIN code */
- u8 bin_code_expansion; /* 0x0b - ??? (padding ?) */
- u8 processor_num; /* 0x0c - Number of CPUs on this MPU */
- u8 input_mul_bus_div; /* 0x0d - Clock input multiplier/bus divider */
- u8 reserved1[2]; /* 0x0e - */
- u32 input_clk_freq_high; /* 0x10 - Input clock frequency high */
- u8 cpu_nb_target_cycles; /* 0x14 - ??? */
- u8 cpu_statlat; /* 0x15 - ??? */
- u8 cpu_snooplat; /* 0x16 - ??? */
- u8 cpu_snoopacc; /* 0x17 - ??? */
- u8 nb_paamwin; /* 0x18 - ??? */
- u8 nb_statlat; /* 0x19 - ??? */
- u8 nb_snooplat; /* 0x1a - ??? */
- u8 nb_snoopwin; /* 0x1b - ??? */
- u8 api_bus_mode; /* 0x1c - ??? */
- u8 reserved2[3]; /* 0x1d - */
- u32 input_clk_freq_low; /* 0x20 - Input clock frequency low */
- u8 processor_card_slot; /* 0x24 - Processor card slot number */
- u8 reserved3[2]; /* 0x25 - */
- u8 padjmax; /* 0x27 - Max power adjustment (Not in OF!) */
- u8 ttarget; /* 0x28 - Target temperature */
- u8 tmax; /* 0x29 - Max temperature */
- u8 pmaxh; /* 0x2a - Max power */
- u8 tguardband; /* 0x2b - Guardband temp ??? Hist. len in OSX */
- fs32 pid_gp; /* 0x2c - PID proportional gain */
- fs32 pid_gr; /* 0x30 - PID reset gain */
- fs32 pid_gd; /* 0x34 - PID derivative gain */
- fu16 voph; /* 0x38 - Vop High */
- fu16 vopl; /* 0x3a - Vop Low */
- fs16 nactual_die; /* 0x3c - nActual Die */
- fs16 nactual_heatsink; /* 0x3e - nActual Heatsink */
- fs16 nactual_system; /* 0x40 - nActual System */
- u16 calibration_flags; /* 0x42 - Calibration flags */
- fu16 mdiode; /* 0x44 - Diode M value (scaling factor) */
- fs16 bdiode; /* 0x46 - Diode B value (offset) */
- fs32 theta_heat_sink; /* 0x48 - Theta heat sink */
- u16 rminn_intake_fan; /* 0x4c - Intake fan min RPM */
- u16 rmaxn_intake_fan; /* 0x4e - Intake fan max RPM */
- u16 rminn_exhaust_fan; /* 0x50 - Exhaust fan min RPM */
- u16 rmaxn_exhaust_fan; /* 0x52 - Exhaust fan max RPM */
- u8 processor_part_num[8]; /* 0x54 - Processor part number XX pumps min/max */
- u32 processor_lot_num; /* 0x5c - Processor lot number */
- u8 orig_card_sernum[0x10]; /* 0x60 - Card original serial number */
- u8 curr_card_sernum[0x10]; /* 0x70 - Card current serial number */
- u8 mlb_sernum[0x18]; /* 0x80 - MLB serial number */
- u32 checksum1; /* 0x98 - */
- u32 checksum2; /* 0x9c - */
-}; /* Total size = 0xa0 */
-
-/* Display a 16.16 fixed point value */
-#define FIX32TOPRINT(f) ((f) >> 16),((((f) & 0xffff) * 1000) >> 16)
-
-/*
- * Maximum number of seconds to be in critical state (after a
- * normal shutdown attempt). If the machine isn't down after
- * this counter elapses, we force an immediate machine power
- * off.
- */
-#define MAX_CRITICAL_STATE 30
-static char * critical_overtemp_path = "/sbin/critical_overtemp";
-
-/*
- * This option is "weird" :) Basically, if you define this to 1
- * the control loop for the RPMs fans (not PWMs) will apply the
- * correction factor obtained from the PID to the _actual_ RPM
- * speed read from the FCU.
- * If you define the below constant to 0, then it will be
- * applied to the setpoint RPM speed, that is basically the
- * speed we proviously "asked" for.
- *
- * I'm not sure which of these Apple's algorithm is supposed
- * to use
- */
-#define RPM_PID_USE_ACTUAL_SPEED 0
-
-/*
- * i2c IDs. Currently, we hard code those and assume that
- * the FCU is on U3 bus 1 while all sensors are on U3 bus
- * 0. This appear to be safe enough for this first version
- * of the driver, though I would accept any clean patch
- * doing a better use of the device-tree without turning the
- * while i2c registration mechanism into a racy mess
- *
- * Note: Xserve changed this. We have some bits on the K2 bus,
- * which I arbitrarily set to 0x200. Ultimately, we really want
- * too lookup these in the device-tree though
- */
-#define FAN_CTRLER_ID 0x15e
-#define SUPPLY_MONITOR_ID 0x58
-#define SUPPLY_MONITORB_ID 0x5a
-#define DRIVES_DALLAS_ID 0x94
-#define BACKSIDE_MAX_ID 0x98
-#define XSERVE_DIMMS_LM87 0x25a
-#define XSERVE_SLOTS_LM75 0x290
-
-/*
- * Some MAX6690, DS1775, LM87 register definitions
- */
-#define MAX6690_INT_TEMP 0
-#define MAX6690_EXT_TEMP 1
-#define DS1775_TEMP 0
-#define LM87_INT_TEMP 0x27
-
-/*
- * Scaling factors for the AD7417 ADC converters (except
- * for the CPU diode which is obtained from the EEPROM).
- * Those values are obtained from the property list of
- * the darwin driver
- */
-#define ADC_12V_CURRENT_SCALE 0x0320 /* _AD2 */
-#define ADC_CPU_VOLTAGE_SCALE 0x00a0 /* _AD3 */
-#define ADC_CPU_CURRENT_SCALE 0x1f40 /* _AD4 */
-
-/*
- * PID factors for the U3/Backside fan control loop. We have 2 sets
- * of values here, one set for U3 and one set for U3H
- */
-#define BACKSIDE_FAN_PWM_DEFAULT_ID 1
-#define BACKSIDE_FAN_PWM_INDEX 0
-#define BACKSIDE_PID_U3_G_d 0x02800000
-#define BACKSIDE_PID_U3H_G_d 0x01400000
-#define BACKSIDE_PID_RACK_G_d 0x00500000
-#define BACKSIDE_PID_G_p 0x00500000
-#define BACKSIDE_PID_RACK_G_p 0x0004cccc
-#define BACKSIDE_PID_G_r 0x00000000
-#define BACKSIDE_PID_U3_INPUT_TARGET 0x00410000
-#define BACKSIDE_PID_U3H_INPUT_TARGET 0x004b0000
-#define BACKSIDE_PID_RACK_INPUT_TARGET 0x00460000
-#define BACKSIDE_PID_INTERVAL 5
-#define BACKSIDE_PID_RACK_INTERVAL 1
-#define BACKSIDE_PID_OUTPUT_MAX 100
-#define BACKSIDE_PID_U3_OUTPUT_MIN 20
-#define BACKSIDE_PID_U3H_OUTPUT_MIN 20
-#define BACKSIDE_PID_HISTORY_SIZE 2
-
-struct basckside_pid_params
-{
- s32 G_d;
- s32 G_p;
- s32 G_r;
- s32 input_target;
- s32 output_min;
- s32 output_max;
- s32 interval;
- int additive;
-};
-
-struct backside_pid_state
-{
- int ticks;
- struct i2c_client * monitor;
- s32 sample_history[BACKSIDE_PID_HISTORY_SIZE];
- s32 error_history[BACKSIDE_PID_HISTORY_SIZE];
- int cur_sample;
- s32 last_temp;
- int pwm;
- int first;
-};
-
-/*
- * PID factors for the Drive Bay fan control loop
- */
-#define DRIVES_FAN_RPM_DEFAULT_ID 2
-#define DRIVES_FAN_RPM_INDEX 1
-#define DRIVES_PID_G_d 0x01e00000
-#define DRIVES_PID_G_p 0x00500000
-#define DRIVES_PID_G_r 0x00000000
-#define DRIVES_PID_INPUT_TARGET 0x00280000
-#define DRIVES_PID_INTERVAL 5
-#define DRIVES_PID_OUTPUT_MAX 4000
-#define DRIVES_PID_OUTPUT_MIN 300
-#define DRIVES_PID_HISTORY_SIZE 2
-
-struct drives_pid_state
-{
- int ticks;
- struct i2c_client * monitor;
- s32 sample_history[BACKSIDE_PID_HISTORY_SIZE];
- s32 error_history[BACKSIDE_PID_HISTORY_SIZE];
- int cur_sample;
- s32 last_temp;
- int rpm;
- int first;
-};
-
-#define SLOTS_FAN_PWM_DEFAULT_ID 2
-#define SLOTS_FAN_PWM_INDEX 2
-#define SLOTS_FAN_DEFAULT_PWM 40 /* Do better here ! */
-
-
-/*
- * PID factors for the Xserve DIMM control loop
- */
-#define DIMM_PID_G_d 0
-#define DIMM_PID_G_p 0
-#define DIMM_PID_G_r 0x06553600
-#define DIMM_PID_INPUT_TARGET 3276800
-#define DIMM_PID_INTERVAL 1
-#define DIMM_PID_OUTPUT_MAX 14000
-#define DIMM_PID_OUTPUT_MIN 4000
-#define DIMM_PID_HISTORY_SIZE 20
-
-struct dimm_pid_state
-{
- int ticks;
- struct i2c_client * monitor;
- s32 sample_history[DIMM_PID_HISTORY_SIZE];
- s32 error_history[DIMM_PID_HISTORY_SIZE];
- int cur_sample;
- s32 last_temp;
- int first;
- int output;
-};
-
-
-/*
- * PID factors for the Xserve Slots control loop
- */
-#define SLOTS_PID_G_d 0
-#define SLOTS_PID_G_p 0
-#define SLOTS_PID_G_r 0x00100000
-#define SLOTS_PID_INPUT_TARGET 3200000
-#define SLOTS_PID_INTERVAL 1
-#define SLOTS_PID_OUTPUT_MAX 100
-#define SLOTS_PID_OUTPUT_MIN 20
-#define SLOTS_PID_HISTORY_SIZE 20
-
-struct slots_pid_state
-{
- int ticks;
- struct i2c_client * monitor;
- s32 sample_history[SLOTS_PID_HISTORY_SIZE];
- s32 error_history[SLOTS_PID_HISTORY_SIZE];
- int cur_sample;
- s32 last_temp;
- int first;
- int pwm;
-};
-
-
-
-/* Desktops */
-
-#define CPUA_INTAKE_FAN_RPM_DEFAULT_ID 3
-#define CPUA_EXHAUST_FAN_RPM_DEFAULT_ID 4
-#define CPUB_INTAKE_FAN_RPM_DEFAULT_ID 5
-#define CPUB_EXHAUST_FAN_RPM_DEFAULT_ID 6
-
-#define CPUA_INTAKE_FAN_RPM_INDEX 3
-#define CPUA_EXHAUST_FAN_RPM_INDEX 4
-#define CPUB_INTAKE_FAN_RPM_INDEX 5
-#define CPUB_EXHAUST_FAN_RPM_INDEX 6
-
-#define CPU_INTAKE_SCALE 0x0000f852
-#define CPU_TEMP_HISTORY_SIZE 2
-#define CPU_POWER_HISTORY_SIZE 10
-#define CPU_PID_INTERVAL 1
-#define CPU_MAX_OVERTEMP 90
-
-#define CPUA_PUMP_RPM_INDEX 7
-#define CPUB_PUMP_RPM_INDEX 8
-#define CPU_PUMP_OUTPUT_MAX 3200
-#define CPU_PUMP_OUTPUT_MIN 1250
-
-/* Xserve */
-#define CPU_A1_FAN_RPM_INDEX 9
-#define CPU_A2_FAN_RPM_INDEX 10
-#define CPU_A3_FAN_RPM_INDEX 11
-#define CPU_B1_FAN_RPM_INDEX 12
-#define CPU_B2_FAN_RPM_INDEX 13
-#define CPU_B3_FAN_RPM_INDEX 14
-
-
-struct cpu_pid_state
-{
- int index;
- struct i2c_client * monitor;
- struct mpu_data mpu;
- int overtemp;
- s32 temp_history[CPU_TEMP_HISTORY_SIZE];
- int cur_temp;
- s32 power_history[CPU_POWER_HISTORY_SIZE];
- s32 error_history[CPU_POWER_HISTORY_SIZE];
- int cur_power;
- int count_power;
- int rpm;
- int intake_rpm;
- s32 voltage;
- s32 current_a;
- s32 last_temp;
- s32 last_power;
- int first;
- u8 adc_config;
- s32 pump_min;
- s32 pump_max;
-};
-
-/* Tickle FCU every 10 seconds */
-#define FCU_TICKLE_TICKS 10
-
-/*
- * Driver state
- */
-enum {
- state_detached,
- state_attaching,
- state_attached,
- state_detaching,
-};
-
-
-#endif /* __THERM_PMAC_7_2_H__ */
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index aa915da2a5e5..82abfce1cb42 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -176,7 +176,6 @@ STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
-static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth);
STATIC struct device_attribute *NCR_700_dev_attrs[];
@@ -326,7 +325,6 @@ NCR_700_detect(struct scsi_host_template *tpnt,
tpnt->slave_destroy = NCR_700_slave_destroy;
tpnt->slave_alloc = NCR_700_slave_alloc;
tpnt->change_queue_depth = NCR_700_change_queue_depth;
- tpnt->change_queue_type = NCR_700_change_queue_type;
tpnt->use_blk_tags = 1;
if(tpnt->name == NULL)
@@ -904,8 +902,8 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata
hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
SCp->device->tagged_supported = 0;
+ SCp->device->simple_tags = 0;
scsi_change_queue_depth(SCp->device, host->cmd_per_lun);
- scsi_set_tag_type(SCp->device, 0);
} else {
shost_printk(KERN_WARNING, host,
"(%d:%d) Unexpected REJECT Message %s\n",
@@ -1818,8 +1816,8 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
}
- if((hostdata->tag_negotiated &(1<<scmd_id(SCp)))
- && scsi_get_tag_type(SCp->device)) {
+ if ((hostdata->tag_negotiated & (1<<scmd_id(SCp))) &&
+ SCp->device->simple_tags) {
slot->tag = SCp->request->tag;
CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
slot->tag, slot);
@@ -2082,39 +2080,6 @@ NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
return scsi_change_queue_depth(SDp, depth);
}
-static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
-{
- int change_tag = ((tag_type ==0 && scsi_get_tag_type(SDp) != 0)
- || (tag_type != 0 && scsi_get_tag_type(SDp) == 0));
- struct NCR_700_Host_Parameters *hostdata =
- (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
-
- /* We have a global (per target) flag to track whether TCQ is
- * enabled, so we'll be turning it off for the entire target here.
- * our tag algorithm will fail if we mix tagged and untagged commands,
- * so quiesce the device before doing this */
- if (change_tag)
- scsi_target_quiesce(SDp->sdev_target);
-
- scsi_set_tag_type(SDp, tag_type);
- if (!tag_type) {
- /* shift back to the default unqueued number of commands
- * (the user can still raise this) */
- scsi_change_queue_depth(SDp, SDp->host->cmd_per_lun);
- hostdata->tag_negotiated &= ~(1 << sdev_id(SDp));
- } else {
- /* Here, we cleared the negotiation flag above, so this
- * will force the driver to renegotiate */
- scsi_change_queue_depth(SDp, SDp->queue_depth);
- if (change_tag)
- NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
- }
- if (change_tag)
- scsi_target_resume(SDp->sdev_target);
-
- return tag_type;
-}
-
static ssize_t
NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
{
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 86cf3d671eb9..9c92f415229f 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1462,18 +1462,17 @@ config SCSI_WD719X
SCSI controllers (based on WD33C296A chip).
config SCSI_DEBUG
- tristate "SCSI debugging host simulator"
+ tristate "SCSI debugging host and device simulator"
depends on SCSI
select CRC_T10DIF
help
- This is a host adapter simulator that can simulate multiple hosts
- each with multiple dummy SCSI devices (disks). It defaults to one
- host adapter with one dummy SCSI disk. Each dummy disk uses kernel
- RAM as storage (i.e. it is a ramdisk). To save space when multiple
- dummy disks are simulated, they share the same kernel RAM for
- their storage. See <http://sg.danny.cz/sg/sdebug26.html> for more
- information. This driver is primarily of use to those testing the
- SCSI and block subsystems. If unsure, say N.
+ This pseudo driver simulates one or more hosts (SCSI initiators),
+ each with one or more targets, each with one or more logical units.
+ Defaults to one of each, creating a small RAM disk device. Many
+ parameters found in the /sys/bus/pseudo/drivers/scsi_debug
+ directory can be tweaked at run time.
+ See <http://sg.danny.cz/sg/sdebug26.html> for more information.
+ Mainly used for testing and best as a module. If unsure, say N.
config SCSI_MESH
tristate "MESH (Power Mac internal SCSI) support"
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 6719a3390ebd..2c5ce48c8f95 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -7921,9 +7921,9 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
*/
if ((asc_dvc->cur_dvc_qng[scp->device->id] > 0) &&
(boardp->reqcnt[scp->device->id] % 255) == 0) {
- asc_scsi_q->q2.tag_code = MSG_ORDERED_TAG;
+ asc_scsi_q->q2.tag_code = ORDERED_QUEUE_TAG;
} else {
- asc_scsi_q->q2.tag_code = MSG_SIMPLE_TAG;
+ asc_scsi_q->q2.tag_code = SIMPLE_QUEUE_TAG;
}
/* Build ASC_SCSI_Q */
@@ -8351,7 +8351,7 @@ static int AscPutReadyQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no)
}
q_addr = ASC_QNO_TO_QADDR(q_no);
if ((scsiq->q1.target_id & asc_dvc->use_tagged_qng) == 0) {
- scsiq->q2.tag_code &= ~MSG_SIMPLE_TAG;
+ scsiq->q2.tag_code &= ~SIMPLE_QUEUE_TAG;
}
scsiq->q1.status = QS_FREE;
AscMemWordCopyPtrToLram(iop_base,
@@ -8669,7 +8669,7 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
}
}
if (disable_syn_offset_one_fix) {
- scsiq->q2.tag_code &= ~MSG_SIMPLE_TAG;
+ scsiq->q2.tag_code &= ~SIMPLE_QUEUE_TAG;
scsiq->q2.tag_code |= (ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX |
ASC_TAG_FLAG_DISABLE_DISCONNECT);
} else {
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 14fc018436c2..02a2512b76a8 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -63,7 +63,6 @@ static struct scsi_host_template aic94xx_sht = {
.scan_finished = asd_scan_finished,
.scan_start = asd_scan_start,
.change_queue_depth = sas_change_queue_depth,
- .change_queue_type = sas_change_queue_type,
.bios_param = sas_bios_param,
.can_queue = 1,
.cmd_per_lun = 1,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index e861f286b42e..98d06d151958 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2792,7 +2792,6 @@ static struct scsi_host_template bnx2fc_shost_template = {
.eh_host_reset_handler = fc_eh_host_reset,
.slave_alloc = fc_slave_alloc,
.change_queue_depth = scsi_change_queue_depth,
- .change_queue_type = scsi_change_queue_type,
.this_id = -1,
.cmd_per_lun = 3,
.use_clustering = ENABLE_CLUSTERING,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 4b56858c1df2..9ecca8504f60 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1737,11 +1737,7 @@ void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
fcp_cmnd->fc_pri_ta = 0;
fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
fcp_cmnd->fc_flags = io_req->io_req_flags;
-
- if (sc_cmd->flags & SCMD_TAGGED)
- fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
- else
- fcp_cmnd->fc_pri_ta = 0;
+ fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
}
static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 51ea5dc5f084..3987284e0d2a 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -172,10 +172,7 @@ csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
fcp_cmnd->fc_cmdref = 0;
memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16);
- if (scmnd->flags & SCMD_TAGGED)
- fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
- else
- fcp_cmnd->fc_pri_ta = 0;
+ fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd));
if (req->nsge)
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
index b7dc59fca7a6..7bd376d95ed5 100644
--- a/drivers/scsi/esas2r/esas2r_flash.c
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -684,9 +684,9 @@ static u16 calc_fi_checksum(struct esas2r_flash_context *fc)
* 1) verify the fi_version is correct
* 2) verify the checksum of the entire image.
* 3) validate the adap_typ, action and length fields.
- * 4) valdiate each component header. check the img_type and
+ * 4) validate each component header. check the img_type and
* length fields
- * 5) valdiate each component image. validate signatures and
+ * 5) validate each component image. validate signatures and
* local checksums
*/
static bool verify_fi(struct esas2r_adapter *a,
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 593ff8a63c70..7e1c21e6736b 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -255,7 +255,6 @@ static struct scsi_host_template driver_template = {
.emulated = 0,
.proc_name = ESAS2R_DRVR_NAME,
.change_queue_depth = scsi_change_queue_depth,
- .change_queue_type = scsi_change_queue_type,
.max_sectors = 0xFFFF,
.use_blk_tags = 1,
};
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index cd00a6cdf55b..ec193a8357d7 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -281,7 +281,6 @@ static struct scsi_host_template fcoe_shost_template = {
.eh_host_reset_handler = fc_eh_host_reset,
.slave_alloc = fc_slave_alloc,
.change_queue_depth = scsi_change_queue_depth,
- .change_queue_type = scsi_change_queue_type,
.this_id = -1,
.cmd_per_lun = 3,
.can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 0c1f8177b5b7..8a0d4d7b3254 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -111,7 +111,6 @@ static struct scsi_host_template fnic_host_template = {
.eh_host_reset_handler = fnic_host_reset,
.slave_alloc = fnic_slave_alloc,
.change_queue_depth = scsi_change_queue_depth,
- .change_queue_type = scsi_change_queue_type,
.this_id = -1,
.cmd_per_lun = 3,
.can_queue = FNIC_DFLT_IO_REQ,
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index f58c6d8e0264..057d27721d5b 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1615,7 +1615,6 @@ static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
struct ibmvfc_cmd *vfc_cmd;
struct ibmvfc_event *evt;
- u8 tag[2];
int rc;
if (unlikely((rc = fc_remote_port_chkready(rport))) ||
@@ -3089,7 +3088,6 @@ static struct scsi_host_template driver_template = {
.target_alloc = ibmvfc_target_alloc,
.scan_finished = ibmvfc_scan_finished,
.change_queue_depth = ibmvfc_change_queue_depth,
- .change_queue_type = scsi_change_queue_type,
.cmd_per_lun = 16,
.can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
.this_id = -1,
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 540294389355..df4e27cd996a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1426,16 +1426,14 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
if (res->sdev) {
res->del_from_ml = 1;
res->res_handle = IPR_INVALID_RES_HANDLE;
- if (ioa_cfg->allow_ml_add_del)
- schedule_work(&ioa_cfg->work_q);
+ schedule_work(&ioa_cfg->work_q);
} else {
ipr_clear_res_target(res);
list_move_tail(&res->queue, &ioa_cfg->free_res_q);
}
} else if (!res->sdev || res->del_from_ml) {
res->add_to_ml = 1;
- if (ioa_cfg->allow_ml_add_del)
- schedule_work(&ioa_cfg->work_q);
+ schedule_work(&ioa_cfg->work_q);
}
ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
@@ -3273,8 +3271,7 @@ static void ipr_worker_thread(struct work_struct *work)
restart:
do {
did_work = 0;
- if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
- !ioa_cfg->allow_ml_add_del) {
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return;
}
@@ -3311,6 +3308,7 @@ restart:
}
}
+ ioa_cfg->scan_done = 1;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
LEAVE;
@@ -4346,30 +4344,6 @@ static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
}
/**
- * ipr_change_queue_type - Change the device's queue type
- * @dsev: scsi device struct
- * @tag_type: type of tags to use
- *
- * Return value:
- * actual queue type set
- **/
-static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
-{
- struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
- struct ipr_resource_entry *res;
- unsigned long lock_flags = 0;
-
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- res = (struct ipr_resource_entry *)sdev->hostdata;
- if (res && ipr_is_gscsi(res))
- tag_type = scsi_change_queue_type(sdev, tag_type);
- else
- tag_type = 0;
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return tag_type;
-}
-
-/**
* ipr_show_adapter_handle - Show the adapter's resource handle for this device
* @dev: device struct
* @attr: device attribute structure
@@ -4739,6 +4713,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
sdev->no_uld_attach = 1;
}
if (ipr_is_vset_device(res)) {
+ sdev->scsi_level = SCSI_SPC_3;
blk_queue_rq_timeout(sdev->request_queue,
IPR_VSET_RW_TIMEOUT);
blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
@@ -5231,6 +5206,28 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
* @scsi_cmd: scsi command struct
*
* Return value:
+ * 0 if scan in progress / 1 if scan is complete
+ **/
+static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
+{
+ unsigned long lock_flags;
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
+ int rc = 0;
+
+ spin_lock_irqsave(shost->host_lock, lock_flags);
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
+ rc = 1;
+ if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
+ rc = 1;
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ return rc;
+}
+
+/**
+ * ipr_eh_host_reset - Reset the host adapter
+ * @scsi_cmd: scsi command struct
+ *
+ * Return value:
* SUCCESS / FAILED
**/
static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
@@ -5779,7 +5776,7 @@ static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
- if (!scsi_get_tag_type(scsi_cmd->device)) {
+ if (!scsi_cmd->device->simple_tags) {
ipr_erp_request_sense(ipr_cmd);
return;
}
@@ -6299,10 +6296,10 @@ static struct scsi_host_template driver_template = {
.slave_alloc = ipr_slave_alloc,
.slave_configure = ipr_slave_configure,
.slave_destroy = ipr_slave_destroy,
+ .scan_finished = ipr_scan_finished,
.target_alloc = ipr_target_alloc,
.target_destroy = ipr_target_destroy,
.change_queue_depth = ipr_change_queue_depth,
- .change_queue_type = ipr_change_queue_type,
.bios_param = ipr_biosparam,
.can_queue = IPR_MAX_COMMANDS,
.this_id = -1,
@@ -6841,7 +6838,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
- if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
+ if (res->add_to_ml || res->del_from_ml) {
ipr_trace;
break;
}
@@ -6870,6 +6867,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
scsi_block_requests(ioa_cfg->host);
+ schedule_work(&ioa_cfg->work_q);
LEAVE;
return IPR_RC_JOB_RETURN;
}
@@ -7610,6 +7608,19 @@ static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
type[4] = '\0';
ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
+ if (ipr_invalid_adapter(ioa_cfg)) {
+ dev_err(&ioa_cfg->pdev->dev,
+ "Adapter not supported in this hardware configuration.\n");
+
+ if (!ipr_testmode) {
+ ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+ list_add_tail(&ipr_cmd->queue,
+ &ioa_cfg->hrrq->hrrq_free_q);
+ return IPR_RC_JOB_RETURN;
+ }
+ }
+
ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
ipr_ioafp_inquiry(ipr_cmd, 1, 0,
@@ -8797,20 +8808,6 @@ static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
IPR_SHUTDOWN_NONE);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
- wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
- spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
-
- if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
- rc = -EIO;
- } else if (ipr_invalid_adapter(ioa_cfg)) {
- if (!ipr_testmode)
- rc = -EIO;
-
- dev_err(&ioa_cfg->pdev->dev,
- "Adapter not supported in this hardware configuration.\n");
- }
-
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
LEAVE;
return rc;
@@ -9264,7 +9261,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
* ioa_cfg->max_devs_supported)));
}
- host->max_channel = IPR_MAX_BUS_TO_SCAN;
+ host->max_channel = IPR_VSET_BUS;
host->unique_id = host->host_no;
host->max_cmd_len = IPR_MAX_CDB_LEN;
host->can_queue = ioa_cfg->max_cmds;
@@ -9764,25 +9761,6 @@ out_scsi_host_put:
}
/**
- * ipr_scan_vsets - Scans for VSET devices
- * @ioa_cfg: ioa config struct
- *
- * Description: Since the VSET resources do not follow SAM in that we can have
- * sparse LUNs with no LUN 0, we have to scan for these ourselves.
- *
- * Return value:
- * none
- **/
-static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
-{
- int target, lun;
-
- for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
- for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
- scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
-}
-
-/**
* ipr_initiate_ioa_bringdown - Bring down an adapter
* @ioa_cfg: ioa config struct
* @shutdown_type: shutdown type
@@ -9937,10 +9915,6 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
}
scsi_scan_host(ioa_cfg->host);
- ipr_scan_vsets(ioa_cfg);
- scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
- ioa_cfg->allow_ml_add_del = 1;
- ioa_cfg->host->max_channel = IPR_VSET_BUS;
ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 9ebdebd944e7..b4f3eec51bc9 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -157,13 +157,11 @@
#define IPR_MAX_NUM_TARGETS_PER_BUS 256
#define IPR_MAX_NUM_LUNS_PER_TARGET 256
-#define IPR_MAX_NUM_VSET_LUNS_PER_TARGET 8
#define IPR_VSET_BUS 0xff
#define IPR_IOA_BUS 0xff
#define IPR_IOA_TARGET 0xff
#define IPR_IOA_LUN 0xff
#define IPR_MAX_NUM_BUSES 16
-#define IPR_MAX_BUS_TO_SCAN IPR_MAX_NUM_BUSES
#define IPR_NUM_RESET_RELOAD_RETRIES 3
@@ -1453,7 +1451,7 @@ struct ipr_ioa_cfg {
u8 in_ioa_bringdown:1;
u8 ioa_unit_checked:1;
u8 dump_taken:1;
- u8 allow_ml_add_del:1;
+ u8 scan_done:1;
u8 needs_hard_reset:1;
u8 dual_raid:1;
u8 needs_warm_reset:1;
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 724c6265b667..cd41b63a2f10 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -158,7 +158,6 @@ static struct scsi_host_template isci_sht = {
.scan_finished = isci_host_scan_finished,
.scan_start = isci_host_start,
.change_queue_depth = sas_change_queue_depth,
- .change_queue_type = sas_change_queue_type,
.bios_param = sas_bios_param,
.can_queue = ISCI_CAN_QUEUE_VAL,
.cmd_per_lun = 1,
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 72918d227ead..519dac4e341e 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -906,13 +906,6 @@ int sas_change_queue_depth(struct scsi_device *sdev, int depth)
return scsi_change_queue_depth(sdev, depth);
}
-int sas_change_queue_type(struct scsi_device *scsi_dev, int type)
-{
- if (dev_is_sata(sdev_to_domain_dev(scsi_dev)))
- return -EINVAL;
- return scsi_change_queue_type(scsi_dev, type);
-}
-
int sas_bios_param(struct scsi_device *scsi_dev,
struct block_device *bdev,
sector_t capacity, int *hsc)
@@ -1011,7 +1004,6 @@ EXPORT_SYMBOL_GPL(sas_queuecommand);
EXPORT_SYMBOL_GPL(sas_target_alloc);
EXPORT_SYMBOL_GPL(sas_slave_configure);
EXPORT_SYMBOL_GPL(sas_change_queue_depth);
-EXPORT_SYMBOL_GPL(sas_change_queue_type);
EXPORT_SYMBOL_GPL(sas_bios_param);
EXPORT_SYMBOL_GPL(sas_task_abort);
EXPORT_SYMBOL_GPL(sas_phy_reset);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index fd85952b621d..4f9222eb2266 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5879,7 +5879,6 @@ struct scsi_host_template lpfc_template = {
.max_sectors = 0xFFFF,
.vendor_id = LPFC_NL_VENDOR_ID,
.change_queue_depth = scsi_change_queue_depth,
- .change_queue_type = scsi_change_queue_type,
.use_blk_tags = 1,
.track_queue_depth = 1,
};
@@ -5904,7 +5903,6 @@ struct scsi_host_template lpfc_vport_template = {
.shost_attrs = lpfc_vport_attrs,
.max_sectors = 0xFFFF,
.change_queue_depth = scsi_change_queue_depth,
- .change_queue_type = scsi_change_queue_type,
.use_blk_tags = 1,
.track_queue_depth = 1,
};
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 8431eb10bbb1..6a1c036a6f3f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -7592,7 +7592,6 @@ static struct scsi_host_template scsih_driver_template = {
.scan_finished = _scsih_scan_finished,
.scan_start = _scsih_scan_start,
.change_queue_depth = _scsih_change_queue_depth,
- .change_queue_type = scsi_change_queue_type,
.eh_abort_handler = _scsih_abort,
.eh_device_reset_handler = _scsih_dev_reset,
.eh_target_reset_handler = _scsih_target_reset,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 0d1d06488a28..e689bf20a3ea 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -1006,12 +1006,9 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
&mpt2sas_phy->remote_identify);
_transport_add_phy_to_an_existing_port(ioc, sas_node,
mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address);
- } else {
+ } else
memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct
sas_identify));
- _transport_del_phy_from_an_existing_port(ioc, sas_node,
- mpt2sas_phy);
- }
if (mpt2sas_phy->phy)
mpt2sas_phy->phy->negotiated_linkrate =
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index a2b60991efd4..94261ee9e72d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -7229,7 +7229,6 @@ static struct scsi_host_template scsih_driver_template = {
.scan_finished = _scsih_scan_finished,
.scan_start = _scsih_scan_start,
.change_queue_depth = _scsih_change_queue_depth,
- .change_queue_type = scsi_change_queue_type,
.eh_abort_handler = _scsih_abort,
.eh_device_reset_handler = _scsih_dev_reset,
.eh_target_reset_handler = _scsih_target_reset,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index d4bafaaebea9..3637ae6c0171 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -1003,12 +1003,9 @@ mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
&mpt3sas_phy->remote_identify);
_transport_add_phy_to_an_existing_port(ioc, sas_node,
mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address);
- } else {
+ } else
memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct
sas_identify));
- _transport_del_phy_from_an_existing_port(ioc, sas_node,
- mpt3sas_phy);
- }
if (mpt3sas_phy->phy)
mpt3sas_phy->phy->negotiated_linkrate =
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index f15df3de6790..53030b0e8015 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -54,7 +54,6 @@ static struct scsi_host_template mvs_sht = {
.scan_finished = mvs_scan_finished,
.scan_start = mvs_scan_start,
.change_queue_depth = sas_change_queue_depth,
- .change_queue_type = sas_change_queue_type,
.bios_param = sas_bios_param,
.can_queue = 1,
.cmd_per_lun = 1,
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 329aba0083ab..65555916d3b8 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -76,7 +76,6 @@ static struct scsi_host_template pm8001_sht = {
.scan_finished = pm8001_scan_finished,
.scan_start = pm8001_scan_start,
.change_queue_depth = sas_change_queue_depth,
- .change_queue_type = sas_change_queue_type,
.bios_param = sas_bios_param,
.can_queue = 1,
.cmd_per_lun = 1,
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index b1b1f66b1ab7..8c27b6a77ec4 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -4251,7 +4251,6 @@ static struct scsi_host_template pmcraid_host_template = {
.slave_configure = pmcraid_slave_configure,
.slave_destroy = pmcraid_slave_destroy,
.change_queue_depth = pmcraid_change_queue_depth,
- .change_queue_type = scsi_change_queue_type,
.can_queue = PMCRAID_MAX_IO_CMD,
.this_id = -1,
.sg_tablesize = PMCRAID_MAX_IOADLS,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index a4dde7e80dbd..e59f25bff7ab 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -3237,8 +3237,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
struct fc_rport *rport;
unsigned long flags;
- qla2x00_rport_del(fcport);
-
rport_ids.node_name = wwn_to_u64(fcport->node_name);
rport_ids.port_name = wwn_to_u64(fcport->port_name);
rport_ids.port_id = fcport->d_id.b.domain << 16 |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 6b4d9235368a..12ca291c1380 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -258,7 +258,6 @@ struct scsi_host_template qla2xxx_driver_template = {
.scan_finished = qla2xxx_scan_finished,
.scan_start = qla2xxx_scan_start,
.change_queue_depth = scsi_change_queue_depth,
- .change_queue_type = scsi_change_queue_type,
.this_id = -1,
.cmd_per_lun = 3,
.use_clustering = ENABLE_CLUSTERING,
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index a902fa1db7af..57418258c101 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -3218,25 +3218,25 @@ static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
switch (task_codes) {
case ATIO_SIMPLE_QUEUE:
- fcp_task_attr = MSG_SIMPLE_TAG;
+ fcp_task_attr = TCM_SIMPLE_TAG;
break;
case ATIO_HEAD_OF_QUEUE:
- fcp_task_attr = MSG_HEAD_TAG;
+ fcp_task_attr = TCM_HEAD_TAG;
break;
case ATIO_ORDERED_QUEUE:
- fcp_task_attr = MSG_ORDERED_TAG;
+ fcp_task_attr = TCM_ORDERED_TAG;
break;
case ATIO_ACA_QUEUE:
- fcp_task_attr = MSG_ACA_TAG;
+ fcp_task_attr = TCM_ACA_TAG;
break;
case ATIO_UNTAGGED:
- fcp_task_attr = MSG_SIMPLE_TAG;
+ fcp_task_attr = TCM_SIMPLE_TAG;
break;
default:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
"qla_target: unknown task code %x, use ORDERED instead\n",
task_codes);
- fcp_task_attr = MSG_ORDERED_TAG;
+ fcp_task_attr = TCM_ORDERED_TAG;
break;
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1ad0c36375b8..e02885451425 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -739,34 +739,12 @@ int scsi_track_queue_full(struct scsi_device *sdev, int depth)
if (sdev->last_queue_full_count <= 10)
return 0;
- if (sdev->last_queue_full_depth < 8) {
- /* Drop back to untagged */
- scsi_set_tag_type(sdev, 0);
- scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun);
- return -1;
- }
return scsi_change_queue_depth(sdev, depth);
}
EXPORT_SYMBOL(scsi_track_queue_full);
/**
- * scsi_change_queue_type() - Change a device's queue type
- * @sdev: The SCSI device whose queue depth is to change
- * @tag_type: Identifier for queue type
- */
-int scsi_change_queue_type(struct scsi_device *sdev, int tag_type)
-{
- if (!sdev->tagged_supported)
- return 0;
-
- scsi_set_tag_type(sdev, tag_type);
- return tag_type;
-
-}
-EXPORT_SYMBOL(scsi_change_queue_type);
-
-/**
* scsi_vpd_inquiry - Request a device provide us with a VPD page
* @sdev: The device to ask
* @buffer: Where to put the result
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index aa4b6b80aade..7b8b51bc29b4 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -128,7 +128,6 @@ static const char *scsi_debug_version_date = "20141022";
#define DEF_REMOVABLE false
#define DEF_SCSI_LEVEL 6 /* INQUIRY, byte2 [6->SPC-4] */
#define DEF_SECTOR_SIZE 512
-#define DEF_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
#define DEF_UNMAP_ALIGNMENT 0
#define DEF_UNMAP_GRANULARITY 1
#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
@@ -817,6 +816,7 @@ static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
if (debug)
cp = "capacity data changed";
+ break;
default:
pr_warn("%s: unexpected unit attention code=%d\n",
__func__, k);
@@ -3045,18 +3045,12 @@ resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u8 num;
unsigned long iflags;
int ret;
+ int retval = 0;
- lba = get_unaligned_be32(cmd + 2);
+ lba = get_unaligned_be64(cmd + 2);
num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
if (0 == num)
return 0; /* degenerate case, not an error */
- dnum = 2 * num;
- arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
- if (NULL == arr) {
- mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
- INSUFF_RES_ASCQ);
- return check_condition_result;
- }
if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
(cmd[1] & 0xe0)) {
mk_sense_invalid_opcode(scp);
@@ -3079,6 +3073,13 @@ resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
return check_condition_result;
}
+ dnum = 2 * num;
+ arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
+ if (NULL == arr) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
+ INSUFF_RES_ASCQ);
+ return check_condition_result;
+ }
write_lock_irqsave(&atomic_rw, iflags);
@@ -3089,24 +3090,24 @@ resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
ret = do_device_access(scp, 0, dnum, true);
fake_storep = fake_storep_hold;
if (ret == -1) {
- write_unlock_irqrestore(&atomic_rw, iflags);
- kfree(arr);
- return DID_ERROR << 16;
+ retval = DID_ERROR << 16;
+ goto cleanup;
} else if ((ret < (dnum * lb_size)) &&
(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
"indicated=%u, IO sent=%d bytes\n", my_name,
dnum * lb_size, ret);
if (!comp_write_worker(lba, num, arr)) {
- write_unlock_irqrestore(&atomic_rw, iflags);
- kfree(arr);
mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
- return check_condition_result;
+ retval = check_condition_result;
+ goto cleanup;
}
if (scsi_debug_lbp())
map_region(lba, num);
+cleanup:
write_unlock_irqrestore(&atomic_rw, iflags);
- return 0;
+ kfree(arr);
+ return retval;
}
struct unmap_block_desc {
@@ -4438,6 +4439,7 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
struct sdebug_host_info *sdhp;
struct sdebug_dev_info *dp;
+ spin_lock(&sdebug_host_list_lock);
list_for_each_entry(sdhp, &sdebug_host_list,
host_list) {
list_for_each_entry(dp, &sdhp->dev_info_list,
@@ -4446,6 +4448,7 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
dp->uas_bm);
}
}
+ spin_unlock(&sdebug_host_list_lock);
}
return count;
}
@@ -4988,32 +4991,6 @@ sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
}
static int
-sdebug_change_qtype(struct scsi_device *sdev, int qtype)
-{
- qtype = scsi_change_queue_type(sdev, qtype);
- if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
- const char *cp;
-
- switch (qtype) {
- case 0:
- cp = "untagged";
- break;
- case MSG_SIMPLE_TAG:
- cp = "simple tags";
- break;
- case MSG_ORDERED_TAG:
- cp = "ordered tags";
- break;
- default:
- cp = "unknown";
- break;
- }
- sdev_printk(KERN_INFO, sdev, "%s: to %s\n", __func__, cp);
- }
- return qtype;
-}
-
-static int
check_inject(struct scsi_cmnd *scp)
{
struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
@@ -5212,7 +5189,6 @@ static struct scsi_host_template sdebug_driver_template = {
.ioctl = scsi_debug_ioctl,
.queuecommand = sdebug_queuecommand_lock_or_not,
.change_queue_depth = sdebug_change_qdepth,
- .change_queue_type = sdebug_change_qtype,
.eh_abort_handler = scsi_debug_abort,
.eh_device_reset_handler = scsi_debug_device_reset,
.eh_target_reset_handler = scsi_debug_target_reset,
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index c1d04d4d3c6c..262ab837a704 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -211,6 +211,7 @@ static struct {
{"Medion", "Flash XL MMC/SD", "2.6D", BLIST_FORCELUN},
{"MegaRAID", "LD", NULL, BLIST_FORCELUN},
{"MICROP", "4110", NULL, BLIST_NOTQ},
+ {"MSFT", "Virtual HD", NULL, BLIST_NO_RSOC},
{"MYLEX", "DACARMRB", "*", BLIST_REPORTLUN2},
{"nCipher", "Fastness Crypto", NULL, BLIST_FORCELUN},
{"NAKAMICH", "MJ-4.8S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 43318d556cbc..9ea95dd3e260 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1918,7 +1918,9 @@ static int scsi_mq_prep_fn(struct request *req)
if (scsi_host_get_prot(shost)) {
cmd->prot_sdb = (void *)sg +
- shost->sg_tablesize * sizeof(struct scatterlist);
+ min_t(unsigned int,
+ shost->sg_tablesize, SCSI_MAX_SG_SEGMENTS) *
+ sizeof(struct scatterlist);
memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
cmd->prot_sdb->table.sgl =
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 1cb64a8e18c9..1ac38e73df7e 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -738,30 +738,12 @@ store_queue_type_field(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct scsi_device *sdev = to_scsi_device(dev);
- struct scsi_host_template *sht = sdev->host->hostt;
- int tag_type = 0, retval;
- int prev_tag_type = scsi_get_tag_type(sdev);
-
- if (!sdev->tagged_supported || !sht->change_queue_type)
- return -EINVAL;
- /*
- * We're never issueing order tags these days, but allow the value
- * for backwards compatibility.
- */
- if (strncmp(buf, "ordered", 7) == 0 ||
- strncmp(buf, "simple", 6) == 0)
- tag_type = MSG_SIMPLE_TAG;
- else if (strncmp(buf, "none", 4) != 0)
+ if (!sdev->tagged_supported)
return -EINVAL;
-
- if (tag_type == prev_tag_type)
- return count;
-
- retval = sht->change_queue_type(sdev, tag_type);
- if (retval < 0)
- return retval;
-
+
+ sdev_printk(KERN_INFO, sdev,
+ "ignoring write to deprecated queue_type attribute");
return count;
}
@@ -938,10 +920,6 @@ static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj,
!sdev->host->hostt->change_queue_depth)
return 0;
- if (attr == &dev_attr_queue_type.attr &&
- !sdev->host->hostt->change_queue_type)
- return S_IRUGO;
-
return attr->mode;
}
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index fa2aece76cc2..31bbb0da3397 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -1221,7 +1221,7 @@ EXPORT_SYMBOL_GPL(spi_populate_ppr_msg);
int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd)
{
if (cmd->flags & SCMD_TAGGED) {
- *msg++ = MSG_SIMPLE_TAG;
+ *msg++ = SIMPLE_QUEUE_TAG;
*msg++ = cmd->request->tag;
return 2;
}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index e3ba251fb6e7..4cff0ddc2c25 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1688,13 +1688,12 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
if (ret == -EAGAIN) {
/* no more space */
- if (cmd_request->bounce_sgl_count) {
+ if (cmd_request->bounce_sgl_count)
destroy_bounce_buffer(cmd_request->bounce_sgl,
cmd_request->bounce_sgl_count);
- ret = SCSI_MLQUEUE_DEVICE_BUSY;
- goto queue_error;
- }
+ ret = SCSI_MLQUEUE_DEVICE_BUSY;
+ goto queue_error;
}
return 0;
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 43781c9fe521..b410499cddca 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -663,7 +663,7 @@ static int img_spfi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int img_spfi_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
@@ -692,7 +692,7 @@ static int img_spfi_runtime_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
static int img_spfi_suspend(struct device *dev)
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c
index 0e48f8c2037d..1bbac0378bf7 100644
--- a/drivers/spi/spi-meson-spifc.c
+++ b/drivers/spi/spi-meson-spifc.c
@@ -413,7 +413,7 @@ static int meson_spifc_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int meson_spifc_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
@@ -431,7 +431,7 @@ static int meson_spifc_runtime_resume(struct device *dev)
return clk_prepare_enable(spifc->clk);
}
-#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
static const struct dev_pm_ops meson_spifc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(meson_spifc_suspend, meson_spifc_resume)
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
index 8156b4c0f568..3925db160650 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
@@ -42,28 +42,6 @@
#include "lustre_patchless_compat.h"
-# define LOCK_FS_STRUCT(fs) spin_lock(&(fs)->lock)
-# define UNLOCK_FS_STRUCT(fs) spin_unlock(&(fs)->lock)
-
-static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
- struct dentry *dentry)
-{
- struct path path;
- struct path old_pwd;
-
- path.mnt = mnt;
- path.dentry = dentry;
- LOCK_FS_STRUCT(fs);
- old_pwd = fs->pwd;
- path_get(&path);
- fs->pwd = path;
- UNLOCK_FS_STRUCT(fs);
-
- if (old_pwd.dentry)
- path_put(&old_pwd);
-}
-
-
/*
* set ATTR_BLOCKS to a high value to avoid any risk of collision with other
* ATTR_* attributes (see bug 13828)
@@ -110,8 +88,6 @@ static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
#define cfs_bio_io_error(a, b) bio_io_error((a))
#define cfs_bio_endio(a, b, c) bio_endio((a), (c))
-#define cfs_fs_pwd(fs) ((fs)->pwd.dentry)
-#define cfs_fs_mnt(fs) ((fs)->pwd.mnt)
#define cfs_path_put(nd) path_put(&(nd)->path)
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 407718a0026f..1ac7a702ce26 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -661,7 +661,7 @@ int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
int mode;
int err;
- mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask) | S_IFDIR;
+ mode = (0755 & ~current_umask()) | S_IFDIR;
op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
strlen(filename), mode, LUSTRE_OPC_MKDIR,
lump);
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 6e423aa6a6e4..a3367bfb1456 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -2372,21 +2372,6 @@ char *ll_get_fsname(struct super_block *sb, char *buf, int buflen)
return buf;
}
-static char *ll_d_path(struct dentry *dentry, char *buf, int bufsize)
-{
- char *path = NULL;
-
- struct path p;
-
- p.dentry = dentry;
- p.mnt = current->fs->root.mnt;
- path_get(&p);
- path = d_path(&p, buf, bufsize);
- path_put(&p);
-
- return path;
-}
-
void ll_dirty_page_discard_warn(struct page *page, int ioret)
{
char *buf, *path = NULL;
@@ -2398,7 +2383,7 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret)
if (buf != NULL) {
dentry = d_find_alias(page->mapping->host);
if (dentry != NULL)
- path = ll_d_path(dentry, buf, PAGE_SIZE);
+ path = dentry_path_raw(dentry, buf, PAGE_SIZE);
}
CDEBUG(D_WARNING,
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 73e58d22e325..55f6774f706f 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -609,6 +609,7 @@ static int __init iscsi_target_init_module(void)
return ret;
r2t_out:
+ iscsit_unregister_transport(&iscsi_target_transport);
kmem_cache_destroy(lio_r2t_cache);
ooo_out:
kmem_cache_destroy(lio_ooo_cache);
@@ -943,17 +944,17 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
*/
if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
(iscsi_task_attr == ISCSI_ATTR_SIMPLE))
- sam_task_attr = MSG_SIMPLE_TAG;
+ sam_task_attr = TCM_SIMPLE_TAG;
else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
- sam_task_attr = MSG_ORDERED_TAG;
+ sam_task_attr = TCM_ORDERED_TAG;
else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
- sam_task_attr = MSG_HEAD_TAG;
+ sam_task_attr = TCM_HEAD_TAG;
else if (iscsi_task_attr == ISCSI_ATTR_ACA)
- sam_task_attr = MSG_ACA_TAG;
+ sam_task_attr = TCM_ACA_TAG;
else {
pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
- " MSG_SIMPLE_TAG\n", iscsi_task_attr);
- sam_task_attr = MSG_SIMPLE_TAG;
+ " TCM_SIMPLE_TAG\n", iscsi_task_attr);
+ sam_task_attr = TCM_SIMPLE_TAG;
}
cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD;
@@ -1811,7 +1812,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
transport_init_se_cmd(&cmd->se_cmd,
&lio_target_fabric_configfs->tf_ops,
conn->sess->se_sess, 0, DMA_NONE,
- MSG_SIMPLE_TAG, cmd->sense_buffer + 2);
+ TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
sess_ref = true;
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 302eb3b78715..09a522bae222 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -790,7 +790,6 @@ struct iscsi_np {
void *np_context;
struct iscsit_transport *np_transport;
struct list_head np_list;
- struct iscsi_tpg_np *tpg_np;
} ____cacheline_aligned;
struct iscsi_tpg_np {
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 480f2e0ecc11..713c0c1877ab 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -281,7 +281,6 @@ static int iscsi_login_zero_tsih_s1(
{
struct iscsi_session *sess = NULL;
struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
- enum target_prot_op sup_pro_ops;
int ret;
sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
@@ -343,9 +342,8 @@ static int iscsi_login_zero_tsih_s1(
kfree(sess);
return -ENOMEM;
}
- sup_pro_ops = conn->conn_transport->iscsit_get_sup_prot_ops(conn);
- sess->se_sess = transport_init_session(sup_pro_ops);
+ sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
@@ -1161,6 +1159,7 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn,
}
kfree(conn->sess->sess_ops);
kfree(conn->sess);
+ conn->sess = NULL;
old_sess_out:
iscsi_stop_login_thread_timer(np);
@@ -1204,6 +1203,9 @@ old_sess_out:
conn->sock = NULL;
}
+ if (conn->conn_transport->iscsit_wait_conn)
+ conn->conn_transport->iscsit_wait_conn(conn);
+
if (conn->conn_transport->iscsit_free_conn)
conn->conn_transport->iscsit_free_conn(conn);
@@ -1364,6 +1366,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
}
login->zero_tsih = zero_tsih;
+ conn->sess->se_sess->sup_prot_ops =
+ conn->conn_transport->iscsit_get_sup_prot_ops(conn);
+
tpg = conn->tpg;
if (!tpg) {
pr_err("Unable to locate struct iscsi_conn->tpg\n");
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index c3cb5c15efda..9053a3c0c6e5 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -501,7 +501,6 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
init_completion(&tpg_np->tpg_np_comp);
kref_init(&tpg_np->tpg_np_kref);
tpg_np->tpg_np = np;
- np->tpg_np = tpg_np;
tpg_np->tpg = tpg;
spin_lock(&tpg->tpg_np_lock);
diff --git a/drivers/target/iscsi/iscsi_target_transport.c b/drivers/target/iscsi/iscsi_target_transport.c
index 882728fac30c..08217d62fb0d 100644
--- a/drivers/target/iscsi/iscsi_target_transport.c
+++ b/drivers/target/iscsi/iscsi_target_transport.c
@@ -26,8 +26,7 @@ struct iscsit_transport *iscsit_get_transport(int type)
void iscsit_put_transport(struct iscsit_transport *t)
{
- if (t->owner)
- module_put(t->owner);
+ module_put(t->owner);
}
int iscsit_register_transport(struct iscsit_transport *t)
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 7c6a95bcb35e..bcd88ec99793 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -1356,15 +1356,15 @@ static int iscsit_do_tx_data(
struct iscsi_conn *conn,
struct iscsi_data_count *count)
{
- int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
+ int ret, iov_len;
struct kvec *iov_p;
struct msghdr msg;
if (!conn || !conn->sock || !conn->conn_ops)
return -1;
- if (data <= 0) {
- pr_err("Data length is: %d\n", data);
+ if (count->data_length <= 0) {
+ pr_err("Data length is: %d\n", count->data_length);
return -1;
}
@@ -1373,20 +1373,16 @@ static int iscsit_do_tx_data(
iov_p = count->iov;
iov_len = count->iov_count;
- while (total_tx < data) {
- tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
- (data - total_tx));
- if (tx_loop <= 0) {
- pr_debug("tx_loop: %d total_tx %d\n",
- tx_loop, total_tx);
- return tx_loop;
- }
- total_tx += tx_loop;
- pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
- tx_loop, total_tx, data);
+ ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
+ count->data_length);
+ if (ret != count->data_length) {
+ pr_err("Unexpected ret: %d send data %d\n",
+ ret, count->data_length);
+ return -EPIPE;
}
+ pr_debug("ret: %d, sent data: %d\n", ret, count->data_length);
- return total_tx;
+ return ret;
}
int rx_data(
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 4d1b7224a7f2..6b3c32954689 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -138,7 +138,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
goto out_done;
}
- tl_nexus = tl_hba->tl_nexus;
+ tl_nexus = tl_tpg->tl_nexus;
if (!tl_nexus) {
scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
" does not exist\n");
@@ -168,7 +168,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
- transfer_length, MSG_SIMPLE_TAG,
+ transfer_length, TCM_SIMPLE_TAG,
sc->sc_data_direction, 0,
scsi_sglist(sc), scsi_sg_count(sc),
sgl_bidi, sgl_bidi_count,
@@ -218,16 +218,26 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
* to struct scsi_device
*/
static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
- struct tcm_loop_nexus *tl_nexus,
int lun, int task, enum tcm_tmreq_table tmr)
{
struct se_cmd *se_cmd = NULL;
struct se_session *se_sess;
struct se_portal_group *se_tpg;
+ struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_cmd *tl_cmd = NULL;
struct tcm_loop_tmr *tl_tmr = NULL;
int ret = TMR_FUNCTION_FAILED, rc;
+ /*
+ * Locate the tl_nexus and se_sess pointers
+ */
+ tl_nexus = tl_tpg->tl_nexus;
+ if (!tl_nexus) {
+ pr_err("Unable to perform device reset without"
+ " active I_T Nexus\n");
+ return ret;
+ }
+
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
if (!tl_cmd) {
pr_err("Unable to allocate memory for tl_cmd\n");
@@ -243,12 +253,12 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
se_cmd = &tl_cmd->tl_se_cmd;
se_tpg = &tl_tpg->tl_se_tpg;
- se_sess = tl_nexus->se_sess;
+ se_sess = tl_tpg->tl_nexus->se_sess;
/*
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
- DMA_NONE, MSG_SIMPLE_TAG,
+ DMA_NONE, TCM_SIMPLE_TAG,
&tl_cmd->tl_sense_buf[0]);
rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
@@ -288,7 +298,6 @@ release:
static int tcm_loop_abort_task(struct scsi_cmnd *sc)
{
struct tcm_loop_hba *tl_hba;
- struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_tpg *tl_tpg;
int ret = FAILED;
@@ -296,21 +305,8 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
* Locate the tcm_loop_hba_t pointer
*/
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
- /*
- * Locate the tl_nexus and se_sess pointers
- */
- tl_nexus = tl_hba->tl_nexus;
- if (!tl_nexus) {
- pr_err("Unable to perform device reset without"
- " active I_T Nexus\n");
- return FAILED;
- }
-
- /*
- * Locate the tl_tpg pointer from TargetID in sc->device->id
- */
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
- ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
+ ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
sc->request->tag, TMR_ABORT_TASK);
return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
}
@@ -322,7 +318,6 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
static int tcm_loop_device_reset(struct scsi_cmnd *sc)
{
struct tcm_loop_hba *tl_hba;
- struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_tpg *tl_tpg;
int ret = FAILED;
@@ -330,20 +325,9 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
* Locate the tcm_loop_hba_t pointer
*/
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
- /*
- * Locate the tl_nexus and se_sess pointers
- */
- tl_nexus = tl_hba->tl_nexus;
- if (!tl_nexus) {
- pr_err("Unable to perform device reset without"
- " active I_T Nexus\n");
- return FAILED;
- }
- /*
- * Locate the tl_tpg pointer from TargetID in sc->device->id
- */
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
- ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
+
+ ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
0, TMR_LUN_RESET);
return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
}
@@ -385,7 +369,6 @@ static struct scsi_host_template tcm_loop_driver_template = {
.name = "TCM_Loopback",
.queuecommand = tcm_loop_queuecommand,
.change_queue_depth = scsi_change_queue_depth,
- .change_queue_type = scsi_change_queue_type,
.eh_abort_handler = tcm_loop_abort_task,
.eh_device_reset_handler = tcm_loop_device_reset,
.eh_target_reset_handler = tcm_loop_target_reset,
@@ -940,8 +923,8 @@ static int tcm_loop_make_nexus(
struct tcm_loop_nexus *tl_nexus;
int ret = -ENOMEM;
- if (tl_tpg->tl_hba->tl_nexus) {
- pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
+ if (tl_tpg->tl_nexus) {
+ pr_debug("tl_tpg->tl_nexus already exists\n");
return -EEXIST;
}
se_tpg = &tl_tpg->tl_se_tpg;
@@ -976,7 +959,7 @@ static int tcm_loop_make_nexus(
*/
__transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
tl_nexus->se_sess, tl_nexus);
- tl_tpg->tl_hba->tl_nexus = tl_nexus;
+ tl_tpg->tl_nexus = tl_nexus;
pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
name);
@@ -992,12 +975,8 @@ static int tcm_loop_drop_nexus(
{
struct se_session *se_sess;
struct tcm_loop_nexus *tl_nexus;
- struct tcm_loop_hba *tl_hba = tpg->tl_hba;
- if (!tl_hba)
- return -ENODEV;
-
- tl_nexus = tl_hba->tl_nexus;
+ tl_nexus = tpg->tl_nexus;
if (!tl_nexus)
return -ENODEV;
@@ -1013,13 +992,13 @@ static int tcm_loop_drop_nexus(
}
pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
- " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
+ " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
tl_nexus->se_sess->se_node_acl->initiatorname);
/*
* Release the SCSI I_T Nexus to the emulated SAS Target Port
*/
transport_deregister_session(tl_nexus->se_sess);
- tpg->tl_hba->tl_nexus = NULL;
+ tpg->tl_nexus = NULL;
kfree(tl_nexus);
return 0;
}
@@ -1035,7 +1014,7 @@ static ssize_t tcm_loop_tpg_show_nexus(
struct tcm_loop_nexus *tl_nexus;
ssize_t ret;
- tl_nexus = tl_tpg->tl_hba->tl_nexus;
+ tl_nexus = tl_tpg->tl_nexus;
if (!tl_nexus)
return -ENODEV;
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 54c59d0b6608..6ae49f272ba6 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -27,11 +27,6 @@ struct tcm_loop_tmr {
};
struct tcm_loop_nexus {
- int it_nexus_active;
- /*
- * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h
- */
- struct scsi_host *sh;
/*
* Pointer to TCM session for I_T Nexus
*/
@@ -51,6 +46,7 @@ struct tcm_loop_tpg {
atomic_t tl_tpg_port_count;
struct se_portal_group tl_se_tpg;
struct tcm_loop_hba *tl_hba;
+ struct tcm_loop_nexus *tl_nexus;
};
struct tcm_loop_hba {
@@ -59,7 +55,6 @@ struct tcm_loop_hba {
struct se_hba_s *se_hba;
struct se_lun *tl_hba_lun;
struct se_port *tl_hba_lun_sep;
- struct tcm_loop_nexus *tl_nexus;
struct device dev;
struct Scsi_Host *sh;
struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index e7e93727553c..9512af6a8114 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -1237,7 +1237,7 @@ static void sbp_handle_command(struct sbp_target_request *req)
if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
req->sense_buf, unpacked_lun, data_length,
- MSG_SIMPLE_TAG, data_dir, 0))
+ TCM_SIMPLE_TAG, data_dir, 0))
goto err;
return;
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 79f9296a08ae..75d89adfccc0 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -50,6 +50,19 @@
#include "target_core_rd.h"
#include "target_core_xcopy.h"
+#define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
+static void target_core_setup_##_name##_cit(struct se_subsystem_api *sa) \
+{ \
+ struct target_backend_cits *tbc = &sa->tb_cits; \
+ struct config_item_type *cit = &tbc->tb_##_name##_cit; \
+ \
+ cit->ct_item_ops = _item_ops; \
+ cit->ct_group_ops = _group_ops; \
+ cit->ct_attrs = _attrs; \
+ cit->ct_owner = sa->owner; \
+ pr_debug("Setup generic %s\n", __stringify(_name)); \
+}
+
extern struct t10_alua_lu_gp *default_lu_gp;
static LIST_HEAD(g_tf_list);
@@ -126,48 +139,57 @@ static struct config_group *target_core_register_fabric(
pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
" %s\n", group, name);
- /*
- * Below are some hardcoded request_module() calls to automatically
- * local fabric modules when the following is called:
- *
- * mkdir -p /sys/kernel/config/target/$MODULE_NAME
- *
- * Note that this does not limit which TCM fabric module can be
- * registered, but simply provids auto loading logic for modules with
- * mkdir(2) system calls with known TCM fabric modules.
- */
- if (!strncmp(name, "iscsi", 5)) {
+
+ tf = target_core_get_fabric(name);
+ if (!tf) {
+ pr_err("target_core_register_fabric() trying autoload for %s\n",
+ name);
+
/*
- * Automatically load the LIO Target fabric module when the
- * following is called:
+ * Below are some hardcoded request_module() calls to automatically
+ * local fabric modules when the following is called:
*
- * mkdir -p $CONFIGFS/target/iscsi
- */
- ret = request_module("iscsi_target_mod");
- if (ret < 0) {
- pr_err("request_module() failed for"
- " iscsi_target_mod.ko: %d\n", ret);
- return ERR_PTR(-EINVAL);
- }
- } else if (!strncmp(name, "loopback", 8)) {
- /*
- * Automatically load the tcm_loop fabric module when the
- * following is called:
+ * mkdir -p /sys/kernel/config/target/$MODULE_NAME
*
- * mkdir -p $CONFIGFS/target/loopback
+ * Note that this does not limit which TCM fabric module can be
+ * registered, but simply provids auto loading logic for modules with
+ * mkdir(2) system calls with known TCM fabric modules.
*/
- ret = request_module("tcm_loop");
- if (ret < 0) {
- pr_err("request_module() failed for"
- " tcm_loop.ko: %d\n", ret);
- return ERR_PTR(-EINVAL);
+
+ if (!strncmp(name, "iscsi", 5)) {
+ /*
+ * Automatically load the LIO Target fabric module when the
+ * following is called:
+ *
+ * mkdir -p $CONFIGFS/target/iscsi
+ */
+ ret = request_module("iscsi_target_mod");
+ if (ret < 0) {
+ pr_err("request_module() failed for"
+ " iscsi_target_mod.ko: %d\n", ret);
+ return ERR_PTR(-EINVAL);
+ }
+ } else if (!strncmp(name, "loopback", 8)) {
+ /*
+ * Automatically load the tcm_loop fabric module when the
+ * following is called:
+ *
+ * mkdir -p $CONFIGFS/target/loopback
+ */
+ ret = request_module("tcm_loop");
+ if (ret < 0) {
+ pr_err("request_module() failed for"
+ " tcm_loop.ko: %d\n", ret);
+ return ERR_PTR(-EINVAL);
+ }
}
+
+ tf = target_core_get_fabric(name);
}
- tf = target_core_get_fabric(name);
if (!tf) {
pr_err("target_core_get_fabric() failed for %s\n",
- name);
+ name);
return ERR_PTR(-EINVAL);
}
pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
@@ -562,198 +584,21 @@ EXPORT_SYMBOL(target_fabric_configfs_deregister);
// Stop functions called by external Target Fabrics Modules
//############################################################################*/
-/* Start functions for struct config_item_type target_core_dev_attrib_cit */
-
-#define DEF_DEV_ATTRIB_SHOW(_name) \
-static ssize_t target_core_dev_show_attr_##_name( \
- struct se_dev_attrib *da, \
- char *page) \
-{ \
- return snprintf(page, PAGE_SIZE, "%u\n", \
- (u32)da->da_dev->dev_attrib._name); \
-}
-
-#define DEF_DEV_ATTRIB_STORE(_name) \
-static ssize_t target_core_dev_store_attr_##_name( \
- struct se_dev_attrib *da, \
- const char *page, \
- size_t count) \
-{ \
- unsigned long val; \
- int ret; \
- \
- ret = kstrtoul(page, 0, &val); \
- if (ret < 0) { \
- pr_err("kstrtoul() failed with" \
- " ret: %d\n", ret); \
- return -EINVAL; \
- } \
- ret = se_dev_set_##_name(da->da_dev, (u32)val); \
- \
- return (!ret) ? count : -EINVAL; \
-}
-
-#define DEF_DEV_ATTRIB(_name) \
-DEF_DEV_ATTRIB_SHOW(_name); \
-DEF_DEV_ATTRIB_STORE(_name);
-
-#define DEF_DEV_ATTRIB_RO(_name) \
-DEF_DEV_ATTRIB_SHOW(_name);
+/* Start functions for struct config_item_type tb_dev_attrib_cit */
CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib);
-#define SE_DEV_ATTR(_name, _mode) \
-static struct target_core_dev_attrib_attribute \
- target_core_dev_attrib_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- target_core_dev_show_attr_##_name, \
- target_core_dev_store_attr_##_name);
-
-#define SE_DEV_ATTR_RO(_name); \
-static struct target_core_dev_attrib_attribute \
- target_core_dev_attrib_##_name = \
- __CONFIGFS_EATTR_RO(_name, \
- target_core_dev_show_attr_##_name);
-
-DEF_DEV_ATTRIB(emulate_model_alias);
-SE_DEV_ATTR(emulate_model_alias, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_dpo);
-SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_fua_write);
-SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_fua_read);
-SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_write_cache);
-SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl);
-SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_tas);
-SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_tpu);
-SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_tpws);
-SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_caw);
-SE_DEV_ATTR(emulate_caw, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_3pc);
-SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(pi_prot_type);
-SE_DEV_ATTR(pi_prot_type, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB_RO(hw_pi_prot_type);
-SE_DEV_ATTR_RO(hw_pi_prot_type);
-
-DEF_DEV_ATTRIB(pi_prot_format);
-SE_DEV_ATTR(pi_prot_format, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(enforce_pr_isids);
-SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(is_nonrot);
-SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_rest_reord);
-SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(force_pr_aptpl);
-SE_DEV_ATTR(force_pr_aptpl, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB_RO(hw_block_size);
-SE_DEV_ATTR_RO(hw_block_size);
-
-DEF_DEV_ATTRIB(block_size);
-SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB_RO(hw_max_sectors);
-SE_DEV_ATTR_RO(hw_max_sectors);
-
-DEF_DEV_ATTRIB(fabric_max_sectors);
-SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(optimal_sectors);
-SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB_RO(hw_queue_depth);
-SE_DEV_ATTR_RO(hw_queue_depth);
-
-DEF_DEV_ATTRIB(queue_depth);
-SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(max_unmap_lba_count);
-SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(max_unmap_block_desc_count);
-SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(unmap_granularity);
-SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(unmap_granularity_alignment);
-SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(max_write_same_len);
-SE_DEV_ATTR(max_write_same_len, S_IRUGO | S_IWUSR);
-
CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
-static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
- &target_core_dev_attrib_emulate_model_alias.attr,
- &target_core_dev_attrib_emulate_dpo.attr,
- &target_core_dev_attrib_emulate_fua_write.attr,
- &target_core_dev_attrib_emulate_fua_read.attr,
- &target_core_dev_attrib_emulate_write_cache.attr,
- &target_core_dev_attrib_emulate_ua_intlck_ctrl.attr,
- &target_core_dev_attrib_emulate_tas.attr,
- &target_core_dev_attrib_emulate_tpu.attr,
- &target_core_dev_attrib_emulate_tpws.attr,
- &target_core_dev_attrib_emulate_caw.attr,
- &target_core_dev_attrib_emulate_3pc.attr,
- &target_core_dev_attrib_pi_prot_type.attr,
- &target_core_dev_attrib_hw_pi_prot_type.attr,
- &target_core_dev_attrib_pi_prot_format.attr,
- &target_core_dev_attrib_enforce_pr_isids.attr,
- &target_core_dev_attrib_force_pr_aptpl.attr,
- &target_core_dev_attrib_is_nonrot.attr,
- &target_core_dev_attrib_emulate_rest_reord.attr,
- &target_core_dev_attrib_hw_block_size.attr,
- &target_core_dev_attrib_block_size.attr,
- &target_core_dev_attrib_hw_max_sectors.attr,
- &target_core_dev_attrib_fabric_max_sectors.attr,
- &target_core_dev_attrib_optimal_sectors.attr,
- &target_core_dev_attrib_hw_queue_depth.attr,
- &target_core_dev_attrib_queue_depth.attr,
- &target_core_dev_attrib_max_unmap_lba_count.attr,
- &target_core_dev_attrib_max_unmap_block_desc_count.attr,
- &target_core_dev_attrib_unmap_granularity.attr,
- &target_core_dev_attrib_unmap_granularity_alignment.attr,
- &target_core_dev_attrib_max_write_same_len.attr,
- NULL,
-};
-
static struct configfs_item_operations target_core_dev_attrib_ops = {
.show_attribute = target_core_dev_attrib_attr_show,
.store_attribute = target_core_dev_attrib_attr_store,
};
-static struct config_item_type target_core_dev_attrib_cit = {
- .ct_item_ops = &target_core_dev_attrib_ops,
- .ct_attrs = target_core_dev_attrib_attrs,
- .ct_owner = THIS_MODULE,
-};
+TB_CIT_SETUP(dev_attrib, &target_core_dev_attrib_ops, NULL, NULL);
-/* End functions for struct config_item_type target_core_dev_attrib_cit */
+/* End functions for struct config_item_type tb_dev_attrib_cit */
-/* Start functions for struct config_item_type target_core_dev_wwn_cit */
+/* Start functions for struct config_item_type tb_dev_wwn_cit */
CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn);
#define SE_DEV_WWN_ATTR(_name, _mode) \
@@ -984,15 +829,11 @@ static struct configfs_item_operations target_core_dev_wwn_ops = {
.store_attribute = target_core_dev_wwn_attr_store,
};
-static struct config_item_type target_core_dev_wwn_cit = {
- .ct_item_ops = &target_core_dev_wwn_ops,
- .ct_attrs = target_core_dev_wwn_attrs,
- .ct_owner = THIS_MODULE,
-};
+TB_CIT_SETUP(dev_wwn, &target_core_dev_wwn_ops, NULL, target_core_dev_wwn_attrs);
-/* End functions for struct config_item_type target_core_dev_wwn_cit */
+/* End functions for struct config_item_type tb_dev_wwn_cit */
-/* Start functions for struct config_item_type target_core_dev_pr_cit */
+/* Start functions for struct config_item_type tb_dev_pr_cit */
CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device);
#define SE_DEV_PR_ATTR(_name, _mode) \
@@ -1453,15 +1294,11 @@ static struct configfs_item_operations target_core_dev_pr_ops = {
.store_attribute = target_core_dev_pr_attr_store,
};
-static struct config_item_type target_core_dev_pr_cit = {
- .ct_item_ops = &target_core_dev_pr_ops,
- .ct_attrs = target_core_dev_pr_attrs,
- .ct_owner = THIS_MODULE,
-};
+TB_CIT_SETUP(dev_pr, &target_core_dev_pr_ops, NULL, target_core_dev_pr_attrs);
-/* End functions for struct config_item_type target_core_dev_pr_cit */
+/* End functions for struct config_item_type tb_dev_pr_cit */
-/* Start functions for struct config_item_type target_core_dev_cit */
+/* Start functions for struct config_item_type tb_dev_cit */
static ssize_t target_core_show_dev_info(void *p, char *page)
{
@@ -1925,7 +1762,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_lba_map = {
.store = target_core_store_dev_lba_map,
};
-static struct configfs_attribute *lio_core_dev_attrs[] = {
+static struct configfs_attribute *target_core_dev_attrs[] = {
&target_core_attr_dev_info.attr,
&target_core_attr_dev_control.attr,
&target_core_attr_dev_alias.attr,
@@ -1984,13 +1821,9 @@ static struct configfs_item_operations target_core_dev_item_ops = {
.store_attribute = target_core_dev_store,
};
-static struct config_item_type target_core_dev_cit = {
- .ct_item_ops = &target_core_dev_item_ops,
- .ct_attrs = lio_core_dev_attrs,
- .ct_owner = THIS_MODULE,
-};
+TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs);
-/* End functions for struct config_item_type target_core_dev_cit */
+/* End functions for struct config_item_type tb_dev_cit */
/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
@@ -2670,7 +2503,7 @@ static struct config_item_type target_core_alua_tg_pt_gp_cit = {
/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
-/* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
+/* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */
static struct config_group *target_core_alua_create_tg_pt_gp(
struct config_group *group,
@@ -2721,12 +2554,9 @@ static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
.drop_item = &target_core_alua_drop_tg_pt_gp,
};
-static struct config_item_type target_core_alua_tg_pt_gps_cit = {
- .ct_group_ops = &target_core_alua_tg_pt_gps_group_ops,
- .ct_owner = THIS_MODULE,
-};
+TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL);
-/* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
+/* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */
/* Start functions for struct config_item_type target_core_alua_cit */
@@ -2744,7 +2574,7 @@ static struct config_item_type target_core_alua_cit = {
/* End functions for struct config_item_type target_core_alua_cit */
-/* Start functions for struct config_item_type target_core_stat_cit */
+/* Start functions for struct config_item_type tb_dev_stat_cit */
static struct config_group *target_core_stat_mkdir(
struct config_group *group,
@@ -2765,12 +2595,9 @@ static struct configfs_group_operations target_core_stat_group_ops = {
.drop_item = &target_core_stat_rmdir,
};
-static struct config_item_type target_core_stat_cit = {
- .ct_group_ops = &target_core_stat_group_ops,
- .ct_owner = THIS_MODULE,
-};
+TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL);
-/* End functions for struct config_item_type target_core_stat_cit */
+/* End functions for struct config_item_type tb_dev_stat_cit */
/* Start functions for struct config_item_type target_core_hba_cit */
@@ -2806,17 +2633,17 @@ static struct config_group *target_core_make_subdev(
if (!dev_cg->default_groups)
goto out_free_device;
- config_group_init_type_name(dev_cg, name, &target_core_dev_cit);
+ config_group_init_type_name(dev_cg, name, &t->tb_cits.tb_dev_cit);
config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
- &target_core_dev_attrib_cit);
+ &t->tb_cits.tb_dev_attrib_cit);
config_group_init_type_name(&dev->dev_pr_group, "pr",
- &target_core_dev_pr_cit);
+ &t->tb_cits.tb_dev_pr_cit);
config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
- &target_core_dev_wwn_cit);
+ &t->tb_cits.tb_dev_wwn_cit);
config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
- "alua", &target_core_alua_tg_pt_gps_cit);
+ "alua", &t->tb_cits.tb_dev_alua_tg_pt_gps_cit);
config_group_init_type_name(&dev->dev_stat_grps.stat_group,
- "statistics", &target_core_stat_cit);
+ "statistics", &t->tb_cits.tb_dev_stat_cit);
dev_cg->default_groups[0] = &dev->dev_attrib.da_group;
dev_cg->default_groups[1] = &dev->dev_pr_group;
@@ -3110,6 +2937,17 @@ static struct config_item_type target_core_cit = {
/* Stop functions for struct config_item_type target_core_hba_cit */
+void target_core_setup_sub_cits(struct se_subsystem_api *sa)
+{
+ target_core_setup_dev_cit(sa);
+ target_core_setup_dev_attrib_cit(sa);
+ target_core_setup_dev_pr_cit(sa);
+ target_core_setup_dev_wwn_cit(sa);
+ target_core_setup_dev_alua_tg_pt_gps_cit(sa);
+ target_core_setup_dev_stat_cit(sa);
+}
+EXPORT_SYMBOL(target_core_setup_sub_cits);
+
static int __init target_core_init_configfs(void)
{
struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index c45f9e907e44..7653cfb027a2 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -659,6 +659,7 @@ int se_dev_set_max_unmap_lba_count(
dev, dev->dev_attrib.max_unmap_lba_count);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_max_unmap_lba_count);
int se_dev_set_max_unmap_block_desc_count(
struct se_device *dev,
@@ -670,6 +671,7 @@ int se_dev_set_max_unmap_block_desc_count(
dev, dev->dev_attrib.max_unmap_block_desc_count);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_max_unmap_block_desc_count);
int se_dev_set_unmap_granularity(
struct se_device *dev,
@@ -680,6 +682,7 @@ int se_dev_set_unmap_granularity(
dev, dev->dev_attrib.unmap_granularity);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_unmap_granularity);
int se_dev_set_unmap_granularity_alignment(
struct se_device *dev,
@@ -690,6 +693,7 @@ int se_dev_set_unmap_granularity_alignment(
dev, dev->dev_attrib.unmap_granularity_alignment);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_unmap_granularity_alignment);
int se_dev_set_max_write_same_len(
struct se_device *dev,
@@ -700,6 +704,7 @@ int se_dev_set_max_write_same_len(
dev, dev->dev_attrib.max_write_same_len);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_max_write_same_len);
static void dev_set_t10_wwn_model_alias(struct se_device *dev)
{
@@ -738,6 +743,7 @@ int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_model_alias);
int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
{
@@ -753,6 +759,7 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_dpo);
int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
{
@@ -760,17 +767,12 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
-
- if (flag &&
- dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- pr_err("emulate_fua_write not supported for pSCSI\n");
- return -EINVAL;
- }
dev->dev_attrib.emulate_fua_write = flag;
pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
dev, dev->dev_attrib.emulate_fua_write);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_fua_write);
int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
{
@@ -786,6 +788,7 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_fua_read);
int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
{
@@ -794,11 +797,6 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
return -EINVAL;
}
if (flag &&
- dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- pr_err("emulate_write_cache not supported for pSCSI\n");
- return -EINVAL;
- }
- if (flag &&
dev->transport->get_write_cache) {
pr_err("emulate_write_cache not supported for this device\n");
return -EINVAL;
@@ -809,6 +807,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
dev, dev->dev_attrib.emulate_write_cache);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_write_cache);
int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
{
@@ -829,6 +828,7 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_ua_intlck_ctrl);
int se_dev_set_emulate_tas(struct se_device *dev, int flag)
{
@@ -849,6 +849,7 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag)
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_tas);
int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
{
@@ -870,6 +871,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
dev, flag);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_tpu);
int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
{
@@ -891,6 +893,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
dev, flag);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_tpws);
int se_dev_set_emulate_caw(struct se_device *dev, int flag)
{
@@ -904,6 +907,7 @@ int se_dev_set_emulate_caw(struct se_device *dev, int flag)
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_caw);
int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
{
@@ -917,6 +921,7 @@ int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_3pc);
int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
{
@@ -970,6 +975,7 @@ int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
return 0;
}
+EXPORT_SYMBOL(se_dev_set_pi_prot_type);
int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
{
@@ -1005,6 +1011,7 @@ int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
return 0;
}
+EXPORT_SYMBOL(se_dev_set_pi_prot_format);
int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
{
@@ -1017,6 +1024,7 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
(dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
return 0;
}
+EXPORT_SYMBOL(se_dev_set_enforce_pr_isids);
int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag)
{
@@ -1034,6 +1042,7 @@ int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag)
pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_force_pr_aptpl);
int se_dev_set_is_nonrot(struct se_device *dev, int flag)
{
@@ -1046,6 +1055,7 @@ int se_dev_set_is_nonrot(struct se_device *dev, int flag)
dev, flag);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_is_nonrot);
int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
{
@@ -1058,6 +1068,7 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_emulate_rest_reord);
/*
* Note, this can only be called on unexported SE Device Object.
@@ -1076,31 +1087,21 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
return -EINVAL;
}
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ if (queue_depth > dev->dev_attrib.queue_depth) {
if (queue_depth > dev->dev_attrib.hw_queue_depth) {
- pr_err("dev[%p]: Passed queue_depth: %u"
- " exceeds TCM/SE_Device TCQ: %u\n",
- dev, queue_depth,
+ pr_err("dev[%p]: Passed queue_depth:"
+ " %u exceeds TCM/SE_Device MAX"
+ " TCQ: %u\n", dev, queue_depth,
dev->dev_attrib.hw_queue_depth);
return -EINVAL;
}
- } else {
- if (queue_depth > dev->dev_attrib.queue_depth) {
- if (queue_depth > dev->dev_attrib.hw_queue_depth) {
- pr_err("dev[%p]: Passed queue_depth:"
- " %u exceeds TCM/SE_Device MAX"
- " TCQ: %u\n", dev, queue_depth,
- dev->dev_attrib.hw_queue_depth);
- return -EINVAL;
- }
- }
}
-
dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
dev, queue_depth);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_queue_depth);
int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
{
@@ -1123,22 +1124,12 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
DA_STATUS_MAX_SECTORS_MIN);
return -EINVAL;
}
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) {
- pr_err("dev[%p]: Passed fabric_max_sectors: %u"
- " greater than TCM/SE_Device max_sectors:"
- " %u\n", dev, fabric_max_sectors,
- dev->dev_attrib.hw_max_sectors);
- return -EINVAL;
- }
- } else {
- if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
- pr_err("dev[%p]: Passed fabric_max_sectors: %u"
- " greater than DA_STATUS_MAX_SECTORS_MAX:"
- " %u\n", dev, fabric_max_sectors,
- DA_STATUS_MAX_SECTORS_MAX);
- return -EINVAL;
- }
+ if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
+ pr_err("dev[%p]: Passed fabric_max_sectors: %u"
+ " greater than DA_STATUS_MAX_SECTORS_MAX:"
+ " %u\n", dev, fabric_max_sectors,
+ DA_STATUS_MAX_SECTORS_MAX);
+ return -EINVAL;
}
/*
* Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
@@ -1155,6 +1146,7 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
dev, fabric_max_sectors);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_fabric_max_sectors);
int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
{
@@ -1164,11 +1156,6 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
dev, dev->export_count);
return -EINVAL;
}
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- pr_err("dev[%p]: Passed optimal_sectors cannot be"
- " changed for TCM/pSCSI\n", dev);
- return -EINVAL;
- }
if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
" greater than fabric_max_sectors: %u\n", dev,
@@ -1181,6 +1168,7 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
dev, optimal_sectors);
return 0;
}
+EXPORT_SYMBOL(se_dev_set_optimal_sectors);
int se_dev_set_block_size(struct se_device *dev, u32 block_size)
{
@@ -1201,13 +1189,6 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
return -EINVAL;
}
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- pr_err("dev[%p]: Not allowed to change block_size for"
- " Physical Device, use for Linux/SCSI to change"
- " block_size for underlying hardware\n", dev);
- return -EINVAL;
- }
-
dev->dev_attrib.block_size = block_size;
pr_debug("dev[%p]: SE Device block_size changed to %u\n",
dev, block_size);
@@ -1218,6 +1199,7 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
return 0;
}
+EXPORT_SYMBOL(se_dev_set_block_size);
struct se_lun *core_dev_add_lun(
struct se_portal_group *tpg,
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 72c83d98662b..c2aea099ea4a 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -37,6 +37,7 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
+#include <target/target_core_backend_configfs.h>
#include "target_core_file.h"
@@ -934,6 +935,42 @@ fd_parse_cdb(struct se_cmd *cmd)
return sbc_parse_cdb(cmd, &fd_sbc_ops);
}
+DEF_TB_DEFAULT_ATTRIBS(fileio);
+
+static struct configfs_attribute *fileio_backend_dev_attrs[] = {
+ &fileio_dev_attrib_emulate_model_alias.attr,
+ &fileio_dev_attrib_emulate_dpo.attr,
+ &fileio_dev_attrib_emulate_fua_write.attr,
+ &fileio_dev_attrib_emulate_fua_read.attr,
+ &fileio_dev_attrib_emulate_write_cache.attr,
+ &fileio_dev_attrib_emulate_ua_intlck_ctrl.attr,
+ &fileio_dev_attrib_emulate_tas.attr,
+ &fileio_dev_attrib_emulate_tpu.attr,
+ &fileio_dev_attrib_emulate_tpws.attr,
+ &fileio_dev_attrib_emulate_caw.attr,
+ &fileio_dev_attrib_emulate_3pc.attr,
+ &fileio_dev_attrib_pi_prot_type.attr,
+ &fileio_dev_attrib_hw_pi_prot_type.attr,
+ &fileio_dev_attrib_pi_prot_format.attr,
+ &fileio_dev_attrib_enforce_pr_isids.attr,
+ &fileio_dev_attrib_is_nonrot.attr,
+ &fileio_dev_attrib_emulate_rest_reord.attr,
+ &fileio_dev_attrib_force_pr_aptpl.attr,
+ &fileio_dev_attrib_hw_block_size.attr,
+ &fileio_dev_attrib_block_size.attr,
+ &fileio_dev_attrib_hw_max_sectors.attr,
+ &fileio_dev_attrib_fabric_max_sectors.attr,
+ &fileio_dev_attrib_optimal_sectors.attr,
+ &fileio_dev_attrib_hw_queue_depth.attr,
+ &fileio_dev_attrib_queue_depth.attr,
+ &fileio_dev_attrib_max_unmap_lba_count.attr,
+ &fileio_dev_attrib_max_unmap_block_desc_count.attr,
+ &fileio_dev_attrib_unmap_granularity.attr,
+ &fileio_dev_attrib_unmap_granularity_alignment.attr,
+ &fileio_dev_attrib_max_write_same_len.attr,
+ NULL,
+};
+
static struct se_subsystem_api fileio_template = {
.name = "fileio",
.inquiry_prod = "FILEIO",
@@ -957,6 +994,11 @@ static struct se_subsystem_api fileio_template = {
static int __init fileio_module_init(void)
{
+ struct target_backend_cits *tbc = &fileio_template.tb_cits;
+
+ target_core_setup_sub_cits(&fileio_template);
+ tbc->tb_dev_attrib_cit.ct_attrs = fileio_backend_dev_attrs;
+
return transport_subsystem_register(&fileio_template);
}
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index a25051a37dd7..ff95f95dcd13 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -36,6 +36,7 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
#include "target_core_internal.h"
@@ -137,8 +138,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
return hba;
out_module_put:
- if (hba->transport->owner)
- module_put(hba->transport->owner);
+ module_put(hba->transport->owner);
hba->transport = NULL;
out_free_hba:
kfree(hba);
@@ -159,8 +159,7 @@ core_delete_hba(struct se_hba *hba)
pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
" Core\n", hba->hba_id);
- if (hba->transport->owner)
- module_put(hba->transport->owner);
+ module_put(hba->transport->owner);
hba->transport = NULL;
kfree(hba);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 7e6b857c6b3f..3efff94fbd97 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -41,6 +41,7 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
+#include <target/target_core_backend_configfs.h>
#include "target_core_iblock.h"
@@ -858,6 +859,42 @@ static bool iblock_get_write_cache(struct se_device *dev)
return q->flush_flags & REQ_FLUSH;
}
+DEF_TB_DEFAULT_ATTRIBS(iblock);
+
+static struct configfs_attribute *iblock_backend_dev_attrs[] = {
+ &iblock_dev_attrib_emulate_model_alias.attr,
+ &iblock_dev_attrib_emulate_dpo.attr,
+ &iblock_dev_attrib_emulate_fua_write.attr,
+ &iblock_dev_attrib_emulate_fua_read.attr,
+ &iblock_dev_attrib_emulate_write_cache.attr,
+ &iblock_dev_attrib_emulate_ua_intlck_ctrl.attr,
+ &iblock_dev_attrib_emulate_tas.attr,
+ &iblock_dev_attrib_emulate_tpu.attr,
+ &iblock_dev_attrib_emulate_tpws.attr,
+ &iblock_dev_attrib_emulate_caw.attr,
+ &iblock_dev_attrib_emulate_3pc.attr,
+ &iblock_dev_attrib_pi_prot_type.attr,
+ &iblock_dev_attrib_hw_pi_prot_type.attr,
+ &iblock_dev_attrib_pi_prot_format.attr,
+ &iblock_dev_attrib_enforce_pr_isids.attr,
+ &iblock_dev_attrib_is_nonrot.attr,
+ &iblock_dev_attrib_emulate_rest_reord.attr,
+ &iblock_dev_attrib_force_pr_aptpl.attr,
+ &iblock_dev_attrib_hw_block_size.attr,
+ &iblock_dev_attrib_block_size.attr,
+ &iblock_dev_attrib_hw_max_sectors.attr,
+ &iblock_dev_attrib_fabric_max_sectors.attr,
+ &iblock_dev_attrib_optimal_sectors.attr,
+ &iblock_dev_attrib_hw_queue_depth.attr,
+ &iblock_dev_attrib_queue_depth.attr,
+ &iblock_dev_attrib_max_unmap_lba_count.attr,
+ &iblock_dev_attrib_max_unmap_block_desc_count.attr,
+ &iblock_dev_attrib_unmap_granularity.attr,
+ &iblock_dev_attrib_unmap_granularity_alignment.attr,
+ &iblock_dev_attrib_max_write_same_len.attr,
+ NULL,
+};
+
static struct se_subsystem_api iblock_template = {
.name = "iblock",
.inquiry_prod = "IBLOCK",
@@ -883,6 +920,11 @@ static struct se_subsystem_api iblock_template = {
static int __init iblock_module_init(void)
{
+ struct target_backend_cits *tbc = &iblock_template.tb_cits;
+
+ target_core_setup_sub_cits(&iblock_template);
+ tbc->tb_dev_attrib_cit.ct_attrs = iblock_backend_dev_attrs;
+
return transport_subsystem_register(&iblock_template);
}
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index e31f42f369ff..60381db90026 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -18,34 +18,6 @@ int core_dev_export(struct se_device *, struct se_portal_group *,
struct se_lun *);
void core_dev_unexport(struct se_device *, struct se_portal_group *,
struct se_lun *);
-int se_dev_set_task_timeout(struct se_device *, u32);
-int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
-int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
-int se_dev_set_unmap_granularity(struct se_device *, u32);
-int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
-int se_dev_set_max_write_same_len(struct se_device *, u32);
-int se_dev_set_emulate_model_alias(struct se_device *, int);
-int se_dev_set_emulate_dpo(struct se_device *, int);
-int se_dev_set_emulate_fua_write(struct se_device *, int);
-int se_dev_set_emulate_fua_read(struct se_device *, int);
-int se_dev_set_emulate_write_cache(struct se_device *, int);
-int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
-int se_dev_set_emulate_tas(struct se_device *, int);
-int se_dev_set_emulate_tpu(struct se_device *, int);
-int se_dev_set_emulate_tpws(struct se_device *, int);
-int se_dev_set_emulate_caw(struct se_device *, int);
-int se_dev_set_emulate_3pc(struct se_device *, int);
-int se_dev_set_pi_prot_type(struct se_device *, int);
-int se_dev_set_pi_prot_format(struct se_device *, int);
-int se_dev_set_enforce_pr_isids(struct se_device *, int);
-int se_dev_set_force_pr_aptpl(struct se_device *, int);
-int se_dev_set_is_nonrot(struct se_device *, int);
-int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
-int se_dev_set_queue_depth(struct se_device *, u32);
-int se_dev_set_max_sectors(struct se_device *, u32);
-int se_dev_set_fabric_max_sectors(struct se_device *, u32);
-int se_dev_set_optimal_sectors(struct se_device *, u32);
-int se_dev_set_block_size(struct se_device *, u32);
struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32);
void core_dev_del_lun(struct se_portal_group *, struct se_lun *);
struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 4c261c33cf55..d56f2aaba9af 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -76,7 +76,7 @@ enum preempt_type {
};
static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
- struct t10_pr_registration *, int);
+ struct t10_pr_registration *, int, int);
static sense_reason_t
target_scsi2_reservation_check(struct se_cmd *cmd)
@@ -1177,7 +1177,7 @@ static int core_scsi3_check_implicit_release(
* service action with the SERVICE ACTION RESERVATION KEY
* field set to zero (see 5.7.11.3).
*/
- __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0);
+ __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0, 1);
ret = 1;
/*
* For 'All Registrants' reservation types, all existing
@@ -1219,7 +1219,8 @@ static void __core_scsi3_free_registration(
pr_reg->pr_reg_deve->def_pr_registered = 0;
pr_reg->pr_reg_deve->pr_res_key = 0;
- list_del(&pr_reg->pr_reg_list);
+ if (!list_empty(&pr_reg->pr_reg_list))
+ list_del(&pr_reg->pr_reg_list);
/*
* Caller accessing *pr_reg using core_scsi3_locate_pr_reg(),
* so call core_scsi3_put_pr_reg() to decrement our reference.
@@ -1271,6 +1272,7 @@ void core_scsi3_free_pr_reg_from_nacl(
{
struct t10_reservation *pr_tmpl = &dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
+ bool free_reg = false;
/*
* If the passed se_node_acl matches the reservation holder,
* release the reservation.
@@ -1278,13 +1280,18 @@ void core_scsi3_free_pr_reg_from_nacl(
spin_lock(&dev->dev_reservation_lock);
pr_res_holder = dev->dev_pr_res_holder;
if ((pr_res_holder != NULL) &&
- (pr_res_holder->pr_reg_nacl == nacl))
- __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0);
+ (pr_res_holder->pr_reg_nacl == nacl)) {
+ __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0, 1);
+ free_reg = true;
+ }
spin_unlock(&dev->dev_reservation_lock);
/*
* Release any registration associated with the struct se_node_acl.
*/
spin_lock(&pr_tmpl->registration_lock);
+ if (pr_res_holder && free_reg)
+ __core_scsi3_free_registration(dev, pr_res_holder, NULL, 0);
+
list_for_each_entry_safe(pr_reg, pr_reg_tmp,
&pr_tmpl->registration_list, pr_reg_list) {
@@ -1307,7 +1314,7 @@ void core_scsi3_free_all_registrations(
if (pr_res_holder != NULL) {
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
__core_scsi3_complete_pro_release(dev, pr_res_nacl,
- pr_res_holder, 0);
+ pr_res_holder, 0, 0);
}
spin_unlock(&dev->dev_reservation_lock);
@@ -1429,14 +1436,12 @@ core_scsi3_decode_spec_i_port(
struct target_core_fabric_ops *tmp_tf_ops;
unsigned char *buf;
unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
- char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+ char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
sense_reason_t ret;
u32 tpdl, tid_len = 0;
int dest_local_nexus;
u32 dest_rtpi = 0;
- memset(dest_iport, 0, 64);
-
local_se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
/*
* Allocate a struct pr_transport_id_holder and setup the
@@ -2105,13 +2110,13 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
/*
* sa_res_key=0 Unregister Reservation Key for registered I_T Nexus.
*/
- pr_holder = core_scsi3_check_implicit_release(
- cmd->se_dev, pr_reg);
+ type = pr_reg->pr_res_type;
+ pr_holder = core_scsi3_check_implicit_release(cmd->se_dev,
+ pr_reg);
if (pr_holder < 0) {
ret = TCM_RESERVATION_CONFLICT;
goto out;
}
- type = pr_reg->pr_res_type;
spin_lock(&pr_tmpl->registration_lock);
/*
@@ -2269,6 +2274,7 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
spin_lock(&dev->dev_reservation_lock);
pr_res_holder = dev->dev_pr_res_holder;
if (pr_res_holder) {
+ int pr_res_type = pr_res_holder->pr_res_type;
/*
* From spc4r17 Section 5.7.9: Reserving:
*
@@ -2279,7 +2285,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
* the logical unit, then the command shall be completed with
* RESERVATION CONFLICT status.
*/
- if (pr_res_holder != pr_reg) {
+ if ((pr_res_holder != pr_reg) &&
+ (pr_res_type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) &&
+ (pr_res_type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
pr_err("SPC-3 PR: Attempted RESERVE from"
" [%s]: %s while reservation already held by"
@@ -2385,23 +2393,59 @@ static void __core_scsi3_complete_pro_release(
struct se_device *dev,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
- int explicit)
+ int explicit,
+ int unreg)
{
struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
char i_buf[PR_REG_ISID_ID_LEN];
+ int pr_res_type = 0, pr_res_scope = 0;
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
/*
* Go ahead and release the current PR reservation holder.
+ * If an All Registrants reservation is currently active and
+ * a unregister operation is requested, replace the current
+ * dev_pr_res_holder with another active registration.
*/
- dev->dev_pr_res_holder = NULL;
+ if (dev->dev_pr_res_holder) {
+ pr_res_type = dev->dev_pr_res_holder->pr_res_type;
+ pr_res_scope = dev->dev_pr_res_holder->pr_res_scope;
+ dev->dev_pr_res_holder->pr_res_type = 0;
+ dev->dev_pr_res_holder->pr_res_scope = 0;
+ dev->dev_pr_res_holder->pr_res_holder = 0;
+ dev->dev_pr_res_holder = NULL;
+ }
+ if (!unreg)
+ goto out;
- pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
- " reservation holder TYPE: %s ALL_TG_PT: %d\n",
- tfo->get_fabric_name(), (explicit) ? "explicit" : "implicit",
- core_scsi3_pr_dump_type(pr_reg->pr_res_type),
- (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+ spin_lock(&dev->t10_pr.registration_lock);
+ list_del_init(&pr_reg->pr_reg_list);
+ /*
+ * If the I_T nexus is a reservation holder, the persistent reservation
+ * is of an all registrants type, and the I_T nexus is the last remaining
+ * registered I_T nexus, then the device server shall also release the
+ * persistent reservation.
+ */
+ if (!list_empty(&dev->t10_pr.registration_list) &&
+ ((pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+ (pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))) {
+ dev->dev_pr_res_holder =
+ list_entry(dev->t10_pr.registration_list.next,
+ struct t10_pr_registration, pr_reg_list);
+ dev->dev_pr_res_holder->pr_res_type = pr_res_type;
+ dev->dev_pr_res_holder->pr_res_scope = pr_res_scope;
+ dev->dev_pr_res_holder->pr_res_holder = 1;
+ }
+ spin_unlock(&dev->t10_pr.registration_lock);
+out:
+ if (!dev->dev_pr_res_holder) {
+ pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
+ " reservation holder TYPE: %s ALL_TG_PT: %d\n",
+ tfo->get_fabric_name(), (explicit) ? "explicit" :
+ "implicit", core_scsi3_pr_dump_type(pr_res_type),
+ (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+ }
pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
tfo->get_fabric_name(), se_nacl->initiatorname,
i_buf);
@@ -2532,7 +2576,7 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
* server shall not establish a unit attention condition.
*/
__core_scsi3_complete_pro_release(dev, se_sess->se_node_acl,
- pr_reg, 1);
+ pr_reg, 1, 0);
spin_unlock(&dev->dev_reservation_lock);
@@ -2620,7 +2664,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
if (pr_res_holder) {
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
__core_scsi3_complete_pro_release(dev, pr_res_nacl,
- pr_res_holder, 0);
+ pr_res_holder, 0, 0);
}
spin_unlock(&dev->dev_reservation_lock);
/*
@@ -2679,7 +2723,7 @@ static void __core_scsi3_complete_pro_preempt(
*/
if (dev->dev_pr_res_holder)
__core_scsi3_complete_pro_release(dev, nacl,
- dev->dev_pr_res_holder, 0);
+ dev->dev_pr_res_holder, 0, 0);
dev->dev_pr_res_holder = pr_reg;
pr_reg->pr_res_holder = 1;
@@ -2924,8 +2968,8 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
*/
if (pr_reg_n != pr_res_holder)
__core_scsi3_complete_pro_release(dev,
- pr_res_holder->pr_reg_nacl,
- dev->dev_pr_res_holder, 0);
+ pr_res_holder->pr_reg_nacl,
+ dev->dev_pr_res_holder, 0, 0);
/*
* b) Remove the registrations for all I_T nexuses identified
* by the SERVICE ACTION RESERVATION KEY field, except the
@@ -3059,7 +3103,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char *buf;
unsigned char *initiator_str;
- char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+ char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
u32 tid_len, tmp_tid_len;
int new_reg = 0, type, scope, matching_iname;
sense_reason_t ret;
@@ -3071,7 +3115,6 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
- memset(dest_iport, 0, 64);
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
se_tpg = se_sess->se_tpg;
tf_ops = se_tpg->se_tpg_tfo;
@@ -3389,7 +3432,7 @@ after_iport_check:
* holder (i.e., the I_T nexus on which the
*/
__core_scsi3_complete_pro_release(dev, pr_res_nacl,
- dev->dev_pr_res_holder, 0);
+ dev->dev_pr_res_holder, 0, 0);
/*
* g) Move the persistent reservation to the specified I_T nexus using
* the same scope and type as the persistent reservation released in
@@ -3837,7 +3880,8 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
unsigned char *buf;
u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
u32 off = 8; /* off into first Full Status descriptor */
- int format_code = 0;
+ int format_code = 0, pr_res_type = 0, pr_res_scope = 0;
+ bool all_reg = false;
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
@@ -3854,6 +3898,19 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
buf[3] = (dev->t10_pr.pr_generation & 0xff);
+ spin_lock(&dev->dev_reservation_lock);
+ if (dev->dev_pr_res_holder) {
+ struct t10_pr_registration *pr_holder = dev->dev_pr_res_holder;
+
+ if (pr_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG ||
+ pr_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG) {
+ all_reg = true;
+ pr_res_type = pr_holder->pr_res_type;
+ pr_res_scope = pr_holder->pr_res_scope;
+ }
+ }
+ spin_unlock(&dev->dev_reservation_lock);
+
spin_lock(&pr_tmpl->registration_lock);
list_for_each_entry_safe(pr_reg, pr_reg_tmp,
&pr_tmpl->registration_list, pr_reg_list) {
@@ -3901,14 +3958,20 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
* reservation holder for PR_HOLDER bit.
*
* Also, if this registration is the reservation
- * holder, fill in SCOPE and TYPE in the next byte.
+ * holder or there is an All Registrants reservation
+ * active, fill in SCOPE and TYPE in the next byte.
*/
if (pr_reg->pr_res_holder) {
buf[off++] |= 0x01;
buf[off++] = (pr_reg->pr_res_scope & 0xf0) |
(pr_reg->pr_res_type & 0x0f);
- } else
+ } else if (all_reg) {
+ buf[off++] |= 0x01;
+ buf[off++] = (pr_res_scope & 0xf0) |
+ (pr_res_type & 0x0f);
+ } else {
off += 2;
+ }
off += 4; /* Skip over reserved area */
/*
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 7c8291f0bbbc..1045dcd7bf65 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -44,6 +44,7 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
+#include <target/target_core_backend_configfs.h>
#include "target_core_alua.h"
#include "target_core_pscsi.h"
@@ -1094,7 +1095,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
req->retries = PS_RETRY;
blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req,
- (cmd->sam_task_attr == MSG_HEAD_TAG),
+ (cmd->sam_task_attr == TCM_HEAD_TAG),
pscsi_req_done);
return 0;
@@ -1165,6 +1166,26 @@ static void pscsi_req_done(struct request *req, int uptodate)
kfree(pt);
}
+DEF_TB_DEV_ATTRIB_RO(pscsi, hw_pi_prot_type);
+TB_DEV_ATTR_RO(pscsi, hw_pi_prot_type);
+
+DEF_TB_DEV_ATTRIB_RO(pscsi, hw_block_size);
+TB_DEV_ATTR_RO(pscsi, hw_block_size);
+
+DEF_TB_DEV_ATTRIB_RO(pscsi, hw_max_sectors);
+TB_DEV_ATTR_RO(pscsi, hw_max_sectors);
+
+DEF_TB_DEV_ATTRIB_RO(pscsi, hw_queue_depth);
+TB_DEV_ATTR_RO(pscsi, hw_queue_depth);
+
+static struct configfs_attribute *pscsi_backend_dev_attrs[] = {
+ &pscsi_dev_attrib_hw_pi_prot_type.attr,
+ &pscsi_dev_attrib_hw_block_size.attr,
+ &pscsi_dev_attrib_hw_max_sectors.attr,
+ &pscsi_dev_attrib_hw_queue_depth.attr,
+ NULL,
+};
+
static struct se_subsystem_api pscsi_template = {
.name = "pscsi",
.owner = THIS_MODULE,
@@ -1185,6 +1206,11 @@ static struct se_subsystem_api pscsi_template = {
static int __init pscsi_module_init(void)
{
+ struct target_backend_cits *tbc = &pscsi_template.tb_cits;
+
+ target_core_setup_sub_cits(&pscsi_template);
+ tbc->tb_dev_attrib_cit.ct_attrs = pscsi_backend_dev_attrs;
+
return transport_subsystem_register(&pscsi_template);
}
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index b920db3388cd..60ebd170a561 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -34,6 +34,7 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
+#include <target/target_core_backend_configfs.h>
#include "target_core_rd.h"
@@ -632,6 +633,42 @@ rd_parse_cdb(struct se_cmd *cmd)
return sbc_parse_cdb(cmd, &rd_sbc_ops);
}
+DEF_TB_DEFAULT_ATTRIBS(rd_mcp);
+
+static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = {
+ &rd_mcp_dev_attrib_emulate_model_alias.attr,
+ &rd_mcp_dev_attrib_emulate_dpo.attr,
+ &rd_mcp_dev_attrib_emulate_fua_write.attr,
+ &rd_mcp_dev_attrib_emulate_fua_read.attr,
+ &rd_mcp_dev_attrib_emulate_write_cache.attr,
+ &rd_mcp_dev_attrib_emulate_ua_intlck_ctrl.attr,
+ &rd_mcp_dev_attrib_emulate_tas.attr,
+ &rd_mcp_dev_attrib_emulate_tpu.attr,
+ &rd_mcp_dev_attrib_emulate_tpws.attr,
+ &rd_mcp_dev_attrib_emulate_caw.attr,
+ &rd_mcp_dev_attrib_emulate_3pc.attr,
+ &rd_mcp_dev_attrib_pi_prot_type.attr,
+ &rd_mcp_dev_attrib_hw_pi_prot_type.attr,
+ &rd_mcp_dev_attrib_pi_prot_format.attr,
+ &rd_mcp_dev_attrib_enforce_pr_isids.attr,
+ &rd_mcp_dev_attrib_is_nonrot.attr,
+ &rd_mcp_dev_attrib_emulate_rest_reord.attr,
+ &rd_mcp_dev_attrib_force_pr_aptpl.attr,
+ &rd_mcp_dev_attrib_hw_block_size.attr,
+ &rd_mcp_dev_attrib_block_size.attr,
+ &rd_mcp_dev_attrib_hw_max_sectors.attr,
+ &rd_mcp_dev_attrib_fabric_max_sectors.attr,
+ &rd_mcp_dev_attrib_optimal_sectors.attr,
+ &rd_mcp_dev_attrib_hw_queue_depth.attr,
+ &rd_mcp_dev_attrib_queue_depth.attr,
+ &rd_mcp_dev_attrib_max_unmap_lba_count.attr,
+ &rd_mcp_dev_attrib_max_unmap_block_desc_count.attr,
+ &rd_mcp_dev_attrib_unmap_granularity.attr,
+ &rd_mcp_dev_attrib_unmap_granularity_alignment.attr,
+ &rd_mcp_dev_attrib_max_write_same_len.attr,
+ NULL,
+};
+
static struct se_subsystem_api rd_mcp_template = {
.name = "rd_mcp",
.inquiry_prod = "RAMDISK-MCP",
@@ -653,8 +690,12 @@ static struct se_subsystem_api rd_mcp_template = {
int __init rd_module_init(void)
{
+ struct target_backend_cits *tbc = &rd_mcp_template.tb_cits;
int ret;
+ target_core_setup_sub_cits(&rd_mcp_template);
+ tbc->tb_dev_attrib_cit.ct_attrs = rd_mcp_backend_dev_attrs;
+
ret = transport_subsystem_register(&rd_mcp_template);
if (ret < 0) {
return ret;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 8d171ff77e75..11bea1952435 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -485,7 +485,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
cmd->t_data_nents_orig = cmd->t_data_nents;
cmd->t_data_nents = 1;
- cmd->sam_task_attr = MSG_HEAD_TAG;
+ cmd->sam_task_attr = TCM_HEAD_TAG;
cmd->transport_complete_callback = compare_and_write_post;
/*
* Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index bc286a67af7c..1307600fe726 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -1357,7 +1357,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
* Do implicit HEAD_OF_QUEUE processing for INQUIRY.
* See spc4r17 section 5.3
*/
- cmd->sam_task_attr = MSG_HEAD_TAG;
+ cmd->sam_task_attr = TCM_HEAD_TAG;
cmd->execute_cmd = spc_emulate_inquiry;
break;
case SECURITY_PROTOCOL_IN:
@@ -1391,7 +1391,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
* Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
* See spc4r17 section 5.3
*/
- cmd->sam_task_attr = MSG_HEAD_TAG;
+ cmd->sam_task_attr = TCM_HEAD_TAG;
break;
case TEST_UNIT_READY:
cmd->execute_cmd = spc_emulate_testunitready;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index be877bf6f730..0adc0f650213 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1159,7 +1159,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
return 0;
- if (cmd->sam_task_attr == MSG_ACA_TAG) {
+ if (cmd->sam_task_attr == TCM_ACA_TAG) {
pr_debug("SAM Task Attribute ACA"
" emulation is not supported\n");
return TCM_INVALID_CDB_FIELD;
@@ -1531,7 +1531,7 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
BUG_ON(!se_tpg);
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
- 0, DMA_NONE, MSG_SIMPLE_TAG, sense);
+ 0, DMA_NONE, TCM_SIMPLE_TAG, sense);
/*
* FIXME: Currently expect caller to handle se_cmd->se_tmr_req
* allocation failure.
@@ -1718,12 +1718,12 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
* to allow the passed struct se_cmd list of tasks to the front of the list.
*/
switch (cmd->sam_task_attr) {
- case MSG_HEAD_TAG:
+ case TCM_HEAD_TAG:
pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
"se_ordered_id: %u\n",
cmd->t_task_cdb[0], cmd->se_ordered_id);
return false;
- case MSG_ORDERED_TAG:
+ case TCM_ORDERED_TAG:
atomic_inc_mb(&dev->dev_ordered_sync);
pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
@@ -1828,7 +1828,7 @@ static void target_restart_delayed_cmds(struct se_device *dev)
__target_execute_cmd(cmd);
- if (cmd->sam_task_attr == MSG_ORDERED_TAG)
+ if (cmd->sam_task_attr == TCM_ORDERED_TAG)
break;
}
}
@@ -1844,18 +1844,18 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
return;
- if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
+ if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
atomic_dec_mb(&dev->simple_cmds);
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
- } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
+ } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for"
" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
- } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
+ } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
atomic_dec_mb(&dev->dev_ordered_sync);
dev->dev_cur_ordered_id++;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 9a1b314f6482..8bfa61c9693d 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -28,6 +28,8 @@
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_backend.h>
+#include <target/target_core_backend_configfs.h>
+
#include <linux/target_core_user.h>
/*
@@ -1092,6 +1094,42 @@ tcmu_parse_cdb(struct se_cmd *cmd)
return ret;
}
+DEF_TB_DEFAULT_ATTRIBS(tcmu);
+
+static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
+ &tcmu_dev_attrib_emulate_model_alias.attr,
+ &tcmu_dev_attrib_emulate_dpo.attr,
+ &tcmu_dev_attrib_emulate_fua_write.attr,
+ &tcmu_dev_attrib_emulate_fua_read.attr,
+ &tcmu_dev_attrib_emulate_write_cache.attr,
+ &tcmu_dev_attrib_emulate_ua_intlck_ctrl.attr,
+ &tcmu_dev_attrib_emulate_tas.attr,
+ &tcmu_dev_attrib_emulate_tpu.attr,
+ &tcmu_dev_attrib_emulate_tpws.attr,
+ &tcmu_dev_attrib_emulate_caw.attr,
+ &tcmu_dev_attrib_emulate_3pc.attr,
+ &tcmu_dev_attrib_pi_prot_type.attr,
+ &tcmu_dev_attrib_hw_pi_prot_type.attr,
+ &tcmu_dev_attrib_pi_prot_format.attr,
+ &tcmu_dev_attrib_enforce_pr_isids.attr,
+ &tcmu_dev_attrib_is_nonrot.attr,
+ &tcmu_dev_attrib_emulate_rest_reord.attr,
+ &tcmu_dev_attrib_force_pr_aptpl.attr,
+ &tcmu_dev_attrib_hw_block_size.attr,
+ &tcmu_dev_attrib_block_size.attr,
+ &tcmu_dev_attrib_hw_max_sectors.attr,
+ &tcmu_dev_attrib_fabric_max_sectors.attr,
+ &tcmu_dev_attrib_optimal_sectors.attr,
+ &tcmu_dev_attrib_hw_queue_depth.attr,
+ &tcmu_dev_attrib_queue_depth.attr,
+ &tcmu_dev_attrib_max_unmap_lba_count.attr,
+ &tcmu_dev_attrib_max_unmap_block_desc_count.attr,
+ &tcmu_dev_attrib_unmap_granularity.attr,
+ &tcmu_dev_attrib_unmap_granularity_alignment.attr,
+ &tcmu_dev_attrib_max_write_same_len.attr,
+ NULL,
+};
+
static struct se_subsystem_api tcmu_template = {
.name = "user",
.inquiry_prod = "USER",
@@ -1112,6 +1150,7 @@ static struct se_subsystem_api tcmu_template = {
static int __init tcmu_module_init(void)
{
+ struct target_backend_cits *tbc = &tcmu_template.tb_cits;
int ret;
BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
@@ -1134,6 +1173,9 @@ static int __init tcmu_module_init(void)
goto out_unreg_device;
}
+ target_core_setup_sub_cits(&tcmu_template);
+ tbc->tb_dev_attrib_cit.ct_attrs = tcmu_backend_dev_attrs;
+
ret = transport_subsystem_register(&tcmu_template);
if (ret)
goto out_unreg_genl;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index be0c0d08c56a..edcafa4490c0 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -554,17 +554,17 @@ static void ft_send_work(struct work_struct *work)
*/
switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
case FCP_PTA_HEADQ:
- task_attr = MSG_HEAD_TAG;
+ task_attr = TCM_HEAD_TAG;
break;
case FCP_PTA_ORDERED:
- task_attr = MSG_ORDERED_TAG;
+ task_attr = TCM_ORDERED_TAG;
break;
case FCP_PTA_ACA:
- task_attr = MSG_ACA_TAG;
+ task_attr = TCM_ACA_TAG;
break;
case FCP_PTA_SIMPLE: /* Fallthrough */
default:
- task_attr = MSG_SIMPLE_TAG;
+ task_attr = TCM_SIMPLE_TAG;
}
fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 336602eb453e..96b69bfd773f 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -561,7 +561,7 @@ static int omap_8250_startup(struct uart_port *port)
if (ret)
goto err;
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
up->capabilities |= UART_CAP_RPM;
#endif
@@ -997,12 +997,12 @@ static int omap8250_probe(struct platform_device *pdev)
up.port.fifosize = 64;
up.tx_loadsz = 64;
up.capabilities = UART_CAP_FIFO;
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
/*
- * PM_RUNTIME is mostly transparent. However to do it right we need to a
+ * Runtime PM is mostly transparent. However to do it right we need to a
* TX empty interrupt before we can put the device to auto idle. So if
- * PM_RUNTIME is not enabled we don't add that flag and can spare that
- * one extra interrupt in the TX path.
+ * PM is not enabled we don't add that flag and can spare that one extra
+ * interrupt in the TX path.
*/
up.capabilities |= UART_CAP_RPM;
#endif
@@ -1105,7 +1105,7 @@ static int omap8250_remove(struct platform_device *pdev)
return 0;
}
-#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_RUNTIME)
+#ifdef CONFIG_PM
static inline void omap8250_enable_wakeirq(struct omap8250_priv *priv,
bool enable)
@@ -1179,7 +1179,7 @@ static int omap8250_resume(struct device *dev)
#define omap8250_complete NULL
#endif
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int omap8250_lost_context(struct uart_8250_port *up)
{
u32 val;
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
index 024f58475a94..3a494168661e 100644
--- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
@@ -1131,19 +1131,19 @@ static int usbg_submit_command(struct f_uas *fu,
switch (cmd_iu->prio_attr & 0x7) {
case UAS_HEAD_TAG:
- cmd->prio_attr = MSG_HEAD_TAG;
+ cmd->prio_attr = TCM_HEAD_TAG;
break;
case UAS_ORDERED_TAG:
- cmd->prio_attr = MSG_ORDERED_TAG;
+ cmd->prio_attr = TCM_ORDERED_TAG;
break;
case UAS_ACA:
- cmd->prio_attr = MSG_ACA_TAG;
+ cmd->prio_attr = TCM_ACA_TAG;
break;
default:
pr_debug_once("Unsupported prio_attr: %02x.\n",
cmd_iu->prio_attr);
case UAS_SIMPLE_TAG:
- cmd->prio_attr = MSG_SIMPLE_TAG;
+ cmd->prio_attr = TCM_SIMPLE_TAG;
break;
}
@@ -1240,7 +1240,7 @@ static int bot_submit_command(struct f_uas *fu,
goto err;
}
- cmd->prio_attr = MSG_SIMPLE_TAG;
+ cmd->prio_attr = TCM_SIMPLE_TAG;
se_cmd = &cmd->se_cmd;
cmd->unpacked_lun = cbw->Lun;
cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index e752c3098f38..395649f357aa 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -1739,7 +1739,7 @@ static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
int retval = 1;
unsigned long flags;
- /* if !PM_RUNTIME, root hub timers won't get shut down ... */
+ /* if !PM, root hub timers won't get shut down ... */
if (!HC_IS_RUNNING(hcd->state))
return 0;
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 75811dd5a9d7..036924e640f5 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -3087,7 +3087,7 @@ static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf)
int ports, i, retval = 1;
unsigned long flags;
- /* if !PM_RUNTIME, root hub timers won't get shut down ... */
+ /* if !PM, root hub timers won't get shut down ... */
if (!HC_IS_RUNNING(hcd->state))
return 0;
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 50610a6acf3d..e999496eda3e 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -606,7 +606,7 @@ static void scsiback_device_action(struct vscsibk_pend *pending_req,
init_waitqueue_head(&tmr->tmr_wait);
transport_init_se_cmd(se_cmd, tpg->se_tpg.se_tpg_tfo,
- tpg->tpg_nexus->tvn_se_sess, 0, DMA_NONE, MSG_SIMPLE_TAG,
+ tpg->tpg_nexus->tvn_se_sess, 0, DMA_NONE, TCM_SIMPLE_TAG,
&pending_req->sense_buffer[0]);
rc = core_tmr_alloc_req(se_cmd, tmr, act, GFP_KERNEL);