summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2010-12-09 11:32:15 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2010-12-09 11:32:15 +1100
commitfdea1faf14bc610d88623f1c42f7641276c9a9f0 (patch)
treeed7f2fae54c67820894b9828a7727c4ab935a6db /drivers
parentca99692aed2d84002f00646512770f3196720e92 (diff)
parent292d10a14990483e5628291bd1c626a28d729659 (diff)
Merge remote branch 'arm/devel'
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/amba/bus.c39
-rw-r--r--drivers/clk/Kconfig4
-rw-r--r--drivers/clk/Makefile2
-rw-r--r--drivers/clk/clkdev.c176
-rw-r--r--drivers/mmc/host/mmci.c210
-rw-r--r--drivers/mmc/host/mmci.h9
-rw-r--r--drivers/rtc/rtc-sa1100.c63
9 files changed, 468 insertions, 39 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index a2b902f4d437..3d93b3a3d630 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -111,4 +111,6 @@ source "drivers/xen/Kconfig"
source "drivers/staging/Kconfig"
source "drivers/platform/Kconfig"
+
+source "drivers/clk/Kconfig"
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index f3ebb30f1b7f..bf15ce7493d2 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -115,3 +115,5 @@ obj-$(CONFIG_VLYNQ) += vlynq/
obj-$(CONFIG_STAGING) += staging/
obj-y += platform/
obj-y += ieee802154/
+#common clk code
+obj-y += clk/
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 2737b9752205..e7df019d29d4 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -147,6 +147,39 @@ static void amba_put_disable_pclk(struct amba_device *pcdev)
clk_put(pclk);
}
+static int amba_get_enable_vcore(struct amba_device *pcdev)
+{
+ struct regulator *vcore = regulator_get(&pcdev->dev, "vcore");
+ int ret;
+
+ pcdev->vcore = vcore;
+
+ if (IS_ERR(vcore)) {
+ /* It is OK not to supply a vcore regulator */
+ if (PTR_ERR(vcore) == -ENODEV)
+ return 0;
+ return PTR_ERR(vcore);
+ }
+
+ ret = regulator_enable(vcore);
+ if (ret) {
+ regulator_put(vcore);
+ pcdev->vcore = ERR_PTR(-ENODEV);
+ }
+
+ return ret;
+}
+
+static void amba_put_disable_vcore(struct amba_device *pcdev)
+{
+ struct regulator *vcore = pcdev->vcore;
+
+ if (!IS_ERR(vcore)) {
+ regulator_disable(vcore);
+ regulator_put(vcore);
+ }
+}
+
/*
* These are the device model conversion veneers; they convert the
* device model structures to our more specific structures.
@@ -159,6 +192,10 @@ static int amba_probe(struct device *dev)
int ret;
do {
+ ret = amba_get_enable_vcore(pcdev);
+ if (ret)
+ break;
+
ret = amba_get_enable_pclk(pcdev);
if (ret)
break;
@@ -168,6 +205,7 @@ static int amba_probe(struct device *dev)
break;
amba_put_disable_pclk(pcdev);
+ amba_put_disable_vcore(pcdev);
} while (0);
return ret;
@@ -180,6 +218,7 @@ static int amba_remove(struct device *dev)
int ret = drv->remove(pcdev);
amba_put_disable_pclk(pcdev);
+ amba_put_disable_vcore(pcdev);
return ret;
}
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
new file mode 100644
index 000000000000..4168c8896e16
--- /dev/null
+++ b/drivers/clk/Kconfig
@@ -0,0 +1,4 @@
+
+config CLKDEV_LOOKUP
+ bool
+ select HAVE_CLK
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
new file mode 100644
index 000000000000..07613fa172c9
--- /dev/null
+++ b/drivers/clk/Makefile
@@ -0,0 +1,2 @@
+
+obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
new file mode 100644
index 000000000000..0fc0a79852de
--- /dev/null
+++ b/drivers/clk/clkdev.c
@@ -0,0 +1,176 @@
+/*
+ * drivers/clk/clkdev.c
+ *
+ * Copyright (C) 2008 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Helper for the clk API to assist looking up a struct clk.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+
+static LIST_HEAD(clocks);
+static DEFINE_MUTEX(clocks_mutex);
+
+/*
+ * Find the correct struct clk for the device and connection ID.
+ * We do slightly fuzzy matching here:
+ * An entry with a NULL ID is assumed to be a wildcard.
+ * If an entry has a device ID, it must match
+ * If an entry has a connection ID, it must match
+ * Then we take the most specific entry - with the following
+ * order of precedence: dev+con > dev only > con only.
+ */
+static struct clk *clk_find(const char *dev_id, const char *con_id)
+{
+ struct clk_lookup *p;
+ struct clk *clk = NULL;
+ int match, best = 0;
+
+ list_for_each_entry(p, &clocks, node) {
+ match = 0;
+ if (p->dev_id) {
+ if (!dev_id || strcmp(p->dev_id, dev_id))
+ continue;
+ match += 2;
+ }
+ if (p->con_id) {
+ if (!con_id || strcmp(p->con_id, con_id))
+ continue;
+ match += 1;
+ }
+
+ if (match > best) {
+ clk = p->clk;
+ if (match != 3)
+ best = match;
+ else
+ break;
+ }
+ }
+ return clk;
+}
+
+struct clk *clk_get_sys(const char *dev_id, const char *con_id)
+{
+ struct clk *clk;
+
+ mutex_lock(&clocks_mutex);
+ clk = clk_find(dev_id, con_id);
+ if (clk && !__clk_get(clk))
+ clk = NULL;
+ mutex_unlock(&clocks_mutex);
+
+ return clk ? clk : ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL(clk_get_sys);
+
+struct clk *clk_get(struct device *dev, const char *con_id)
+{
+ const char *dev_id = dev ? dev_name(dev) : NULL;
+
+ return clk_get_sys(dev_id, con_id);
+}
+EXPORT_SYMBOL(clk_get);
+
+void clk_put(struct clk *clk)
+{
+ __clk_put(clk);
+}
+EXPORT_SYMBOL(clk_put);
+
+void clkdev_add(struct clk_lookup *cl)
+{
+ mutex_lock(&clocks_mutex);
+ list_add_tail(&cl->node, &clocks);
+ mutex_unlock(&clocks_mutex);
+}
+EXPORT_SYMBOL(clkdev_add);
+
+void __init clkdev_add_table(struct clk_lookup *cl, size_t num)
+{
+ mutex_lock(&clocks_mutex);
+ while (num--) {
+ list_add_tail(&cl->node, &clocks);
+ cl++;
+ }
+ mutex_unlock(&clocks_mutex);
+}
+
+#define MAX_DEV_ID 20
+#define MAX_CON_ID 16
+
+struct clk_lookup_alloc {
+ struct clk_lookup cl;
+ char dev_id[MAX_DEV_ID];
+ char con_id[MAX_CON_ID];
+};
+
+struct clk_lookup * __init_refok
+clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...)
+{
+ struct clk_lookup_alloc *cla;
+
+ cla = __clkdev_alloc(sizeof(*cla));
+ if (!cla)
+ return NULL;
+
+ cla->cl.clk = clk;
+ if (con_id) {
+ strlcpy(cla->con_id, con_id, sizeof(cla->con_id));
+ cla->cl.con_id = cla->con_id;
+ }
+
+ if (dev_fmt) {
+ va_list ap;
+
+ va_start(ap, dev_fmt);
+ vscnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap);
+ cla->cl.dev_id = cla->dev_id;
+ va_end(ap);
+ }
+
+ return &cla->cl;
+}
+EXPORT_SYMBOL(clkdev_alloc);
+
+int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
+ struct device *dev)
+{
+ struct clk *r = clk_get(dev, id);
+ struct clk_lookup *l;
+
+ if (IS_ERR(r))
+ return PTR_ERR(r);
+
+ l = clkdev_alloc(r, alias, alias_dev_name);
+ clk_put(r);
+ if (!l)
+ return -ENODEV;
+ clkdev_add(l);
+ return 0;
+}
+EXPORT_SYMBOL(clk_add_alias);
+
+/*
+ * clkdev_drop - remove a clock dynamically allocated
+ */
+void clkdev_drop(struct clk_lookup *cl)
+{
+ mutex_lock(&clocks_mutex);
+ list_del(&cl->node);
+ mutex_unlock(&clocks_mutex);
+ kfree(cl);
+}
+EXPORT_SYMBOL(clkdev_drop);
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 87b4fc6c98c2..82880e89e204 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -19,6 +19,7 @@
#include <linux/highmem.h>
#include <linux/log2.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
#include <linux/amba/bus.h>
#include <linux/clk.h>
#include <linux/scatterlist.h>
@@ -45,6 +46,12 @@ static unsigned int fmax = 515633;
* is asserted (likewise for RX)
* @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
* is asserted (likewise for RX)
+ * @broken_blockend: the MCI_DATABLOCKEND is broken on the hardware
+ * and will not work at all.
+ * @broken_blockend_dma: the MCI_DATABLOCKEND is broken on the hardware when
+ * using DMA.
+ * @sdio: variant supports SDIO
+ * @st_clkdiv: true if using a ST-specific clock divider algorithm
*/
struct variant_data {
unsigned int clkreg;
@@ -52,6 +59,10 @@ struct variant_data {
unsigned int datalength_bits;
unsigned int fifosize;
unsigned int fifohalfsize;
+ bool broken_blockend;
+ bool broken_blockend_dma;
+ bool sdio;
+ bool st_clkdiv;
};
static struct variant_data variant_arm = {
@@ -65,6 +76,8 @@ static struct variant_data variant_u300 = {
.fifohalfsize = 8 * 4,
.clkreg_enable = 1 << 13, /* HWFCEN */
.datalength_bits = 16,
+ .broken_blockend_dma = true,
+ .sdio = true,
};
static struct variant_data variant_ux500 = {
@@ -73,7 +86,11 @@ static struct variant_data variant_ux500 = {
.clkreg = MCI_CLK_ENABLE,
.clkreg_enable = 1 << 14, /* HWFCEN */
.datalength_bits = 24,
+ .broken_blockend = true,
+ .sdio = true,
+ .st_clkdiv = true,
};
+
/*
* This must be called with host->lock held
*/
@@ -84,9 +101,30 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
if (desired) {
if (desired >= host->mclk) {
- clk = MCI_CLK_BYPASS;
+ /*
+ * The ST clock divider does not like the bypass bit,
+ * even though it's available. Instead the datasheet
+ * recommends setting the divider to zero.
+ */
+ if (!variant->st_clkdiv)
+ clk = MCI_CLK_BYPASS;
host->cclk = host->mclk;
+ } else if (variant->st_clkdiv) {
+ /*
+ * DB8500 TRM says f = mclk / (clkdiv + 2)
+ * => clkdiv = (mclk / f) - 2
+ * Round the divider up so we don't exceed the max
+ * frequency
+ */
+ clk = DIV_ROUND_UP(host->mclk, desired) - 2;
+ if (clk >= 256)
+ clk = 255;
+ host->cclk = host->mclk / (clk + 2);
} else {
+ /*
+ * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
+ * => clkdiv = mclk / (2 * f) - 1
+ */
clk = host->mclk / (2 * desired) - 1;
if (clk >= 256)
clk = 255;
@@ -129,10 +167,26 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
spin_lock(&host->lock);
}
+static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
+{
+ void __iomem *base = host->base;
+
+ if (host->singleirq) {
+ unsigned int mask0 = readl(base + MMCIMASK0);
+
+ mask0 &= ~MCI_IRQ1MASK;
+ mask0 |= mask;
+
+ writel(mask0, base + MMCIMASK0);
+ }
+
+ writel(mask, base + MMCIMASK1);
+}
+
static void mmci_stop_data(struct mmci_host *host)
{
writel(0, host->base + MMCIDATACTRL);
- writel(0, host->base + MMCIMASK1);
+ mmci_set_mask1(host, 0);
host->data = NULL;
}
@@ -162,6 +216,8 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
host->data = data;
host->size = data->blksz * data->blocks;
host->data_xfered = 0;
+ host->blockend = false;
+ host->dataend = false;
mmci_init_sg(host, data);
@@ -196,9 +252,14 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
irqmask = MCI_TXFIFOHALFEMPTYMASK;
}
+ /* The ST Micro variants has a special bit to enable SDIO */
+ if (variant->sdio && host->mmc->card)
+ if (mmc_card_sdio(host->mmc->card))
+ datactrl |= MCI_ST_DPSM_SDIOEN;
+
writel(datactrl, base + MMCIDATACTRL);
writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
- writel(irqmask, base + MMCIMASK1);
+ mmci_set_mask1(host, irqmask);
}
static void
@@ -233,20 +294,9 @@ static void
mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
unsigned int status)
{
- if (status & MCI_DATABLOCKEND) {
- host->data_xfered += data->blksz;
-#ifdef CONFIG_ARCH_U300
- /*
- * On the U300 some signal or other is
- * badly routed so that a data write does
- * not properly terminate with a MCI_DATAEND
- * status flag. This quirk will make writes
- * work again.
- */
- if (data->flags & MMC_DATA_WRITE)
- status |= MCI_DATAEND;
-#endif
- }
+ struct variant_data *variant = host->variant;
+
+ /* First check for errors */
if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
if (status & MCI_DATACRCFAIL)
@@ -255,7 +305,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
data->error = -ETIMEDOUT;
else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
data->error = -EIO;
- status |= MCI_DATAEND;
+
+ /* Force-complete the transaction */
+ host->blockend = true;
+ host->dataend = true;
/*
* We hit an error condition. Ensure that any data
@@ -273,9 +326,64 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
local_irq_restore(flags);
}
}
- if (status & MCI_DATAEND) {
+
+ /*
+ * On ARM variants in PIO mode, MCI_DATABLOCKEND
+ * is always sent first, and we increase the
+ * transfered number of bytes for that IRQ. Then
+ * MCI_DATAEND follows and we conclude the transaction.
+ *
+ * On the Ux500 single-IRQ variant MCI_DATABLOCKEND
+ * doesn't seem to immediately clear from the status,
+ * so we can't use it keep count when only one irq is
+ * used because the irq will hit for other reasons, and
+ * then the flag is still up. So we use the MCI_DATAEND
+ * IRQ at the end of the entire transfer because
+ * MCI_DATABLOCKEND is broken.
+ *
+ * In the U300, the IRQs can arrive out-of-order,
+ * e.g. MCI_DATABLOCKEND sometimes arrives after MCI_DATAEND,
+ * so for this case we use the flags "blockend" and
+ * "dataend" to make sure both IRQs have arrived before
+ * concluding the transaction. (This does not apply
+ * to the Ux500 which doesn't fire MCI_DATABLOCKEND
+ * at all.) In DMA mode it suffers from the same problem
+ * as the Ux500.
+ */
+ if (status & MCI_DATABLOCKEND) {
+ /*
+ * Just being a little over-cautious, we do not
+ * use this progressive update if the hardware blockend
+ * flag is unreliable: since it can stay high between
+ * IRQs it will corrupt the transfer counter.
+ */
+ if (!variant->broken_blockend)
+ host->data_xfered += data->blksz;
+ host->blockend = true;
+ }
+
+ if (status & MCI_DATAEND)
+ host->dataend = true;
+
+ /*
+ * On variants with broken blockend we shall only wait for dataend,
+ * on others we must sync with the blockend signal since they can
+ * appear out-of-order.
+ */
+ if (host->dataend && (host->blockend || variant->broken_blockend)) {
mmci_stop_data(host);
+ /* Reset these flags */
+ host->blockend = false;
+ host->dataend = false;
+
+ /*
+ * Variants with broken blockend flags need to handle the
+ * end of the entire transfer here.
+ */
+ if (variant->broken_blockend && !data->error)
+ host->data_xfered += data->blksz * data->blocks;
+
if (!data->stop) {
mmci_request_end(host, data->mrq);
} else {
@@ -356,7 +464,32 @@ static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int rem
variant->fifosize : variant->fifohalfsize;
count = min(remain, maxcnt);
- writesl(base + MMCIFIFO, ptr, count >> 2);
+ /*
+ * The ST Micro variant for SDIO transfer sizes
+ * less then 8 bytes should have clock H/W flow
+ * control disabled.
+ */
+ if (variant->sdio &&
+ mmc_card_sdio(host->mmc->card)) {
+ if (count < 8)
+ writel(readl(host->base + MMCICLOCK) &
+ ~variant->clkreg_enable,
+ host->base + MMCICLOCK);
+ else
+ writel(readl(host->base + MMCICLOCK) |
+ variant->clkreg_enable,
+ host->base + MMCICLOCK);
+ }
+
+ /*
+ * SDIO especially may want to send something that is
+ * not divisible by 4 (as opposed to card sectors
+ * etc), and the FIFO only accept full 32-bit writes.
+ * So compensate by adding +3 on the count, a single
+ * byte become a 32bit write, 7 bytes will be two
+ * 32bit writes etc.
+ */
+ writesl(base + MMCIFIFO, ptr, (count + 3) >> 2);
ptr += count;
remain -= count;
@@ -437,7 +570,7 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
* "any data available" mode.
*/
if (status & MCI_RXACTIVE && host->size < variant->fifosize)
- writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
+ mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
/*
* If we run out of data, disable the data IRQs; this
@@ -446,7 +579,7 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
* stops us racing with our data end IRQ.
*/
if (host->size == 0) {
- writel(0, base + MMCIMASK1);
+ mmci_set_mask1(host, 0);
writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
}
@@ -469,6 +602,14 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
struct mmc_data *data;
status = readl(host->base + MMCISTATUS);
+
+ if (host->singleirq) {
+ if (status & readl(host->base + MMCIMASK1))
+ mmci_pio_irq(irq, dev_id);
+
+ status &= ~MCI_IRQ1MASK;
+ }
+
status &= readl(host->base + MMCIMASK0);
writel(status, host->base + MMCICLEAR);
@@ -635,6 +776,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
struct variant_data *variant = id->data;
struct mmci_host *host;
struct mmc_host *mmc;
+ unsigned int mask;
int ret;
/* must have platform data */
@@ -714,7 +856,6 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
mmc->f_max = min(host->mclk, fmax);
dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
-#ifdef CONFIG_REGULATOR
/* If we're using the regulator framework, try to fetch a regulator */
host->vcc = regulator_get(&dev->dev, "vmmc");
if (IS_ERR(host->vcc))
@@ -733,7 +874,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
"(using regulator instead)\n");
}
}
-#endif
+
/* Fall back to platform data if no regulator is found */
if (host->vcc == NULL)
mmc->ocr_avail = plat->ocr_mask;
@@ -806,11 +947,21 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
if (ret)
goto unmap;
- ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host);
- if (ret)
- goto irq0_free;
+ if (dev->irq[1] == NO_IRQ)
+ host->singleirq = true;
+ else {
+ ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED,
+ DRIVER_NAME " (pio)", host);
+ if (ret)
+ goto irq0_free;
+ }
+
+ mask = MCI_IRQENABLE;
+ /* Don't use the datablockend flag if it's broken */
+ if (variant->broken_blockend)
+ mask &= ~MCI_DATABLOCKEND;
- writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+ writel(mask, host->base + MMCIMASK0);
amba_set_drvdata(dev, mmc);
@@ -864,7 +1015,8 @@ static int __devexit mmci_remove(struct amba_device *dev)
writel(0, host->base + MMCIDATACTRL);
free_irq(dev->irq[0], host);
- free_irq(dev->irq[1], host);
+ if (!host->singleirq)
+ free_irq(dev->irq[1], host);
if (host->gpio_wp != -ENOSYS)
gpio_free(host->gpio_wp);
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 4ae887fc0189..df06f01aac89 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -139,6 +139,11 @@
MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK)
+/* These interrupts are directed to IRQ1 when two IRQ lines are available */
+#define MCI_IRQ1MASK \
+ (MCI_RXFIFOHALFFULLMASK | MCI_RXDATAAVLBLMASK | \
+ MCI_TXFIFOHALFEMPTYMASK)
+
#define NR_SG 16
struct clk;
@@ -154,6 +159,7 @@ struct mmci_host {
int gpio_cd;
int gpio_wp;
int gpio_cd_irq;
+ bool singleirq;
unsigned int data_xfered;
@@ -171,6 +177,9 @@ struct mmci_host {
struct timer_list timer;
unsigned int oldstat;
+ bool blockend;
+ bool dataend;
+
/* pio stuff */
struct sg_mapping_iter sg_miter;
unsigned int size;
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index e4a44b641702..b0985f727078 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -39,7 +39,7 @@
#include <mach/regs-ost.h>
#endif
-#define RTC_DEF_DIVIDER 32768 - 1
+#define RTC_DEF_DIVIDER (32768 - 1)
#define RTC_DEF_TRIM 0
static unsigned long rtc_freq = 1024;
@@ -61,7 +61,8 @@ static inline int rtc_periodic_alarm(struct rtc_time *tm)
* Calculate the next alarm time given the requested alarm time mask
* and the current time.
*/
-static void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, struct rtc_time *alrm)
+static void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now,
+ struct rtc_time *alrm)
{
unsigned long next_time;
unsigned long now_time;
@@ -116,7 +117,23 @@ static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id)
rtsr = RTSR;
/* clear interrupt sources */
RTSR = 0;
- RTSR = (RTSR_AL | RTSR_HZ) & (rtsr >> 2);
+ /* Fix for a nasty initialization problem the in SA11xx RTSR register.
+ * See also the comments in sa1100_rtc_probe(). */
+ if (rtsr & (RTSR_ALE | RTSR_HZE)) {
+ /* This is the original code, before there was the if test
+ * above. This code does not clear interrupts that were not
+ * enabled. */
+ RTSR = (RTSR_AL | RTSR_HZ) & (rtsr >> 2);
+ } else {
+ /* For some reason, it is possible to enter this routine
+ * without interruptions enabled, it has been tested with
+ * several units (Bug in SA11xx chip?).
+ *
+ * This situation leads to an infinite "loop" of interrupt
+ * routine calling and as a result the processor seems to
+ * lock on its first call to open(). */
+ RTSR = RTSR_AL | RTSR_HZ;
+ }
/* clear alarm interrupt if it has occurred */
if (rtsr & RTSR_AL)
@@ -178,7 +195,7 @@ static int sa1100_rtc_read_callback(struct device *dev, int data)
* Here we compare (match - OSCR) 8 instead of 0 --
* see comment in pxa_timer_interrupt() for explanation.
*/
- while( (signed long)((osmr1 = OSMR1) - OSCR) <= 8 ) {
+ while ((signed long)((osmr1 = OSMR1) - OSCR) <= 8) {
data += 0x100;
OSSR = OSSR_M1; /* clear match on timer 1 */
OSMR1 = osmr1 + period;
@@ -192,19 +209,19 @@ static int sa1100_rtc_open(struct device *dev)
int ret;
ret = request_irq(IRQ_RTC1Hz, sa1100_rtc_interrupt, IRQF_DISABLED,
- "rtc 1Hz", dev);
+ "rtc 1Hz", dev);
if (ret) {
dev_err(dev, "IRQ %d already in use.\n", IRQ_RTC1Hz);
goto fail_ui;
}
ret = request_irq(IRQ_RTCAlrm, sa1100_rtc_interrupt, IRQF_DISABLED,
- "rtc Alrm", dev);
+ "rtc Alrm", dev);
if (ret) {
dev_err(dev, "IRQ %d already in use.\n", IRQ_RTCAlrm);
goto fail_ai;
}
ret = request_irq(IRQ_OST1, timer1_interrupt, IRQF_DISABLED,
- "rtc timer", dev);
+ "rtc timer", dev);
if (ret) {
dev_err(dev, "IRQ %d already in use.\n", IRQ_OST1);
goto fail_pi;
@@ -236,7 +253,7 @@ static void sa1100_rtc_release(struct device *dev)
static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd,
unsigned long arg)
{
- switch(cmd) {
+ switch (cmd) {
case RTC_AIE_OFF:
spin_lock_irq(&sa1100_rtc_lock);
RTSR &= ~RTSR_ALE;
@@ -333,6 +350,7 @@ static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq)
seq_printf(seq, "periodic_IRQ\t: %s\n",
(OIER & OIER_E1) ? "yes" : "no");
seq_printf(seq, "periodic_freq\t: %ld\n", rtc_freq);
+ seq_printf(seq, "RTSR\t\t: 0x%08x\n", (u32)RTSR);
return 0;
}
@@ -364,7 +382,8 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
*/
if (RTTR == 0) {
RTTR = RTC_DEF_DIVIDER + (RTC_DEF_TRIM << 16);
- dev_warn(&pdev->dev, "warning: initializing default clock divider/trim value\n");
+ dev_warn(&pdev->dev, "warning: "
+ "initializing default clock divider/trim value\n");
/* The current RTC value probably doesn't make sense either */
RCNR = 0;
}
@@ -379,6 +398,30 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
+ /* Fix for a nasty initialization problem the in SA11xx RTSR register.
+ * See also the comments in sa1100_rtc_interrupt().
+ *
+ * Sometimes bit 1 of the RTSR (RTSR_HZ) will wake up 1, which means an
+ * interrupt pending, even though interrupts were never enabled.
+ * In this case, this bit it must be reset before enabling
+ * interruptions to avoid a nonexistent interrupt to occur.
+ *
+ * In principle, the same problem would apply to bit 0, although it has
+ * never been observed to happen.
+ *
+ * This issue is addressed both here and in sa1100_rtc_interrupt().
+ * If the issue is not addressed here, in the times when the processor
+ * wakes up with the bit set there will be one spurious interrupt.
+ *
+ * The issue is also dealt with in sa1100_rtc_interrupt() to be on the
+ * safe side, once the condition that lead to this strange
+ * initialization is unknown and could in principle happen during
+ * normal processing.
+ *
+ * Notice that clearing bit 1 and 0 is accomplished by writting ONES to
+ * the corresponding bits in RTSR. */
+ RTSR = RTSR_AL | RTSR_HZ;
+
return 0;
}
@@ -386,7 +429,7 @@ static int sa1100_rtc_remove(struct platform_device *pdev)
{
struct rtc_device *rtc = platform_get_drvdata(pdev);
- if (rtc)
+ if (rtc)
rtc_device_unregister(rtc);
return 0;