summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/cache-fa.S6
-rw-r--r--arch/arm/mm/cache-v3.S29
-rw-r--r--arch/arm/mm/cache-v4.S29
-rw-r--r--arch/arm/mm/cache-v4wb.S6
-rw-r--r--arch/arm/mm/cache-v4wt.S15
-rw-r--r--arch/arm/mm/cache-v6.S6
-rw-r--r--arch/arm/mm/cache-v7.S6
-rw-r--r--arch/arm/mm/dma-mapping.c83
-rw-r--r--arch/arm/mm/ioremap.c103
-rw-r--r--arch/arm/mm/proc-arm1020.S6
-rw-r--r--arch/arm/mm/proc-arm1020e.S6
-rw-r--r--arch/arm/mm/proc-arm1022.S6
-rw-r--r--arch/arm/mm/proc-arm1026.S6
-rw-r--r--arch/arm/mm/proc-arm920.S6
-rw-r--r--arch/arm/mm/proc-arm922.S6
-rw-r--r--arch/arm/mm/proc-arm925.S6
-rw-r--r--arch/arm/mm/proc-arm926.S6
-rw-r--r--arch/arm/mm/proc-arm940.S6
-rw-r--r--arch/arm/mm/proc-arm946.S6
-rw-r--r--arch/arm/mm/proc-feroceon.S12
-rw-r--r--arch/arm/mm/proc-mohawk.S6
-rw-r--r--arch/arm/mm/proc-xsc3.S6
-rw-r--r--arch/arm/mm/proc-xscale.S8
23 files changed, 334 insertions, 41 deletions
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index 7148e53e6078..8ebffdd6fcff 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -157,7 +157,7 @@ ENTRY(fa_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-fa_dma_inv_range:
+ENTRY(fa_dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry
@@ -180,7 +180,7 @@ fa_dma_inv_range:
* - start - virtual start address
* - end - virtual end address
*/
-fa_dma_clean_range:
+ENTRY(fa_dma_clean_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -241,5 +241,7 @@ ENTRY(fa_cache_fns)
.long fa_flush_kern_dcache_area
.long fa_dma_map_area
.long fa_dma_unmap_area
+ .long fa_dma_inv_range
+ .long fa_dma_clean_range
.long fa_dma_flush_range
.size fa_cache_fns, . - fa_cache_fns
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
index c2ff3c599fee..6df52dc014be 100644
--- a/arch/arm/mm/cache-v3.S
+++ b/arch/arm/mm/cache-v3.S
@@ -84,6 +84,20 @@ ENTRY(v3_flush_kern_dcache_area)
/* FALLTHROUGH */
/*
+ * dma_inv_range(start, end)
+ *
+ * Invalidate (discard) the specified virtual address range.
+ * May not write back any entries. If 'start' or 'end'
+ * are not cache line aligned, those lines must be written
+ * back.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v3_dma_inv_range)
+ /* FALLTHROUGH */
+
+/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
@@ -94,6 +108,17 @@ ENTRY(v3_flush_kern_dcache_area)
ENTRY(v3_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
+ /* FALLTHROUGH */
+
+/*
+ * dma_clean_range(start, end)
+ *
+ * Clean (write back) the specified virtual address range.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v3_dma_clean_range)
mov pc, lr
/*
@@ -104,7 +129,7 @@ ENTRY(v3_dma_flush_range)
*/
ENTRY(v3_dma_unmap_area)
teq r2, #DMA_TO_DEVICE
- bne v3_dma_flush_range
+ bne v3_dma_inv_range
/* FALLTHROUGH */
/*
@@ -130,5 +155,7 @@ ENTRY(v3_cache_fns)
.long v3_flush_kern_dcache_area
.long v3_dma_map_area
.long v3_dma_unmap_area
+ .long v3_dma_inv_range
+ .long v3_dma_clean_range
.long v3_dma_flush_range
.size v3_cache_fns, . - v3_cache_fns
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index 4810f7e3e813..df3b423713b9 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -94,6 +94,20 @@ ENTRY(v4_flush_kern_dcache_area)
/* FALLTHROUGH */
/*
+ * dma_inv_range(start, end)
+ *
+ * Invalidate (discard) the specified virtual address range.
+ * May not write back any entries. If 'start' or 'end'
+ * are not cache line aligned, those lines must be written
+ * back.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v4_dma_inv_range)
+ /* FALLTHROUGH */
+
+/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
@@ -106,6 +120,17 @@ ENTRY(v4_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
#endif
+ /* FALLTHROUGH */
+
+/*
+ * dma_clean_range(start, end)
+ *
+ * Clean (write back) the specified virtual address range.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v4_dma_clean_range)
mov pc, lr
/*
@@ -116,7 +141,7 @@ ENTRY(v4_dma_flush_range)
*/
ENTRY(v4_dma_unmap_area)
teq r2, #DMA_TO_DEVICE
- bne v4_dma_flush_range
+ bne v4_dma_inv_range
/* FALLTHROUGH */
/*
@@ -142,5 +167,7 @@ ENTRY(v4_cache_fns)
.long v4_flush_kern_dcache_area
.long v4_dma_map_area
.long v4_dma_unmap_area
+ .long v4_dma_inv_range
+ .long v4_dma_clean_range
.long v4_dma_flush_range
.size v4_cache_fns, . - v4_cache_fns
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index df8368afa102..32e7a7448496 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -173,7 +173,7 @@ ENTRY(v4wb_coherent_user_range)
* - start - virtual start address
* - end - virtual end address
*/
-v4wb_dma_inv_range:
+ENTRY(v4wb_dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -194,7 +194,7 @@ v4wb_dma_inv_range:
* - start - virtual start address
* - end - virtual end address
*/
-v4wb_dma_clean_range:
+ENTRY(v4wb_dma_clean_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -252,5 +252,7 @@ ENTRY(v4wb_cache_fns)
.long v4wb_flush_kern_dcache_area
.long v4wb_dma_map_area
.long v4wb_dma_unmap_area
+ .long v4wb_dma_inv_range
+ .long v4wb_dma_clean_range
.long v4wb_dma_flush_range
.size v4wb_cache_fns, . - v4wb_cache_fns
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index 45c70312f43b..3d8dad5b2650 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -142,12 +142,23 @@ ENTRY(v4wt_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-v4wt_dma_inv_range:
+ENTRY(v4wt_dma_inv_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
+ /* FALLTHROUGH */
+
+/*
+ * dma_clean_range(start, end)
+ *
+ * Clean the specified virtual address range.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v4wt_dma_clean_range)
mov pc, lr
/*
@@ -196,5 +207,7 @@ ENTRY(v4wt_cache_fns)
.long v4wt_flush_kern_dcache_area
.long v4wt_dma_map_area
.long v4wt_dma_unmap_area
+ .long v4wt_dma_inv_range
+ .long v4wt_dma_clean_range
.long v4wt_dma_flush_range
.size v4wt_cache_fns, . - v4wt_cache_fns
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index e46ecd847138..c58392d4ff5a 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -195,7 +195,7 @@ ENTRY(v6_flush_kern_dcache_area)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-v6_dma_inv_range:
+ENTRY(v6_dma_inv_range)
tst r0, #D_CACHE_LINE_SIZE - 1
bic r0, r0, #D_CACHE_LINE_SIZE - 1
#ifdef HARVARD_CACHE
@@ -231,7 +231,7 @@ v6_dma_inv_range:
* - start - virtual start address of region
* - end - virtual end address of region
*/
-v6_dma_clean_range:
+ENTRY(v6_dma_clean_range)
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
#ifdef CONFIG_SMP
@@ -310,5 +310,7 @@ ENTRY(v6_cache_fns)
.long v6_flush_kern_dcache_area
.long v6_dma_map_area
.long v6_dma_unmap_area
+ .long v6_dma_inv_range
+ .long v6_dma_clean_range
.long v6_dma_flush_range
.size v6_cache_fns, . - v6_cache_fns
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 37c8157e116e..884ac48a2010 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -224,7 +224,7 @@ ENDPROC(v7_flush_kern_dcache_area)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-v7_dma_inv_range:
+ENTRY(v7_dma_inv_range)
dcache_line_size r2, r3
sub r3, r2, #1
tst r0, r3
@@ -248,7 +248,7 @@ ENDPROC(v7_dma_inv_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-v7_dma_clean_range:
+ENTRY(v7_dma_clean_range)
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
@@ -317,5 +317,7 @@ ENTRY(v7_cache_fns)
.long v7_flush_kern_dcache_area
.long v7_dma_map_area
.long v7_dma_unmap_area
+ .long v7_dma_inv_range
+ .long v7_dma_clean_range
.long v7_dma_flush_range
.size v7_cache_fns, . - v7_cache_fns
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index bb9b6124a0d5..20a3e7fea2c4 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -622,3 +622,86 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
}
}
EXPORT_SYMBOL(dma_sync_sg_for_device);
+
+#ifdef CONFIG_UNOFFICIAL_USER_DMA_API
+int temp_user_dma_op(unsigned long start, unsigned long end, int op)
+{
+ struct mm_struct *mm = current->active_mm;
+ void (*inner_op)(const void *, const void *);
+ void (*outer_op)(unsigned long, unsigned long);
+
+ if (!test_taint(TAINT_USER)) {
+ printk(KERN_WARNING "%s: using unofficial user DMA API, kernel tainted.\n",
+ current->comm);
+ add_taint(TAINT_USER);
+ }
+
+ switch (op) {
+ case 1:
+ inner_op = dmac_inv_range;
+ outer_op = outer_inv_range;
+ break;
+ case 2:
+ inner_op = dmac_clean_range;
+ outer_op = outer_clean_range;
+ break;
+ case 3:
+ inner_op = dmac_flush_range;
+ outer_op = outer_flush_range;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (end < start)
+ return -EINVAL;
+
+ down_read(&mm->mmap_sem);
+ do {
+ struct vm_area_struct *vma = find_vma(mm, start);
+
+ if (!vma || start < vma->vm_start ||
+ vma->vm_flags & (VM_IO | VM_PFNMAP)) {
+ up_read(&mm->mmap_sem);
+ return -EFAULT;
+ }
+
+ do {
+ unsigned long e = (start | ~PAGE_MASK) + 1;
+ struct page *page;
+
+ if (e > end)
+ e = end;
+
+ page = follow_page(vma, start, FOLL_GET);
+ if (IS_ERR(page)) {
+ up_read(&mm->mmap_sem);
+ return PTR_ERR(page);
+ }
+
+ if (page) {
+ unsigned long phys;
+
+ /*
+ * This flushes the userspace address - which
+ * is not what this API was intended to do.
+ * Things may go astray as a result.
+ */
+ inner_op((void *)start, (void *)e);
+
+ /*
+ * Now handle the L2 cache.
+ */
+ phys = page_to_phys(page) + (start & ~PAGE_MASK);
+ outer_op(phys, phys + e - start);
+
+ put_page(page);
+ }
+ start = e;
+ } while (start < end && start < vma->vm_end);
+ } while (start < end);
+ up_read(&mm->mmap_sem);
+
+ return 0;
+}
+#endif
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 28c8b950ef04..d5a38801cc08 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -349,6 +349,109 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
}
EXPORT_SYMBOL(__arm_ioremap);
+#define MAX_SECTIONS 4
+void __iomem *
+__arm_multi_strided_ioremap(int sections,
+ unsigned long *phys_addr, size_t *phys_size,
+ unsigned long *phys_stride,
+ unsigned long *virt_stride,
+ unsigned int mtype)
+{
+ unsigned long pfns[MAX_SECTIONS];
+ const struct mem_type *type;
+ unsigned long total_size = 0, j;
+ int err = 0, i;
+ unsigned long addr, addr_i, pstride, vstride;
+ struct vm_struct * area;
+
+ if (sections > MAX_SECTIONS)
+ return NULL;
+
+ for (i = 0; i < sections; i++) {
+ /* both physical and virtual strides must be both specified
+ or neither specified */
+ pstride = ((phys_stride && phys_stride[i]) ?
+ phys_stride[i] : phys_size[i]);
+ vstride = ((virt_stride && virt_stride[i]) ?
+ virt_stride[i] : phys_size[i]);
+
+ if (!pstride ^ !vstride)
+ return NULL;
+
+ /*
+ * Don't allow wraparound or zero size. Also, sections
+ * must end/begin on page boundary, and strides be page
+ * aligned
+ *
+ * For now, size must be multiple of physical stride. This
+ * may be relaxed to contain only full virtual strides. (E.g.
+ * not have to contain the waste after the last virtual block.)
+ *
+ */
+ if (((phys_addr[i] | phys_size[i] |
+ vstride | pstride) & ~PAGE_MASK) ||
+ !phys_size[i] ||
+ vstride > pstride ||
+ (pstride && (phys_size[i] % pstride)) ||
+ (phys_addr[i] + phys_size[i] - 1 < phys_addr[i]))
+ return NULL;
+
+ pfns[i] = __phys_to_pfn(phys_addr[i]);
+
+ /*
+ * High mappings must be supersection aligned
+ */
+ if (pfns[i] >= 0x100000 &&
+ (__pfn_to_phys(pfns[i]) & ~SUPERSECTION_MASK))
+ return NULL;
+
+ total_size += phys_size[i] / pstride * vstride;
+ }
+
+ type = get_mem_type(mtype);
+ if (!type)
+ return NULL;
+
+ area = get_vm_area(total_size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+ addr = addr_i = (unsigned long)area->addr;
+
+ for (i = 0; i < sections && !err; i++) {
+ printk(KERN_ERR "mapping %lx to %lx (%x)\n", __pfn_to_phys(pfns[i]), addr_i, phys_size[i]);
+ pstride = ((phys_stride && phys_stride[i]) ?
+ phys_stride[i] : phys_size[i]);
+ vstride = ((virt_stride && virt_stride[i]) ?
+ virt_stride[i] : phys_size[i]);
+ for (j = 0; j < phys_size[i]; j += pstride) {
+ #ifndef CONFIG_SMP
+ if (DOMAIN_IO == 0 &&
+ (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
+ cpu_is_xsc3()) && pfns[i] >= 0x100000 &&
+ !((__pfn_to_phys(pfns[i]) | vstride | addr_i) & ~SUPERSECTION_MASK)) {
+ area->flags |= VM_ARM_SECTION_MAPPING;
+ err = remap_area_supersections(addr_i, pfns[i], size[i], type);
+ } else if (!((__pfn_to_phys(pfns[i]) | vstride | addr_i) & ~PMD_MASK)) {
+ area->flags |= VM_ARM_SECTION_MAPPING;
+ err = remap_area_sections(addr_i, pfns[i], vstride, type);
+ } else
+ #endif
+ err = remap_area_pages(addr_i, pfns[i], vstride, type);
+ pfns[i] += __phys_to_pfn(pstride);
+ addr_i += vstride;
+ }
+ }
+
+ if (err) {
+ vunmap((void *)addr);
+ return NULL;
+ }
+
+ flush_cache_vmap(addr, addr + total_size);
+ return (void __iomem *) addr;
+}
+EXPORT_SYMBOL(__arm_multi_strided_ioremap);
+
void __iounmap(volatile void __iomem *io_addr)
{
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 72507c630ceb..c85f5eb42634 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -265,7 +265,7 @@ ENTRY(arm1020_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-arm1020_dma_inv_range:
+ENTRY(arm1020_dma_inv_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
tst r0, #CACHE_DLINESIZE - 1
@@ -295,7 +295,7 @@ arm1020_dma_inv_range:
*
* (same as v4wb)
*/
-arm1020_dma_clean_range:
+ENTRY(arm1020_dma_clean_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
@@ -363,6 +363,8 @@ ENTRY(arm1020_cache_fns)
.long arm1020_flush_kern_dcache_area
.long arm1020_dma_map_area
.long arm1020_dma_unmap_area
+ .long arm1020_dma_inv_range
+ .long arm1020_dma_clean_range
.long arm1020_dma_flush_range
.align 5
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index d27829805609..5a3cf7620a2c 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -258,7 +258,7 @@ ENTRY(arm1020e_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-arm1020e_dma_inv_range:
+ENTRY(arm1020e_dma_inv_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
tst r0, #CACHE_DLINESIZE - 1
@@ -284,7 +284,7 @@ arm1020e_dma_inv_range:
*
* (same as v4wb)
*/
-arm1020e_dma_clean_range:
+ENTRY(arm1020e_dma_clean_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
@@ -349,6 +349,8 @@ ENTRY(arm1020e_cache_fns)
.long arm1020e_flush_kern_dcache_area
.long arm1020e_dma_map_area
.long arm1020e_dma_unmap_area
+ .long arm1020e_dma_inv_range
+ .long arm1020e_dma_clean_range
.long arm1020e_dma_flush_range
.align 5
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index ce13e4a827de..fec8f5878438 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -247,7 +247,7 @@ ENTRY(arm1022_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-arm1022_dma_inv_range:
+ENTRY(arm1022_dma_inv_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
tst r0, #CACHE_DLINESIZE - 1
@@ -273,7 +273,7 @@ arm1022_dma_inv_range:
*
* (same as v4wb)
*/
-arm1022_dma_clean_range:
+ENTRY(arm1022_dma_clean_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
@@ -338,6 +338,8 @@ ENTRY(arm1022_cache_fns)
.long arm1022_flush_kern_dcache_area
.long arm1022_dma_map_area
.long arm1022_dma_unmap_area
+ .long arm1022_dma_inv_range
+ .long arm1022_dma_clean_range
.long arm1022_dma_flush_range
.align 5
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 636672a29c6d..9ece6f666497 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -241,7 +241,7 @@ ENTRY(arm1026_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-arm1026_dma_inv_range:
+ENTRY(arm1026_dma_inv_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
tst r0, #CACHE_DLINESIZE - 1
@@ -267,7 +267,7 @@ arm1026_dma_inv_range:
*
* (same as v4wb)
*/
-arm1026_dma_clean_range:
+ENTRY(arm1026_dma_clean_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
@@ -332,6 +332,8 @@ ENTRY(arm1026_cache_fns)
.long arm1026_flush_kern_dcache_area
.long arm1026_dma_map_area
.long arm1026_dma_unmap_area
+ .long arm1026_dma_inv_range
+ .long arm1026_dma_clean_range
.long arm1026_dma_flush_range
.align 5
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 8be81992645d..6f6ab2747da6 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -239,7 +239,7 @@ ENTRY(arm920_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-arm920_dma_inv_range:
+ENTRY(arm920_dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -262,7 +262,7 @@ arm920_dma_inv_range:
*
* (same as v4wb)
*/
-arm920_dma_clean_range:
+ENTRY(arm920_dma_clean_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -321,6 +321,8 @@ ENTRY(arm920_cache_fns)
.long arm920_flush_kern_dcache_area
.long arm920_dma_map_area
.long arm920_dma_unmap_area
+ .long arm920_dma_inv_range
+ .long arm920_dma_clean_range
.long arm920_dma_flush_range
#endif
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index c0ff8e4b1074..4e4396b121ca 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -241,7 +241,7 @@ ENTRY(arm922_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-arm922_dma_inv_range:
+ENTRY(arm922_dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -264,7 +264,7 @@ arm922_dma_inv_range:
*
* (same as v4wb)
*/
-arm922_dma_clean_range:
+ENTRY(arm922_dma_clean_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -323,6 +323,8 @@ ENTRY(arm922_cache_fns)
.long arm922_flush_kern_dcache_area
.long arm922_dma_map_area
.long arm922_dma_unmap_area
+ .long arm922_dma_inv_range
+ .long arm922_dma_clean_range
.long arm922_dma_flush_range
#endif
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 3c6cffe400f6..7c01c5d1108c 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -283,7 +283,7 @@ ENTRY(arm925_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-arm925_dma_inv_range:
+ENTRY(arm925_dma_inv_range)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
tst r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -308,7 +308,7 @@ arm925_dma_inv_range:
*
* (same as v4wb)
*/
-arm925_dma_clean_range:
+ENTRY(arm925_dma_clean_range)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -374,6 +374,8 @@ ENTRY(arm925_cache_fns)
.long arm925_flush_kern_dcache_area
.long arm925_dma_map_area
.long arm925_dma_unmap_area
+ .long arm925_dma_inv_range
+ .long arm925_dma_clean_range
.long arm925_dma_flush_range
ENTRY(cpu_arm925_dcache_clean_area)
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 75b707c9cce1..72a01a4b80ab 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -246,7 +246,7 @@ ENTRY(arm926_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-arm926_dma_inv_range:
+ENTRY(arm926_dma_inv_range)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
tst r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -271,7 +271,7 @@ arm926_dma_inv_range:
*
* (same as v4wb)
*/
-arm926_dma_clean_range:
+ENTRY(arm926_dma_clean_range)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -337,6 +337,8 @@ ENTRY(arm926_cache_fns)
.long arm926_flush_kern_dcache_area
.long arm926_dma_map_area
.long arm926_dma_unmap_area
+ .long arm926_dma_inv_range
+ .long arm926_dma_clean_range
.long arm926_dma_flush_range
ENTRY(cpu_arm926_dcache_clean_area)
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 1af1657819eb..6bb58fca7270 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -171,7 +171,7 @@ ENTRY(arm940_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-arm940_dma_inv_range:
+ENTRY(arm940_dma_inv_range)
mov ip, #0
mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
@@ -192,7 +192,7 @@ arm940_dma_inv_range:
* - start - virtual start address
* - end - virtual end address
*/
-arm940_dma_clean_range:
+ENTRY(arm940_dma_clean_range)
ENTRY(cpu_arm940_dcache_clean_area)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -266,6 +266,8 @@ ENTRY(arm940_cache_fns)
.long arm940_flush_kern_dcache_area
.long arm940_dma_map_area
.long arm940_dma_unmap_area
+ .long arm940_dma_inv_range
+ .long arm940_dma_clean_range
.long arm940_dma_flush_range
__INIT
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 1664b6aaff79..ac0f9ba719d7 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -215,7 +215,7 @@ ENTRY(arm946_flush_kern_dcache_area)
* - end - virtual end address
* (same as arm926)
*/
-arm946_dma_inv_range:
+ENTRY(arm946_dma_inv_range)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
tst r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -240,7 +240,7 @@ arm946_dma_inv_range:
*
* (same as arm926)
*/
-arm946_dma_clean_range:
+ENTRY(arm946_dma_clean_range)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -308,6 +308,8 @@ ENTRY(arm946_cache_fns)
.long arm946_flush_kern_dcache_area
.long arm946_dma_map_area
.long arm946_dma_unmap_area
+ .long arm946_dma_inv_range
+ .long arm946_dma_clean_range
.long arm946_dma_flush_range
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index 53e632343849..97e1d784f152 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -274,7 +274,7 @@ ENTRY(feroceon_range_flush_kern_dcache_area)
* (same as v4wb)
*/
.align 5
-feroceon_dma_inv_range:
+ENTRY(feroceon_dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -288,7 +288,7 @@ feroceon_dma_inv_range:
mov pc, lr
.align 5
-feroceon_range_dma_inv_range:
+ENTRY(feroceon_range_dma_inv_range)
mrs r2, cpsr
tst r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -314,7 +314,7 @@ feroceon_range_dma_inv_range:
* (same as v4wb)
*/
.align 5
-feroceon_dma_clean_range:
+ENTRY(feroceon_dma_clean_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -324,7 +324,7 @@ feroceon_dma_clean_range:
mov pc, lr
.align 5
-feroceon_range_dma_clean_range:
+ENTRY(feroceon_range_dma_clean_range)
mrs r2, cpsr
cmp r1, r0
subne r1, r1, #1 @ top address is inclusive
@@ -414,6 +414,8 @@ ENTRY(feroceon_cache_fns)
.long feroceon_flush_kern_dcache_area
.long feroceon_dma_map_area
.long feroceon_dma_unmap_area
+ .long feroceon_dma_inv_range
+ .long feroceon_dma_clean_range
.long feroceon_dma_flush_range
ENTRY(feroceon_range_cache_fns)
@@ -425,6 +427,8 @@ ENTRY(feroceon_range_cache_fns)
.long feroceon_range_flush_kern_dcache_area
.long feroceon_range_dma_map_area
.long feroceon_dma_unmap_area
+ .long feroceon_range_dma_inv_range
+ .long feroceon_range_dma_clean_range
.long feroceon_range_dma_flush_range
.align 5
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index caa31154e7db..55b7fbec6548 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -218,7 +218,7 @@ ENTRY(mohawk_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-mohawk_dma_inv_range:
+ENTRY(mohawk_dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
@@ -241,7 +241,7 @@ mohawk_dma_inv_range:
*
* (same as v4wb)
*/
-mohawk_dma_clean_range:
+ENTRY(mohawk_dma_clean_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -301,6 +301,8 @@ ENTRY(mohawk_cache_fns)
.long mohawk_flush_kern_dcache_area
.long mohawk_dma_map_area
.long mohawk_dma_unmap_area
+ .long mohawk_dma_inv_range
+ .long mohawk_dma_clean_range
.long mohawk_dma_flush_range
ENTRY(cpu_mohawk_dcache_clean_area)
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index e5797f1c1db7..32231a0e917e 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -257,7 +257,7 @@ ENTRY(xsc3_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-xsc3_dma_inv_range:
+ENTRY(xsc3_dma_inv_range)
tst r0, #CACHELINESIZE - 1
bic r0, r0, #CACHELINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line
@@ -278,7 +278,7 @@ xsc3_dma_inv_range:
* - start - virtual start address
* - end - virtual end address
*/
-xsc3_dma_clean_range:
+ENTRY(xsc3_dma_clean_range)
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
add r0, r0, #CACHELINESIZE
@@ -337,6 +337,8 @@ ENTRY(xsc3_cache_fns)
.long xsc3_flush_kern_dcache_area
.long xsc3_dma_map_area
.long xsc3_dma_unmap_area
+ .long xsc3_dma_inv_range
+ .long xsc3_dma_clean_range
.long xsc3_dma_flush_range
ENTRY(cpu_xsc3_dcache_clean_area)
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 63037e2162f2..a7999f94bf27 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -315,7 +315,7 @@ ENTRY(xscale_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-xscale_dma_inv_range:
+ENTRY(xscale_dma_inv_range)
tst r0, #CACHELINESIZE - 1
bic r0, r0, #CACHELINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -336,7 +336,7 @@ xscale_dma_inv_range:
* - start - virtual start address
* - end - virtual end address
*/
-xscale_dma_clean_range:
+ENTRY(xscale_dma_clean_range)
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHELINESIZE
@@ -409,6 +409,8 @@ ENTRY(xscale_cache_fns)
.long xscale_flush_kern_dcache_area
.long xscale_dma_map_area
.long xscale_dma_unmap_area
+ .long xscale_dma_inv_range
+ .long xscale_dma_clean_range
.long xscale_dma_flush_range
/*
@@ -434,6 +436,8 @@ ENTRY(xscale_80200_A0_A1_cache_fns)
.long xscale_dma_a0_map_area
.long xscale_dma_unmap_area
.long xscale_dma_flush_range
+ .long xscale_dma_clean_range
+ .long xscale_dma_flush_range
ENTRY(cpu_xscale_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry