summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/csky/abiv1/cacheflush.c20
-rw-r--r--arch/csky/abiv1/inc/abi/cacheflush.h41
2 files changed, 49 insertions, 12 deletions
diff --git a/arch/csky/abiv1/cacheflush.c b/arch/csky/abiv1/cacheflush.c
index fee99fc6612f..9f1fe80cc847 100644
--- a/arch/csky/abiv1/cacheflush.c
+++ b/arch/csky/abiv1/cacheflush.c
@@ -54,3 +54,23 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
icache_inv_all();
}
}
+
+void flush_kernel_dcache_page(struct page *page)
+{
+ struct address_space *mapping;
+
+ mapping = page_mapping_file(page);
+
+ if (!mapping || mapping_mapped(mapping))
+ dcache_wbinv_all();
+}
+EXPORT_SYMBOL(flush_kernel_dcache_page);
+
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ dcache_wbinv_all();
+
+ if (vma->vm_flags & VM_EXEC)
+ icache_inv_all();
+}
diff --git a/arch/csky/abiv1/inc/abi/cacheflush.h b/arch/csky/abiv1/inc/abi/cacheflush.h
index fce5604cef40..79ef9e8c1afd 100644
--- a/arch/csky/abiv1/inc/abi/cacheflush.h
+++ b/arch/csky/abiv1/inc/abi/cacheflush.h
@@ -4,26 +4,49 @@
#ifndef __ABI_CSKY_CACHEFLUSH_H
#define __ABI_CSKY_CACHEFLUSH_H
-#include <linux/compiler.h>
+#include <linux/mm.h>
#include <asm/string.h>
#include <asm/cache.h>
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);
-#define flush_cache_mm(mm) cache_wbinv_all()
+#define flush_cache_mm(mm) dcache_wbinv_all()
#define flush_cache_page(vma, page, pfn) cache_wbinv_all()
#define flush_cache_dup_mm(mm) cache_wbinv_all()
+#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
+extern void flush_kernel_dcache_page(struct page *);
+
+#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
+#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
+
+static inline void flush_kernel_vmap_range(void *addr, int size)
+{
+ dcache_wbinv_all();
+}
+static inline void invalidate_kernel_vmap_range(void *addr, int size)
+{
+ dcache_wbinv_all();
+}
+
+#define ARCH_HAS_FLUSH_ANON_PAGE
+static inline void flush_anon_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vmaddr)
+{
+ if (PageAnon(page))
+ cache_wbinv_all();
+}
+
/*
* if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken.
* Use cache_wbinv_all() here and need to be improved in future.
*/
-#define flush_cache_range(vma, start, end) cache_wbinv_all()
-#define flush_cache_vmap(start, end) cache_wbinv_range(start, end)
-#define flush_cache_vunmap(start, end) cache_wbinv_range(start, end)
+extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+#define flush_cache_vmap(start, end) cache_wbinv_all()
+#define flush_cache_vunmap(start, end) cache_wbinv_all()
-#define flush_icache_page(vma, page) cache_wbinv_all()
+#define flush_icache_page(vma, page) do {} while (0);
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
#define flush_icache_user_range(vma,page,addr,len) \
@@ -31,19 +54,13 @@ extern void flush_dcache_page(struct page *);
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
- cache_wbinv_all(); \
memcpy(dst, src, len); \
- cache_wbinv_all(); \
} while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
- cache_wbinv_all(); \
memcpy(dst, src, len); \
cache_wbinv_all(); \
} while (0)
-#define flush_dcache_mmap_lock(mapping) do {} while (0)
-#define flush_dcache_mmap_unlock(mapping) do {} while (0)
-
#endif /* __ABI_CSKY_CACHEFLUSH_H */