summaryrefslogtreecommitdiff
path: root/include/linux/highmem.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/highmem.h')
-rw-r--r--include/linux/highmem.h81
1 files changed, 75 insertions, 6 deletions
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index ea5cdbd8c2c3..d6e82e3de027 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -32,8 +32,65 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
#include <asm/kmap_types.h>
#ifdef CONFIG_HIGHMEM
+extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
+extern void kunmap_atomic_high(void *kvaddr);
#include <asm/highmem.h>
+#ifndef ARCH_HAS_KMAP_FLUSH_TLB
+static inline void kmap_flush_tlb(unsigned long addr) { }
+#endif
+
+#ifndef kmap_prot
+#define kmap_prot PAGE_KERNEL
+#endif
+
+void *kmap_high(struct page *page);
+static inline void *kmap(struct page *page)
+{
+ void *addr;
+
+ might_sleep();
+ if (!PageHighMem(page))
+ addr = page_address(page);
+ else
+ addr = kmap_high(page);
+ kmap_flush_tlb((unsigned long)addr);
+ return addr;
+}
+
+void kunmap_high(struct page *page);
+
+static inline void kunmap(struct page *page)
+{
+ might_sleep();
+ if (!PageHighMem(page))
+ return;
+ kunmap_high(page);
+}
+
+/*
+ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
+ * no global lock is needed and because the kmap code must perform a global TLB
+ * invalidation when the kmap pool wraps.
+ *
+ * However when holding an atomic kmap is is not legal to sleep, so atomic
+ * kmaps are appropriate for short, tight code paths only.
+ *
+ * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
+ * gives a more generic (and caching) interface. But kmap_atomic can
+ * be used in IRQ contexts, so in some (very limited) cases we need
+ * it.
+ */
+static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+{
+ preempt_disable();
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+ return kmap_atomic_high_prot(page, prot);
+}
+#define kmap_atomic(page) kmap_atomic_prot(page, kmap_prot)
+
/* declarations for linux/mm/highmem.c */
unsigned int nr_free_highpages(void);
extern atomic_long_t _totalhigh_pages;
@@ -77,15 +134,21 @@ static inline struct page *kmap_to_page(void *addr)
static inline unsigned long totalhigh_pages(void) { return 0UL; }
-#ifndef ARCH_HAS_KMAP
static inline void *kmap(struct page *page)
{
might_sleep();
return page_address(page);
}
+static inline void kunmap_high(struct page *page)
+{
+}
+
static inline void kunmap(struct page *page)
{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(page_address(page));
+#endif
}
static inline void *kmap_atomic(struct page *page)
@@ -96,16 +159,20 @@ static inline void *kmap_atomic(struct page *page)
}
#define kmap_atomic_prot(page, prot) kmap_atomic(page)
-static inline void __kunmap_atomic(void *addr)
+static inline void kunmap_atomic_high(void *addr)
{
- pagefault_enable();
- preempt_enable();
+ /*
+ * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
+ * handles re-enabling faults + preemption
+ */
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(addr);
+#endif
}
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
#define kmap_flush_unused() do {} while(0)
-#endif
#endif /* CONFIG_HIGHMEM */
@@ -149,7 +216,9 @@ static inline void kmap_atomic_idx_pop(void)
#define kunmap_atomic(addr) \
do { \
BUILD_BUG_ON(__same_type((addr), struct page *)); \
- __kunmap_atomic(addr); \
+ kunmap_atomic_high(addr); \
+ pagefault_enable(); \
+ preempt_enable(); \
} while (0)