summaryrefslogtreecommitdiff
path: root/mm/internal.h
diff options
context:
space:
mode:
authorJames Morris <james.l.morris@oracle.com>2017-09-24 22:41:55 -0700
committerJames Morris <james.l.morris@oracle.com>2017-09-24 22:41:55 -0700
commit25eabb13c7d67ae32298015c5e28d00f604f412c (patch)
tree5bd75c2c1e385c79425bb099f0d19db7fb9c391f /mm/internal.h
parentab5348c9c23cd253f5902980d2d8fe067dc24c82 (diff)
parente19b205be43d11bff638cad4487008c48d21c103 (diff)
Merge tag 'v4.14-rc2' into next-general
Linux 4.14-rc2 Sync to v4.14-rc2 for security subsystem developers to track.
Diffstat (limited to 'mm/internal.h')
-rw-r--r--mm/internal.h17
1 files changed, 16 insertions, 1 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 24d88f084705..1df011f62480 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -480,6 +480,17 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
/* Mask to get the watermark bits */
#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
+/*
+ * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
+ * cannot assume a reduced access to memory reserves is sufficient for
+ * !MMU
+ */
+#ifdef CONFIG_MMU
+#define ALLOC_OOM 0x08
+#else
+#define ALLOC_OOM ALLOC_NO_WATERMARKS
+#endif
+
#define ALLOC_HARDER 0x10 /* try to alloc harder */
#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
@@ -498,6 +509,7 @@ extern struct workqueue_struct *mm_percpu_wq;
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
void try_to_unmap_flush(void);
void try_to_unmap_flush_dirty(void);
+void flush_tlb_batched_pending(struct mm_struct *mm);
#else
static inline void try_to_unmap_flush(void)
{
@@ -505,7 +517,9 @@ static inline void try_to_unmap_flush(void)
static inline void try_to_unmap_flush_dirty(void)
{
}
-
+static inline void flush_tlb_batched_pending(struct mm_struct *mm)
+{
+}
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
extern const struct trace_print_flags pageflag_names[];
@@ -522,4 +536,5 @@ static inline bool is_migrate_highatomic_page(struct page *page)
return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
}
+void setup_zone_pageset(struct zone *zone);
#endif /* __MM_INTERNAL_H */