summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-26 10:29:42 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-26 10:29:42 -0700
commitcbafe18c71028d5e0ee1626b4776fea5d5824a78 (patch)
tree2bb7db7db4ed8df2801f7c16553c69fb27379f7f /include
parentf41def397161053eb0d3ed6861ef65985efbf293 (diff)
parenta22fea94992a2bc5328005e62f368413ede49c14 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: - almost all of the rest of -mm - various other subsystems Subsystems affected by this patch series: memcg, misc, core-kernel, lib, checkpatch, reiserfs, fat, fork, cpumask, kexec, uaccess, kconfig, kgdb, bug, ipc, lzo, kasan, madvise, cleanups, pagemap * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (77 commits) arch/sparc/include/asm/pgtable_64.h: fix build mm: treewide: clarify pgtable_page_{ctor,dtor}() naming ntfs: remove (un)?likely() from IS_ERR() conditions IB/hfi1: remove unlikely() from IS_ERR*() condition xfs: remove unlikely() from WARN_ON() condition wimax/i2400m: remove unlikely() from WARN*() condition fs: remove unlikely() from WARN_ON() condition xen/events: remove unlikely() from WARN() condition checkpatch: check for nested (un)?likely() calls hexagon: drop empty and unused free_initrd_mem mm: factor out common parts between MADV_COLD and MADV_PAGEOUT mm: introduce MADV_PAGEOUT mm: change PAGEREF_RECLAIM_CLEAN with PAGE_REFRECLAIM mm: introduce MADV_COLD mm: untag user pointers in mmap/munmap/mremap/brk vfio/type1: untag user pointers in vaddr_get_pfn tee/shm: untag user pointers in tee_shm_register media/v4l2-core: untag user pointers in videobuf_dma_contig_user_get drm/radeon: untag user pointers in radeon_gem_userptr_ioctl drm/amdgpu: untag user pointers ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/bug.h53
-rw-r--r--include/asm-generic/pgalloc.h8
-rw-r--r--include/linux/cpumask.h14
-rw-r--r--include/linux/interval_tree_generic.h22
-rw-r--r--include/linux/kexec.h2
-rw-r--r--include/linux/kgdb.h2
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/mm_types_task.h4
-rw-r--r--include/linux/printk.h22
-rw-r--r--include/linux/rbtree_augmented.h88
-rw-r--r--include/linux/string.h5
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/thread_info.h2
-rw-r--r--include/linux/uaccess.h21
-rw-r--r--include/trace/events/writeback.h38
-rw-r--r--include/uapi/asm-generic/mman-common.h3
-rw-r--r--include/uapi/linux/coff.h5
17 files changed, 171 insertions, 124 deletions
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 7357a3c942a0..384b5c835ced 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -10,6 +10,7 @@
#define BUGFLAG_WARNING (1 << 0)
#define BUGFLAG_ONCE (1 << 1)
#define BUGFLAG_DONE (1 << 2)
+#define BUGFLAG_NO_CUT_HERE (1 << 3) /* CUT_HERE already sent */
#define BUGFLAG_TAINT(taint) ((taint) << 8)
#define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
#endif
@@ -61,18 +62,6 @@ struct bug_entry {
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0)
#endif
-#ifdef __WARN_FLAGS
-#define __WARN_TAINT(taint) __WARN_FLAGS(BUGFLAG_TAINT(taint))
-#define __WARN_ONCE_TAINT(taint) __WARN_FLAGS(BUGFLAG_ONCE|BUGFLAG_TAINT(taint))
-
-#define WARN_ON_ONCE(condition) ({ \
- int __ret_warn_on = !!(condition); \
- if (unlikely(__ret_warn_on)) \
- __WARN_ONCE_TAINT(TAINT_WARN); \
- unlikely(__ret_warn_on); \
-})
-#endif
-
/*
* WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report
* significant kernel issues that need prompt attention if they should ever
@@ -89,27 +78,27 @@ struct bug_entry {
*
* Use the versions with printk format strings to provide better diagnostics.
*/
-#ifndef __WARN_TAINT
-extern __printf(3, 4)
-void warn_slowpath_fmt(const char *file, const int line,
- const char *fmt, ...);
+#ifndef __WARN_FLAGS
extern __printf(4, 5)
-void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint,
- const char *fmt, ...);
-extern void warn_slowpath_null(const char *file, const int line);
-#define WANT_WARN_ON_SLOWPATH
-#define __WARN() warn_slowpath_null(__FILE__, __LINE__)
-#define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg)
-#define __WARN_printf_taint(taint, arg...) \
- warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg)
+void warn_slowpath_fmt(const char *file, const int line, unsigned taint,
+ const char *fmt, ...);
+#define __WARN() __WARN_printf(TAINT_WARN, NULL)
+#define __WARN_printf(taint, arg...) \
+ warn_slowpath_fmt(__FILE__, __LINE__, taint, arg)
#else
extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
-#define __WARN() do { \
- printk(KERN_WARNING CUT_HERE); __WARN_TAINT(TAINT_WARN); \
-} while (0)
-#define __WARN_printf(arg...) __WARN_printf_taint(TAINT_WARN, arg)
-#define __WARN_printf_taint(taint, arg...) \
- do { __warn_printk(arg); __WARN_TAINT(taint); } while (0)
+#define __WARN() __WARN_FLAGS(BUGFLAG_TAINT(TAINT_WARN))
+#define __WARN_printf(taint, arg...) do { \
+ __warn_printk(arg); \
+ __WARN_FLAGS(BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\
+ } while (0)
+#define WARN_ON_ONCE(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_FLAGS(BUGFLAG_ONCE | \
+ BUGFLAG_TAINT(TAINT_WARN)); \
+ unlikely(__ret_warn_on); \
+})
#endif
/* used internally by panic.c */
@@ -132,7 +121,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
#define WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
- __WARN_printf(format); \
+ __WARN_printf(TAINT_WARN, format); \
unlikely(__ret_warn_on); \
})
#endif
@@ -140,7 +129,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
#define WARN_TAINT(condition, taint, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
- __WARN_printf_taint(taint, format); \
+ __WARN_printf(taint, format); \
unlikely(__ret_warn_on); \
})
diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
index 6f8cc06ee44e..73f7421413cb 100644
--- a/include/asm-generic/pgalloc.h
+++ b/include/asm-generic/pgalloc.h
@@ -49,7 +49,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
* @mm: the mm_struct of the current context
* @gfp: GFP flags to use for the allocation
*
- * Allocates a page and runs the pgtable_page_ctor().
+ * Allocates a page and runs the pgtable_pte_page_ctor().
*
* This function is intended for architectures that need
* anything beyond simple page allocation or must have custom GFP flags.
@@ -63,7 +63,7 @@ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
pte = alloc_page(gfp);
if (!pte)
return NULL;
- if (!pgtable_page_ctor(pte)) {
+ if (!pgtable_pte_page_ctor(pte)) {
__free_page(pte);
return NULL;
}
@@ -76,7 +76,7 @@ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
* pte_alloc_one - allocate a page for PTE-level user page table
* @mm: the mm_struct of the current context
*
- * Allocates a page and runs the pgtable_page_ctor().
+ * Allocates a page and runs the pgtable_pte_page_ctor().
*
* Return: `struct page` initialized as page table or %NULL on error
*/
@@ -98,7 +98,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
*/
static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
{
- pgtable_page_dtor(pte_page);
+ pgtable_pte_page_dtor(pte_page);
__free_page(pte_page);
}
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index b5a5a1ed9efd..78a73eba64dd 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -200,8 +200,8 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node)
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
#define for_each_cpu_wrap(cpu, mask, start) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
-#define for_each_cpu_and(cpu, mask, and) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
+#define for_each_cpu_and(cpu, mask1, mask2) \
+ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2)
#else
/**
* cpumask_first - get the first cpu in a cpumask
@@ -290,20 +290,20 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
/**
* for_each_cpu_and - iterate over every cpu in both masks
* @cpu: the (optionally unsigned) integer iterator
- * @mask: the first cpumask pointer
- * @and: the second cpumask pointer
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
*
* This saves a temporary CPU mask in many places. It is equivalent to:
* struct cpumask tmp;
- * cpumask_and(&tmp, &mask, &and);
+ * cpumask_and(&tmp, &mask1, &mask2);
* for_each_cpu(cpu, &tmp)
* ...
*
* After the loop, cpu is >= nr_cpu_ids.
*/
-#define for_each_cpu_and(cpu, mask, and) \
+#define for_each_cpu_and(cpu, mask1, mask2) \
for ((cpu) = -1; \
- (cpu) = cpumask_next_and((cpu), (mask), (and)), \
+ (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \
(cpu) < nr_cpu_ids;)
#endif /* SMP */
diff --git a/include/linux/interval_tree_generic.h b/include/linux/interval_tree_generic.h
index 855476145fe1..aaa8a0767aa3 100644
--- a/include/linux/interval_tree_generic.h
+++ b/include/linux/interval_tree_generic.h
@@ -30,26 +30,8 @@
\
/* Callbacks for augmented rbtree insert and remove */ \
\
-static inline ITTYPE ITPREFIX ## _compute_subtree_last(ITSTRUCT *node) \
-{ \
- ITTYPE max = ITLAST(node), subtree_last; \
- if (node->ITRB.rb_left) { \
- subtree_last = rb_entry(node->ITRB.rb_left, \
- ITSTRUCT, ITRB)->ITSUBTREE; \
- if (max < subtree_last) \
- max = subtree_last; \
- } \
- if (node->ITRB.rb_right) { \
- subtree_last = rb_entry(node->ITRB.rb_right, \
- ITSTRUCT, ITRB)->ITSUBTREE; \
- if (max < subtree_last) \
- max = subtree_last; \
- } \
- return max; \
-} \
- \
-RB_DECLARE_CALLBACKS(static, ITPREFIX ## _augment, ITSTRUCT, ITRB, \
- ITTYPE, ITSUBTREE, ITPREFIX ## _compute_subtree_last) \
+RB_DECLARE_CALLBACKS_MAX(static, ITPREFIX ## _augment, \
+ ITSTRUCT, ITRB, ITTYPE, ITSUBTREE, ITLAST) \
\
/* Insert / remove interval nodes from the tree */ \
\
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index f0b809258ed3..cc162f3e6461 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -183,6 +183,8 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
bool get_value);
void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
+int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
+ unsigned long buf_len);
void * __weak arch_kexec_kernel_image_load(struct kimage *image);
int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi,
Elf_Shdr *section,
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index fbf144aaa749..b072aeb1fd78 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -326,8 +326,10 @@ extern atomic_t kgdb_active;
(raw_smp_processor_id() == atomic_read(&kgdb_active))
extern bool dbg_is_early;
extern void __init dbg_late_init(void);
+extern void kgdb_panic(const char *msg);
#else /* ! CONFIG_KGDB */
#define in_dbg_master() (0)
#define dbg_late_init()
+static inline void kgdb_panic(const char *msg) {}
#endif /* ! CONFIG_KGDB */
#endif /* _KGDB_H_ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 294a67b94147..cc292273e6ba 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1949,7 +1949,7 @@ static inline void pgtable_init(void)
pgtable_cache_init();
}
-static inline bool pgtable_page_ctor(struct page *page)
+static inline bool pgtable_pte_page_ctor(struct page *page)
{
if (!ptlock_init(page))
return false;
@@ -1958,7 +1958,7 @@ static inline bool pgtable_page_ctor(struct page *page)
return true;
}
-static inline void pgtable_page_dtor(struct page *page)
+static inline void pgtable_pte_page_dtor(struct page *page)
{
ptlock_free(page);
__ClearPageTable(page);
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index d7016dcb245e..c1bc6731125c 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -36,6 +36,10 @@ struct vmacache {
struct vm_area_struct *vmas[VMACACHE_SIZE];
};
+/*
+ * When updating this, please also update struct resident_page_types[] in
+ * kernel/fork.c
+ */
enum {
MM_FILEPAGES, /* Resident file mapping pages */
MM_ANONPAGES, /* Resident anonymous pages */
diff --git a/include/linux/printk.h b/include/linux/printk.h
index cefd374c47b1..c09d67edda3a 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -488,13 +488,6 @@ extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
extern void print_hex_dump(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
const void *buf, size_t len, bool ascii);
-#if defined(CONFIG_DYNAMIC_DEBUG)
-#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \
- dynamic_hex_dump(prefix_str, prefix_type, 16, 1, buf, len, true)
-#else
-extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
- const void *buf, size_t len);
-#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
#else
static inline void print_hex_dump(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
@@ -526,4 +519,19 @@ static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type,
}
#endif
+/**
+ * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params
+ * @prefix_str: string to prefix each line with;
+ * caller supplies trailing spaces for alignment if desired
+ * @prefix_type: controls whether prefix of an offset, address, or none
+ * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
+ * @buf: data blob to dump
+ * @len: number of bytes in the @buf
+ *
+ * Calls print_hex_dump(), with log level of KERN_DEBUG,
+ * rowsize of 16, groupsize of 1, and ASCII output included.
+ */
+#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \
+ print_hex_dump_debug(prefix_str, prefix_type, 16, 1, buf, len, true)
+
#endif
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index 179faab29f52..fdd421b8d9ae 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -60,41 +60,87 @@ rb_insert_augmented_cached(struct rb_node *node,
rb_insert_augmented(node, &root->rb_root, augment);
}
-#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
- rbtype, rbaugmented, rbcompute) \
+/*
+ * Template for declaring augmented rbtree callbacks (generic case)
+ *
+ * RBSTATIC: 'static' or empty
+ * RBNAME: name of the rb_augment_callbacks structure
+ * RBSTRUCT: struct type of the tree nodes
+ * RBFIELD: name of struct rb_node field within RBSTRUCT
+ * RBAUGMENTED: name of field within RBSTRUCT holding data for subtree
+ * RBCOMPUTE: name of function that recomputes the RBAUGMENTED data
+ */
+
+#define RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \
+ RBSTRUCT, RBFIELD, RBAUGMENTED, RBCOMPUTE) \
static inline void \
-rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \
+RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop) \
{ \
while (rb != stop) { \
- rbstruct *node = rb_entry(rb, rbstruct, rbfield); \
- rbtype augmented = rbcompute(node); \
- if (node->rbaugmented == augmented) \
+ RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \
+ if (RBCOMPUTE(node, true)) \
break; \
- node->rbaugmented = augmented; \
- rb = rb_parent(&node->rbfield); \
+ rb = rb_parent(&node->RBFIELD); \
} \
} \
static inline void \
-rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
+RBNAME ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
{ \
- rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
- rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
- new->rbaugmented = old->rbaugmented; \
+ RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \
+ RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \
+ new->RBAUGMENTED = old->RBAUGMENTED; \
} \
static void \
-rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
+RBNAME ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
{ \
- rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
- rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
- new->rbaugmented = old->rbaugmented; \
- old->rbaugmented = rbcompute(old); \
+ RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \
+ RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \
+ new->RBAUGMENTED = old->RBAUGMENTED; \
+ RBCOMPUTE(old, false); \
} \
-rbstatic const struct rb_augment_callbacks rbname = { \
- .propagate = rbname ## _propagate, \
- .copy = rbname ## _copy, \
- .rotate = rbname ## _rotate \
+RBSTATIC const struct rb_augment_callbacks RBNAME = { \
+ .propagate = RBNAME ## _propagate, \
+ .copy = RBNAME ## _copy, \
+ .rotate = RBNAME ## _rotate \
};
+/*
+ * Template for declaring augmented rbtree callbacks,
+ * computing RBAUGMENTED scalar as max(RBCOMPUTE(node)) for all subtree nodes.
+ *
+ * RBSTATIC: 'static' or empty
+ * RBNAME: name of the rb_augment_callbacks structure
+ * RBSTRUCT: struct type of the tree nodes
+ * RBFIELD: name of struct rb_node field within RBSTRUCT
+ * RBTYPE: type of the RBAUGMENTED field
+ * RBAUGMENTED: name of RBTYPE field within RBSTRUCT holding data for subtree
+ * RBCOMPUTE: name of function that returns the per-node RBTYPE scalar
+ */
+
+#define RB_DECLARE_CALLBACKS_MAX(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \
+ RBTYPE, RBAUGMENTED, RBCOMPUTE) \
+static inline bool RBNAME ## _compute_max(RBSTRUCT *node, bool exit) \
+{ \
+ RBSTRUCT *child; \
+ RBTYPE max = RBCOMPUTE(node); \
+ if (node->RBFIELD.rb_left) { \
+ child = rb_entry(node->RBFIELD.rb_left, RBSTRUCT, RBFIELD); \
+ if (child->RBAUGMENTED > max) \
+ max = child->RBAUGMENTED; \
+ } \
+ if (node->RBFIELD.rb_right) { \
+ child = rb_entry(node->RBFIELD.rb_right, RBSTRUCT, RBFIELD); \
+ if (child->RBAUGMENTED > max) \
+ max = child->RBAUGMENTED; \
+ } \
+ if (exit && node->RBAUGMENTED == max) \
+ return true; \
+ node->RBAUGMENTED = max; \
+ return false; \
+} \
+RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \
+ RBSTRUCT, RBFIELD, RBAUGMENTED, RBNAME ## _compute_max)
+
#define RB_RED 0
#define RB_BLACK 1
diff --git a/include/linux/string.h b/include/linux/string.h
index 4deb11f7976b..b2f9df7f0761 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -474,8 +474,9 @@ static inline void memcpy_and_pad(void *dest, size_t dest_len,
* But this can lead to bugs due to typos, or if prefix is a pointer
* and not a constant. Instead use str_has_prefix().
*
- * Returns: 0 if @str does not start with @prefix
- strlen(@prefix) if @str does start with @prefix
+ * Returns:
+ * * strlen(@prefix) if @str starts with @prefix
+ * * 0 if @str does not start with @prefix
*/
static __always_inline size_t str_has_prefix(const char *str, const char *prefix)
{
diff --git a/include/linux/swap.h b/include/linux/swap.h
index de2c67a33b7e..063c0c1e112b 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -340,6 +340,7 @@ extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_all(void);
extern void rotate_reclaimable_page(struct page *page);
extern void deactivate_file_page(struct page *page);
+extern void deactivate_page(struct page *page);
extern void mark_page_lazyfree(struct page *page);
extern void swap_setup(void);
@@ -364,6 +365,7 @@ extern int vm_swappiness;
extern int remove_mapping(struct address_space *mapping, struct page *page);
extern unsigned long vm_total_pages;
+extern unsigned long reclaim_pages(struct list_head *page_list);
#ifdef CONFIG_NUMA
extern int node_reclaim_mode;
extern int sysctl_min_unmapped_ratio;
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 8d8821b3689a..659a4400517b 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -134,7 +134,7 @@ static inline void copy_overflow(int size, unsigned long count)
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
}
-static __always_inline bool
+static __always_inline __must_check bool
check_copy_size(const void *addr, size_t bytes, bool is_source)
{
int sz = __compiletime_object_size(addr);
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 34a038563d97..70bbdc38dc37 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -55,7 +55,7 @@
* as usual) and both source and destination can trigger faults.
*/
-static __always_inline unsigned long
+static __always_inline __must_check unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
kasan_check_write(to, n);
@@ -63,7 +63,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
return raw_copy_from_user(to, from, n);
}
-static __always_inline unsigned long
+static __always_inline __must_check unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
@@ -85,7 +85,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
* The caller should also make sure he pins the user space address
* so that we don't result in page fault and sleep.
*/
-static __always_inline unsigned long
+static __always_inline __must_check unsigned long
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
kasan_check_read(from, n);
@@ -93,7 +93,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
return raw_copy_to_user(to, from, n);
}
-static __always_inline unsigned long
+static __always_inline __must_check unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
@@ -103,7 +103,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
}
#ifdef INLINE_COPY_FROM_USER
-static inline unsigned long
+static inline __must_check unsigned long
_copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
@@ -117,12 +117,12 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
return res;
}
#else
-extern unsigned long
+extern __must_check unsigned long
_copy_from_user(void *, const void __user *, unsigned long);
#endif
#ifdef INLINE_COPY_TO_USER
-static inline unsigned long
+static inline __must_check unsigned long
_copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
@@ -133,7 +133,7 @@ _copy_to_user(void __user *to, const void *from, unsigned long n)
return n;
}
#else
-extern unsigned long
+extern __must_check unsigned long
_copy_to_user(void __user *, const void *, unsigned long);
#endif
@@ -222,8 +222,9 @@ static inline bool pagefault_disabled(void)
#ifndef ARCH_HAS_NOCACHE_UACCESS
-static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
- const void __user *from, unsigned long n)
+static inline __must_check unsigned long
+__copy_from_user_inatomic_nocache(void *to, const void __user *from,
+ unsigned long n)
{
return __copy_from_user_inatomic(to, from, n);
}
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 3a27335fce2c..c2ce6480b4b1 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -66,8 +66,9 @@ DECLARE_EVENT_CLASS(writeback_page_template,
),
TP_fast_assign(
- strncpy(__entry->name,
- mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
+ strscpy_pad(__entry->name,
+ mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)",
+ 32);
__entry->ino = mapping ? mapping->host->i_ino : 0;
__entry->index = page->index;
),
@@ -110,8 +111,8 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
struct backing_dev_info *bdi = inode_to_bdi(inode);
/* may be called for files on pseudo FSes w/ unregistered bdi */
- strncpy(__entry->name,
- bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
+ strscpy_pad(__entry->name,
+ bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->flags = flags;
@@ -316,8 +317,8 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
),
TP_fast_assign(
- strncpy(__entry->name,
- dev_name(inode_to_bdi(inode)->dev), 32);
+ strscpy_pad(__entry->name,
+ dev_name(inode_to_bdi(inode)->dev), 32);
__entry->ino = inode->i_ino;
__entry->sync_mode = wbc->sync_mode;
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
@@ -360,8 +361,9 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__field(unsigned int, cgroup_ino)
),
TP_fast_assign(
- strncpy(__entry->name,
- wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32);
+ strscpy_pad(__entry->name,
+ wb->bdi->dev ? dev_name(wb->bdi->dev) :
+ "(unknown)", 32);
__entry->nr_pages = work->nr_pages;
__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
__entry->sync_mode = work->sync_mode;
@@ -414,7 +416,7 @@ DECLARE_EVENT_CLASS(writeback_class,
__field(unsigned int, cgroup_ino)
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
TP_printk("bdi %s: cgroup_ino=%u",
@@ -436,7 +438,7 @@ TRACE_EVENT(writeback_bdi_register,
__array(char, name, 32)
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
),
TP_printk("bdi %s",
__entry->name
@@ -461,7 +463,7 @@ DECLARE_EVENT_CLASS(wbc_class,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
__entry->nr_to_write = wbc->nr_to_write;
__entry->pages_skipped = wbc->pages_skipped;
__entry->sync_mode = wbc->sync_mode;
@@ -512,7 +514,7 @@ TRACE_EVENT(writeback_queue_io,
),
TP_fast_assign(
unsigned long *older_than_this = work->older_than_this;
- strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
__entry->older = older_than_this ? *older_than_this : 0;
__entry->age = older_than_this ?
(jiffies - *older_than_this) * 1000 / HZ : -1;
@@ -598,7 +600,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
),
TP_fast_assign(
- strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
__entry->write_bw = KBps(wb->write_bandwidth);
__entry->avg_write_bw = KBps(wb->avg_write_bandwidth);
__entry->dirty_rate = KBps(dirty_rate);
@@ -663,7 +665,7 @@ TRACE_EVENT(balance_dirty_pages,
TP_fast_assign(
unsigned long freerun = (thresh + bg_thresh) / 2;
- strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
__entry->limit = global_wb_domain.dirty_limit;
__entry->setpoint = (global_wb_domain.dirty_limit +
@@ -723,8 +725,8 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
),
TP_fast_assign(
- strncpy(__entry->name,
- dev_name(inode_to_bdi(inode)->dev), 32);
+ strscpy_pad(__entry->name,
+ dev_name(inode_to_bdi(inode)->dev), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->dirtied_when = inode->dirtied_when;
@@ -797,8 +799,8 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
),
TP_fast_assign(
- strncpy(__entry->name,
- dev_name(inode_to_bdi(inode)->dev), 32);
+ strscpy_pad(__entry->name,
+ dev_name(inode_to_bdi(inode)->dev), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->dirtied_when = inode->dirtied_when;
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h
index 63b1f506ea67..c160a5354eb6 100644
--- a/include/uapi/asm-generic/mman-common.h
+++ b/include/uapi/asm-generic/mman-common.h
@@ -67,6 +67,9 @@
#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
+#define MADV_COLD 20 /* deactivate these pages */
+#define MADV_PAGEOUT 21 /* reclaim these pages */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/include/uapi/linux/coff.h b/include/uapi/linux/coff.h
index e4a79f80b9a0..ab5c7e847eed 100644
--- a/include/uapi/linux/coff.h
+++ b/include/uapi/linux/coff.h
@@ -11,6 +11,9 @@
more information about COFF, then O'Reilly has a very excellent book.
*/
+#ifndef _UAPI_LINUX_COFF_H
+#define _UAPI_LINUX_COFF_H
+
#define E_SYMNMLEN 8 /* Number of characters in a symbol name */
#define E_FILNMLEN 14 /* Number of characters in a file name */
#define E_DIMNUM 4 /* Number of array dimensions in auxiliary entry */
@@ -350,3 +353,5 @@ struct COFF_reloc {
/* For new sections we haven't heard of before */
#define COFF_DEF_SECTION_ALIGNMENT 4
+
+#endif /* _UAPI_LINUX_COFF_H */