path: root/kernel/fork.c
diff options
authorGlauber Costa <>2012-12-18 14:22:18 -0800
committerLinus Torvalds <>2012-12-18 15:02:13 -0800
commit2ad306b17c0ac5a1b1f250d5f772aeb87fdf1eba (patch)
tree743e6092019959dd455091d75b3fab2aa7f0a2aa /kernel/fork.c
parentc8b2a36fb1597e9390cf4c1a7f2dd394dc7d7b17 (diff)
fork: protect architectures where THREAD_SIZE >= PAGE_SIZE against fork bombs
Because those architectures will draw their stacks directly from the page allocator, rather than the slab cache, we can directly pass __GFP_KMEMCG flag, and issue the corresponding free_pages. This code path is taken when the architecture doesn't define CONFIG_ARCH_THREAD_INFO_ALLOCATOR (only ia64 seems to), and has THREAD_SIZE >= PAGE_SIZE. Luckily, most - if not all - of the remaining architectures fall in this category. This will guarantee that every stack page is accounted to the memcg the process currently lives on, and will have the allocations to fail if they go over limit. For the time being, I am defining a new variant of THREADINFO_GFP, not to mess with the other path. Once the slab is also tracked by memcg, we can get rid of that flag. Tested to successfully protect against :(){ :|:& };: Signed-off-by: Glauber Costa <> Acked-by: Frederic Weisbecker <> Acked-by: Kamezawa Hiroyuki <> Reviewed-by: Michal Hocko <> Cc: Christoph Lameter <> Cc: David Rientjes <> Cc: Greg Thelen <> Cc: Johannes Weiner <> Cc: JoonSoo Kim <> Cc: Mel Gorman <> Cc: Pekka Enberg <> Cc: Rik van Riel <> Cc: Suleiman Souhlal <> Cc: Tejun Heo <> Signed-off-by: Andrew Morton <> Signed-off-by: Linus Torvalds <>
Diffstat (limited to 'kernel/fork.c')
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index c36c4e301efe..85f6d536608d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -146,7 +146,7 @@ void __weak arch_release_thread_info(struct thread_info *ti)
static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
int node)
- struct page *page = alloc_pages_node(node, THREADINFO_GFP,
+ struct page *page = alloc_pages_node(node, THREADINFO_GFP_ACCOUNTED,
return page ? page_address(page) : NULL;
@@ -154,7 +154,7 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
static inline void free_thread_info(struct thread_info *ti)
- free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
+ free_memcg_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER);
# else
static struct kmem_cache *thread_info_cache;