summaryrefslogtreecommitdiff
path: root/mm/slab.c
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2009-03-20 14:29:40 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2009-03-20 14:29:40 +1100
commitde96a7e12cd59df6609063c396283e92fa2ef393 (patch)
treeffb403dab6350756522a7bfa08a0fc615d8367af /mm/slab.c
parent70d453edddd16d44f67405cca5e4ad5917f86333 (diff)
parente601725c07e6a4855cea78d1f65219ba6be63a06 (diff)
Merge commit 'kmemleak/kmemleak'
Conflicts: Documentation/kernel-parameters.txt MAINTAINERS include/linux/percpu.h init/main.c kernel/module.c lib/Kconfig.debug mm/slob.c mm/vmalloc.c
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c32
1 files changed, 30 insertions, 2 deletions
diff --git a/mm/slab.c b/mm/slab.c
index dd66136b7dc0..3a8162ab81f9 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -107,6 +107,7 @@
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/nodemask.h>
+#include <linux/kmemleak.h>
#include <linux/mempolicy.h>
#include <linux/mutex.h>
#include <linux/fault-inject.h>
@@ -178,13 +179,13 @@
SLAB_STORE_USER | \
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
- SLAB_DEBUG_OBJECTS)
+ SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
#else
# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
SLAB_CACHE_DMA | \
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
- SLAB_DEBUG_OBJECTS)
+ SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
#endif
/*
@@ -964,6 +965,14 @@ static struct array_cache *alloc_arraycache(int node, int entries,
struct array_cache *nc = NULL;
nc = kmalloc_node(memsize, GFP_KERNEL, node);
+ /*
+ * The array_cache structures contain pointers to free object.
+ * However, when such objects are allocated or transfered to another
+ * cache the pointers are not cleared and they could be counted as
+ * valid references during a kmemleak scan. Therefore, kmemleak must
+ * not scan such objects.
+ */
+ kmemleak_no_scan(nc);
if (nc) {
nc->avail = 0;
nc->limit = entries;
@@ -2621,6 +2630,14 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
/* Slab management obj is off-slab. */
slabp = kmem_cache_alloc_node(cachep->slabp_cache,
local_flags, nodeid);
+ /*
+ * If the first object in the slab is leaked (it's allocated
+ * but no one has a reference to it), we want to make sure
+ * kmemleak does not treat the ->s_mem pointer as a reference
+ * to the object. Otherwise we will not report the leak.
+ */
+ kmemleak_scan_area(slabp, offsetof(struct slab, list),
+ sizeof(struct list_head), local_flags);
if (!slabp)
return NULL;
} else {
@@ -3141,6 +3158,12 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
STATS_INC_ALLOCMISS(cachep);
objp = cache_alloc_refill(cachep, flags);
}
+ /*
+ * To avoid a false negative, if an object that is in one of the
+ * per-CPU caches is leaked, we need to make sure kmemleak doesn't
+ * treat the array pointers as a reference to the object.
+ */
+ kmemleak_erase(&ac->entry[ac->avail]);
return objp;
}
@@ -3360,6 +3383,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
out:
local_irq_restore(save_flags);
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
+ kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
+ flags);
if (unlikely((flags & __GFP_ZERO) && ptr))
memset(ptr, 0, obj_size(cachep));
@@ -3415,6 +3440,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
objp = __do_cache_alloc(cachep, flags);
local_irq_restore(save_flags);
objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
+ kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
+ flags);
prefetchw(objp);
if (unlikely((flags & __GFP_ZERO) && objp))
@@ -3530,6 +3557,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
struct array_cache *ac = cpu_cache_get(cachep);
check_irq_off();
+ kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
/*