summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c91
1 files changed, 63 insertions, 28 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 1e3d0ec4e200..54ea651bb6a4 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -303,11 +303,6 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
__p < (__addr) + (__objects) * (__s)->size; \
__p += (__s)->size)
-#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
- for (__p = fixup_red_left(__s, __addr), __idx = 1; \
- __idx <= __objects; \
- __p += (__s)->size, __idx++)
-
/* Determine object index from a given position */
static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
{
@@ -507,6 +502,7 @@ static inline int check_valid_pointer(struct kmem_cache *s,
return 1;
base = page_address(page);
+ object = kasan_reset_tag(object);
object = restore_red_left(s, object);
if (object < base || object >= base + page->objects * s->size ||
(object - base) % s->size) {
@@ -684,7 +680,10 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
print_section(KERN_ERR, "Padding ", p + off,
size_from_object(s) - off);
- dump_stack();
+ if (unlikely(s->flags & SLAB_WARN_ON_ERROR))
+ WARN_ON(1);
+ else
+ dump_stack();
}
void object_err(struct kmem_cache *s, struct page *page,
@@ -705,7 +704,11 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
va_end(args);
slab_bug(s, "%s", buf);
print_page_info(page);
- dump_stack();
+
+ if (unlikely(s->flags & SLAB_WARN_ON_ERROR))
+ WARN_ON(1);
+ else
+ dump_stack();
}
static void init_object(struct kmem_cache *s, void *object, u8 val)
@@ -1076,8 +1079,7 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
}
static inline int alloc_consistency_checks(struct kmem_cache *s,
- struct page *page,
- void *object, unsigned long addr)
+ struct page *page, void *object)
{
if (!check_slab(s, page))
return 0;
@@ -1098,7 +1100,7 @@ static noinline int alloc_debug_processing(struct kmem_cache *s,
void *object, unsigned long addr)
{
if (s->flags & SLAB_CONSISTENCY_CHECKS) {
- if (!alloc_consistency_checks(s, page, object, addr))
+ if (!alloc_consistency_checks(s, page, object))
goto bad;
}
@@ -1254,6 +1256,9 @@ static int __init setup_slub_debug(char *str)
case 'a':
slub_debug |= SLAB_FAILSLAB;
break;
+ case 'w':
+ slub_debug |= SLAB_WARN_ON_ERROR;
+ break;
case 'o':
/*
* Avoid enabling debugging on caches if its minimum
@@ -1357,6 +1362,14 @@ slab_flags_t kmem_cache_flags(unsigned int object_size,
#define disable_higher_order_debug 0
+static inline void metadata_access_enable(void)
+{
+}
+
+static inline void metadata_access_disable(void)
+{
+}
+
static inline unsigned long slabs_node(struct kmem_cache *s, int node)
{ return 0; }
static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
@@ -1374,8 +1387,9 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
*/
static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
{
+ ptr = kasan_kmalloc_large(ptr, size, flags);
kmemleak_alloc(ptr, size, 1, flags);
- return kasan_kmalloc_large(ptr, size, flags);
+ return ptr;
}
static __always_inline void kfree_hook(void *x)
@@ -1641,27 +1655,29 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
if (page_is_pfmemalloc(page))
SetPageSlabPfmemalloc(page);
+ kasan_poison_slab(page);
+
start = page_address(page);
- if (unlikely(s->flags & SLAB_POISON))
+ if (unlikely(s->flags & SLAB_POISON)) {
+ metadata_access_enable();
memset(start, POISON_INUSE, PAGE_SIZE << order);
-
- kasan_poison_slab(page);
+ metadata_access_disable();
+ }
shuffle = shuffle_freelist(s, page);
if (!shuffle) {
- for_each_object_idx(p, idx, s, start, page->objects) {
- if (likely(idx < page->objects)) {
- next = p + s->size;
- next = setup_object(s, page, next);
- set_freepointer(s, p, next);
- } else
- set_freepointer(s, p, NULL);
- }
start = fixup_red_left(s, start);
start = setup_object(s, page, start);
page->freelist = start;
+ for (idx = 0, p = start; idx < page->objects - 1; idx++) {
+ next = p + s->size;
+ next = setup_object(s, page, next);
+ set_freepointer(s, p, next);
+ p = next;
+ }
+ set_freepointer(s, p, NULL);
}
page->inuse = page->objects;
@@ -2111,7 +2127,7 @@ redo:
if (!lock) {
lock = 1;
/*
- * Taking the spinlock removes the possiblity
+ * Taking the spinlock removes the possibility
* that acquire_slab() will see a slab page that
* is frozen
*/
@@ -2235,8 +2251,8 @@ static void unfreeze_partials(struct kmem_cache *s,
}
/*
- * Put a page that was just frozen (in __slab_free) into a partial page
- * slot if available.
+ * Put a page that was just frozen (in __slab_free|get_partial_node) into a
+ * partial page slot if available.
*
* If we did not find a slot then simply move all the partials to the
* per node partial list.
@@ -2463,8 +2479,7 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
stat(s, ALLOC_SLAB);
c->page = page;
*pc = c;
- } else
- freelist = NULL;
+ }
return freelist;
}
@@ -4245,7 +4260,7 @@ void __init kmem_cache_init(void)
cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
slub_cpu_dead);
- pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%d\n",
+ pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
cache_line_size(),
slub_min_order, slub_max_order, slub_min_objects,
nr_cpu_ids, nr_node_ids);
@@ -5220,6 +5235,25 @@ static ssize_t store_user_store(struct kmem_cache *s,
}
SLAB_ATTR(store_user);
+static ssize_t warn_on_error_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_WARN_ON_ERROR));
+}
+
+static ssize_t warn_on_error_store(struct kmem_cache *s,
+ const char *buf, size_t length)
+{
+ if (any_slab_objects(s))
+ return -EBUSY;
+
+ s->flags &= ~SLAB_WARN_ON_ERROR;
+ if (buf[0] == '1')
+ s->flags |= SLAB_WARN_ON_ERROR;
+
+ return length;
+}
+SLAB_ATTR(warn_on_error);
+
static ssize_t validate_show(struct kmem_cache *s, char *buf)
{
return 0;
@@ -5428,6 +5462,7 @@ static struct attribute *slab_attrs[] = {
&validate_attr.attr,
&alloc_calls_attr.attr,
&free_calls_attr.attr,
+ &warn_on_error_attr.attr,
#endif
#ifdef CONFIG_ZONE_DMA
&cache_dma_attr.attr,