summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2014-07-11 13:18:18 +1000
committerStephen Rothwell <sfr@canb.auug.org.au>2014-07-11 13:18:18 +1000
commitce3eea761281eccb9d8c15dee7adf77ead763500 (patch)
tree192773f0472344ee88cf8a75a987ac5242571ceb /mm/slub.c
parent1ba91690f792ac5b75ccbdbbfc0dcd0fb7b21283 (diff)
parent201ed0edb7123017f84d5e9e07a434a3a904d474 (diff)
Merge remote-tracking branch 'slab/for-next'
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c16
1 files changed, 9 insertions, 7 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 73004808537e..8c24a23fdafa 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -382,9 +382,9 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
if (s->flags & __CMPXCHG_DOUBLE) {
if (cmpxchg_double(&page->freelist, &page->counters,
- freelist_old, counters_old,
- freelist_new, counters_new))
- return 1;
+ freelist_old, counters_old,
+ freelist_new, counters_new))
+ return 1;
} else
#endif
{
@@ -418,9 +418,9 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
if (s->flags & __CMPXCHG_DOUBLE) {
if (cmpxchg_double(&page->freelist, &page->counters,
- freelist_old, counters_old,
- freelist_new, counters_new))
- return 1;
+ freelist_old, counters_old,
+ freelist_new, counters_new))
+ return 1;
} else
#endif
{
@@ -3199,12 +3199,13 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
/*
* Attempt to free all partial slabs on a node.
* This is called from kmem_cache_close(). We must be the last thread
- * using the cache and therefore we do not need to lock anymore.
+ * using the cache, but we still have to lock for lockdep's sake.
*/
static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
{
struct page *page, *h;
+ spin_lock_irq(&n->list_lock);
list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) {
__remove_partial(n, page);
@@ -3214,6 +3215,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
"Objects remaining in %s on kmem_cache_close()");
}
}
+ spin_unlock_irq(&n->list_lock);
}
/*