summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2013-01-10 19:14:19 +0000
committerPekka Enberg <penberg@kernel.org>2013-02-01 12:32:09 +0200
commitca34956b804b7554fc4e88826773380d9d5122a8 (patch)
tree5fbcbd881ebe6e0229f59ff97f7d7a36ccd6e004
parentce8eb6c424c794d7fb4d1a6667d267990ca28072 (diff)
slab: Common definition for kmem_cache_node
Put the definitions for the kmem_cache_node structures together so that we have one structure. That will allow us to create more common fields in the future which could yield more opportunities to share code. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r--include/linux/slub_def.h11
-rw-r--r--mm/slab.c17
-rw-r--r--mm/slab.h32
3 files changed, 32 insertions, 28 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 16341e5316de..027276fa8713 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -53,17 +53,6 @@ struct kmem_cache_cpu {
#endif
};
-struct kmem_cache_node {
- spinlock_t list_lock; /* Protect partial list and nr_partial */
- unsigned long nr_partial;
- struct list_head partial;
-#ifdef CONFIG_SLUB_DEBUG
- atomic_long_t nr_slabs;
- atomic_long_t total_objects;
- struct list_head full;
-#endif
-};
-
/*
* Word size structure that can be atomically updated or read and that
* contains both the order and the number of objects that a slab of the
diff --git a/mm/slab.c b/mm/slab.c
index c162b2eb493a..17f859614546 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -286,23 +286,6 @@ struct arraycache_init {
};
/*
- * The slab lists for all objects.
- */
-struct kmem_cache_node {
- struct list_head slabs_partial; /* partial list first, better asm code */
- struct list_head slabs_full;
- struct list_head slabs_free;
- unsigned long free_objects;
- unsigned int free_limit;
- unsigned int colour_next; /* Per-node cache coloring */
- spinlock_t list_lock;
- struct array_cache *shared; /* shared per node */
- struct array_cache **alien; /* on other nodes */
- unsigned long next_reap; /* updated without locking */
- int free_touched; /* updated without locking */
-};
-
-/*
* Need this for bootstrapping a per node allocator.
*/
#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
diff --git a/mm/slab.h b/mm/slab.h
index f0a552ff7b9b..f96b49e4704e 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -239,3 +239,35 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
return s;
}
#endif
+
+
+/*
+ * The slab lists for all objects.
+ */
+struct kmem_cache_node {
+ spinlock_t list_lock;
+
+#ifdef CONFIG_SLAB
+ struct list_head slabs_partial; /* partial list first, better asm code */
+ struct list_head slabs_full;
+ struct list_head slabs_free;
+ unsigned long free_objects;
+ unsigned int free_limit;
+ unsigned int colour_next; /* Per-node cache coloring */
+ struct array_cache *shared; /* shared per node */
+ struct array_cache **alien; /* on other nodes */
+ unsigned long next_reap; /* updated without locking */
+ int free_touched; /* updated without locking */
+#endif
+
+#ifdef CONFIG_SLUB
+ unsigned long nr_partial;
+ struct list_head partial;
+#ifdef CONFIG_SLUB_DEBUG
+ atomic_long_t nr_slabs;
+ atomic_long_t total_objects;
+ struct list_head full;
+#endif
+#endif
+
+};