summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2009-12-04 15:15:54 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2009-12-04 15:15:54 +1100
commitc66186407abf197de2e8c16107f34b70bf009090 (patch)
tree01903529d52e21c4efd1989693a5aa39efee639e /include
parent8f02afb667daa37048eb2800e0ed7ac676308c52 (diff)
parent3ca05e47275843eacefb421f08fd085e67cf302a (diff)
Merge remote branch 'slab/for-next'
Diffstat (limited to 'include')
-rw-r--r--include/linux/rcu_types.h18
-rw-r--r--include/linux/rcupdate.h11
-rw-r--r--include/linux/slab.h10
-rw-r--r--include/linux/slqb_def.h300
4 files changed, 327 insertions, 12 deletions
diff --git a/include/linux/rcu_types.h b/include/linux/rcu_types.h
new file mode 100644
index 000000000000..fd3570d0e6c1
--- /dev/null
+++ b/include/linux/rcu_types.h
@@ -0,0 +1,18 @@
+#ifndef __LINUX_RCU_TYPES_H
+#define __LINUX_RCU_TYPES_H
+
+#ifdef __KERNEL__
+
+/**
+ * struct rcu_head - callback structure for use with RCU
+ * @next: next update requests in a list
+ * @func: actual update function to call after the grace period.
+ */
+struct rcu_head {
+ struct rcu_head *next;
+ void (*func)(struct rcu_head *head);
+};
+
+#endif
+
+#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 3ebd0b7bcb08..af5b8af505bf 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -33,6 +33,7 @@
#ifndef __LINUX_RCUPDATE_H
#define __LINUX_RCUPDATE_H
+#include <linux/rcu_types.h>
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
@@ -41,16 +42,6 @@
#include <linux/lockdep.h>
#include <linux/completion.h>
-/**
- * struct rcu_head - callback structure for use with RCU
- * @next: next update requests in a list
- * @func: actual update function to call after the grace period.
- */
-struct rcu_head {
- struct rcu_head *next;
- void (*func)(struct rcu_head *head);
-};
-
/* Exported common interfaces */
#ifdef CONFIG_TREE_PREEMPT_RCU
extern void synchronize_rcu(void);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 2da8372519f5..62d2b16d9ff2 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -74,6 +74,10 @@
/* The following flags affect the page allocator grouping pages by mobility */
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
+
+/* Following flags should only be used by allocator specific flags */
+#define SLAB_ALLOC_PRIVATE 0x000000ffUL
+
/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
*
@@ -160,6 +164,8 @@ size_t ksize(const void *);
*/
#ifdef CONFIG_SLUB
#include <linux/slub_def.h>
+#elif defined(CONFIG_SLQB)
+#include <linux/slqb_def.h>
#elif defined(CONFIG_SLOB)
#include <linux/slob_def.h>
#else
@@ -262,7 +268,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
* allocator where we care about the real place the memory allocation
* request comes from.
*/
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || defined(CONFIG_SLQB_DEBUG)
extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_)
@@ -280,7 +286,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
* standard allocator where we care about the real place the memory
* allocation request comes from.
*/
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || defined(CONFIG_SLQB_DEBUG)
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
#define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \
diff --git a/include/linux/slqb_def.h b/include/linux/slqb_def.h
new file mode 100644
index 000000000000..1cb68cc96630
--- /dev/null
+++ b/include/linux/slqb_def.h
@@ -0,0 +1,300 @@
+#ifndef _LINUX_SLQB_DEF_H
+#define _LINUX_SLQB_DEF_H
+
+/*
+ * SLQB : A slab allocator with object queues.
+ *
+ * (C) 2008 Nick Piggin <npiggin@suse.de>
+ */
+#include <linux/types.h>
+#include <linux/gfp.h>
+#include <linux/workqueue.h>
+#include <linux/kobject.h>
+#include <linux/rcu_types.h>
+#include <linux/mm_types.h>
+#include <linux/kernel.h>
+
+#define SLAB_NUMA 0x00000001UL /* shortcut */
+
+enum stat_item {
+ ALLOC, /* Allocation count */
+ ALLOC_SLAB_FILL, /* Fill freelist from page list */
+ ALLOC_SLAB_NEW, /* New slab acquired from page allocator */
+ FREE, /* Free count */
+ FREE_REMOTE, /* NUMA: freeing to remote list */
+ FLUSH_FREE_LIST, /* Freelist flushed */
+ FLUSH_FREE_LIST_OBJECTS, /* Objects flushed from freelist */
+ FLUSH_FREE_LIST_REMOTE, /* Objects flushed from freelist to remote */
+ FLUSH_SLAB_PARTIAL, /* Freeing moves slab to partial list */
+ FLUSH_SLAB_FREE, /* Slab freed to the page allocator */
+ FLUSH_RFREE_LIST, /* Rfree list flushed */
+ FLUSH_RFREE_LIST_OBJECTS, /* Rfree objects flushed */
+ CLAIM_REMOTE_LIST, /* Remote freed list claimed */
+ CLAIM_REMOTE_LIST_OBJECTS, /* Remote freed objects claimed */
+ NR_SLQB_STAT_ITEMS
+};
+
+/*
+ * Singly-linked list with head, tail, and nr
+ */
+struct kmlist {
+ unsigned long nr;
+ void **head;
+ void **tail;
+};
+
+/*
+ * Every kmem_cache_list has a kmem_cache_remote_free structure, by which
+ * objects can be returned to the kmem_cache_list from remote CPUs.
+ */
+struct kmem_cache_remote_free {
+ spinlock_t lock;
+ struct kmlist list;
+} ____cacheline_aligned;
+
+/*
+ * A kmem_cache_list manages all the slabs and objects allocated from a given
+ * source. Per-cpu kmem_cache_lists allow node-local allocations. Per-node
+ * kmem_cache_lists allow off-node allocations (but require locking).
+ */
+struct kmem_cache_list {
+ /* Fastpath LIFO freelist of objects */
+ struct kmlist freelist;
+#ifdef CONFIG_SMP
+ /* remote_free has reached a watermark */
+ int remote_free_check;
+#endif
+ /* kmem_cache corresponding to this list */
+ struct kmem_cache *cache;
+
+ /* Number of partial slabs (pages) */
+ unsigned long nr_partial;
+
+ /* Slabs which have some free objects */
+ struct list_head partial;
+
+ /* Total number of slabs allocated */
+ unsigned long nr_slabs;
+
+ /* Protects nr_partial, nr_slabs, and partial */
+ spinlock_t page_lock;
+
+#ifdef CONFIG_SMP
+ /*
+ * In the case of per-cpu lists, remote_free is for objects freed by
+ * non-owner CPU back to its home list. For per-node lists, remote_free
+ * is always used to free objects.
+ */
+ struct kmem_cache_remote_free remote_free;
+#endif
+
+#ifdef CONFIG_SLQB_STATS
+ unsigned long stats[NR_SLQB_STAT_ITEMS];
+#endif
+} ____cacheline_aligned;
+
+/*
+ * Primary per-cpu, per-kmem_cache structure.
+ */
+struct kmem_cache_cpu {
+ struct kmem_cache_list list; /* List for node-local slabs */
+ unsigned int colour_next; /* Next colour offset to use */
+
+#ifdef CONFIG_SMP
+ /*
+ * rlist is a list of objects that don't fit on list.freelist (ie.
+ * wrong node). The objects all correspond to a given kmem_cache_list,
+ * remote_cache_list. To free objects to another list, we must first
+ * flush the existing objects, then switch remote_cache_list.
+ *
+ * An NR_CPUS or MAX_NUMNODES array would be nice here, but then we
+ * get to O(NR_CPUS^2) memory consumption situation.
+ */
+ struct kmlist rlist;
+ struct kmem_cache_list *remote_cache_list;
+#endif
+} ____cacheline_aligned_in_smp;
+
+/*
+ * Per-node, per-kmem_cache structure. Used for node-specific allocations.
+ */
+struct kmem_cache_node {
+ struct kmem_cache_list list;
+ spinlock_t list_lock; /* protects access to list */
+} ____cacheline_aligned;
+
+/*
+ * Management object for a slab cache.
+ */
+struct kmem_cache {
+ unsigned long flags;
+ int hiwater; /* LIFO list high watermark */
+ int freebatch; /* LIFO freelist batch flush size */
+#ifdef CONFIG_SMP
+ struct kmem_cache_cpu **cpu_slab; /* dynamic per-cpu structures */
+#else
+ struct kmem_cache_cpu cpu_slab;
+#endif
+ int objsize; /* Size of object without meta data */
+ int offset; /* Free pointer offset. */
+ int objects; /* Number of objects in slab */
+
+#ifdef CONFIG_NUMA
+ struct kmem_cache_node **node_slab; /* dynamic per-node structures */
+#endif
+
+ int size; /* Size of object including meta data */
+ int order; /* Allocation order */
+ gfp_t allocflags; /* gfp flags to use on allocation */
+ unsigned int colour_range; /* range of colour counter */
+ unsigned int colour_off; /* offset per colour */
+ void (*ctor)(void *);
+
+ const char *name; /* Name (only for display!) */
+ struct list_head list; /* List of slab caches */
+
+ int align; /* Alignment */
+ int inuse; /* Offset to metadata */
+
+#ifdef CONFIG_SLQB_SYSFS
+ struct kobject kobj; /* For sysfs */
+#endif
+} ____cacheline_aligned;
+
+/*
+ * Kmalloc subsystem.
+ */
+#if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8
+#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
+#else
+#define KMALLOC_MIN_SIZE 8
+#endif
+
+#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
+#define KMALLOC_SHIFT_SLQB_HIGH (PAGE_SHIFT + \
+ ((9 <= (MAX_ORDER - 1)) ? 9 : (MAX_ORDER - 1)))
+
+extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_SLQB_HIGH + 1];
+extern struct kmem_cache kmalloc_caches_dma[KMALLOC_SHIFT_SLQB_HIGH + 1];
+
+/*
+ * Constant size allocations use this path to find index into kmalloc caches
+ * arrays. get_slab() function is used for non-constant sizes.
+ */
+static __always_inline int kmalloc_index(size_t size)
+{
+ extern int ____kmalloc_too_large(void);
+
+ if (unlikely(size <= KMALLOC_MIN_SIZE))
+ return KMALLOC_SHIFT_LOW;
+
+#if L1_CACHE_BYTES < 64
+ if (size > 64 && size <= 96)
+ return 1;
+#endif
+#if L1_CACHE_BYTES < 128
+ if (size > 128 && size <= 192)
+ return 2;
+#endif
+ if (size <= 8) return 3;
+ if (size <= 16) return 4;
+ if (size <= 32) return 5;
+ if (size <= 64) return 6;
+ if (size <= 128) return 7;
+ if (size <= 256) return 8;
+ if (size <= 512) return 9;
+ if (size <= 1024) return 10;
+ if (size <= 2 * 1024) return 11;
+ if (size <= 4 * 1024) return 12;
+ if (size <= 8 * 1024) return 13;
+ if (size <= 16 * 1024) return 14;
+ if (size <= 32 * 1024) return 15;
+ if (size <= 64 * 1024) return 16;
+ if (size <= 128 * 1024) return 17;
+ if (size <= 256 * 1024) return 18;
+ if (size <= 512 * 1024) return 19;
+ if (size <= 1024 * 1024) return 20;
+ if (size <= 2 * 1024 * 1024) return 21;
+ if (size <= 4 * 1024 * 1024) return 22;
+ if (size <= 8 * 1024 * 1024) return 23;
+ if (size <= 16 * 1024 * 1024) return 24;
+ if (size <= 32 * 1024 * 1024) return 25;
+ return ____kmalloc_too_large();
+}
+
+#ifdef CONFIG_ZONE_DMA
+#define SLQB_DMA __GFP_DMA
+#else
+/* Disable "DMA slabs" */
+#define SLQB_DMA (__force gfp_t)0
+#endif
+
+/*
+ * Find the kmalloc slab cache for a given combination of allocation flags and
+ * size. Should really only be used for constant 'size' arguments, due to
+ * bloat.
+ */
+static __always_inline struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
+{
+ int index;
+
+ if (unlikely(size > 1UL << KMALLOC_SHIFT_SLQB_HIGH))
+ return NULL;
+ if (unlikely(!size))
+ return ZERO_SIZE_PTR;
+
+ index = kmalloc_index(size);
+ if (likely(!(flags & SLQB_DMA)))
+ return &kmalloc_caches[index];
+ else
+ return &kmalloc_caches_dma[index];
+}
+
+void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
+void *__kmalloc(size_t size, gfp_t flags);
+
+#ifndef ARCH_KMALLOC_MINALIGN
+#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
+#endif
+
+#ifndef ARCH_SLAB_MINALIGN
+#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
+#endif
+
+#define KMALLOC_HEADER (ARCH_KMALLOC_MINALIGN < sizeof(void *) ? \
+ sizeof(void *) : ARCH_KMALLOC_MINALIGN)
+
+static __always_inline void *kmalloc(size_t size, gfp_t flags)
+{
+ if (__builtin_constant_p(size)) {
+ struct kmem_cache *s;
+
+ s = kmalloc_slab(size, flags);
+ if (unlikely(ZERO_OR_NULL_PTR(s)))
+ return s;
+
+ return kmem_cache_alloc(s, flags);
+ }
+ return __kmalloc(size, flags);
+}
+
+#ifdef CONFIG_NUMA
+void *__kmalloc_node(size_t size, gfp_t flags, int node);
+void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
+
+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ if (__builtin_constant_p(size)) {
+ struct kmem_cache *s;
+
+ s = kmalloc_slab(size, flags);
+ if (unlikely(ZERO_OR_NULL_PTR(s)))
+ return s;
+
+ return kmem_cache_alloc_node(s, flags, node);
+ }
+ return __kmalloc_node(size, flags, node);
+}
+#endif
+
+#endif /* _LINUX_SLQB_DEF_H */