summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig9
-rw-r--r--lib/Kconfig.debug66
-rw-r--r--lib/Kconfig.kgdb58
-rw-r--r--lib/Makefile8
-rw-r--r--lib/bitmap.c174
-rw-r--r--lib/debugobjects.c890
-rw-r--r--lib/devres.c4
-rw-r--r--lib/div64.c35
-rw-r--r--lib/find_next_bit.c69
-rw-r--r--lib/idr.c12
-rw-r--r--lib/inflate.c3
-rw-r--r--lib/iomap.c2
-rw-r--r--lib/kernel_lock.c1
-rw-r--r--lib/klist.c235
-rw-r--r--lib/kobject.c63
-rw-r--r--lib/kobject_uevent.c16
-rw-r--r--lib/lmb.c509
-rw-r--r--lib/pcounter.c58
-rw-r--r--lib/percpu_counter.c1
-rw-r--r--lib/proportions.c38
-rw-r--r--lib/radix-tree.c9
-rw-r--r--lib/ratelimit.c51
-rw-r--r--lib/reed_solomon/reed_solomon.c1
-rw-r--r--lib/scatterlist.c102
-rw-r--r--lib/semaphore-sleepers.c176
-rw-r--r--lib/string.c27
-rw-r--r--lib/swiotlb.c149
27 files changed, 2253 insertions, 513 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index ba3d104994d9..8cc8e8722a3f 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -7,6 +7,12 @@ menu "Library routines"
config BITREVERSE
tristate
+config GENERIC_FIND_FIRST_BIT
+ def_bool n
+
+config GENERIC_FIND_NEXT_BIT
+ def_bool n
+
config CRC_CCITT
tristate "CRC-CCITT functions"
help
@@ -141,4 +147,7 @@ config HAS_DMA
config CHECK_SIGNATURE
bool
+config HAVE_LMB
+ boolean
+
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 78955eb6bd94..d2099f41aa1e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -25,6 +25,17 @@ config ENABLE_MUST_CHECK
suppress the "warning: ignoring return value of 'foo', declared with
attribute warn_unused_result" messages.
+config FRAME_WARN
+ int "Warn for stack frames larger than (needs gcc 4.4)"
+ range 0 8192
+ default 1024 if !64BIT
+ default 2048 if 64BIT
+ help
+ Tell gcc to warn at build time for stack frames larger than this.
+ Setting this too low will cause a lot of warnings.
+ Setting it to 0 disables the warning.
+ Requires gcc 4.4
+
config MAGIC_SYSRQ
bool "Magic SysRq key"
depends on !UML
@@ -183,6 +194,37 @@ config TIMER_STATS
(it defaults to deactivated on bootup and will only be activated
if some application like powertop activates it explicitly).
+config DEBUG_OBJECTS
+ bool "Debug object operations"
+ depends on DEBUG_KERNEL
+ help
+ If you say Y here, additional code will be inserted into the
+ kernel to track the life time of various objects and validate
+ the operations on those objects.
+
+config DEBUG_OBJECTS_SELFTEST
+ bool "Debug objects selftest"
+ depends on DEBUG_OBJECTS
+ help
+ This enables the selftest of the object debug code.
+
+config DEBUG_OBJECTS_FREE
+ bool "Debug objects in freed memory"
+ depends on DEBUG_OBJECTS
+ help
+ This enables checks whether a k/v free operation frees an area
+ which contains an object which has not been deactivated
+ properly. This can make kmalloc/kfree-intensive workloads
+ much slower.
+
+config DEBUG_OBJECTS_TIMERS
+ bool "Debug timer objects"
+ depends on DEBUG_OBJECTS
+ help
+ If you say Y here, additional code will be inserted into the
+ timer routines to track the life time of timer objects and
+ validate the timer operations.
+
config DEBUG_SLAB
bool "Debug slab memory allocations"
depends on DEBUG_KERNEL && SLAB
@@ -211,7 +253,7 @@ config SLUB_DEBUG_ON
config SLUB_STATS
default n
bool "Enable SLUB performance statistics"
- depends on SLUB
+ depends on SLUB && SLUB_DEBUG && SYSFS
help
SLUB statistics are useful to debug SLUBs allocation behavior in
order find ways to optimize the allocator. This should never be
@@ -265,16 +307,6 @@ config DEBUG_MUTEXES
This feature allows mutex semantics violations to be detected and
reported.
-config DEBUG_SEMAPHORE
- bool "Semaphore debugging"
- depends on DEBUG_KERNEL
- depends on ALPHA || FRV
- default n
- help
- If you say Y here then semaphore processing will issue lots of
- verbose debugging messages. If you suspect a semaphore problem or a
- kernel hacker asks for this option then say Y. Otherwise say N.
-
config DEBUG_LOCK_ALLOC
bool "Lock debugging: detect incorrect freeing of live locks"
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -437,6 +469,16 @@ config DEBUG_VM
If unsure, say N.
+config DEBUG_WRITECOUNT
+ bool "Debug filesystem writers count"
+ depends on DEBUG_KERNEL
+ help
+ Enable this to catch wrong use of the writers count in struct
+ vfsmount. This will increase the size of each file struct by
+ 32 bits.
+
+ If unsure, say N.
+
config DEBUG_LIST
bool "Debug linked list manipulation"
depends on DEBUG_KERNEL
@@ -633,3 +675,5 @@ config FIREWIRE_OHCI_REMOTE_DMA
If unsure, say N.
source "samples/Kconfig"
+
+source "lib/Kconfig.kgdb"
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
new file mode 100644
index 000000000000..f2e01ac5ab09
--- /dev/null
+++ b/lib/Kconfig.kgdb
@@ -0,0 +1,58 @@
+
+menuconfig KGDB
+ bool "KGDB: kernel debugging with remote gdb"
+ select FRAME_POINTER
+ depends on HAVE_ARCH_KGDB
+ depends on DEBUG_KERNEL && EXPERIMENTAL
+ help
+ If you say Y here, it will be possible to remotely debug the
+ kernel using gdb. Documentation of kernel debugger is available
+ at http://kgdb.sourceforge.net as well as in DocBook form
+ in Documentation/DocBook/. If unsure, say N.
+
+config HAVE_ARCH_KGDB_SHADOW_INFO
+ bool
+
+config HAVE_ARCH_KGDB
+ bool
+
+config KGDB_SERIAL_CONSOLE
+ tristate "KGDB: use kgdb over the serial console"
+ depends on KGDB
+ select CONSOLE_POLL
+ select MAGIC_SYSRQ
+ default y
+ help
+ Share a serial console with kgdb. Sysrq-g must be used
+ to break in initially.
+
+config KGDB_TESTS
+ bool "KGDB: internal test suite"
+ depends on KGDB
+ default n
+ help
+ This is a kgdb I/O module specifically designed to test
+ kgdb's internal functions. This kgdb I/O module is
+ intended to for the development of new kgdb stubs
+ as well as regression testing the kgdb internals.
+ See the drivers/misc/kgdbts.c for the details about
+ the tests. The most basic of this I/O module is to boot
+ a kernel boot arguments "kgdbwait kgdbts=V1F100"
+
+config KGDB_TESTS_ON_BOOT
+ bool "KGDB: Run tests on boot"
+ depends on KGDB_TESTS
+ default n
+ help
+ Run the kgdb tests on boot up automatically without the need
+ to pass in a kernel parameter
+
+config KGDB_TESTS_BOOT_STRING
+ string "KGDB: which internal kgdb tests to run"
+ depends on KGDB_TESTS_ON_BOOT
+ default "V1F100"
+ help
+ This is the command string to send the kgdb test suite on
+ boot. See the drivers/misc/kgdbts.c for detailed
+ information about other strings you could use beyond the
+ default of V1F100.
diff --git a/lib/Makefile b/lib/Makefile
index 23de261a4c83..74b0cfb1fcc3 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -6,7 +6,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o dump_stack.o \
idr.o int_sqrt.o extable.o prio_tree.o \
sha1.o irq_regs.o reciprocal_div.o argv_split.o \
- proportions.o prio_heap.o
+ proportions.o prio_heap.o ratelimit.o
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
@@ -29,13 +29,14 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
-lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
+lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
obj-$(CONFIG_PLIST) += plist.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
obj-$(CONFIG_DEBUG_LIST) += list_debug.o
+obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
lib-y += dec_and_lock.o
@@ -61,7 +62,6 @@ obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
obj-$(CONFIG_SMP) += percpu_counter.o
-obj-$(CONFIG_SMP) += pcounter.o
obj-$(CONFIG_AUDIT_GENERIC) += audit.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
@@ -70,6 +70,8 @@ obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
lib-$(CONFIG_GENERIC_BUG) += bug.o
+obj-$(CONFIG_HAVE_LMB) += lmb.o
+
hostprogs-y := gen_crc32table
clean-files := crc32table.h
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 2c9242e3fed0..c4cb48f77f0c 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -316,6 +316,22 @@ int bitmap_scnprintf(char *buf, unsigned int buflen,
EXPORT_SYMBOL(bitmap_scnprintf);
/**
+ * bitmap_scnprintf_len - return buffer length needed to convert
+ * bitmap to an ASCII hex string.
+ * @len: number of bits to be converted
+ */
+int bitmap_scnprintf_len(unsigned int len)
+{
+ /* we need 9 chars per word for 32 bit words (8 hexdigits + sep/null) */
+ int bitslen = ALIGN(len, CHUNKSZ);
+ int wordlen = CHUNKSZ / 4;
+ int buflen = (bitslen / wordlen) * (wordlen + 1) * sizeof(char);
+
+ return buflen;
+}
+EXPORT_SYMBOL(bitmap_scnprintf_len);
+
+/**
* __bitmap_parse - convert an ASCII hex string into a bitmap.
* @buf: pointer to buffer containing string.
* @buflen: buffer size in bytes. If string is smaller than this
@@ -698,6 +714,164 @@ int bitmap_bitremap(int oldbit, const unsigned long *old,
}
EXPORT_SYMBOL(bitmap_bitremap);
+/**
+ * bitmap_onto - translate one bitmap relative to another
+ * @dst: resulting translated bitmap
+ * @orig: original untranslated bitmap
+ * @relmap: bitmap relative to which translated
+ * @bits: number of bits in each of these bitmaps
+ *
+ * Set the n-th bit of @dst iff there exists some m such that the
+ * n-th bit of @relmap is set, the m-th bit of @orig is set, and
+ * the n-th bit of @relmap is also the m-th _set_ bit of @relmap.
+ * (If you understood the previous sentence the first time your
+ * read it, you're overqualified for your current job.)
+ *
+ * In other words, @orig is mapped onto (surjectively) @dst,
+ * using the the map { <n, m> | the n-th bit of @relmap is the
+ * m-th set bit of @relmap }.
+ *
+ * Any set bits in @orig above bit number W, where W is the
+ * weight of (number of set bits in) @relmap are mapped nowhere.
+ * In particular, if for all bits m set in @orig, m >= W, then
+ * @dst will end up empty. In situations where the possibility
+ * of such an empty result is not desired, one way to avoid it is
+ * to use the bitmap_fold() operator, below, to first fold the
+ * @orig bitmap over itself so that all its set bits x are in the
+ * range 0 <= x < W. The bitmap_fold() operator does this by
+ * setting the bit (m % W) in @dst, for each bit (m) set in @orig.
+ *
+ * Example [1] for bitmap_onto():
+ * Let's say @relmap has bits 30-39 set, and @orig has bits
+ * 1, 3, 5, 7, 9 and 11 set. Then on return from this routine,
+ * @dst will have bits 31, 33, 35, 37 and 39 set.
+ *
+ * When bit 0 is set in @orig, it means turn on the bit in
+ * @dst corresponding to whatever is the first bit (if any)
+ * that is turned on in @relmap. Since bit 0 was off in the
+ * above example, we leave off that bit (bit 30) in @dst.
+ *
+ * When bit 1 is set in @orig (as in the above example), it
+ * means turn on the bit in @dst corresponding to whatever
+ * is the second bit that is turned on in @relmap. The second
+ * bit in @relmap that was turned on in the above example was
+ * bit 31, so we turned on bit 31 in @dst.
+ *
+ * Similarly, we turned on bits 33, 35, 37 and 39 in @dst,
+ * because they were the 4th, 6th, 8th and 10th set bits
+ * set in @relmap, and the 4th, 6th, 8th and 10th bits of
+ * @orig (i.e. bits 3, 5, 7 and 9) were also set.
+ *
+ * When bit 11 is set in @orig, it means turn on the bit in
+ * @dst corresponding to whatever is the twelth bit that is
+ * turned on in @relmap. In the above example, there were
+ * only ten bits turned on in @relmap (30..39), so that bit
+ * 11 was set in @orig had no affect on @dst.
+ *
+ * Example [2] for bitmap_fold() + bitmap_onto():
+ * Let's say @relmap has these ten bits set:
+ * 40 41 42 43 45 48 53 61 74 95
+ * (for the curious, that's 40 plus the first ten terms of the
+ * Fibonacci sequence.)
+ *
+ * Further lets say we use the following code, invoking
+ * bitmap_fold() then bitmap_onto, as suggested above to
+ * avoid the possitility of an empty @dst result:
+ *
+ * unsigned long *tmp; // a temporary bitmap's bits
+ *
+ * bitmap_fold(tmp, orig, bitmap_weight(relmap, bits), bits);
+ * bitmap_onto(dst, tmp, relmap, bits);
+ *
+ * Then this table shows what various values of @dst would be, for
+ * various @orig's. I list the zero-based positions of each set bit.
+ * The tmp column shows the intermediate result, as computed by
+ * using bitmap_fold() to fold the @orig bitmap modulo ten
+ * (the weight of @relmap).
+ *
+ * @orig tmp @dst
+ * 0 0 40
+ * 1 1 41
+ * 9 9 95
+ * 10 0 40 (*)
+ * 1 3 5 7 1 3 5 7 41 43 48 61
+ * 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45
+ * 0 9 18 27 0 9 8 7 40 61 74 95
+ * 0 10 20 30 0 40
+ * 0 11 22 33 0 1 2 3 40 41 42 43
+ * 0 12 24 36 0 2 4 6 40 42 45 53
+ * 78 102 211 1 2 8 41 42 74 (*)
+ *
+ * (*) For these marked lines, if we hadn't first done bitmap_fold()
+ * into tmp, then the @dst result would have been empty.
+ *
+ * If either of @orig or @relmap is empty (no set bits), then @dst
+ * will be returned empty.
+ *
+ * If (as explained above) the only set bits in @orig are in positions
+ * m where m >= W, (where W is the weight of @relmap) then @dst will
+ * once again be returned empty.
+ *
+ * All bits in @dst not set by the above rule are cleared.
+ */
+void bitmap_onto(unsigned long *dst, const unsigned long *orig,
+ const unsigned long *relmap, int bits)
+{
+ int n, m; /* same meaning as in above comment */
+
+ if (dst == orig) /* following doesn't handle inplace mappings */
+ return;
+ bitmap_zero(dst, bits);
+
+ /*
+ * The following code is a more efficient, but less
+ * obvious, equivalent to the loop:
+ * for (m = 0; m < bitmap_weight(relmap, bits); m++) {
+ * n = bitmap_ord_to_pos(orig, m, bits);
+ * if (test_bit(m, orig))
+ * set_bit(n, dst);
+ * }
+ */
+
+ m = 0;
+ for (n = find_first_bit(relmap, bits);
+ n < bits;
+ n = find_next_bit(relmap, bits, n + 1)) {
+ /* m == bitmap_pos_to_ord(relmap, n, bits) */
+ if (test_bit(m, orig))
+ set_bit(n, dst);
+ m++;
+ }
+}
+EXPORT_SYMBOL(bitmap_onto);
+
+/**
+ * bitmap_fold - fold larger bitmap into smaller, modulo specified size
+ * @dst: resulting smaller bitmap
+ * @orig: original larger bitmap
+ * @sz: specified size
+ * @bits: number of bits in each of these bitmaps
+ *
+ * For each bit oldbit in @orig, set bit oldbit mod @sz in @dst.
+ * Clear all other bits in @dst. See further the comment and
+ * Example [2] for bitmap_onto() for why and how to use this.
+ */
+void bitmap_fold(unsigned long *dst, const unsigned long *orig,
+ int sz, int bits)
+{
+ int oldbit;
+
+ if (dst == orig) /* following doesn't handle inplace mappings */
+ return;
+ bitmap_zero(dst, bits);
+
+ for (oldbit = find_first_bit(orig, bits);
+ oldbit < bits;
+ oldbit = find_next_bit(orig, bits, oldbit + 1))
+ set_bit(oldbit % sz, dst);
+}
+EXPORT_SYMBOL(bitmap_fold);
+
/*
* Common code for bitmap_*_region() routines.
* bitmap: array of unsigned longs corresponding to the bitmap
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
new file mode 100644
index 000000000000..a76a5e122ae1
--- /dev/null
+++ b/lib/debugobjects.c
@@ -0,0 +1,890 @@
+/*
+ * Generic infrastructure for lifetime debugging of objects.
+ *
+ * Started by Thomas Gleixner
+ *
+ * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+#include <linux/debugobjects.h>
+#include <linux/interrupt.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/hash.h>
+
+#define ODEBUG_HASH_BITS 14
+#define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
+
+#define ODEBUG_POOL_SIZE 512
+#define ODEBUG_POOL_MIN_LEVEL 256
+
+#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
+#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
+#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
+
+struct debug_bucket {
+ struct hlist_head list;
+ spinlock_t lock;
+};
+
+static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
+
+static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE];
+
+static DEFINE_SPINLOCK(pool_lock);
+
+static HLIST_HEAD(obj_pool);
+
+static int obj_pool_min_free = ODEBUG_POOL_SIZE;
+static int obj_pool_free = ODEBUG_POOL_SIZE;
+static int obj_pool_used;
+static int obj_pool_max_used;
+static struct kmem_cache *obj_cache;
+
+static int debug_objects_maxchain __read_mostly;
+static int debug_objects_fixups __read_mostly;
+static int debug_objects_warnings __read_mostly;
+static int debug_objects_enabled __read_mostly;
+static struct debug_obj_descr *descr_test __read_mostly;
+
+static int __init enable_object_debug(char *str)
+{
+ debug_objects_enabled = 1;
+ return 0;
+}
+early_param("debug_objects", enable_object_debug);
+
+static const char *obj_states[ODEBUG_STATE_MAX] = {
+ [ODEBUG_STATE_NONE] = "none",
+ [ODEBUG_STATE_INIT] = "initialized",
+ [ODEBUG_STATE_INACTIVE] = "inactive",
+ [ODEBUG_STATE_ACTIVE] = "active",
+ [ODEBUG_STATE_DESTROYED] = "destroyed",
+ [ODEBUG_STATE_NOTAVAILABLE] = "not available",
+};
+
+static int fill_pool(void)
+{
+ gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
+ struct debug_obj *new;
+
+ if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
+ return obj_pool_free;
+
+ if (unlikely(!obj_cache))
+ return obj_pool_free;
+
+ while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
+
+ new = kmem_cache_zalloc(obj_cache, gfp);
+ if (!new)
+ return obj_pool_free;
+
+ spin_lock(&pool_lock);
+ hlist_add_head(&new->node, &obj_pool);
+ obj_pool_free++;
+ spin_unlock(&pool_lock);
+ }
+ return obj_pool_free;
+}
+
+/*
+ * Lookup an object in the hash bucket.
+ */
+static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
+{
+ struct hlist_node *node;
+ struct debug_obj *obj;
+ int cnt = 0;
+
+ hlist_for_each_entry(obj, node, &b->list, node) {
+ cnt++;
+ if (obj->object == addr)
+ return obj;
+ }
+ if (cnt > debug_objects_maxchain)
+ debug_objects_maxchain = cnt;
+
+ return NULL;
+}
+
+/*
+ * Allocate a new object. If the pool is empty and no refill possible,
+ * switch off the debugger.
+ */
+static struct debug_obj *
+alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
+{
+ struct debug_obj *obj = NULL;
+ int retry = 0;
+
+repeat:
+ spin_lock(&pool_lock);
+ if (obj_pool.first) {
+ obj = hlist_entry(obj_pool.first, typeof(*obj), node);
+
+ obj->object = addr;
+ obj->descr = descr;
+ obj->state = ODEBUG_STATE_NONE;
+ hlist_del(&obj->node);
+
+ hlist_add_head(&obj->node, &b->list);
+
+ obj_pool_used++;
+ if (obj_pool_used > obj_pool_max_used)
+ obj_pool_max_used = obj_pool_used;
+
+ obj_pool_free--;
+ if (obj_pool_free < obj_pool_min_free)
+ obj_pool_min_free = obj_pool_free;
+ }
+ spin_unlock(&pool_lock);
+
+ if (fill_pool() && !obj && !retry++)
+ goto repeat;
+
+ return obj;
+}
+
+/*
+ * Put the object back into the pool or give it back to kmem_cache:
+ */
+static void free_object(struct debug_obj *obj)
+{
+ unsigned long idx = (unsigned long)(obj - obj_static_pool);
+
+ if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) {
+ spin_lock(&pool_lock);
+ hlist_add_head(&obj->node, &obj_pool);
+ obj_pool_free++;
+ obj_pool_used--;
+ spin_unlock(&pool_lock);
+ } else {
+ spin_lock(&pool_lock);
+ obj_pool_used--;
+ spin_unlock(&pool_lock);
+ kmem_cache_free(obj_cache, obj);
+ }
+}
+
+/*
+ * We run out of memory. That means we probably have tons of objects
+ * allocated.
+ */
+static void debug_objects_oom(void)
+{
+ struct debug_bucket *db = obj_hash;
+ struct hlist_node *node, *tmp;
+ struct debug_obj *obj;
+ unsigned long flags;
+ int i;
+
+ printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
+
+ for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
+ spin_lock_irqsave(&db->lock, flags);
+ hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
+ hlist_del(&obj->node);
+ free_object(obj);
+ }
+ spin_unlock_irqrestore(&db->lock, flags);
+ }
+}
+
+/*
+ * We use the pfn of the address for the hash. That way we can check
+ * for freed objects simply by checking the affected bucket.
+ */
+static struct debug_bucket *get_bucket(unsigned long addr)
+{
+ unsigned long hash;
+
+ hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
+ return &obj_hash[hash];
+}
+
+static void debug_print_object(struct debug_obj *obj, char *msg)
+{
+ static int limit;
+
+ if (limit < 5 && obj->descr != descr_test) {
+ limit++;
+ printk(KERN_ERR "ODEBUG: %s %s object type: %s\n", msg,
+ obj_states[obj->state], obj->descr->name);
+ WARN_ON(1);
+ }
+ debug_objects_warnings++;
+}
+
+/*
+ * Try to repair the damage, so we have a better chance to get useful
+ * debug output.
+ */
+static void
+debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
+ void * addr, enum debug_obj_state state)
+{
+ if (fixup)
+ debug_objects_fixups += fixup(addr, state);
+}
+
+static void debug_object_is_on_stack(void *addr, int onstack)
+{
+ void *stack = current->stack;
+ int is_on_stack;
+ static int limit;
+
+ if (limit > 4)
+ return;
+
+ is_on_stack = (addr >= stack && addr < (stack + THREAD_SIZE));
+
+ if (is_on_stack == onstack)
+ return;
+
+ limit++;
+ if (is_on_stack)
+ printk(KERN_WARNING
+ "ODEBUG: object is on stack, but not annotated\n");
+ else
+ printk(KERN_WARNING
+ "ODEBUG: object is not on stack, but annotated\n");
+ WARN_ON(1);
+}
+
+static void
+__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
+{
+ enum debug_obj_state state;
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+
+ db = get_bucket((unsigned long) addr);
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object(addr, db);
+ if (!obj) {
+ obj = alloc_object(addr, db, descr);
+ if (!obj) {
+ debug_objects_enabled = 0;
+ spin_unlock_irqrestore(&db->lock, flags);
+ debug_objects_oom();
+ return;
+ }
+ debug_object_is_on_stack(addr, onstack);
+ }
+
+ switch (obj->state) {
+ case ODEBUG_STATE_NONE:
+ case ODEBUG_STATE_INIT:
+ case ODEBUG_STATE_INACTIVE:
+ obj->state = ODEBUG_STATE_INIT;
+ break;
+
+ case ODEBUG_STATE_ACTIVE:
+ debug_print_object(obj, "init");
+ state = obj->state;
+ spin_unlock_irqrestore(&db->lock, flags);
+ debug_object_fixup(descr->fixup_init, addr, state);
+ return;
+
+ case ODEBUG_STATE_DESTROYED:
+ debug_print_object(obj, "init");
+ break;
+ default:
+ break;
+ }
+
+ spin_unlock_irqrestore(&db->lock, flags);
+}
+
+/**
+ * debug_object_init - debug checks when an object is initialized
+ * @addr: address of the object
+ * @descr: pointer to an object specific debug description structure
+ */
+void debug_object_init(void *addr, struct debug_obj_descr *descr)
+{
+ if (!debug_objects_enabled)
+ return;
+
+ __debug_object_init(addr, descr, 0);
+}
+
+/**
+ * debug_object_init_on_stack - debug checks when an object on stack is
+ * initialized
+ * @addr: address of the object
+ * @descr: pointer to an object specific debug description structure
+ */
+void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
+{
+ if (!debug_objects_enabled)
+ return;
+
+ __debug_object_init(addr, descr, 1);
+}
+
+/**
+ * debug_object_activate - debug checks when an object is activated
+ * @addr: address of the object
+ * @descr: pointer to an object specific debug description structure
+ */
+void debug_object_activate(void *addr, struct debug_obj_descr *descr)
+{
+ enum debug_obj_state state;
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+
+ if (!debug_objects_enabled)
+ return;
+
+ db = get_bucket((unsigned long) addr);
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object(addr, db);
+ if (obj) {
+ switch (obj->state) {
+ case ODEBUG_STATE_INIT:
+ case ODEBUG_STATE_INACTIVE:
+ obj->state = ODEBUG_STATE_ACTIVE;
+ break;
+
+ case ODEBUG_STATE_ACTIVE:
+ debug_print_object(obj, "activate");
+ state = obj->state;
+ spin_unlock_irqrestore(&db->lock, flags);
+ debug_object_fixup(descr->fixup_activate, addr, state);
+ return;
+
+ case ODEBUG_STATE_DESTROYED:
+ debug_print_object(obj, "activate");
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&db->lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&db->lock, flags);
+ /*
+ * This happens when a static object is activated. We
+ * let the type specific code decide whether this is
+ * true or not.
+ */
+ debug_object_fixup(descr->fixup_activate, addr,
+ ODEBUG_STATE_NOTAVAILABLE);
+}
+
+/**
+ * debug_object_deactivate - debug checks when an object is deactivated
+ * @addr: address of the object
+ * @descr: pointer to an object specific debug description structure
+ */
+void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
+{
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+
+ if (!debug_objects_enabled)
+ return;
+
+ db = get_bucket((unsigned long) addr);
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object(addr, db);
+ if (obj) {
+ switch (obj->state) {
+ case ODEBUG_STATE_INIT:
+ case ODEBUG_STATE_INACTIVE:
+ case ODEBUG_STATE_ACTIVE:
+ obj->state = ODEBUG_STATE_INACTIVE;
+ break;
+
+ case ODEBUG_STATE_DESTROYED:
+ debug_print_object(obj, "deactivate");
+ break;
+ default:
+ break;
+ }
+ } else {
+ struct debug_obj o = { .object = addr,
+ .state = ODEBUG_STATE_NOTAVAILABLE,
+ .descr = descr };
+
+ debug_print_object(&o, "deactivate");
+ }
+
+ spin_unlock_irqrestore(&db->lock, flags);
+}
+
+/**
+ * debug_object_destroy - debug checks when an object is destroyed
+ * @addr: address of the object
+ * @descr: pointer to an object specific debug description structure
+ */
+void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
+{
+ enum debug_obj_state state;
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+
+ if (!debug_objects_enabled)
+ return;
+
+ db = get_bucket((unsigned long) addr);
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object(addr, db);
+ if (!obj)
+ goto out_unlock;
+
+ switch (obj->state) {
+ case ODEBUG_STATE_NONE:
+ case ODEBUG_STATE_INIT:
+ case ODEBUG_STATE_INACTIVE:
+ obj->state = ODEBUG_STATE_DESTROYED;
+ break;
+ case ODEBUG_STATE_ACTIVE:
+ debug_print_object(obj, "destroy");
+ state = obj->state;
+ spin_unlock_irqrestore(&db->lock, flags);
+ debug_object_fixup(descr->fixup_destroy, addr, state);
+ return;
+
+ case ODEBUG_STATE_DESTROYED:
+ debug_print_object(obj, "destroy");
+ break;
+ default:
+ break;
+ }
+out_unlock:
+ spin_unlock_irqrestore(&db->lock, flags);
+}
+
+/**
+ * debug_object_free - debug checks when an object is freed
+ * @addr: address of the object
+ * @descr: pointer to an object specific debug description structure
+ */
+void debug_object_free(void *addr, struct debug_obj_descr *descr)
+{
+ enum debug_obj_state state;
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+
+ if (!debug_objects_enabled)
+ return;
+
+ db = get_bucket((unsigned long) addr);
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object(addr, db);
+ if (!obj)
+ goto out_unlock;
+
+ switch (obj->state) {
+ case ODEBUG_STATE_ACTIVE:
+ debug_print_object(obj, "free");
+ state = obj->state;
+ spin_unlock_irqrestore(&db->lock, flags);
+ debug_object_fixup(descr->fixup_free, addr, state);
+ return;
+ default:
+ hlist_del(&obj->node);
+ free_object(obj);
+ break;
+ }
+out_unlock:
+ spin_unlock_irqrestore(&db->lock, flags);
+}
+
+#ifdef CONFIG_DEBUG_OBJECTS_FREE
+static void __debug_check_no_obj_freed(const void *address, unsigned long size)
+{
+ unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
+ struct hlist_node *node, *tmp;
+ struct debug_obj_descr *descr;
+ enum debug_obj_state state;
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ int cnt;
+
+ saddr = (unsigned long) address;
+ eaddr = saddr + size;
+ paddr = saddr & ODEBUG_CHUNK_MASK;
+ chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
+ chunks >>= ODEBUG_CHUNK_SHIFT;
+
+ for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
+ db = get_bucket(paddr);
+
+repeat:
+ cnt = 0;
+ spin_lock_irqsave(&db->lock, flags);
+ hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
+ cnt++;
+ oaddr = (unsigned long) obj->object;
+ if (oaddr < saddr || oaddr >= eaddr)
+ continue;
+
+ switch (obj->state) {
+ case ODEBUG_STATE_ACTIVE:
+ debug_print_object(obj, "free");
+ descr = obj->descr;
+ state = obj->state;
+ spin_unlock_irqrestore(&db->lock, flags);
+ debug_object_fixup(descr->fixup_free,
+ (void *) oaddr, state);
+ goto repeat;
+ default:
+ hlist_del(&obj->node);
+ free_object(obj);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&db->lock, flags);
+ if (cnt > debug_objects_maxchain)
+ debug_objects_maxchain = cnt;
+ }
+}
+
+void debug_check_no_obj_freed(const void *address, unsigned long size)
+{
+ if (debug_objects_enabled)
+ __debug_check_no_obj_freed(address, size);
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+
+static int debug_stats_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
+ seq_printf(m, "warnings :%d\n", debug_objects_warnings);
+ seq_printf(m, "fixups :%d\n", debug_objects_fixups);
+ seq_printf(m, "pool_free :%d\n", obj_pool_free);
+ seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
+ seq_printf(m, "pool_used :%d\n", obj_pool_used);
+ seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
+ return 0;
+}
+
+static int debug_stats_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, debug_stats_show, NULL);
+}
+
+static const struct file_operations debug_stats_fops = {
+ .open = debug_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init debug_objects_init_debugfs(void)
+{
+ struct dentry *dbgdir, *dbgstats;
+
+ if (!debug_objects_enabled)
+ return 0;
+
+ dbgdir = debugfs_create_dir("debug_objects", NULL);
+ if (!dbgdir)
+ return -ENOMEM;
+
+ dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
+ &debug_stats_fops);
+ if (!dbgstats)
+ goto err;
+
+ return 0;
+
+err:
+ debugfs_remove(dbgdir);
+
+ return -ENOMEM;
+}
+__initcall(debug_objects_init_debugfs);
+
+#else
+static inline void debug_objects_init_debugfs(void) { }
+#endif
+
+#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
+
+/* Random data structure for the self test */
+struct self_test {
+ unsigned long dummy1[6];
+ int static_init;
+ unsigned long dummy2[3];
+};
+
+static __initdata struct debug_obj_descr descr_type_test;
+
+/*
+ * fixup_init is called when:
+ * - an active object is initialized
+ */
+static int __init fixup_init(void *addr, enum debug_obj_state state)
+{
+ struct self_test *obj = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_ACTIVE:
+ debug_object_deactivate(obj, &descr_type_test);
+ debug_object_init(obj, &descr_type_test);
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * fixup_activate is called when:
+ * - an active object is activated
+ * - an unknown object is activated (might be a statically initialized object)
+ */
+static int __init fixup_activate(void *addr, enum debug_obj_state state)
+{
+ struct self_test *obj = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_NOTAVAILABLE:
+ if (obj->static_init == 1) {
+ debug_object_init(obj, &descr_type_test);
+ debug_object_activate(obj, &descr_type_test);
+ /*
+ * Real code should return 0 here ! This is
+ * not a fixup of some bad behaviour. We
+ * merily call the debug_init function to keep
+ * track of the object.
+ */
+ return 1;
+ } else {
+ /* Real code needs to emit a warning here */
+ }
+ return 0;
+
+ case ODEBUG_STATE_ACTIVE:
+ debug_object_deactivate(obj, &descr_type_test);
+ debug_object_activate(obj, &descr_type_test);
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+/*
+ * fixup_destroy is called when:
+ * - an active object is destroyed
+ */
+static int __init fixup_destroy(void *addr, enum debug_obj_state state)
+{
+ struct self_test *obj = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_ACTIVE:
+ debug_object_deactivate(obj, &descr_type_test);
+ debug_object_destroy(obj, &descr_type_test);
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * fixup_free is called when:
+ * - an active object is freed
+ */
+static int __init fixup_free(void *addr, enum debug_obj_state state)
+{
+ struct self_test *obj = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_ACTIVE:
+ debug_object_deactivate(obj, &descr_type_test);
+ debug_object_free(obj, &descr_type_test);
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int
+check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
+{
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+ int res = -EINVAL;
+
+ db = get_bucket((unsigned long) addr);
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object(addr, db);
+ if (!obj && state != ODEBUG_STATE_NONE) {
+ printk(KERN_ERR "ODEBUG: selftest object not found\n");
+ WARN_ON(1);
+ goto out;
+ }
+ if (obj && obj->state != state) {
+ printk(KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
+ obj->state, state);
+ WARN_ON(1);
+ goto out;
+ }
+ if (fixups != debug_objects_fixups) {
+ printk(KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
+ fixups, debug_objects_fixups);
+ WARN_ON(1);
+ goto out;
+ }
+ if (warnings != debug_objects_warnings) {
+ printk(KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
+ warnings, debug_objects_warnings);
+ WARN_ON(1);
+ goto out;
+ }
+ res = 0;
+out:
+ spin_unlock_irqrestore(&db->lock, flags);
+ if (res)
+ debug_objects_enabled = 0;
+ return res;
+}
+
+static __initdata struct debug_obj_descr descr_type_test = {
+ .name = "selftest",
+ .fixup_init = fixup_init,
+ .fixup_activate = fixup_activate,
+ .fixup_destroy = fixup_destroy,
+ .fixup_free = fixup_free,
+};
+
+static __initdata struct self_test obj = { .static_init = 0 };
+
+static void __init debug_objects_selftest(void)
+{
+ int fixups, oldfixups, warnings, oldwarnings;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ fixups = oldfixups = debug_objects_fixups;
+ warnings = oldwarnings = debug_objects_warnings;
+ descr_test = &descr_type_test;
+
+ debug_object_init(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
+ goto out;
+ debug_object_activate(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
+ goto out;
+ debug_object_activate(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
+ goto out;
+ debug_object_deactivate(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
+ goto out;
+ debug_object_destroy(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
+ goto out;
+ debug_object_init(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
+ goto out;
+ debug_object_activate(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
+ goto out;
+ debug_object_deactivate(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
+ goto out;
+ debug_object_free(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
+ goto out;
+
+ obj.static_init = 1;
+ debug_object_activate(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings))
+ goto out;
+ debug_object_init(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
+ goto out;
+ debug_object_free(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
+ goto out;
+
+#ifdef CONFIG_DEBUG_OBJECTS_FREE
+ debug_object_init(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
+ goto out;
+ debug_object_activate(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
+ goto out;
+ __debug_check_no_obj_freed(&obj, sizeof(obj));
+ if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
+ goto out;
+#endif
+ printk(KERN_INFO "ODEBUG: selftest passed\n");
+
+out:
+ debug_objects_fixups = oldfixups;
+ debug_objects_warnings = oldwarnings;
+ descr_test = NULL;
+
+ local_irq_restore(flags);
+}
+#else
+static inline void debug_objects_selftest(void) { }
+#endif
+
+/*
+ * Called during early boot to initialize the hash buckets and link
+ * the static object pool objects into the poll list. After this call
+ * the object tracker is fully operational.
+ */
+void __init debug_objects_early_init(void)
+{
+ int i;
+
+ for (i = 0; i < ODEBUG_HASH_SIZE; i++)
+ spin_lock_init(&obj_hash[i].lock);
+
+ for (i = 0; i < ODEBUG_POOL_SIZE; i++)
+ hlist_add_head(&obj_static_pool[i].node, &obj_pool);
+}
+
+/*
+ * Called after the kmem_caches are functional to setup a dedicated
+ * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
+ * prevents that the debug code is called on kmem_cache_free() for the
+ * debug tracker objects to avoid recursive calls.
+ */
+void __init debug_objects_mem_init(void)
+{
+ if (!debug_objects_enabled)
+ return;
+
+ obj_cache = kmem_cache_create("debug_objects_cache",
+ sizeof (struct debug_obj), 0,
+ SLAB_DEBUG_OBJECTS, NULL);
+
+ if (!obj_cache)
+ debug_objects_enabled = 0;
+ else
+ debug_objects_selftest();
+}
diff --git a/lib/devres.c b/lib/devres.c
index edc27a5d1b73..26c87c49d776 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -20,7 +20,7 @@ static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
*
* Managed ioremap(). Map is automatically unmapped on driver detach.
*/
-void __iomem *devm_ioremap(struct device *dev, unsigned long offset,
+void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
unsigned long size)
{
void __iomem **ptr, *addr;
@@ -49,7 +49,7 @@ EXPORT_SYMBOL(devm_ioremap);
* Managed ioremap_nocache(). Map is automatically unmapped on driver
* detach.
*/
-void __iomem *devm_ioremap_nocache(struct device *dev, unsigned long offset,
+void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
unsigned long size)
{
void __iomem **ptr, *addr;
diff --git a/lib/div64.c b/lib/div64.c
index b71cf93c529a..bb5bd0c0f030 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -16,9 +16,8 @@
* assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S.
*/
-#include <linux/types.h>
#include <linux/module.h>
-#include <asm/div64.h>
+#include <linux/math64.h>
/* Not needed on 64bit architectures */
#if BITS_PER_LONG == 32
@@ -58,10 +57,31 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
EXPORT_SYMBOL(__div64_32);
+#ifndef div_s64_rem
+s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
+{
+ u64 quotient;
+
+ if (dividend < 0) {
+ quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
+ *remainder = -*remainder;
+ if (divisor > 0)
+ quotient = -quotient;
+ } else {
+ quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
+ if (divisor < 0)
+ quotient = -quotient;
+ }
+ return quotient;
+}
+EXPORT_SYMBOL(div_s64_rem);
+#endif
+
/* 64bit divisor, dividend and result. dynamic precision */
-uint64_t div64_64(uint64_t dividend, uint64_t divisor)
+#ifndef div64_u64
+u64 div64_u64(u64 dividend, u64 divisor)
{
- uint32_t high, d;
+ u32 high, d;
high = divisor >> 32;
if (high) {
@@ -72,10 +92,9 @@ uint64_t div64_64(uint64_t dividend, uint64_t divisor)
} else
d = divisor;
- do_div(dividend, d);
-
- return dividend;
+ return div_u64(dividend, d);
}
-EXPORT_SYMBOL(div64_64);
+EXPORT_SYMBOL(div64_u64);
+#endif
#endif /* BITS_PER_LONG == 32 */
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c
index 78ccd73a8841..24c59ded47a0 100644
--- a/lib/find_next_bit.c
+++ b/lib/find_next_bit.c
@@ -16,14 +16,12 @@
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
-/**
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
+#ifdef CONFIG_GENERIC_FIND_NEXT_BIT
+/*
+ * Find the next set bit in a memory region.
*/
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
+ unsigned long offset)
{
const unsigned long *p = addr + BITOP_WORD(offset);
unsigned long result = offset & ~(BITS_PER_LONG-1);
@@ -60,7 +58,6 @@ found_first:
found_middle:
return result + __ffs(tmp);
}
-
EXPORT_SYMBOL(find_next_bit);
/*
@@ -68,7 +65,7 @@ EXPORT_SYMBOL(find_next_bit);
* Linus' asm-alpha/bitops.h.
*/
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
+ unsigned long offset)
{
const unsigned long *p = addr + BITOP_WORD(offset);
unsigned long result = offset & ~(BITS_PER_LONG-1);
@@ -105,8 +102,62 @@ found_first:
found_middle:
return result + ffz(tmp);
}
-
EXPORT_SYMBOL(find_next_zero_bit);
+#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */
+
+#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
+/*
+ * Find the first set bit in a memory region.
+ */
+unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
+{
+ const unsigned long *p = addr;
+ unsigned long result = 0;
+ unsigned long tmp;
+
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+
+ tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found:
+ return result + __ffs(tmp);
+}
+EXPORT_SYMBOL(find_first_bit);
+
+/*
+ * Find the first cleared bit in a memory region.
+ */
+unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
+{
+ const unsigned long *p = addr;
+ unsigned long result = 0;
+ unsigned long tmp;
+
+ while (size & ~(BITS_PER_LONG-1)) {
+ if (~(tmp = *(p++)))
+ goto found;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+
+ tmp = (*p) | (~0UL << size);
+ if (tmp == ~0UL) /* Are any bits zero? */
+ return result + size; /* Nope. */
+found:
+ return result + ffz(tmp);
+}
+EXPORT_SYMBOL(find_first_zero_bit);
+#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
#ifdef __BIG_ENDIAN
diff --git a/lib/idr.c b/lib/idr.c
index afbb0b1023d4..7a02e173f027 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -385,8 +385,8 @@ void idr_remove(struct idr *idp, int id)
while (idp->id_free_cnt >= IDR_FREE_MAX) {
p = alloc_layer(idp);
kmem_cache_free(idr_layer_cache, p);
- return;
}
+ return;
}
EXPORT_SYMBOL(idr_remove);
@@ -585,12 +585,11 @@ static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer)
memset(idr_layer, 0, sizeof(struct idr_layer));
}
-static int init_id_cache(void)
+void __init idr_init_cache(void)
{
- if (!idr_layer_cache)
- idr_layer_cache = kmem_cache_create("idr_layer_cache",
- sizeof(struct idr_layer), 0, 0, idr_cache_ctor);
- return 0;
+ idr_layer_cache = kmem_cache_create("idr_layer_cache",
+ sizeof(struct idr_layer), 0, SLAB_PANIC,
+ idr_cache_ctor);
}
/**
@@ -602,7 +601,6 @@ static int init_id_cache(void)
*/
void idr_init(struct idr *idp)
{
- init_id_cache();
memset(idp, 0, sizeof(struct idr));
spin_lock_init(&idp->lock);
}
diff --git a/lib/inflate.c b/lib/inflate.c
index 845f91d3ac12..9762294be062 100644
--- a/lib/inflate.c
+++ b/lib/inflate.c
@@ -811,6 +811,9 @@ DEBG("<dyn");
ll = malloc(sizeof(*ll) * (286+30)); /* literal/length and distance code lengths */
#endif
+ if (ll == NULL)
+ return 1;
+
/* make local bit buffer */
b = bb;
k = bk;
diff --git a/lib/iomap.c b/lib/iomap.c
index dd6ca48fe6b0..37a3ea4cac9f 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -257,7 +257,7 @@ EXPORT_SYMBOL(ioport_unmap);
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
{
resource_size_t start = pci_resource_start(dev, bar);
- unsigned long len = pci_resource_len(dev, bar);
+ resource_size_t len = pci_resource_len(dev, bar);
unsigned long flags = pci_resource_flags(dev, bar);
if (!len || !start)
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 812dbf00844b..cd3e82530b03 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -8,6 +8,7 @@
#include <linux/smp_lock.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
+#include <linux/semaphore.h>
/*
* The 'big kernel semaphore'
diff --git a/lib/klist.c b/lib/klist.c
index 120bd175aa78..cca37f96faa2 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -1,38 +1,37 @@
/*
- * klist.c - Routines for manipulating klists.
+ * klist.c - Routines for manipulating klists.
*
+ * Copyright (C) 2005 Patrick Mochel
*
- * This klist interface provides a couple of structures that wrap around
- * struct list_head to provide explicit list "head" (struct klist) and
- * list "node" (struct klist_node) objects. For struct klist, a spinlock
- * is included that protects access to the actual list itself. struct
- * klist_node provides a pointer to the klist that owns it and a kref
- * reference count that indicates the number of current users of that node
- * in the list.
+ * This file is released under the GPL v2.
*
- * The entire point is to provide an interface for iterating over a list
- * that is safe and allows for modification of the list during the
- * iteration (e.g. insertion and removal), including modification of the
- * current node on the list.
+ * This klist interface provides a couple of structures that wrap around
+ * struct list_head to provide explicit list "head" (struct klist) and list
+ * "node" (struct klist_node) objects. For struct klist, a spinlock is
+ * included that protects access to the actual list itself. struct
+ * klist_node provides a pointer to the klist that owns it and a kref
+ * reference count that indicates the number of current users of that node
+ * in the list.
*
- * It works using a 3rd object type - struct klist_iter - that is declared
- * and initialized before an iteration. klist_next() is used to acquire the
- * next element in the list. It returns NULL if there are no more items.
- * Internally, that routine takes the klist's lock, decrements the reference
- * count of the previous klist_node and increments the count of the next
- * klist_node. It then drops the lock and returns.
+ * The entire point is to provide an interface for iterating over a list
+ * that is safe and allows for modification of the list during the
+ * iteration (e.g. insertion and removal), including modification of the
+ * current node on the list.
*
- * There are primitives for adding and removing nodes to/from a klist.
- * When deleting, klist_del() will simply decrement the reference count.
- * Only when the count goes to 0 is the node removed from the list.
- * klist_remove() will try to delete the node from the list and block
- * until it is actually removed. This is useful for objects (like devices)
- * that have been removed from the system and must be freed (but must wait
- * until all accessors have finished).
+ * It works using a 3rd object type - struct klist_iter - that is declared
+ * and initialized before an iteration. klist_next() is used to acquire the
+ * next element in the list. It returns NULL if there are no more items.
+ * Internally, that routine takes the klist's lock, decrements the
+ * reference count of the previous klist_node and increments the count of
+ * the next klist_node. It then drops the lock and returns.
*
- * Copyright (C) 2005 Patrick Mochel
- *
- * This file is released under the GPL v2.
+ * There are primitives for adding and removing nodes to/from a klist.
+ * When deleting, klist_del() will simply decrement the reference count.
+ * Only when the count goes to 0 is the node removed from the list.
+ * klist_remove() will try to delete the node from the list and block until
+ * it is actually removed. This is useful for objects (like devices) that
+ * have been removed from the system and must be freed (but must wait until
+ * all accessors have finished).
*/
#include <linux/klist.h>
@@ -40,10 +39,10 @@
/**
- * klist_init - Initialize a klist structure.
- * @k: The klist we're initializing.
- * @get: The get function for the embedding object (NULL if none)
- * @put: The put function for the embedding object (NULL if none)
+ * klist_init - Initialize a klist structure.
+ * @k: The klist we're initializing.
+ * @get: The get function for the embedding object (NULL if none)
+ * @put: The put function for the embedding object (NULL if none)
*
* Initialises the klist structure. If the klist_node structures are
* going to be embedded in refcounted objects (necessary for safe
@@ -51,8 +50,7 @@
* functions that take and release references on the embedding
* objects.
*/
-
-void klist_init(struct klist * k, void (*get)(struct klist_node *),
+void klist_init(struct klist *k, void (*get)(struct klist_node *),
void (*put)(struct klist_node *))
{
INIT_LIST_HEAD(&k->k_list);
@@ -60,26 +58,23 @@ void klist_init(struct klist * k, void (*get)(struct klist_node *),
k->get = get;
k->put = put;
}
-
EXPORT_SYMBOL_GPL(klist_init);
-
-static void add_head(struct klist * k, struct klist_node * n)
+static void add_head(struct klist *k, struct klist_node *n)
{
spin_lock(&k->k_lock);
list_add(&n->n_node, &k->k_list);
spin_unlock(&k->k_lock);
}
-static void add_tail(struct klist * k, struct klist_node * n)
+static void add_tail(struct klist *k, struct klist_node *n)
{
spin_lock(&k->k_lock);
list_add_tail(&n->n_node, &k->k_list);
spin_unlock(&k->k_lock);
}
-
-static void klist_node_init(struct klist * k, struct klist_node * n)
+static void klist_node_init(struct klist *k, struct klist_node *n)
{
INIT_LIST_HEAD(&n->n_node);
init_completion(&n->n_removed);
@@ -89,60 +84,83 @@ static void klist_node_init(struct klist * k, struct klist_node * n)
k->get(n);
}
-
/**
- * klist_add_head - Initialize a klist_node and add it to front.
- * @n: node we're adding.
- * @k: klist it's going on.
+ * klist_add_head - Initialize a klist_node and add it to front.
+ * @n: node we're adding.
+ * @k: klist it's going on.
*/
-
-void klist_add_head(struct klist_node * n, struct klist * k)
+void klist_add_head(struct klist_node *n, struct klist *k)
{
klist_node_init(k, n);
add_head(k, n);
}
-
EXPORT_SYMBOL_GPL(klist_add_head);
-
/**
- * klist_add_tail - Initialize a klist_node and add it to back.
- * @n: node we're adding.
- * @k: klist it's going on.
+ * klist_add_tail - Initialize a klist_node and add it to back.
+ * @n: node we're adding.
+ * @k: klist it's going on.
*/
-
-void klist_add_tail(struct klist_node * n, struct klist * k)
+void klist_add_tail(struct klist_node *n, struct klist *k)
{
klist_node_init(k, n);
add_tail(k, n);
}
-
EXPORT_SYMBOL_GPL(klist_add_tail);
+/**
+ * klist_add_after - Init a klist_node and add it after an existing node
+ * @n: node we're adding.
+ * @pos: node to put @n after
+ */
+void klist_add_after(struct klist_node *n, struct klist_node *pos)
+{
+ struct klist *k = pos->n_klist;
+
+ klist_node_init(k, n);
+ spin_lock(&k->k_lock);
+ list_add(&n->n_node, &pos->n_node);
+ spin_unlock(&k->k_lock);
+}
+EXPORT_SYMBOL_GPL(klist_add_after);
+
+/**
+ * klist_add_before - Init a klist_node and add it before an existing node
+ * @n: node we're adding.
+ * @pos: node to put @n after
+ */
+void klist_add_before(struct klist_node *n, struct klist_node *pos)
+{
+ struct klist *k = pos->n_klist;
+
+ klist_node_init(k, n);
+ spin_lock(&k->k_lock);
+ list_add_tail(&n->n_node, &pos->n_node);
+ spin_unlock(&k->k_lock);
+}
+EXPORT_SYMBOL_GPL(klist_add_before);
-static void klist_release(struct kref * kref)
+static void klist_release(struct kref *kref)
{
- struct klist_node * n = container_of(kref, struct klist_node, n_ref);
+ struct klist_node *n = container_of(kref, struct klist_node, n_ref);
list_del(&n->n_node);
complete(&n->n_removed);
n->n_klist = NULL;
}
-static int klist_dec_and_del(struct klist_node * n)
+static int klist_dec_and_del(struct klist_node *n)
{
return kref_put(&n->n_ref, klist_release);
}
-
/**
- * klist_del - Decrement the reference count of node and try to remove.
- * @n: node we're deleting.
+ * klist_del - Decrement the reference count of node and try to remove.
+ * @n: node we're deleting.
*/
-
-void klist_del(struct klist_node * n)
+void klist_del(struct klist_node *n)
{
- struct klist * k = n->n_klist;
+ struct klist *k = n->n_klist;
void (*put)(struct klist_node *) = k->put;
spin_lock(&k->k_lock);
@@ -152,48 +170,40 @@ void klist_del(struct klist_node * n)
if (put)
put(n);
}
-
EXPORT_SYMBOL_GPL(klist_del);
-
/**
- * klist_remove - Decrement the refcount of node and wait for it to go away.
- * @n: node we're removing.
+ * klist_remove - Decrement the refcount of node and wait for it to go away.
+ * @n: node we're removing.
*/
-
-void klist_remove(struct klist_node * n)
+void klist_remove(struct klist_node *n)
{
klist_del(n);
wait_for_completion(&n->n_removed);
}
-
EXPORT_SYMBOL_GPL(klist_remove);
-
/**
- * klist_node_attached - Say whether a node is bound to a list or not.
- * @n: Node that we're testing.
+ * klist_node_attached - Say whether a node is bound to a list or not.
+ * @n: Node that we're testing.
*/
-
-int klist_node_attached(struct klist_node * n)
+int klist_node_attached(struct klist_node *n)
{
return (n->n_klist != NULL);
}
-
EXPORT_SYMBOL_GPL(klist_node_attached);
-
/**
- * klist_iter_init_node - Initialize a klist_iter structure.
- * @k: klist we're iterating.
- * @i: klist_iter we're filling.
- * @n: node to start with.
+ * klist_iter_init_node - Initialize a klist_iter structure.
+ * @k: klist we're iterating.
+ * @i: klist_iter we're filling.
+ * @n: node to start with.
*
- * Similar to klist_iter_init(), but starts the action off with @n,
- * instead of with the list head.
+ * Similar to klist_iter_init(), but starts the action off with @n,
+ * instead of with the list head.
*/
-
-void klist_iter_init_node(struct klist * k, struct klist_iter * i, struct klist_node * n)
+void klist_iter_init_node(struct klist *k, struct klist_iter *i,
+ struct klist_node *n)
{
i->i_klist = k;
i->i_head = &k->k_list;
@@ -201,66 +211,56 @@ void klist_iter_init_node(struct klist * k, struct klist_iter * i, struct klist_
if (n)
kref_get(&n->n_ref);
}
-
EXPORT_SYMBOL_GPL(klist_iter_init_node);
-
/**
- * klist_iter_init - Iniitalize a klist_iter structure.
- * @k: klist we're iterating.
- * @i: klist_iter structure we're filling.
+ * klist_iter_init - Iniitalize a klist_iter structure.
+ * @k: klist we're iterating.
+ * @i: klist_iter structure we're filling.
*
- * Similar to klist_iter_init_node(), but start with the list head.
+ * Similar to klist_iter_init_node(), but start with the list head.
*/
-
-void klist_iter_init(struct klist * k, struct klist_iter * i)
+void klist_iter_init(struct klist *k, struct klist_iter *i)
{
klist_iter_init_node(k, i, NULL);
}
-
EXPORT_SYMBOL_GPL(klist_iter_init);
-
/**
- * klist_iter_exit - Finish a list iteration.
- * @i: Iterator structure.
+ * klist_iter_exit - Finish a list iteration.
+ * @i: Iterator structure.
*
- * Must be called when done iterating over list, as it decrements the
- * refcount of the current node. Necessary in case iteration exited before
- * the end of the list was reached, and always good form.
+ * Must be called when done iterating over list, as it decrements the
+ * refcount of the current node. Necessary in case iteration exited before
+ * the end of the list was reached, and always good form.
*/
-
-void klist_iter_exit(struct klist_iter * i)
+void klist_iter_exit(struct klist_iter *i)
{
if (i->i_cur) {
klist_del(i->i_cur);
i->i_cur = NULL;
}
}
-
EXPORT_SYMBOL_GPL(klist_iter_exit);
-
-static struct klist_node * to_klist_node(struct list_head * n)
+static struct klist_node *to_klist_node(struct list_head *n)
{
return container_of(n, struct klist_node, n_node);
}
-
/**
- * klist_next - Ante up next node in list.
- * @i: Iterator structure.
+ * klist_next - Ante up next node in list.
+ * @i: Iterator structure.
*
- * First grab list lock. Decrement the reference count of the previous
- * node, if there was one. Grab the next node, increment its reference
- * count, drop the lock, and return that next node.
+ * First grab list lock. Decrement the reference count of the previous
+ * node, if there was one. Grab the next node, increment its reference
+ * count, drop the lock, and return that next node.
*/
-
-struct klist_node * klist_next(struct klist_iter * i)
+struct klist_node *klist_next(struct klist_iter *i)
{
- struct list_head * next;
- struct klist_node * lnode = i->i_cur;
- struct klist_node * knode = NULL;
+ struct list_head *next;
+ struct klist_node *lnode = i->i_cur;
+ struct klist_node *knode = NULL;
void (*put)(struct klist_node *) = i->i_klist->put;
spin_lock(&i->i_klist->k_lock);
@@ -281,7 +281,4 @@ struct klist_node * klist_next(struct klist_iter * i)
put(lnode);
return knode;
}
-
EXPORT_SYMBOL_GPL(klist_next);
-
-
diff --git a/lib/kobject.c b/lib/kobject.c
index 0d03252f87a8..718e5101c263 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -58,11 +58,6 @@ static int create_dir(struct kobject *kobj)
return error;
}
-static inline struct kobject *to_kobj(struct list_head *entry)
-{
- return container_of(entry, struct kobject, entry);
-}
-
static int get_kobj_path_length(struct kobject *kobj)
{
int length = 1;
@@ -95,7 +90,7 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length)
}
pr_debug("kobject: '%s' (%p): %s: path = '%s'\n", kobject_name(kobj),
- kobj, __FUNCTION__, path);
+ kobj, __func__, path);
}
/**
@@ -186,7 +181,7 @@ static int kobject_add_internal(struct kobject *kobj)
}
pr_debug("kobject: '%s' (%p): %s: parent: '%s', set: '%s'\n",
- kobject_name(kobj), kobj, __FUNCTION__,
+ kobject_name(kobj), kobj, __func__,
parent ? kobject_name(parent) : "<NULL>",
kobj->kset ? kobject_name(&kobj->kset->kobj) : "<NULL>");
@@ -201,10 +196,10 @@ static int kobject_add_internal(struct kobject *kobj)
printk(KERN_ERR "%s failed for %s with "
"-EEXIST, don't try to register things with "
"the same name in the same directory.\n",
- __FUNCTION__, kobject_name(kobj));
+ __func__, kobject_name(kobj));
else
printk(KERN_ERR "%s failed for %s (%d)\n",
- __FUNCTION__, kobject_name(kobj), error);
+ __func__, kobject_name(kobj), error);
dump_stack();
} else
kobj->state_in_sysfs = 1;
@@ -221,21 +216,12 @@ static int kobject_add_internal(struct kobject *kobj)
static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
va_list vargs)
{
- va_list aq;
- char *name;
-
- va_copy(aq, vargs);
- name = kvasprintf(GFP_KERNEL, fmt, vargs);
- va_end(aq);
-
- if (!name)
- return -ENOMEM;
-
/* Free the old name, if necessary. */
kfree(kobj->name);
- /* Now, set the new name */
- kobj->name = name;
+ kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs);
+ if (!kobj->name)
+ return -ENOMEM;
return 0;
}
@@ -251,12 +237,12 @@ static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
*/
int kobject_set_name(struct kobject *kobj, const char *fmt, ...)
{
- va_list args;
+ va_list vargs;
int retval;
- va_start(args, fmt);
- retval = kobject_set_name_vargs(kobj, fmt, args);
- va_end(args);
+ va_start(vargs, fmt);
+ retval = kobject_set_name_vargs(kobj, fmt, vargs);
+ va_end(vargs);
return retval;
}
@@ -306,12 +292,9 @@ EXPORT_SYMBOL(kobject_init);
static int kobject_add_varg(struct kobject *kobj, struct kobject *parent,
const char *fmt, va_list vargs)
{
- va_list aq;
int retval;
- va_copy(aq, vargs);
- retval = kobject_set_name_vargs(kobj, fmt, aq);
- va_end(aq);
+ retval = kobject_set_name_vargs(kobj, fmt, vargs);
if (retval) {
printk(KERN_ERR "kobject: can not set name properly!\n");
return retval;
@@ -545,7 +528,7 @@ static void kobject_cleanup(struct kobject *kobj)
const char *name = kobj->name;
pr_debug("kobject: '%s' (%p): %s\n",
- kobject_name(kobj), kobj, __FUNCTION__);
+ kobject_name(kobj), kobj, __func__);
if (t && !t->release)
pr_debug("kobject: '%s' (%p): does not have a release() "
@@ -592,13 +575,20 @@ static void kobject_release(struct kref *kref)
*/
void kobject_put(struct kobject *kobj)
{
- if (kobj)
+ if (kobj) {
+ if (!kobj->state_initialized) {
+ printk(KERN_WARNING "kobject: '%s' (%p): is not "
+ "initialized, yet kobject_put() is being "
+ "called.\n", kobject_name(kobj), kobj);
+ WARN_ON(1);
+ }
kref_put(&kobj->kref, kobject_release);
+ }
}
static void dynamic_kobj_release(struct kobject *kobj)
{
- pr_debug("kobject: (%p): %s\n", kobj, __FUNCTION__);
+ pr_debug("kobject: (%p): %s\n", kobj, __func__);
kfree(kobj);
}
@@ -655,7 +645,7 @@ struct kobject *kobject_create_and_add(const char *name, struct kobject *parent)
retval = kobject_add(kobj, parent, "%s", name);
if (retval) {
printk(KERN_WARNING "%s: kobject_add error: %d\n",
- __FUNCTION__, retval);
+ __func__, retval);
kobject_put(kobj);
kobj = NULL;
}
@@ -745,12 +735,11 @@ void kset_unregister(struct kset *k)
*/
struct kobject *kset_find_obj(struct kset *kset, const char *name)
{
- struct list_head *entry;
+ struct kobject *k;
struct kobject *ret = NULL;
spin_lock(&kset->list_lock);
- list_for_each(entry, &kset->list) {
- struct kobject *k = to_kobj(entry);
+ list_for_each_entry(k, &kset->list, entry) {
if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
ret = kobject_get(k);
break;
@@ -764,7 +753,7 @@ static void kset_release(struct kobject *kobj)
{
struct kset *kset = container_of(kobj, struct kset, kobj);
pr_debug("kobject: '%s' (%p): %s\n",
- kobject_name(kobj), kobj, __FUNCTION__);
+ kobject_name(kobj), kobj, __func__);
kfree(kset);
}
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 5b6d7f6956b9..2fa545a63160 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -15,11 +15,13 @@
*/
#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+
#include <linux/socket.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
-#include <linux/string.h>
-#include <linux/kobject.h>
#include <net/sock.h>
@@ -99,7 +101,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
int retval = 0;
pr_debug("kobject: '%s' (%p): %s\n",
- kobject_name(kobj), kobj, __FUNCTION__);
+ kobject_name(kobj), kobj, __func__);
/* search the kset we belong to */
top_kobj = kobj;
@@ -109,7 +111,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
if (!top_kobj->kset) {
pr_debug("kobject: '%s' (%p): %s: attempted to send uevent "
"without kset!\n", kobject_name(kobj), kobj,
- __FUNCTION__);
+ __func__);
return -EINVAL;
}
@@ -121,7 +123,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
if (!uevent_ops->filter(kset, kobj)) {
pr_debug("kobject: '%s' (%p): %s: filter function "
"caused the event to drop!\n",
- kobject_name(kobj), kobj, __FUNCTION__);
+ kobject_name(kobj), kobj, __func__);
return 0;
}
@@ -133,7 +135,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
if (!subsystem) {
pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the "
"event to drop!\n", kobject_name(kobj), kobj,
- __FUNCTION__);
+ __func__);
return 0;
}
@@ -175,7 +177,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
if (retval) {
pr_debug("kobject: '%s' (%p): %s: uevent() returned "
"%d\n", kobject_name(kobj), kobj,
- __FUNCTION__, retval);
+ __func__, retval);
goto exit;
}
}
diff --git a/lib/lmb.c b/lib/lmb.c
new file mode 100644
index 000000000000..83287d3869a3
--- /dev/null
+++ b/lib/lmb.c
@@ -0,0 +1,509 @@
+/*
+ * Procedures for maintaining information about logical memory blocks.
+ *
+ * Peter Bergner, IBM Corp. June 2001.
+ * Copyright (C) 2001 Peter Bergner.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/lmb.h>
+
+#define LMB_ALLOC_ANYWHERE 0
+
+struct lmb lmb;
+
+void lmb_dump_all(void)
+{
+#ifdef DEBUG
+ unsigned long i;
+
+ pr_debug("lmb_dump_all:\n");
+ pr_debug(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
+ pr_debug(" memory.size = 0x%llx\n",
+ (unsigned long long)lmb.memory.size);
+ for (i=0; i < lmb.memory.cnt ;i++) {
+ pr_debug(" memory.region[0x%x].base = 0x%llx\n",
+ i, (unsigned long long)lmb.memory.region[i].base);
+ pr_debug(" .size = 0x%llx\n",
+ (unsigned long long)lmb.memory.region[i].size);
+ }
+
+ pr_debug(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
+ pr_debug(" reserved.size = 0x%lx\n", lmb.reserved.size);
+ for (i=0; i < lmb.reserved.cnt ;i++) {
+ pr_debug(" reserved.region[0x%x].base = 0x%llx\n",
+ i, (unsigned long long)lmb.reserved.region[i].base);
+ pr_debug(" .size = 0x%llx\n",
+ (unsigned long long)lmb.reserved.region[i].size);
+ }
+#endif /* DEBUG */
+}
+
+static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
+ u64 size2)
+{
+ return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
+}
+
+static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
+{
+ if (base2 == base1 + size1)
+ return 1;
+ else if (base1 == base2 + size2)
+ return -1;
+
+ return 0;
+}
+
+static long lmb_regions_adjacent(struct lmb_region *rgn,
+ unsigned long r1, unsigned long r2)
+{
+ u64 base1 = rgn->region[r1].base;
+ u64 size1 = rgn->region[r1].size;
+ u64 base2 = rgn->region[r2].base;
+ u64 size2 = rgn->region[r2].size;
+
+ return lmb_addrs_adjacent(base1, size1, base2, size2);
+}
+
+static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
+{
+ unsigned long i;
+
+ for (i = r; i < rgn->cnt - 1; i++) {
+ rgn->region[i].base = rgn->region[i + 1].base;
+ rgn->region[i].size = rgn->region[i + 1].size;
+ }
+ rgn->cnt--;
+}
+
+/* Assumption: base addr of region 1 < base addr of region 2 */
+static void lmb_coalesce_regions(struct lmb_region *rgn,
+ unsigned long r1, unsigned long r2)
+{
+ rgn->region[r1].size += rgn->region[r2].size;
+ lmb_remove_region(rgn, r2);
+}
+
+void __init lmb_init(void)
+{
+ /* Create a dummy zero size LMB which will get coalesced away later.
+ * This simplifies the lmb_add() code below...
+ */
+ lmb.memory.region[0].base = 0;
+ lmb.memory.region[0].size = 0;
+ lmb.memory.cnt = 1;
+
+ /* Ditto. */
+ lmb.reserved.region[0].base = 0;
+ lmb.reserved.region[0].size = 0;
+ lmb.reserved.cnt = 1;
+}
+
+void __init lmb_analyze(void)
+{
+ int i;
+
+ lmb.memory.size = 0;
+
+ for (i = 0; i < lmb.memory.cnt; i++)
+ lmb.memory.size += lmb.memory.region[i].size;
+}
+
+static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
+{
+ unsigned long coalesced = 0;
+ long adjacent, i;
+
+ if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
+ rgn->region[0].base = base;
+ rgn->region[0].size = size;
+ return 0;
+ }
+
+ /* First try and coalesce this LMB with another. */
+ for (i = 0; i < rgn->cnt; i++) {
+ u64 rgnbase = rgn->region[i].base;
+ u64 rgnsize = rgn->region[i].size;
+
+ if ((rgnbase == base) && (rgnsize == size))
+ /* Already have this region, so we're done */
+ return 0;
+
+ adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
+ if (adjacent > 0) {
+ rgn->region[i].base -= size;
+ rgn->region[i].size += size;
+ coalesced++;
+ break;
+ } else if (adjacent < 0) {
+ rgn->region[i].size += size;
+ coalesced++;
+ break;
+ }
+ }
+
+ if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) {
+ lmb_coalesce_regions(rgn, i, i+1);
+ coalesced++;
+ }
+
+ if (coalesced)
+ return coalesced;
+ if (rgn->cnt >= MAX_LMB_REGIONS)
+ return -1;
+
+ /* Couldn't coalesce the LMB, so add it to the sorted table. */
+ for (i = rgn->cnt - 1; i >= 0; i--) {
+ if (base < rgn->region[i].base) {
+ rgn->region[i+1].base = rgn->region[i].base;
+ rgn->region[i+1].size = rgn->region[i].size;
+ } else {
+ rgn->region[i+1].base = base;
+ rgn->region[i+1].size = size;
+ break;
+ }
+ }
+
+ if (base < rgn->region[0].base) {
+ rgn->region[0].base = base;
+ rgn->region[0].size = size;
+ }
+ rgn->cnt++;
+
+ return 0;
+}
+
+long lmb_add(u64 base, u64 size)
+{
+ struct lmb_region *_rgn = &lmb.memory;
+
+ /* On pSeries LPAR systems, the first LMB is our RMO region. */
+ if (base == 0)
+ lmb.rmo_size = size;
+
+ return lmb_add_region(_rgn, base, size);
+
+}
+
+long lmb_remove(u64 base, u64 size)
+{
+ struct lmb_region *rgn = &(lmb.memory);
+ u64 rgnbegin, rgnend;
+ u64 end = base + size;
+ int i;
+
+ rgnbegin = rgnend = 0; /* supress gcc warnings */
+
+ /* Find the region where (base, size) belongs to */
+ for (i=0; i < rgn->cnt; i++) {
+ rgnbegin = rgn->region[i].base;
+ rgnend = rgnbegin + rgn->region[i].size;
+
+ if ((rgnbegin <= base) && (end <= rgnend))
+ break;
+ }
+
+ /* Didn't find the region */
+ if (i == rgn->cnt)
+ return -1;
+
+ /* Check to see if we are removing entire region */
+ if ((rgnbegin == base) && (rgnend == end)) {
+ lmb_remove_region(rgn, i);
+ return 0;
+ }
+
+ /* Check to see if region is matching at the front */
+ if (rgnbegin == base) {
+ rgn->region[i].base = end;
+ rgn->region[i].size -= size;
+ return 0;
+ }
+
+ /* Check to see if the region is matching at the end */
+ if (rgnend == end) {
+ rgn->region[i].size -= size;
+ return 0;
+ }
+
+ /*
+ * We need to split the entry - adjust the current one to the
+ * beginging of the hole and add the region after hole.
+ */
+ rgn->region[i].size = base - rgn->region[i].base;
+ return lmb_add_region(rgn, end, rgnend - end);
+}
+
+long __init lmb_reserve(u64 base, u64 size)
+{
+ struct lmb_region *_rgn = &lmb.reserved;
+
+ BUG_ON(0 == size);
+
+ return lmb_add_region(_rgn, base, size);
+}
+
+long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
+{
+ unsigned long i;
+
+ for (i = 0; i < rgn->cnt; i++) {
+ u64 rgnbase = rgn->region[i].base;
+ u64 rgnsize = rgn->region[i].size;
+ if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
+ break;
+ }
+
+ return (i < rgn->cnt) ? i : -1;
+}
+
+static u64 lmb_align_down(u64 addr, u64 size)
+{
+ return addr & ~(size - 1);
+}
+
+static u64 lmb_align_up(u64 addr, u64 size)
+{
+ return (addr + (size - 1)) & ~(size - 1);
+}
+
+static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
+ u64 size, u64 align)
+{
+ u64 base, res_base;
+ long j;
+
+ base = lmb_align_down((end - size), align);
+ while (start <= base) {
+ j = lmb_overlaps_region(&lmb.reserved, base, size);
+ if (j < 0) {
+ /* this area isn't reserved, take it */
+ if (lmb_add_region(&lmb.reserved, base,
+ lmb_align_up(size, align)) < 0)
+ base = ~(u64)0;
+ return base;
+ }
+ res_base = lmb.reserved.region[j].base;
+ if (res_base < size)
+ break;
+ base = lmb_align_down(res_base - size, align);
+ }
+
+ return ~(u64)0;
+}
+
+static u64 __init lmb_alloc_nid_region(struct lmb_property *mp,
+ u64 (*nid_range)(u64, u64, int *),
+ u64 size, u64 align, int nid)
+{
+ u64 start, end;
+
+ start = mp->base;
+ end = start + mp->size;
+
+ start = lmb_align_up(start, align);
+ while (start < end) {
+ u64 this_end;
+ int this_nid;
+
+ this_end = nid_range(start, end, &this_nid);
+ if (this_nid == nid) {
+ u64 ret = lmb_alloc_nid_unreserved(start, this_end,
+ size, align);
+ if (ret != ~(u64)0)
+ return ret;
+ }
+ start = this_end;
+ }
+
+ return ~(u64)0;
+}
+
+u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
+ u64 (*nid_range)(u64 start, u64 end, int *nid))
+{
+ struct lmb_region *mem = &lmb.memory;
+ int i;
+
+ for (i = 0; i < mem->cnt; i++) {
+ u64 ret = lmb_alloc_nid_region(&mem->region[i],
+ nid_range,
+ size, align, nid);
+ if (ret != ~(u64)0)
+ return ret;
+ }
+
+ return lmb_alloc(size, align);
+}
+
+u64 __init lmb_alloc(u64 size, u64 align)
+{
+ return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
+}
+
+u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr)
+{
+ u64 alloc;
+
+ alloc = __lmb_alloc_base(size, align, max_addr);
+
+ if (alloc == 0)
+ panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
+ (unsigned long long) size, (unsigned long long) max_addr);
+
+ return alloc;
+}
+
+u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
+{
+ long i, j;
+ u64 base = 0;
+ u64 res_base;
+
+ BUG_ON(0 == size);
+
+ /* On some platforms, make sure we allocate lowmem */
+ /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
+ if (max_addr == LMB_ALLOC_ANYWHERE)
+ max_addr = LMB_REAL_LIMIT;
+
+ for (i = lmb.memory.cnt - 1; i >= 0; i--) {
+ u64 lmbbase = lmb.memory.region[i].base;
+ u64 lmbsize = lmb.memory.region[i].size;
+
+ if (lmbsize < size)
+ continue;
+ if (max_addr == LMB_ALLOC_ANYWHERE)
+ base = lmb_align_down(lmbbase + lmbsize - size, align);
+ else if (lmbbase < max_addr) {
+ base = min(lmbbase + lmbsize, max_addr);
+ base = lmb_align_down(base - size, align);
+ } else
+ continue;
+
+ while (base && lmbbase <= base) {
+ j = lmb_overlaps_region(&lmb.reserved, base, size);
+ if (j < 0) {
+ /* this area isn't reserved, take it */
+ if (lmb_add_region(&lmb.reserved, base,
+ lmb_align_up(size, align)) < 0)
+ return 0;
+ return base;
+ }
+ res_base = lmb.reserved.region[j].base;
+ if (res_base < size)
+ break;
+ base = lmb_align_down(res_base - size, align);
+ }
+ }
+ return 0;
+}
+
+/* You must call lmb_analyze() before this. */
+u64 __init lmb_phys_mem_size(void)
+{
+ return lmb.memory.size;
+}
+
+u64 __init lmb_end_of_DRAM(void)
+{
+ int idx = lmb.memory.cnt - 1;
+
+ return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
+}
+
+/* You must call lmb_analyze() after this. */
+void __init lmb_enforce_memory_limit(u64 memory_limit)
+{
+ unsigned long i;
+ u64 limit;
+ struct lmb_property *p;
+
+ if (!memory_limit)
+ return;
+
+ /* Truncate the lmb regions to satisfy the memory limit. */
+ limit = memory_limit;
+ for (i = 0; i < lmb.memory.cnt; i++) {
+ if (limit > lmb.memory.region[i].size) {
+ limit -= lmb.memory.region[i].size;
+ continue;
+ }
+
+ lmb.memory.region[i].size = limit;
+ lmb.memory.cnt = i + 1;
+ break;
+ }
+
+ if (lmb.memory.region[0].size < lmb.rmo_size)
+ lmb.rmo_size = lmb.memory.region[0].size;
+
+ /* And truncate any reserves above the limit also. */
+ for (i = 0; i < lmb.reserved.cnt; i++) {
+ p = &lmb.reserved.region[i];
+
+ if (p->base > memory_limit)
+ p->size = 0;
+ else if ((p->base + p->size) > memory_limit)
+ p->size = memory_limit - p->base;
+
+ if (p->size == 0) {
+ lmb_remove_region(&lmb.reserved, i);
+ i--;
+ }
+ }
+}
+
+int __init lmb_is_reserved(u64 addr)
+{
+ int i;
+
+ for (i = 0; i < lmb.reserved.cnt; i++) {
+ u64 upper = lmb.reserved.region[i].base +
+ lmb.reserved.region[i].size - 1;
+ if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Given a <base, len>, find which memory regions belong to this range.
+ * Adjust the request and return a contiguous chunk.
+ */
+int lmb_find(struct lmb_property *res)
+{
+ int i;
+ u64 rstart, rend;
+
+ rstart = res->base;
+ rend = rstart + res->size - 1;
+
+ for (i = 0; i < lmb.memory.cnt; i++) {
+ u64 start = lmb.memory.region[i].base;
+ u64 end = start + lmb.memory.region[i].size - 1;
+
+ if (start > rend)
+ return -1;
+
+ if ((end >= rstart) && (start < rend)) {
+ /* adjust the request */
+ if (rstart < start)
+ rstart = start;
+ if (rend > end)
+ rend = end;
+ res->base = rstart;
+ res->size = rend - rstart + 1;
+ return 0;
+ }
+ }
+ return -1;
+}
diff --git a/lib/pcounter.c b/lib/pcounter.c
deleted file mode 100644
index 9b56807da93b..000000000000
--- a/lib/pcounter.c
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Define default pcounter functions
- * Note that often used pcounters use dedicated functions to get a speed increase.
- * (see DEFINE_PCOUNTER/REF_PCOUNTER_MEMBER)
- */
-
-#include <linux/module.h>
-#include <linux/pcounter.h>
-#include <linux/smp.h>
-#include <linux/cpumask.h>
-
-static void pcounter_dyn_add(struct pcounter *self, int inc)
-{
- per_cpu_ptr(self->per_cpu_values, smp_processor_id())[0] += inc;
-}
-
-static int pcounter_dyn_getval(const struct pcounter *self, int cpu)
-{
- return per_cpu_ptr(self->per_cpu_values, cpu)[0];
-}
-
-int pcounter_getval(const struct pcounter *self)
-{
- int res = 0, cpu;
-
- for_each_possible_cpu(cpu)
- res += self->getval(self, cpu);
-
- return res;
-}
-EXPORT_SYMBOL_GPL(pcounter_getval);
-
-int pcounter_alloc(struct pcounter *self)
-{
- int rc = 0;
- if (self->add == NULL) {
- self->per_cpu_values = alloc_percpu(int);
- if (self->per_cpu_values != NULL) {
- self->add = pcounter_dyn_add;
- self->getval = pcounter_dyn_getval;
- } else
- rc = 1;
- }
- return rc;
-}
-EXPORT_SYMBOL_GPL(pcounter_alloc);
-
-void pcounter_free(struct pcounter *self)
-{
- if (self->per_cpu_values != NULL) {
- free_percpu(self->per_cpu_values);
- self->per_cpu_values = NULL;
- self->getval = NULL;
- self->add = NULL;
- }
-}
-EXPORT_SYMBOL_GPL(pcounter_free);
-
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 393a0e915c23..119174494cb5 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -102,6 +102,7 @@ void percpu_counter_destroy(struct percpu_counter *fbc)
return;
free_percpu(fbc->counters);
+ fbc->counters = NULL;
#ifdef CONFIG_HOTPLUG_CPU
mutex_lock(&percpu_counters_lock);
list_del(&fbc->list);
diff --git a/lib/proportions.c b/lib/proportions.c
index 9508d9a7af3e..4f387a643d72 100644
--- a/lib/proportions.c
+++ b/lib/proportions.c
@@ -73,12 +73,6 @@
#include <linux/proportions.h>
#include <linux/rcupdate.h>
-/*
- * Limit the time part in order to ensure there are some bits left for the
- * cycle counter.
- */
-#define PROP_MAX_SHIFT (3*BITS_PER_LONG/4)
-
int prop_descriptor_init(struct prop_descriptor *pd, int shift)
{
int err;
@@ -268,6 +262,38 @@ void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
}
/*
+ * identical to __prop_inc_percpu, except that it limits this pl's fraction to
+ * @frac/PROP_FRAC_BASE by ignoring events when this limit has been exceeded.
+ */
+void __prop_inc_percpu_max(struct prop_descriptor *pd,
+ struct prop_local_percpu *pl, long frac)
+{
+ struct prop_global *pg = prop_get_global(pd);
+
+ prop_norm_percpu(pg, pl);
+
+ if (unlikely(frac != PROP_FRAC_BASE)) {
+ unsigned long period_2 = 1UL << (pg->shift - 1);
+ unsigned long counter_mask = period_2 - 1;
+ unsigned long global_count;
+ long numerator, denominator;
+
+ numerator = percpu_counter_read_positive(&pl->events);
+ global_count = percpu_counter_read(&pg->events);
+ denominator = period_2 + (global_count & counter_mask);
+
+ if (numerator > ((denominator * frac) >> PROP_FRAC_SHIFT))
+ goto out_put;
+ }
+
+ percpu_counter_add(&pl->events, 1);
+ percpu_counter_add(&pg->events, 1);
+
+out_put:
+ prop_put_global(pd, pg);
+}
+
+/*
* Obtain a fraction of this proportion
*
* p_{j} = x_{j} / (period/2 + t % period/2)
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 65f0e758ec38..bd521716ab1a 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -114,8 +114,7 @@ radix_tree_node_alloc(struct radix_tree_root *root)
}
}
if (ret == NULL)
- ret = kmem_cache_alloc(radix_tree_node_cachep,
- set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
+ ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
BUG_ON(radix_tree_is_indirect_ptr(ret));
return ret;
@@ -150,8 +149,7 @@ int radix_tree_preload(gfp_t gfp_mask)
rtp = &__get_cpu_var(radix_tree_preloads);
while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
preempt_enable();
- node = kmem_cache_alloc(radix_tree_node_cachep,
- set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
+ node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
if (node == NULL)
goto out;
preempt_disable();
@@ -1098,7 +1096,8 @@ void __init radix_tree_init(void)
{
radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
sizeof(struct radix_tree_node), 0,
- SLAB_PANIC, radix_tree_node_ctor);
+ SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
+ radix_tree_node_ctor);
radix_tree_init_maxindex();
hotcpu_notifier(radix_tree_callback, 0);
}
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
new file mode 100644
index 000000000000..485e3040dcd4
--- /dev/null
+++ b/lib/ratelimit.c
@@ -0,0 +1,51 @@
+/*
+ * ratelimit.c - Do something with rate limit.
+ *
+ * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com>
+ *
+ * This file is released under the GPLv2.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+
+/*
+ * __ratelimit - rate limiting
+ * @ratelimit_jiffies: minimum time in jiffies between two callbacks
+ * @ratelimit_burst: number of callbacks we do before ratelimiting
+ *
+ * This enforces a rate limit: not more than @ratelimit_burst callbacks
+ * in every ratelimit_jiffies
+ */
+int __ratelimit(int ratelimit_jiffies, int ratelimit_burst)
+{
+ static DEFINE_SPINLOCK(ratelimit_lock);
+ static unsigned toks = 10 * 5 * HZ;
+ static unsigned long last_msg;
+ static int missed;
+ unsigned long flags;
+ unsigned long now = jiffies;
+
+ spin_lock_irqsave(&ratelimit_lock, flags);
+ toks += now - last_msg;
+ last_msg = now;
+ if (toks > (ratelimit_burst * ratelimit_jiffies))
+ toks = ratelimit_burst * ratelimit_jiffies;
+ if (toks >= ratelimit_jiffies) {
+ int lost = missed;
+
+ missed = 0;
+ toks -= ratelimit_jiffies;
+ spin_unlock_irqrestore(&ratelimit_lock, flags);
+ if (lost)
+ printk(KERN_WARNING "%s: %d messages suppressed\n",
+ __func__, lost);
+ return 1;
+ }
+ missed++;
+ spin_unlock_irqrestore(&ratelimit_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(__ratelimit);
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c
index 3ea2db94d5b0..06d04cfa9339 100644
--- a/lib/reed_solomon/reed_solomon.c
+++ b/lib/reed_solomon/reed_solomon.c
@@ -45,7 +45,6 @@
#include <linux/rslib.h>
#include <linux/slab.h>
#include <linux/mutex.h>
-#include <asm/semaphore.h>
/* This list holds all currently allocated rs control structures */
static LIST_HEAD (rslist);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index acca4901046c..b80c21100d78 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -8,6 +8,7 @@
*/
#include <linux/module.h>
#include <linux/scatterlist.h>
+#include <linux/highmem.h>
/**
* sg_next - return the next scatterlist entry in a list
@@ -292,3 +293,104 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
return ret;
}
EXPORT_SYMBOL(sg_alloc_table);
+
+/**
+ * sg_copy_buffer - Copy data between a linear buffer and an SG list
+ * @sgl: The SG list
+ * @nents: Number of SG entries
+ * @buf: Where to copy from
+ * @buflen: The number of bytes to copy
+ * @to_buffer: transfer direction (non zero == from an sg list to a
+ * buffer, 0 == from a buffer to an sg list
+ *
+ * Returns the number of copied bytes.
+ *
+ **/
+static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
+ void *buf, size_t buflen, int to_buffer)
+{
+ struct scatterlist *sg;
+ size_t buf_off = 0;
+ int i;
+
+ WARN_ON(!irqs_disabled());
+
+ for_each_sg(sgl, sg, nents, i) {
+ struct page *page;
+ int n = 0;
+ unsigned int sg_off = sg->offset;
+ unsigned int sg_copy = sg->length;
+
+ if (sg_copy > buflen)
+ sg_copy = buflen;
+ buflen -= sg_copy;
+
+ while (sg_copy > 0) {
+ unsigned int page_copy;
+ void *p;
+
+ page_copy = PAGE_SIZE - sg_off;
+ if (page_copy > sg_copy)
+ page_copy = sg_copy;
+
+ page = nth_page(sg_page(sg), n);
+ p = kmap_atomic(page, KM_BIO_SRC_IRQ);
+
+ if (to_buffer)
+ memcpy(buf + buf_off, p + sg_off, page_copy);
+ else {
+ memcpy(p + sg_off, buf + buf_off, page_copy);
+ flush_kernel_dcache_page(page);
+ }
+
+ kunmap_atomic(p, KM_BIO_SRC_IRQ);
+
+ buf_off += page_copy;
+ sg_off += page_copy;
+ if (sg_off == PAGE_SIZE) {
+ sg_off = 0;
+ n++;
+ }
+ sg_copy -= page_copy;
+ }
+
+ if (!buflen)
+ break;
+ }
+
+ return buf_off;
+}
+
+/**
+ * sg_copy_from_buffer - Copy from a linear buffer to an SG list
+ * @sgl: The SG list
+ * @nents: Number of SG entries
+ * @buf: Where to copy from
+ * @buflen: The number of bytes to copy
+ *
+ * Returns the number of copied bytes.
+ *
+ **/
+size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
+ void *buf, size_t buflen)
+{
+ return sg_copy_buffer(sgl, nents, buf, buflen, 0);
+}
+EXPORT_SYMBOL(sg_copy_from_buffer);
+
+/**
+ * sg_copy_to_buffer - Copy from an SG list to a linear buffer
+ * @sgl: The SG list
+ * @nents: Number of SG entries
+ * @buf: Where to copy to
+ * @buflen: The number of bytes to copy
+ *
+ * Returns the number of copied bytes.
+ *
+ **/
+size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
+ void *buf, size_t buflen)
+{
+ return sg_copy_buffer(sgl, nents, buf, buflen, 1);
+}
+EXPORT_SYMBOL(sg_copy_to_buffer);
diff --git a/lib/semaphore-sleepers.c b/lib/semaphore-sleepers.c
deleted file mode 100644
index 0198782cdacb..000000000000
--- a/lib/semaphore-sleepers.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * i386 and x86-64 semaphore implementation.
- *
- * (C) Copyright 1999 Linus Torvalds
- *
- * Portions Copyright 1999 Red Hat, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
- */
-#include <linux/sched.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <asm/semaphore.h>
-
-/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to acquire the semaphore, while the "sleeping"
- * variable is a count of such acquires.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * "sleeping" and the contention routine ordering is protected
- * by the spinlock in the semaphore's waitqueue head.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
- */
-
-/*
- * Logic:
- * - only on a boundary condition do we need to care. When we go
- * from a negative count to a non-negative, we wake people up.
- * - when we go from a non-negative count to a negative do we
- * (a) synchronize with the "sleeper" count and (b) make sure
- * that we're on the wakeup list before we synchronize so that
- * we cannot lose wakeup events.
- */
-
-void __up(struct semaphore *sem)
-{
- wake_up(&sem->wait);
-}
-
-void __sched __down(struct semaphore *sem)
-{
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
- unsigned long flags;
-
- tsk->state = TASK_UNINTERRUPTIBLE;
- spin_lock_irqsave(&sem->wait.lock, flags);
- add_wait_queue_exclusive_locked(&sem->wait, &wait);
-
- sem->sleepers++;
- for (;;) {
- int sleepers = sem->sleepers;
-
- /*
- * Add "everybody else" into it. They aren't
- * playing, because we own the spinlock in
- * the wait_queue_head.
- */
- if (!atomic_add_negative(sleepers - 1, &sem->count)) {
- sem->sleepers = 0;
- break;
- }
- sem->sleepers = 1; /* us - see -1 above */
- spin_unlock_irqrestore(&sem->wait.lock, flags);
-
- schedule();
-
- spin_lock_irqsave(&sem->wait.lock, flags);
- tsk->state = TASK_UNINTERRUPTIBLE;
- }
- remove_wait_queue_locked(&sem->wait, &wait);
- wake_up_locked(&sem->wait);
- spin_unlock_irqrestore(&sem->wait.lock, flags);
- tsk->state = TASK_RUNNING;
-}
-
-int __sched __down_interruptible(struct semaphore *sem)
-{
- int retval = 0;
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
- unsigned long flags;
-
- tsk->state = TASK_INTERRUPTIBLE;
- spin_lock_irqsave(&sem->wait.lock, flags);
- add_wait_queue_exclusive_locked(&sem->wait, &wait);
-
- sem->sleepers++;
- for (;;) {
- int sleepers = sem->sleepers;
-
- /*
- * With signals pending, this turns into
- * the trylock failure case - we won't be
- * sleeping, and we* can't get the lock as
- * it has contention. Just correct the count
- * and exit.
- */
- if (signal_pending(current)) {
- retval = -EINTR;
- sem->sleepers = 0;
- atomic_add(sleepers, &sem->count);
- break;
- }
-
- /*
- * Add "everybody else" into it. They aren't
- * playing, because we own the spinlock in
- * wait_queue_head. The "-1" is because we're
- * still hoping to get the semaphore.
- */
- if (!atomic_add_negative(sleepers - 1, &sem->count)) {
- sem->sleepers = 0;
- break;
- }
- sem->sleepers = 1; /* us - see -1 above */
- spin_unlock_irqrestore(&sem->wait.lock, flags);
-
- schedule();
-
- spin_lock_irqsave(&sem->wait.lock, flags);
- tsk->state = TASK_INTERRUPTIBLE;
- }
- remove_wait_queue_locked(&sem->wait, &wait);
- wake_up_locked(&sem->wait);
- spin_unlock_irqrestore(&sem->wait.lock, flags);
-
- tsk->state = TASK_RUNNING;
- return retval;
-}
-
-/*
- * Trylock failed - make sure we correct for
- * having decremented the count.
- *
- * We could have done the trylock with a
- * single "cmpxchg" without failure cases,
- * but then it wouldn't work on a 386.
- */
-int __down_trylock(struct semaphore *sem)
-{
- int sleepers;
- unsigned long flags;
-
- spin_lock_irqsave(&sem->wait.lock, flags);
- sleepers = sem->sleepers + 1;
- sem->sleepers = 0;
-
- /*
- * Add "everybody else" and us into it. They aren't
- * playing, because we own the spinlock in the
- * wait_queue_head.
- */
- if (!atomic_add_negative(sleepers, &sem->count)) {
- wake_up_locked(&sem->wait);
- }
-
- spin_unlock_irqrestore(&sem->wait.lock, flags);
- return 1;
-}
diff --git a/lib/string.c b/lib/string.c
index 5efafed3d6b6..b19b87af65a3 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -493,6 +493,33 @@ char *strsep(char **s, const char *ct)
EXPORT_SYMBOL(strsep);
#endif
+/**
+ * sysfs_streq - return true if strings are equal, modulo trailing newline
+ * @s1: one string
+ * @s2: another string
+ *
+ * This routine returns true iff two strings are equal, treating both
+ * NUL and newline-then-NUL as equivalent string terminations. It's
+ * geared for use with sysfs input strings, which generally terminate
+ * with newlines but are compared against values without newlines.
+ */
+bool sysfs_streq(const char *s1, const char *s2)
+{
+ while (*s1 && *s1 == *s2) {
+ s1++;
+ s2++;
+ }
+
+ if (*s1 == *s2)
+ return true;
+ if (!*s1 && *s2 == '\n' && !s2[1])
+ return true;
+ if (*s1 == '\n' && !s1[1] && !*s2)
+ return true;
+ return false;
+}
+EXPORT_SYMBOL(sysfs_streq);
+
#ifndef __HAVE_ARCH_MEMSET
/**
* memset - Fill a region of memory with the given value
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 025922807e6e..d568894df8cc 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -31,6 +31,7 @@
#include <linux/init.h>
#include <linux/bootmem.h>
+#include <linux/iommu-helper.h>
#define OFFSET(val,align) ((unsigned long) \
( (val) & ( (align) - 1)))
@@ -282,15 +283,6 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr)
return (addr & ~mask) != 0;
}
-static inline unsigned int is_span_boundary(unsigned int index,
- unsigned int nslots,
- unsigned long offset_slots,
- unsigned long max_slots)
-{
- unsigned long offset = (offset_slots + index) & (max_slots - 1);
- return offset + nslots > max_slots;
-}
-
/*
* Allocates bounce buffer and returns its kernel virtual address.
*/
@@ -331,56 +323,53 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
* request and allocate a buffer from that IO TLB pool.
*/
spin_lock_irqsave(&io_tlb_lock, flags);
- {
- index = ALIGN(io_tlb_index, stride);
- if (index >= io_tlb_nslabs)
- index = 0;
- wrap = index;
-
- do {
- while (is_span_boundary(index, nslots, offset_slots,
- max_slots)) {
- index += stride;
- if (index >= io_tlb_nslabs)
- index = 0;
- if (index == wrap)
- goto not_found;
- }
-
- /*
- * If we find a slot that indicates we have 'nslots'
- * number of contiguous buffers, we allocate the
- * buffers from that slot and mark the entries as '0'
- * indicating unavailable.
- */
- if (io_tlb_list[index] >= nslots) {
- int count = 0;
-
- for (i = index; i < (int) (index + nslots); i++)
- io_tlb_list[i] = 0;
- for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
- io_tlb_list[i] = ++count;
- dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
-
- /*
- * Update the indices to avoid searching in
- * the next round.
- */
- io_tlb_index = ((index + nslots) < io_tlb_nslabs
- ? (index + nslots) : 0);
-
- goto found;
- }
+ index = ALIGN(io_tlb_index, stride);
+ if (index >= io_tlb_nslabs)
+ index = 0;
+ wrap = index;
+
+ do {
+ while (iommu_is_span_boundary(index, nslots, offset_slots,
+ max_slots)) {
index += stride;
if (index >= io_tlb_nslabs)
index = 0;
- } while (index != wrap);
+ if (index == wrap)
+ goto not_found;
+ }
- not_found:
- spin_unlock_irqrestore(&io_tlb_lock, flags);
- return NULL;
- }
- found:
+ /*
+ * If we find a slot that indicates we have 'nslots' number of
+ * contiguous buffers, we allocate the buffers from that slot
+ * and mark the entries as '0' indicating unavailable.
+ */
+ if (io_tlb_list[index] >= nslots) {
+ int count = 0;
+
+ for (i = index; i < (int) (index + nslots); i++)
+ io_tlb_list[i] = 0;
+ for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
+ io_tlb_list[i] = ++count;
+ dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
+
+ /*
+ * Update the indices to avoid searching in the next
+ * round.
+ */
+ io_tlb_index = ((index + nslots) < io_tlb_nslabs
+ ? (index + nslots) : 0);
+
+ goto found;
+ }
+ index += stride;
+ if (index >= io_tlb_nslabs)
+ index = 0;
+ } while (index != wrap);
+
+not_found:
+ spin_unlock_irqrestore(&io_tlb_lock, flags);
+ return NULL;
+found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
/*
@@ -566,7 +555,8 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
* either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
*/
dma_addr_t
-swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
+swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
+ int dir, struct dma_attrs *attrs)
{
dma_addr_t dev_addr = virt_to_bus(ptr);
void *map;
@@ -599,6 +589,13 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
return dev_addr;
}
+EXPORT_SYMBOL(swiotlb_map_single_attrs);
+
+dma_addr_t
+swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
+{
+ return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL);
+}
/*
* Unmap a single streaming mode DMA translation. The dma_addr and size must
@@ -609,8 +606,8 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
* whatever the device wrote there.
*/
void
-swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
- int dir)
+swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, int dir, struct dma_attrs *attrs)
{
char *dma_addr = bus_to_virt(dev_addr);
@@ -620,7 +617,14 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
else if (dir == DMA_FROM_DEVICE)
dma_mark_clean(dma_addr, size);
}
+EXPORT_SYMBOL(swiotlb_unmap_single_attrs);
+void
+swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
+ int dir)
+{
+ return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL);
+}
/*
* Make physical memory consistent for a single streaming mode DMA translation
* after a transfer.
@@ -691,6 +695,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
SYNC_FOR_DEVICE);
}
+void swiotlb_unmap_sg_attrs(struct device *, struct scatterlist *, int, int,
+ struct dma_attrs *);
/*
* Map a set of buffers described by scatterlist in streaming mode for DMA.
* This is the scatter-gather version of the above swiotlb_map_single
@@ -708,8 +714,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
* same here.
*/
int
-swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
- int dir)
+swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
+ int dir, struct dma_attrs *attrs)
{
struct scatterlist *sg;
void *addr;
@@ -727,7 +733,8 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
/* Don't panic here, we expect map_sg users
to do proper error handling. */
swiotlb_full(hwdev, sg->length, dir, 0);
- swiotlb_unmap_sg(hwdev, sgl, i, dir);
+ swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
+ attrs);
sgl[0].dma_length = 0;
return 0;
}
@@ -738,14 +745,22 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
}
return nelems;
}
+EXPORT_SYMBOL(swiotlb_map_sg_attrs);
+
+int
+swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
+ int dir)
+{
+ return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
+}
/*
* Unmap a set of streaming mode DMA translations. Again, cpu read rules
* concerning calls here are the same as for swiotlb_unmap_single() above.
*/
void
-swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
- int dir)
+swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
+ int nelems, int dir, struct dma_attrs *attrs)
{
struct scatterlist *sg;
int i;
@@ -760,6 +775,14 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
}
}
+EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
+
+void
+swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
+ int dir)
+{
+ return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
+}
/*
* Make physical memory consistent for a set of streaming mode DMA translations