summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig13
-rw-r--r--lib/Kconfig.debug178
-rw-r--r--lib/Kconfig.kgdb24
-rw-r--r--lib/Makefile14
-rw-r--r--lib/argv_split.c13
-rw-r--r--lib/atomic64.c4
-rw-r--r--lib/atomic64_test.c166
-rw-r--r--lib/bitmap.c100
-rw-r--r--lib/btree.c798
-rw-r--r--lib/bug.c2
-rw-r--r--lib/checksum.c14
-rw-r--r--lib/cpu-notifier-error-inject.c63
-rw-r--r--lib/cpumask.c1
-rw-r--r--lib/crc32.c150
-rw-r--r--lib/ctype.c50
-rw-r--r--lib/debug_locks.c1
-rw-r--r--lib/debugobjects.c139
-rw-r--r--lib/decompress.c5
-rw-r--r--lib/decompress_bunzip2.c12
-rw-r--r--lib/decompress_unlzo.c217
-rw-r--r--lib/devres.c3
-rw-r--r--lib/dma-debug.c27
-rw-r--r--lib/dynamic_debug.c9
-rw-r--r--lib/fault-inject.c1
-rw-r--r--lib/flex_array.c2
-rw-r--r--lib/gen_crc32table.c47
-rw-r--r--lib/genalloc.c35
-rw-r--r--lib/hexdump.c54
-rw-r--r--lib/hweight.c26
-rw-r--r--lib/idr.c27
-rw-r--r--lib/inflate.c1
-rw-r--r--lib/iommu-helper.c59
-rw-r--r--lib/ioremap.c10
-rw-r--r--lib/kasprintf.c1
-rw-r--r--lib/kernel_lock.c46
-rw-r--r--lib/kobject.c121
-rw-r--r--lib/kobject_uevent.c115
-rw-r--r--lib/kref.c16
-rw-r--r--lib/lcm.c15
-rw-r--r--lib/list_sort.c217
-rw-r--r--lib/lmb.c527
-rw-r--r--lib/lru_cache.c560
-rw-r--r--lib/lzo/lzo1x_decompress.c9
-rw-r--r--lib/parser.c11
-rw-r--r--lib/plist.c8
-rw-r--r--lib/radix-tree.c46
-rw-r--r--lib/raid6/Makefile22
-rw-r--r--lib/raid6/raid6algos.c21
-rw-r--r--lib/raid6/raid6altivec.uc2
-rw-r--r--lib/raid6/raid6int.uc2
-rw-r--r--lib/raid6/raid6test/Makefile42
-rw-r--r--lib/raid6/unroll.awk20
-rw-r--r--lib/raid6/unroll.pl24
-rw-r--r--lib/random32.c40
-rw-r--r--lib/ratelimit.c54
-rw-r--r--lib/rational.c1
-rw-r--r--lib/rbtree.c68
-rw-r--r--lib/rwsem-spinlock.c37
-rw-r--r--lib/rwsem.c5
-rw-r--r--lib/scatterlist.c1
-rw-r--r--lib/show_mem.c14
-rw-r--r--lib/spinlock_debug.c64
-rw-r--r--lib/string.c106
-rw-r--r--lib/swiotlb.c217
-rw-r--r--lib/textsearch.c1
-rw-r--r--lib/uuid.c53
-rw-r--r--lib/vsprintf.c652
-rw-r--r--lib/zlib_inflate/inffast.c73
68 files changed, 3935 insertions, 1541 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 1cc756c4c78c..fa9bf2c06199 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -120,6 +120,10 @@ config DECOMPRESS_BZIP2
config DECOMPRESS_LZMA
tristate
+config DECOMPRESS_LZO
+ select LZO_DECOMPRESS
+ tristate
+
#
# Generic allocator support is selected if needed
#
@@ -159,6 +163,9 @@ config TEXTSEARCH_BM
config TEXTSEARCH_FSM
tristate
+config BTREE
+ boolean
+
config HAS_IOMEM
boolean
depends on !NO_IOMEM
@@ -177,9 +184,6 @@ config HAS_DMA
config CHECK_SIGNATURE
bool
-config HAVE_LMB
- boolean
-
config CPUMASK_OFFSTACK
bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
help
@@ -203,4 +207,7 @@ config NLATTR
config GENERIC_ATOMIC64
bool
+config LRU_CACHE
+ tristate
+
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 891155817bc6..79e0dff1cdcb 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -76,7 +76,6 @@ config UNUSED_SYMBOLS
config DEBUG_FS
bool "Debug Filesystem"
- depends on SYSFS
help
debugfs is a virtual file system that kernel developers use to put
debugging files into. Enable this option to be able to read and
@@ -103,9 +102,10 @@ config HEADERS_CHECK
config DEBUG_SECTION_MISMATCH
bool "Enable full Section mismatch analysis"
- depends on UNDEFINED
+ depends on UNDEFINED || (BLACKFIN)
+ default y
# This option is on purpose disabled for now.
- # It will be enabled when we are down to a resonable number
+ # It will be enabled when we are down to a reasonable number
# of section mismatch warnings (< 10 for an allyesconfig build)
help
The section mismatch analysis checks if there are illegal
@@ -151,28 +151,33 @@ config DEBUG_SHIRQ
Drivers ought to be able to handle interrupts coming in at those
points; some don't and need to be caught.
-config DETECT_SOFTLOCKUP
- bool "Detect Soft Lockups"
+config LOCKUP_DETECTOR
+ bool "Detect Hard and Soft Lockups"
depends on DEBUG_KERNEL && !S390
- default y
help
- Say Y here to enable the kernel to detect "soft lockups",
- which are bugs that cause the kernel to loop in kernel
+ Say Y here to enable the kernel to act as a watchdog to detect
+ hard and soft lockups.
+
+ Softlockups are bugs that cause the kernel to loop in kernel
mode for more than 60 seconds, without giving other tasks a
- chance to run.
+ chance to run. The current stack trace is displayed upon
+ detection and the system will stay locked up.
- When a soft-lockup is detected, the kernel will print the
- current stack trace (which you should report), but the
- system will stay locked up. This feature has negligible
- overhead.
+ Hardlockups are bugs that cause the CPU to loop in kernel mode
+ for more than 60 seconds, without letting other interrupts have a
+ chance to run. The current stack trace is displayed upon detection
+ and the system will stay locked up.
+
+ The overhead should be minimal. A periodic hrtimer runs to
+ generate interrupts and kick the watchdog task every 10-12 seconds.
+ An NMI is generated every 60 seconds or so to check for hardlockups.
- (Note that "hard lockups" are separate type of bugs that
- can be detected via the NMI-watchdog, on platforms that
- support it.)
+config HARDLOCKUP_DETECTOR
+ def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI
config BOOTPARAM_SOFTLOCKUP_PANIC
bool "Panic (Reboot) On Soft Lockups"
- depends on DETECT_SOFTLOCKUP
+ depends on LOCKUP_DETECTOR
help
Say Y here to enable the kernel to panic on "soft lockups",
which are bugs that cause the kernel to loop in kernel
@@ -189,7 +194,7 @@ config BOOTPARAM_SOFTLOCKUP_PANIC
config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
int
- depends on DETECT_SOFTLOCKUP
+ depends on LOCKUP_DETECTOR
range 0 1
default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
@@ -298,6 +303,20 @@ config DEBUG_OBJECTS_TIMERS
timer routines to track the life time of timer objects and
validate the timer operations.
+config DEBUG_OBJECTS_WORK
+ bool "Debug work objects"
+ depends on DEBUG_OBJECTS
+ help
+ If you say Y here, additional code will be inserted into the
+ work queue routines to track the life time of work objects and
+ validate the work operations.
+
+config DEBUG_OBJECTS_RCU_HEAD
+ bool "Debug RCU callbacks objects"
+ depends on DEBUG_OBJECTS && PREEMPT
+ help
+ Enable this to turn on debugging of RCU list heads (call_rcu() usage).
+
config DEBUG_OBJECTS_ENABLE_DEFAULT
int "debug_objects bootup default value (0-1)"
range 0 1
@@ -346,11 +365,13 @@ config SLUB_STATS
config DEBUG_KMEMLEAK
bool "Kernel memory leak detector"
- depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM || PPC) && \
- !MEMORY_HOTPLUG
+ depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \
+ (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE)
+
select DEBUG_FS if SYSFS
select STACKTRACE if STACKTRACE_SUPPORT
select KALLSYMS
+ select CRC32
help
Say Y here if you want to enable the memory leak
detector. The memory allocation/freeing is traced in a way
@@ -370,7 +391,7 @@ config DEBUG_KMEMLEAK
config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
int "Maximum kmemleak early log entries"
depends on DEBUG_KMEMLEAK
- range 200 2000
+ range 200 40000
default 400
help
Kmemleak must track all the memory allocations to avoid
@@ -391,7 +412,7 @@ config DEBUG_KMEMLEAK_TEST
config DEBUG_PREEMPT
bool "Debug preemptible kernel"
- depends on DEBUG_KERNEL && PREEMPT && (TRACE_IRQFLAGS_SUPPORT || PPC64)
+ depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
default y
help
If you say Y here then the kernel will use a debug variant of the
@@ -489,11 +510,35 @@ config PROVE_LOCKING
For more details, see Documentation/lockdep-design.txt.
+config PROVE_RCU
+ bool "RCU debugging: prove RCU correctness"
+ depends on PROVE_LOCKING
+ default n
+ help
+ This feature enables lockdep extensions that check for correct
+ use of RCU APIs. This is currently under development. Say Y
+ if you want to debug RCU usage or help work on the PROVE_RCU
+ feature.
+
+ Say N if you are unsure.
+
+config PROVE_RCU_REPEATEDLY
+ bool "RCU debugging: don't disable PROVE_RCU on first splat"
+ depends on PROVE_RCU
+ default n
+ help
+ By itself, PROVE_RCU will disable checking upon issuing the
+ first warning (or "splat"). This feature prevents such
+ disabling, allowing multiple RCU-lockdep warnings to be printed
+ on a single reboot.
+
+ Say N if you are unsure.
+
config LOCKDEP
bool
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select STACKTRACE
- select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390
+ select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE
select KALLSYMS
select KALLSYMS_ALL
@@ -510,6 +555,14 @@ config LOCK_STAT
For more details, see Documentation/lockstat.txt
+ This also enables lock events required by "perf lock",
+ subcommand of perf.
+ If you want to use "perf lock", you also need to turn on
+ CONFIG_EVENT_TRACING.
+
+ CONFIG_LOCK_STAT defines "contended" and "acquired" lock events.
+ (CONFIG_LOCKDEP defines "acquire" and "release" events.)
+
config DEBUG_LOCKDEP
bool "Lock dependency engine debugging"
depends on DEBUG_KERNEL && LOCKDEP
@@ -566,7 +619,7 @@ config DEBUG_BUGVERBOSE
depends on BUG
depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \
FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300
- default !EMBEDDED
+ default y
help
Say Y here to make BUG() panics output the file name and line number
of the BUG call as well as the EIP and oops trace. This aids
@@ -585,6 +638,19 @@ config DEBUG_INFO
If unsure, say N.
+config DEBUG_INFO_REDUCED
+ bool "Reduce debugging information"
+ depends on DEBUG_INFO
+ help
+ If you say Y here gcc is instructed to generate less debugging
+ information for structure types. This means that tools that
+ need full debugging information (like kgdb or systemtap) won't
+ be happy. But if you merely need debugging information to
+ resolve line numbers there is no loss. Advantage is that
+ build directory object sizes shrink dramatically over a full
+ DEBUG_INFO build and compile times are reduced too.
+ Only works with newer gcc versions.
+
config DEBUG_VM
bool "Debug VM"
depends on DEBUG_KERNEL
@@ -749,16 +815,28 @@ config RCU_TORTURE_TEST_RUNNABLE
config RCU_CPU_STALL_DETECTOR
bool "Check for stalled CPUs delaying RCU grace periods"
depends on TREE_RCU || TREE_PREEMPT_RCU
- default n
+ default y
help
This option causes RCU to printk information on which
CPUs are delaying the current grace period, but only when
the grace period extends for excessive time periods.
- Say Y if you want RCU to perform such checks.
+ Say N if you want to disable such checks.
+
+ Say Y if you are unsure.
+
+config RCU_CPU_STALL_VERBOSE
+ bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR"
+ depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU
+ default y
+ help
+ This option causes RCU to printk detailed per-task information
+ for any tasks that are stalling the current RCU grace period.
Say N if you are unsure.
+ Say Y if you want to enable such checks.
+
config KPROBES_SANITY_TEST
bool "Kprobes sanity tests"
depends on DEBUG_KERNEL
@@ -830,8 +908,7 @@ config DEBUG_FORCE_WEAK_PER_CPU
config LKDTM
tristate "Linux Kernel Dump Test Tool Module"
- depends on DEBUG_KERNEL
- depends on KPROBES
+ depends on DEBUG_FS
depends on BLOCK
default n
help
@@ -842,7 +919,19 @@ config LKDTM
called lkdtm.
Documentation on how to use the module can be found in
- drivers/misc/lkdtm.c
+ Documentation/fault-injection/provoke-crashes.txt
+
+config CPU_NOTIFIER_ERROR_INJECT
+ tristate "CPU notifier error injection module"
+ depends on HOTPLUG_CPU && DEBUG_KERNEL
+ help
+ This option provides a kernel module that can be used to test
+ the error handling of the cpu notifiers
+
+ To compile this code as a module, choose M here: the module will
+ be called cpu-notifier-error-inject.
+
+ If unsure, say N.
config FAULT_INJECTION
bool "Fault-injection framework"
@@ -871,7 +960,7 @@ config FAIL_MAKE_REQUEST
Provide fault-injection capability for disk IO.
config FAIL_IO_TIMEOUT
- bool "Faul-injection capability for faking disk interrupts"
+ bool "Fault-injection capability for faking disk interrupts"
depends on FAULT_INJECTION && BLOCK
help
Provide fault-injection capability on end IO handling. This
@@ -892,13 +981,13 @@ config FAULT_INJECTION_STACKTRACE_FILTER
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
depends on !X86_64
select STACKTRACE
- select FRAME_POINTER if !PPC && !S390
+ select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE
help
Provide stacktrace filter for fault-injection capabilities
config LATENCYTOP
bool "Latency measuring infrastructure"
- select FRAME_POINTER if !MIPS && !PPC && !S390
+ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
select KALLSYMS
select KALLSYMS_ALL
select STACKTRACE
@@ -911,7 +1000,7 @@ config LATENCYTOP
config SYSCTL_SYSCALL_CHECK
bool "Sysctl checks"
- depends on SYSCTL_SYSCALL
+ depends on SYSCTL
---help---
sys_sysctl uses binary paths that have been found challenging
to properly maintain and use. This enables checks that help
@@ -985,10 +1074,10 @@ config DYNAMIC_DEBUG
Usage:
- Dynamic debugging is controlled via the 'dynamic_debug/ddebug' file,
+ Dynamic debugging is controlled via the 'dynamic_debug/control' file,
which is contained in the 'debugfs' filesystem. Thus, the debugfs
filesystem must first be mounted before making use of this feature.
- We refer the control file as: <debugfs>/dynamic_debug/ddebug. This
+ We refer the control file as: <debugfs>/dynamic_debug/control. This
file contains a list of the debug statements that can be enabled. The
format for each line of the file is:
@@ -1003,7 +1092,7 @@ config DYNAMIC_DEBUG
From a live system:
- nullarbor:~ # cat <debugfs>/dynamic_debug/ddebug
+ nullarbor:~ # cat <debugfs>/dynamic_debug/control
# filename:lineno [module]function flags format
fs/aio.c:222 [aio]__put_ioctx - "__put_ioctx:\040freeing\040%p\012"
fs/aio.c:248 [aio]ioctx_alloc - "ENOMEM:\040nr_events\040too\040high\012"
@@ -1013,23 +1102,23 @@ config DYNAMIC_DEBUG
// enable the message at line 1603 of file svcsock.c
nullarbor:~ # echo -n 'file svcsock.c line 1603 +p' >
- <debugfs>/dynamic_debug/ddebug
+ <debugfs>/dynamic_debug/control
// enable all the messages in file svcsock.c
nullarbor:~ # echo -n 'file svcsock.c +p' >
- <debugfs>/dynamic_debug/ddebug
+ <debugfs>/dynamic_debug/control
// enable all the messages in the NFS server module
nullarbor:~ # echo -n 'module nfsd +p' >
- <debugfs>/dynamic_debug/ddebug
+ <debugfs>/dynamic_debug/control
// enable all 12 messages in the function svc_process()
nullarbor:~ # echo -n 'func svc_process +p' >
- <debugfs>/dynamic_debug/ddebug
+ <debugfs>/dynamic_debug/control
// disable all 12 messages in the function svc_process()
nullarbor:~ # echo -n 'func svc_process -p' >
- <debugfs>/dynamic_debug/ddebug
+ <debugfs>/dynamic_debug/control
See Documentation/dynamic-debug-howto.txt for additional information.
@@ -1044,6 +1133,13 @@ config DMA_API_DEBUG
This option causes a performance degredation. Use only if you want
to debug device drivers. If unsure, say N.
+config ATOMIC64_SELFTEST
+ bool "Perform an atomic64_t self-test at boot"
+ help
+ Enable this option to test the atomic64_t functions at boot.
+
+ If unsure, say N.
+
source "samples/Kconfig"
source "lib/Kconfig.kgdb"
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index 9b5d1d7f2ef7..43cb93fa2651 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -3,7 +3,7 @@ config HAVE_ARCH_KGDB
bool
menuconfig KGDB
- bool "KGDB: kernel debugging with remote gdb"
+ bool "KGDB: kernel debugger"
depends on HAVE_ARCH_KGDB
depends on DEBUG_KERNEL && EXPERIMENTAL
help
@@ -57,4 +57,26 @@ config KGDB_TESTS_BOOT_STRING
information about other strings you could use beyond the
default of V1F100.
+config KGDB_LOW_LEVEL_TRAP
+ bool "KGDB: Allow debugging with traps in notifiers"
+ depends on X86 || MIPS
+ default n
+ help
+ This will add an extra call back to kgdb for the breakpoint
+ exception handler on which will will allow kgdb to step
+ through a notify handler.
+
+config KGDB_KDB
+ bool "KGDB_KDB: include kdb frontend for kgdb"
+ default n
+ help
+ KDB frontend for kernel
+
+config KDB_KEYBOARD
+ bool "KGDB_KDB: keyboard as input device"
+ depends on VT && KGDB_KDB
+ default n
+ help
+ KDB can use a PS/2 type keyboard for an input device
+
endif # KGDB
diff --git a/lib/Makefile b/lib/Makefile
index b25faf0d1d5c..e6a3763b8212 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
- string_helpers.o gcd.o
+ string_helpers.o gcd.o lcm.o list_sort.o uuid.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
@@ -39,8 +39,12 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
+
+CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
+
obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
+obj-$(CONFIG_BTREE) += btree.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
obj-$(CONFIG_DEBUG_LIST) += list_debug.o
obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
@@ -70,6 +74,7 @@ obj-$(CONFIG_RAID6_PQ) += raid6/
lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o
lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o
lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o
+lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o
obj-$(CONFIG_TEXTSEARCH) += textsearch.o
obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
@@ -81,23 +86,26 @@ obj-$(CONFIG_AUDIT_GENERIC) += audit.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
+obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o
lib-$(CONFIG_GENERIC_BUG) += bug.o
-obj-$(CONFIG_HAVE_LMB) += lmb.o
-
obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o
obj-$(CONFIG_NLATTR) += nlattr.o
+obj-$(CONFIG_LRU_CACHE) += lru_cache.o
+
obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o
obj-$(CONFIG_GENERIC_CSUM) += checksum.o
obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
+obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
+
hostprogs-y := gen_crc32table
clean-files := crc32table.h
diff --git a/lib/argv_split.c b/lib/argv_split.c
index 5205a8dae5bc..4b1b083f219c 100644
--- a/lib/argv_split.c
+++ b/lib/argv_split.c
@@ -4,17 +4,10 @@
#include <linux/kernel.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/slab.h>
#include <linux/module.h>
-static const char *skip_sep(const char *cp)
-{
- while (*cp && isspace(*cp))
- cp++;
-
- return cp;
-}
-
static const char *skip_arg(const char *cp)
{
while (*cp && !isspace(*cp))
@@ -28,7 +21,7 @@ static int count_argc(const char *str)
int count = 0;
while (*str) {
- str = skip_sep(str);
+ str = skip_spaces(str);
if (*str) {
count++;
str = skip_arg(str);
@@ -82,7 +75,7 @@ char **argv_split(gfp_t gfp, const char *str, int *argcp)
argvp = argv;
while (*str) {
- str = skip_sep(str);
+ str = skip_spaces(str);
if (*str) {
const char *p = str;
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 8bee16ec7524..a21c12bc727c 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -162,12 +162,12 @@ int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
unsigned long flags;
spinlock_t *lock = lock_addr(v);
- int ret = 1;
+ int ret = 0;
spin_lock_irqsave(lock, flags);
if (v->counter != u) {
v->counter += a;
- ret = 0;
+ ret = 1;
}
spin_unlock_irqrestore(lock, flags);
return ret;
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
new file mode 100644
index 000000000000..44524cc8c32a
--- /dev/null
+++ b/lib/atomic64_test.c
@@ -0,0 +1,166 @@
+/*
+ * Testsuite for atomic64_t functions
+ *
+ * Copyright © 2010 Luca Barbieri
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/atomic.h>
+
+#define INIT(c) do { atomic64_set(&v, c); r = c; } while (0)
+static __init int test_atomic64(void)
+{
+ long long v0 = 0xaaa31337c001d00dLL;
+ long long v1 = 0xdeadbeefdeafcafeLL;
+ long long v2 = 0xfaceabadf00df001LL;
+ long long onestwos = 0x1111111122222222LL;
+ long long one = 1LL;
+
+ atomic64_t v = ATOMIC64_INIT(v0);
+ long long r = v0;
+ BUG_ON(v.counter != r);
+
+ atomic64_set(&v, v1);
+ r = v1;
+ BUG_ON(v.counter != r);
+ BUG_ON(atomic64_read(&v) != r);
+
+ INIT(v0);
+ atomic64_add(onestwos, &v);
+ r += onestwos;
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ atomic64_add(-one, &v);
+ r += -one;
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ r += onestwos;
+ BUG_ON(atomic64_add_return(onestwos, &v) != r);
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ r += -one;
+ BUG_ON(atomic64_add_return(-one, &v) != r);
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ atomic64_sub(onestwos, &v);
+ r -= onestwos;
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ atomic64_sub(-one, &v);
+ r -= -one;
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ r -= onestwos;
+ BUG_ON(atomic64_sub_return(onestwos, &v) != r);
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ r -= -one;
+ BUG_ON(atomic64_sub_return(-one, &v) != r);
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ atomic64_inc(&v);
+ r += one;
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ r += one;
+ BUG_ON(atomic64_inc_return(&v) != r);
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ atomic64_dec(&v);
+ r -= one;
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ r -= one;
+ BUG_ON(atomic64_dec_return(&v) != r);
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ BUG_ON(atomic64_xchg(&v, v1) != v0);
+ r = v1;
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ BUG_ON(atomic64_cmpxchg(&v, v0, v1) != v0);
+ r = v1;
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ BUG_ON(atomic64_cmpxchg(&v, v2, v1) != v0);
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ BUG_ON(atomic64_add_unless(&v, one, v0));
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ BUG_ON(!atomic64_add_unless(&v, one, v1));
+ r += one;
+ BUG_ON(v.counter != r);
+
+#if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_PPC) || \
+ defined(CONFIG_S390) || defined(_ASM_GENERIC_ATOMIC64_H) || defined(CONFIG_ARM)
+ INIT(onestwos);
+ BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1));
+ r -= one;
+ BUG_ON(v.counter != r);
+
+ INIT(0);
+ BUG_ON(atomic64_dec_if_positive(&v) != -one);
+ BUG_ON(v.counter != r);
+
+ INIT(-one);
+ BUG_ON(atomic64_dec_if_positive(&v) != (-one - one));
+ BUG_ON(v.counter != r);
+#else
+#warning Please implement atomic64_dec_if_positive for your architecture, and add it to the IF above
+#endif
+
+ INIT(onestwos);
+ BUG_ON(!atomic64_inc_not_zero(&v));
+ r += one;
+ BUG_ON(v.counter != r);
+
+ INIT(0);
+ BUG_ON(atomic64_inc_not_zero(&v));
+ BUG_ON(v.counter != r);
+
+ INIT(-one);
+ BUG_ON(!atomic64_inc_not_zero(&v));
+ r += one;
+ BUG_ON(v.counter != r);
+
+#ifdef CONFIG_X86
+ printk(KERN_INFO "atomic64 test passed for %s platform %s CX8 and %s SSE\n",
+#ifdef CONFIG_X86_64
+ "x86-64",
+#elif defined(CONFIG_X86_CMPXCHG64)
+ "i586+",
+#else
+ "i386+",
+#endif
+ boot_cpu_has(X86_FEATURE_CX8) ? "with" : "without",
+ boot_cpu_has(X86_FEATURE_XMM) ? "with" : "without");
+#else
+ printk(KERN_INFO "atomic64 test passed\n");
+#endif
+
+ return 0;
+}
+
+core_initcall(test_atomic64);
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 702565821c99..ffb78c916ccd 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -271,6 +271,87 @@ int __bitmap_weight(const unsigned long *bitmap, int bits)
}
EXPORT_SYMBOL(__bitmap_weight);
+#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
+
+void bitmap_set(unsigned long *map, int start, int nr)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const int size = start + nr;
+ int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
+
+ while (nr - bits_to_set >= 0) {
+ *p |= mask_to_set;
+ nr -= bits_to_set;
+ bits_to_set = BITS_PER_LONG;
+ mask_to_set = ~0UL;
+ p++;
+ }
+ if (nr) {
+ mask_to_set &= BITMAP_LAST_WORD_MASK(size);
+ *p |= mask_to_set;
+ }
+}
+EXPORT_SYMBOL(bitmap_set);
+
+void bitmap_clear(unsigned long *map, int start, int nr)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const int size = start + nr;
+ int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
+
+ while (nr - bits_to_clear >= 0) {
+ *p &= ~mask_to_clear;
+ nr -= bits_to_clear;
+ bits_to_clear = BITS_PER_LONG;
+ mask_to_clear = ~0UL;
+ p++;
+ }
+ if (nr) {
+ mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
+ *p &= ~mask_to_clear;
+ }
+}
+EXPORT_SYMBOL(bitmap_clear);
+
+/*
+ * bitmap_find_next_zero_area - find a contiguous aligned zero area
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @align_mask: Alignment mask for zero area
+ *
+ * The @align_mask should be one less than a power of 2; the effect is that
+ * the bit offset of all zero areas this function finds is multiples of that
+ * power of 2. A @align_mask of 0 means no alignment is required.
+ */
+unsigned long bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask)
+{
+ unsigned long index, end, i;
+again:
+ index = find_next_zero_bit(map, size, start);
+
+ /* Align allocation */
+ index = __ALIGN_MASK(index, align_mask);
+
+ end = index + nr;
+ if (end > size)
+ return end;
+ i = find_next_bit(map, end, index);
+ if (i < end) {
+ start = i + 1;
+ goto again;
+ }
+ return index;
+}
+EXPORT_SYMBOL(bitmap_find_next_zero_area);
+
/*
* Bitmap printing & parsing functions: first version by Bill Irwin,
* second version by Paul Jackson, third by Joe Korty.
@@ -406,7 +487,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
EXPORT_SYMBOL(__bitmap_parse);
/**
- * bitmap_parse_user()
+ * bitmap_parse_user - convert an ASCII hex string in a user buffer into a bitmap
*
* @ubuf: pointer to user buffer containing string.
* @ulen: buffer size in bytes. If string is smaller than this
@@ -538,7 +619,7 @@ int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
EXPORT_SYMBOL(bitmap_parselist);
/**
- * bitmap_pos_to_ord(buf, pos, bits)
+ * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap
* @buf: pointer to a bitmap
* @pos: a bit position in @buf (0 <= @pos < @bits)
* @bits: number of valid bit positions in @buf
@@ -574,7 +655,7 @@ static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits)
}
/**
- * bitmap_ord_to_pos(buf, ord, bits)
+ * bitmap_ord_to_pos - find position of n-th set bit in bitmap
* @buf: pointer to bitmap
* @ord: ordinal bit position (n-th set bit, n >= 0)
* @bits: number of valid bit positions in @buf
@@ -652,10 +733,9 @@ void bitmap_remap(unsigned long *dst, const unsigned long *src,
bitmap_zero(dst, bits);
w = bitmap_weight(new, bits);
- for (oldbit = find_first_bit(src, bits);
- oldbit < bits;
- oldbit = find_next_bit(src, bits, oldbit + 1)) {
+ for_each_set_bit(oldbit, src, bits) {
int n = bitmap_pos_to_ord(old, oldbit, bits);
+
if (n < 0 || w == 0)
set_bit(oldbit, dst); /* identity map */
else
@@ -822,9 +902,7 @@ void bitmap_onto(unsigned long *dst, const unsigned long *orig,
*/
m = 0;
- for (n = find_first_bit(relmap, bits);
- n < bits;
- n = find_next_bit(relmap, bits, n + 1)) {
+ for_each_set_bit(n, relmap, bits) {
/* m == bitmap_pos_to_ord(relmap, n, bits) */
if (test_bit(m, orig))
set_bit(n, dst);
@@ -853,9 +931,7 @@ void bitmap_fold(unsigned long *dst, const unsigned long *orig,
return;
bitmap_zero(dst, bits);
- for (oldbit = find_first_bit(orig, bits);
- oldbit < bits;
- oldbit = find_next_bit(orig, bits, oldbit + 1))
+ for_each_set_bit(oldbit, orig, bits)
set_bit(oldbit % sz, dst);
}
EXPORT_SYMBOL(bitmap_fold);
diff --git a/lib/btree.c b/lib/btree.c
new file mode 100644
index 000000000000..c9c6f0351526
--- /dev/null
+++ b/lib/btree.c
@@ -0,0 +1,798 @@
+/*
+ * lib/btree.c - Simple In-memory B+Tree
+ *
+ * As should be obvious for Linux kernel code, license is GPLv2
+ *
+ * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org>
+ * Bits and pieces stolen from Peter Zijlstra's code, which is
+ * Copyright 2007, Red Hat Inc. Peter Zijlstra <pzijlstr@redhat.com>
+ * GPLv2
+ *
+ * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch
+ *
+ * A relatively simple B+Tree implementation. I have written it as a learning
+ * excercise to understand how B+Trees work. Turned out to be useful as well.
+ *
+ * B+Trees can be used similar to Linux radix trees (which don't have anything
+ * in common with textbook radix trees, beware). Prerequisite for them working
+ * well is that access to a random tree node is much faster than a large number
+ * of operations within each node.
+ *
+ * Disks have fulfilled the prerequisite for a long time. More recently DRAM
+ * has gained similar properties, as memory access times, when measured in cpu
+ * cycles, have increased. Cacheline sizes have increased as well, which also
+ * helps B+Trees.
+ *
+ * Compared to radix trees, B+Trees are more efficient when dealing with a
+ * sparsely populated address space. Between 25% and 50% of the memory is
+ * occupied with valid pointers. When densely populated, radix trees contain
+ * ~98% pointers - hard to beat. Very sparse radix trees contain only ~2%
+ * pointers.
+ *
+ * This particular implementation stores pointers identified by a long value.
+ * Storing NULL pointers is illegal, lookup will return NULL when no entry
+ * was found.
+ *
+ * A tricks was used that is not commonly found in textbooks. The lowest
+ * values are to the right, not to the left. All used slots within a node
+ * are on the left, all unused slots contain NUL values. Most operations
+ * simply loop once over all slots and terminate on the first NUL.
+ */
+
+#include <linux/btree.h>
+#include <linux/cache.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+#define NODESIZE MAX(L1_CACHE_BYTES, 128)
+
+struct btree_geo {
+ int keylen;
+ int no_pairs;
+ int no_longs;
+};
+
+struct btree_geo btree_geo32 = {
+ .keylen = 1,
+ .no_pairs = NODESIZE / sizeof(long) / 2,
+ .no_longs = NODESIZE / sizeof(long) / 2,
+};
+EXPORT_SYMBOL_GPL(btree_geo32);
+
+#define LONG_PER_U64 (64 / BITS_PER_LONG)
+struct btree_geo btree_geo64 = {
+ .keylen = LONG_PER_U64,
+ .no_pairs = NODESIZE / sizeof(long) / (1 + LONG_PER_U64),
+ .no_longs = LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + LONG_PER_U64)),
+};
+EXPORT_SYMBOL_GPL(btree_geo64);
+
+struct btree_geo btree_geo128 = {
+ .keylen = 2 * LONG_PER_U64,
+ .no_pairs = NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64),
+ .no_longs = 2 * LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64)),
+};
+EXPORT_SYMBOL_GPL(btree_geo128);
+
+static struct kmem_cache *btree_cachep;
+
+void *btree_alloc(gfp_t gfp_mask, void *pool_data)
+{
+ return kmem_cache_alloc(btree_cachep, gfp_mask);
+}
+EXPORT_SYMBOL_GPL(btree_alloc);
+
+void btree_free(void *element, void *pool_data)
+{
+ kmem_cache_free(btree_cachep, element);
+}
+EXPORT_SYMBOL_GPL(btree_free);
+
+static unsigned long *btree_node_alloc(struct btree_head *head, gfp_t gfp)
+{
+ unsigned long *node;
+
+ node = mempool_alloc(head->mempool, gfp);
+ if (likely(node))
+ memset(node, 0, NODESIZE);
+ return node;
+}
+
+static int longcmp(const unsigned long *l1, const unsigned long *l2, size_t n)
+{
+ size_t i;
+
+ for (i = 0; i < n; i++) {
+ if (l1[i] < l2[i])
+ return -1;
+ if (l1[i] > l2[i])
+ return 1;
+ }
+ return 0;
+}
+
+static unsigned long *longcpy(unsigned long *dest, const unsigned long *src,
+ size_t n)
+{
+ size_t i;
+
+ for (i = 0; i < n; i++)
+ dest[i] = src[i];
+ return dest;
+}
+
+static unsigned long *longset(unsigned long *s, unsigned long c, size_t n)
+{
+ size_t i;
+
+ for (i = 0; i < n; i++)
+ s[i] = c;
+ return s;
+}
+
+static void dec_key(struct btree_geo *geo, unsigned long *key)
+{
+ unsigned long val;
+ int i;
+
+ for (i = geo->keylen - 1; i >= 0; i--) {
+ val = key[i];
+ key[i] = val - 1;
+ if (val)
+ break;
+ }
+}
+
+static unsigned long *bkey(struct btree_geo *geo, unsigned long *node, int n)
+{
+ return &node[n * geo->keylen];
+}
+
+static void *bval(struct btree_geo *geo, unsigned long *node, int n)
+{
+ return (void *)node[geo->no_longs + n];
+}
+
+static void setkey(struct btree_geo *geo, unsigned long *node, int n,
+ unsigned long *key)
+{
+ longcpy(bkey(geo, node, n), key, geo->keylen);
+}
+
+static void setval(struct btree_geo *geo, unsigned long *node, int n,
+ void *val)
+{
+ node[geo->no_longs + n] = (unsigned long) val;
+}
+
+static void clearpair(struct btree_geo *geo, unsigned long *node, int n)
+{
+ longset(bkey(geo, node, n), 0, geo->keylen);
+ node[geo->no_longs + n] = 0;
+}
+
+static inline void __btree_init(struct btree_head *head)
+{
+ head->node = NULL;
+ head->height = 0;
+}
+
+void btree_init_mempool(struct btree_head *head, mempool_t *mempool)
+{
+ __btree_init(head);
+ head->mempool = mempool;
+}
+EXPORT_SYMBOL_GPL(btree_init_mempool);
+
+int btree_init(struct btree_head *head)
+{
+ __btree_init(head);
+ head->mempool = mempool_create(0, btree_alloc, btree_free, NULL);
+ if (!head->mempool)
+ return -ENOMEM;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btree_init);
+
+void btree_destroy(struct btree_head *head)
+{
+ mempool_destroy(head->mempool);
+ head->mempool = NULL;
+}
+EXPORT_SYMBOL_GPL(btree_destroy);
+
+void *btree_last(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key)
+{
+ int height = head->height;
+ unsigned long *node = head->node;
+
+ if (height == 0)
+ return NULL;
+
+ for ( ; height > 1; height--)
+ node = bval(geo, node, 0);
+
+ longcpy(key, bkey(geo, node, 0), geo->keylen);
+ return bval(geo, node, 0);
+}
+EXPORT_SYMBOL_GPL(btree_last);
+
+static int keycmp(struct btree_geo *geo, unsigned long *node, int pos,
+ unsigned long *key)
+{
+ return longcmp(bkey(geo, node, pos), key, geo->keylen);
+}
+
+static int keyzero(struct btree_geo *geo, unsigned long *key)
+{
+ int i;
+
+ for (i = 0; i < geo->keylen; i++)
+ if (key[i])
+ return 0;
+
+ return 1;
+}
+
+void *btree_lookup(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key)
+{
+ int i, height = head->height;
+ unsigned long *node = head->node;
+
+ if (height == 0)
+ return NULL;
+
+ for ( ; height > 1; height--) {
+ for (i = 0; i < geo->no_pairs; i++)
+ if (keycmp(geo, node, i, key) <= 0)
+ break;
+ if (i == geo->no_pairs)
+ return NULL;
+ node = bval(geo, node, i);
+ if (!node)
+ return NULL;
+ }
+
+ if (!node)
+ return NULL;
+
+ for (i = 0; i < geo->no_pairs; i++)
+ if (keycmp(geo, node, i, key) == 0)
+ return bval(geo, node, i);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(btree_lookup);
+
+int btree_update(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, void *val)
+{
+ int i, height = head->height;
+ unsigned long *node = head->node;
+
+ if (height == 0)
+ return -ENOENT;
+
+ for ( ; height > 1; height--) {
+ for (i = 0; i < geo->no_pairs; i++)
+ if (keycmp(geo, node, i, key) <= 0)
+ break;
+ if (i == geo->no_pairs)
+ return -ENOENT;
+ node = bval(geo, node, i);
+ if (!node)
+ return -ENOENT;
+ }
+
+ if (!node)
+ return -ENOENT;
+
+ for (i = 0; i < geo->no_pairs; i++)
+ if (keycmp(geo, node, i, key) == 0) {
+ setval(geo, node, i, val);
+ return 0;
+ }
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(btree_update);
+
+/*
+ * Usually this function is quite similar to normal lookup. But the key of
+ * a parent node may be smaller than the smallest key of all its siblings.
+ * In such a case we cannot just return NULL, as we have only proven that no
+ * key smaller than __key, but larger than this parent key exists.
+ * So we set __key to the parent key and retry. We have to use the smallest
+ * such parent key, which is the last parent key we encountered.
+ */
+void *btree_get_prev(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *__key)
+{
+ int i, height;
+ unsigned long *node, *oldnode;
+ unsigned long *retry_key = NULL, key[geo->keylen];
+
+ if (keyzero(geo, __key))
+ return NULL;
+
+ if (head->height == 0)
+ return NULL;
+retry:
+ longcpy(key, __key, geo->keylen);
+ dec_key(geo, key);
+
+ node = head->node;
+ for (height = head->height ; height > 1; height--) {
+ for (i = 0; i < geo->no_pairs; i++)
+ if (keycmp(geo, node, i, key) <= 0)
+ break;
+ if (i == geo->no_pairs)
+ goto miss;
+ oldnode = node;
+ node = bval(geo, node, i);
+ if (!node)
+ goto miss;
+ retry_key = bkey(geo, oldnode, i);
+ }
+
+ if (!node)
+ goto miss;
+
+ for (i = 0; i < geo->no_pairs; i++) {
+ if (keycmp(geo, node, i, key) <= 0) {
+ if (bval(geo, node, i)) {
+ longcpy(__key, bkey(geo, node, i), geo->keylen);
+ return bval(geo, node, i);
+ } else
+ goto miss;
+ }
+ }
+miss:
+ if (retry_key) {
+ __key = retry_key;
+ retry_key = NULL;
+ goto retry;
+ }
+ return NULL;
+}
+
+static int getpos(struct btree_geo *geo, unsigned long *node,
+ unsigned long *key)
+{
+ int i;
+
+ for (i = 0; i < geo->no_pairs; i++) {
+ if (keycmp(geo, node, i, key) <= 0)
+ break;
+ }
+ return i;
+}
+
+static int getfill(struct btree_geo *geo, unsigned long *node, int start)
+{
+ int i;
+
+ for (i = start; i < geo->no_pairs; i++)
+ if (!bval(geo, node, i))
+ break;
+ return i;
+}
+
+/*
+ * locate the correct leaf node in the btree
+ */
+static unsigned long *find_level(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, int level)
+{
+ unsigned long *node = head->node;
+ int i, height;
+
+ for (height = head->height; height > level; height--) {
+ for (i = 0; i < geo->no_pairs; i++)
+ if (keycmp(geo, node, i, key) <= 0)
+ break;
+
+ if ((i == geo->no_pairs) || !bval(geo, node, i)) {
+ /* right-most key is too large, update it */
+ /* FIXME: If the right-most key on higher levels is
+ * always zero, this wouldn't be necessary. */
+ i--;
+ setkey(geo, node, i, key);
+ }
+ BUG_ON(i < 0);
+ node = bval(geo, node, i);
+ }
+ BUG_ON(!node);
+ return node;
+}
+
+static int btree_grow(struct btree_head *head, struct btree_geo *geo,
+ gfp_t gfp)
+{
+ unsigned long *node;
+ int fill;
+
+ node = btree_node_alloc(head, gfp);
+ if (!node)
+ return -ENOMEM;
+ if (head->node) {
+ fill = getfill(geo, head->node, 0);
+ setkey(geo, node, 0, bkey(geo, head->node, fill - 1));
+ setval(geo, node, 0, head->node);
+ }
+ head->node = node;
+ head->height++;
+ return 0;
+}
+
+static void btree_shrink(struct btree_head *head, struct btree_geo *geo)
+{
+ unsigned long *node;
+ int fill;
+
+ if (head->height <= 1)
+ return;
+
+ node = head->node;
+ fill = getfill(geo, node, 0);
+ BUG_ON(fill > 1);
+ head->node = bval(geo, node, 0);
+ head->height--;
+ mempool_free(node, head->mempool);
+}
+
+static int btree_insert_level(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, void *val, int level,
+ gfp_t gfp)
+{
+ unsigned long *node;
+ int i, pos, fill, err;
+
+ BUG_ON(!val);
+ if (head->height < level) {
+ err = btree_grow(head, geo, gfp);
+ if (err)
+ return err;
+ }
+
+retry:
+ node = find_level(head, geo, key, level);
+ pos = getpos(geo, node, key);
+ fill = getfill(geo, node, pos);
+ /* two identical keys are not allowed */
+ BUG_ON(pos < fill && keycmp(geo, node, pos, key) == 0);
+
+ if (fill == geo->no_pairs) {
+ /* need to split node */
+ unsigned long *new;
+
+ new = btree_node_alloc(head, gfp);
+ if (!new)
+ return -ENOMEM;
+ err = btree_insert_level(head, geo,
+ bkey(geo, node, fill / 2 - 1),
+ new, level + 1, gfp);
+ if (err) {
+ mempool_free(new, head->mempool);
+ return err;
+ }
+ for (i = 0; i < fill / 2; i++) {
+ setkey(geo, new, i, bkey(geo, node, i));
+ setval(geo, new, i, bval(geo, node, i));
+ setkey(geo, node, i, bkey(geo, node, i + fill / 2));
+ setval(geo, node, i, bval(geo, node, i + fill / 2));
+ clearpair(geo, node, i + fill / 2);
+ }
+ if (fill & 1) {
+ setkey(geo, node, i, bkey(geo, node, fill - 1));
+ setval(geo, node, i, bval(geo, node, fill - 1));
+ clearpair(geo, node, fill - 1);
+ }
+ goto retry;
+ }
+ BUG_ON(fill >= geo->no_pairs);
+
+ /* shift and insert */
+ for (i = fill; i > pos; i--) {
+ setkey(geo, node, i, bkey(geo, node, i - 1));
+ setval(geo, node, i, bval(geo, node, i - 1));
+ }
+ setkey(geo, node, pos, key);
+ setval(geo, node, pos, val);
+
+ return 0;
+}
+
+int btree_insert(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, void *val, gfp_t gfp)
+{
+ return btree_insert_level(head, geo, key, val, 1, gfp);
+}
+EXPORT_SYMBOL_GPL(btree_insert);
+
+static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, int level);
+static void merge(struct btree_head *head, struct btree_geo *geo, int level,
+ unsigned long *left, int lfill,
+ unsigned long *right, int rfill,
+ unsigned long *parent, int lpos)
+{
+ int i;
+
+ for (i = 0; i < rfill; i++) {
+ /* Move all keys to the left */
+ setkey(geo, left, lfill + i, bkey(geo, right, i));
+ setval(geo, left, lfill + i, bval(geo, right, i));
+ }
+ /* Exchange left and right child in parent */
+ setval(geo, parent, lpos, right);
+ setval(geo, parent, lpos + 1, left);
+ /* Remove left (formerly right) child from parent */
+ btree_remove_level(head, geo, bkey(geo, parent, lpos), level + 1);
+ mempool_free(right, head->mempool);
+}
+
+static void rebalance(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, int level, unsigned long *child, int fill)
+{
+ unsigned long *parent, *left = NULL, *right = NULL;
+ int i, no_left, no_right;
+
+ if (fill == 0) {
+ /* Because we don't steal entries from a neigbour, this case
+ * can happen. Parent node contains a single child, this
+ * node, so merging with a sibling never happens.
+ */
+ btree_remove_level(head, geo, key, level + 1);
+ mempool_free(child, head->mempool);
+ return;
+ }
+
+ parent = find_level(head, geo, key, level + 1);
+ i = getpos(geo, parent, key);
+ BUG_ON(bval(geo, parent, i) != child);
+
+ if (i > 0) {
+ left = bval(geo, parent, i - 1);
+ no_left = getfill(geo, left, 0);
+ if (fill + no_left <= geo->no_pairs) {
+ merge(head, geo, level,
+ left, no_left,
+ child, fill,
+ parent, i - 1);
+ return;
+ }
+ }
+ if (i + 1 < getfill(geo, parent, i)) {
+ right = bval(geo, parent, i + 1);
+ no_right = getfill(geo, right, 0);
+ if (fill + no_right <= geo->no_pairs) {
+ merge(head, geo, level,
+ child, fill,
+ right, no_right,
+ parent, i);
+ return;
+ }
+ }
+ /*
+ * We could also try to steal one entry from the left or right
+ * neighbor. By not doing so we changed the invariant from
+ * "all nodes are at least half full" to "no two neighboring
+ * nodes can be merged". Which means that the average fill of
+ * all nodes is still half or better.
+ */
+}
+
+static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, int level)
+{
+ unsigned long *node;
+ int i, pos, fill;
+ void *ret;
+
+ if (level > head->height) {
+ /* we recursed all the way up */
+ head->height = 0;
+ head->node = NULL;
+ return NULL;
+ }
+
+ node = find_level(head, geo, key, level);
+ pos = getpos(geo, node, key);
+ fill = getfill(geo, node, pos);
+ if ((level == 1) && (keycmp(geo, node, pos, key) != 0))
+ return NULL;
+ ret = bval(geo, node, pos);
+
+ /* remove and shift */
+ for (i = pos; i < fill - 1; i++) {
+ setkey(geo, node, i, bkey(geo, node, i + 1));
+ setval(geo, node, i, bval(geo, node, i + 1));
+ }
+ clearpair(geo, node, fill - 1);
+
+ if (fill - 1 < geo->no_pairs / 2) {
+ if (level < head->height)
+ rebalance(head, geo, key, level, node, fill - 1);
+ else if (fill - 1 == 1)
+ btree_shrink(head, geo);
+ }
+
+ return ret;
+}
+
+void *btree_remove(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key)
+{
+ if (head->height == 0)
+ return NULL;
+
+ return btree_remove_level(head, geo, key, 1);
+}
+EXPORT_SYMBOL_GPL(btree_remove);
+
+int btree_merge(struct btree_head *target, struct btree_head *victim,
+ struct btree_geo *geo, gfp_t gfp)
+{
+ unsigned long key[geo->keylen];
+ unsigned long dup[geo->keylen];
+ void *val;
+ int err;
+
+ BUG_ON(target == victim);
+
+ if (!(target->node)) {
+ /* target is empty, just copy fields over */
+ target->node = victim->node;
+ target->height = victim->height;
+ __btree_init(victim);
+ return 0;
+ }
+
+ /* TODO: This needs some optimizations. Currently we do three tree
+ * walks to remove a single object from the victim.
+ */
+ for (;;) {
+ if (!btree_last(victim, geo, key))
+ break;
+ val = btree_lookup(victim, geo, key);
+ err = btree_insert(target, geo, key, val, gfp);
+ if (err)
+ return err;
+ /* We must make a copy of the key, as the original will get
+ * mangled inside btree_remove. */
+ longcpy(dup, key, geo->keylen);
+ btree_remove(victim, geo, dup);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btree_merge);
+
+static size_t __btree_for_each(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *node, unsigned long opaque,
+ void (*func)(void *elem, unsigned long opaque,
+ unsigned long *key, size_t index,
+ void *func2),
+ void *func2, int reap, int height, size_t count)
+{
+ int i;
+ unsigned long *child;
+
+ for (i = 0; i < geo->no_pairs; i++) {
+ child = bval(geo, node, i);
+ if (!child)
+ break;
+ if (height > 1)
+ count = __btree_for_each(head, geo, child, opaque,
+ func, func2, reap, height - 1, count);
+ else
+ func(child, opaque, bkey(geo, node, i), count++,
+ func2);
+ }
+ if (reap)
+ mempool_free(node, head->mempool);
+ return count;
+}
+
+static void empty(void *elem, unsigned long opaque, unsigned long *key,
+ size_t index, void *func2)
+{
+}
+
+void visitorl(void *elem, unsigned long opaque, unsigned long *key,
+ size_t index, void *__func)
+{
+ visitorl_t func = __func;
+
+ func(elem, opaque, *key, index);
+}
+EXPORT_SYMBOL_GPL(visitorl);
+
+void visitor32(void *elem, unsigned long opaque, unsigned long *__key,
+ size_t index, void *__func)
+{
+ visitor32_t func = __func;
+ u32 *key = (void *)__key;
+
+ func(elem, opaque, *key, index);
+}
+EXPORT_SYMBOL_GPL(visitor32);
+
+void visitor64(void *elem, unsigned long opaque, unsigned long *__key,
+ size_t index, void *__func)
+{
+ visitor64_t func = __func;
+ u64 *key = (void *)__key;
+
+ func(elem, opaque, *key, index);
+}
+EXPORT_SYMBOL_GPL(visitor64);
+
+void visitor128(void *elem, unsigned long opaque, unsigned long *__key,
+ size_t index, void *__func)
+{
+ visitor128_t func = __func;
+ u64 *key = (void *)__key;
+
+ func(elem, opaque, key[0], key[1], index);
+}
+EXPORT_SYMBOL_GPL(visitor128);
+
+size_t btree_visitor(struct btree_head *head, struct btree_geo *geo,
+ unsigned long opaque,
+ void (*func)(void *elem, unsigned long opaque,
+ unsigned long *key,
+ size_t index, void *func2),
+ void *func2)
+{
+ size_t count = 0;
+
+ if (!func2)
+ func = empty;
+ if (head->node)
+ count = __btree_for_each(head, geo, head->node, opaque, func,
+ func2, 0, head->height, 0);
+ return count;
+}
+EXPORT_SYMBOL_GPL(btree_visitor);
+
+size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo,
+ unsigned long opaque,
+ void (*func)(void *elem, unsigned long opaque,
+ unsigned long *key,
+ size_t index, void *func2),
+ void *func2)
+{
+ size_t count = 0;
+
+ if (!func2)
+ func = empty;
+ if (head->node)
+ count = __btree_for_each(head, geo, head->node, opaque, func,
+ func2, 1, head->height, 0);
+ __btree_init(head);
+ return count;
+}
+EXPORT_SYMBOL_GPL(btree_grim_visitor);
+
+static int __init btree_module_init(void)
+{
+ btree_cachep = kmem_cache_create("btree_node", NODESIZE, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ return 0;
+}
+
+static void __exit btree_module_exit(void)
+{
+ kmem_cache_destroy(btree_cachep);
+}
+
+/* If core code starts using btree, initialization should happen even earlier */
+module_init(btree_module_init);
+module_exit(btree_module_exit);
+
+MODULE_AUTHOR("Joern Engel <joern@logfs.org>");
+MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
+MODULE_LICENSE("GPL");
diff --git a/lib/bug.c b/lib/bug.c
index 300e41afbf97..f13daf435211 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -165,7 +165,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
(void *)bugaddr);
show_regs(regs);
- add_taint(TAINT_WARN);
+ add_taint(BUG_GET_TAINT(bug));
return BUG_TRAP_TYPE_WARN;
}
diff --git a/lib/checksum.c b/lib/checksum.c
index b2e2fd468461..097508732f34 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -37,7 +37,8 @@
#include <asm/byteorder.h>
-static inline unsigned short from32to16(unsigned long x)
+#ifndef do_csum
+static inline unsigned short from32to16(unsigned int x)
{
/* add up 16-bit and 16-bit for 16+c bit */
x = (x & 0xffff) + (x >> 16);
@@ -49,16 +50,16 @@ static inline unsigned short from32to16(unsigned long x)
static unsigned int do_csum(const unsigned char *buff, int len)
{
int odd, count;
- unsigned long result = 0;
+ unsigned int result = 0;
if (len <= 0)
goto out;
odd = 1 & (unsigned long) buff;
if (odd) {
#ifdef __LITTLE_ENDIAN
- result = *buff;
-#else
result += (*buff << 8);
+#else
+ result = *buff;
#endif
len--;
buff++;
@@ -73,9 +74,9 @@ static unsigned int do_csum(const unsigned char *buff, int len)
}
count >>= 1; /* nr of 32-bit words.. */
if (count) {
- unsigned long carry = 0;
+ unsigned int carry = 0;
do {
- unsigned long w = *(unsigned int *) buff;
+ unsigned int w = *(unsigned int *) buff;
count--;
buff += 4;
result += carry;
@@ -102,6 +103,7 @@ static unsigned int do_csum(const unsigned char *buff, int len)
out:
return result;
}
+#endif
/*
* This is a version of ip_compute_csum() optimized for IP headers,
diff --git a/lib/cpu-notifier-error-inject.c b/lib/cpu-notifier-error-inject.c
new file mode 100644
index 000000000000..4dc20321b0d5
--- /dev/null
+++ b/lib/cpu-notifier-error-inject.c
@@ -0,0 +1,63 @@
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+
+static int priority;
+static int cpu_up_prepare_error;
+static int cpu_down_prepare_error;
+
+module_param(priority, int, 0);
+MODULE_PARM_DESC(priority, "specify cpu notifier priority");
+
+module_param(cpu_up_prepare_error, int, 0644);
+MODULE_PARM_DESC(cpu_up_prepare_error,
+ "specify error code to inject CPU_UP_PREPARE action");
+
+module_param(cpu_down_prepare_error, int, 0644);
+MODULE_PARM_DESC(cpu_down_prepare_error,
+ "specify error code to inject CPU_DOWN_PREPARE action");
+
+static int err_inject_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ int err = 0;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ err = cpu_up_prepare_error;
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ err = cpu_down_prepare_error;
+ break;
+ }
+ if (err)
+ printk(KERN_INFO "Injecting error (%d) at cpu notifier\n", err);
+
+ return notifier_from_errno(err);
+}
+
+static struct notifier_block err_inject_cpu_notifier = {
+ .notifier_call = err_inject_cpu_callback,
+};
+
+static int err_inject_init(void)
+{
+ err_inject_cpu_notifier.priority = priority;
+
+ return register_hotcpu_notifier(&err_inject_cpu_notifier);
+}
+
+static void err_inject_exit(void)
+{
+ unregister_hotcpu_notifier(&err_inject_cpu_notifier);
+}
+
+module_init(err_inject_init);
+module_exit(err_inject_exit);
+
+MODULE_DESCRIPTION("CPU notifier error injection module");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 7bb4142a502f..05d6aca7fc19 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -1,3 +1,4 @@
+#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/cpumask.h>
diff --git a/lib/crc32.c b/lib/crc32.c
index 49d1c9e3ce38..4855995fcde9 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -25,16 +25,19 @@
#include <linux/module.h>
#include <linux/compiler.h>
#include <linux/types.h>
-#include <linux/slab.h>
#include <linux/init.h>
#include <asm/atomic.h>
#include "crc32defs.h"
#if CRC_LE_BITS == 8
-#define tole(x) __constant_cpu_to_le32(x)
-#define tobe(x) __constant_cpu_to_be32(x)
+# define tole(x) __constant_cpu_to_le32(x)
#else
-#define tole(x) (x)
-#define tobe(x) (x)
+# define tole(x) (x)
+#endif
+
+#if CRC_BE_BITS == 8
+# define tobe(x) __constant_cpu_to_be32(x)
+#else
+# define tobe(x) (x)
#endif
#include "crc32table.h"
@@ -42,6 +45,54 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
MODULE_DESCRIPTION("Ethernet CRC32 calculations");
MODULE_LICENSE("GPL");
+#if CRC_LE_BITS == 8 || CRC_BE_BITS == 8
+
+static inline u32
+crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
+{
+# ifdef __LITTLE_ENDIAN
+# define DO_CRC(x) crc = tab[0][(crc ^ (x)) & 255] ^ (crc >> 8)
+# define DO_CRC4 crc = tab[3][(crc) & 255] ^ \
+ tab[2][(crc >> 8) & 255] ^ \
+ tab[1][(crc >> 16) & 255] ^ \
+ tab[0][(crc >> 24) & 255]
+# else
+# define DO_CRC(x) crc = tab[0][((crc >> 24) ^ (x)) & 255] ^ (crc << 8)
+# define DO_CRC4 crc = tab[0][(crc) & 255] ^ \
+ tab[1][(crc >> 8) & 255] ^ \
+ tab[2][(crc >> 16) & 255] ^ \
+ tab[3][(crc >> 24) & 255]
+# endif
+ const u32 *b;
+ size_t rem_len;
+
+ /* Align it */
+ if (unlikely((long)buf & 3 && len)) {
+ do {
+ DO_CRC(*buf++);
+ } while ((--len) && ((long)buf)&3);
+ }
+ rem_len = len & 3;
+ /* load data 32 bits wide, xor data 32 bits wide. */
+ len = len >> 2;
+ b = (const u32 *)buf;
+ for (--b; len; --len) {
+ crc ^= *++b; /* use pre increment for speed */
+ DO_CRC4;
+ }
+ len = rem_len;
+ /* And the last few bytes */
+ if (len) {
+ u8 *p = (u8 *)(b + 1) - 1;
+ do {
+ DO_CRC(*++p); /* use pre increment for speed */
+ } while (--len);
+ }
+ return crc;
+#undef DO_CRC
+#undef DO_CRC4
+}
+#endif
/**
* crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
* @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
@@ -72,52 +123,11 @@ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
{
# if CRC_LE_BITS == 8
- const u32 *b =(u32 *)p;
- const u32 *tab = crc32table_le;
-
-# ifdef __LITTLE_ENDIAN
-# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8)
-# else
-# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8)
-# endif
+ const u32 (*tab)[] = crc32table_le;
crc = __cpu_to_le32(crc);
- /* Align it */
- if(unlikely(((long)b)&3 && len)){
- do {
- u8 *p = (u8 *)b;
- DO_CRC(*p++);
- b = (void *)p;
- } while ((--len) && ((long)b)&3 );
- }
- if(likely(len >= 4)){
- /* load data 32 bits wide, xor data 32 bits wide. */
- size_t save_len = len & 3;
- len = len >> 2;
- --b; /* use pre increment below(*++b) for speed */
- do {
- crc ^= *++b;
- DO_CRC(0);
- DO_CRC(0);
- DO_CRC(0);
- DO_CRC(0);
- } while (--len);
- b++; /* point to next byte(s) */
- len = save_len;
- }
- /* And the last few bytes */
- if(len){
- do {
- u8 *p = (u8 *)b;
- DO_CRC(*p++);
- b = (void *)p;
- } while (--len);
- }
-
+ crc = crc32_body(crc, p, len, tab);
return __le32_to_cpu(crc);
-#undef ENDIAN_SHIFT
-#undef DO_CRC
-
# elif CRC_LE_BITS == 4
while (len--) {
crc ^= *p++;
@@ -170,51 +180,11 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
{
# if CRC_BE_BITS == 8
- const u32 *b =(u32 *)p;
- const u32 *tab = crc32table_be;
-
-# ifdef __LITTLE_ENDIAN
-# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8)
-# else
-# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8)
-# endif
+ const u32 (*tab)[] = crc32table_be;
crc = __cpu_to_be32(crc);
- /* Align it */
- if(unlikely(((long)b)&3 && len)){
- do {
- u8 *p = (u8 *)b;
- DO_CRC(*p++);
- b = (u32 *)p;
- } while ((--len) && ((long)b)&3 );
- }
- if(likely(len >= 4)){
- /* load data 32 bits wide, xor data 32 bits wide. */
- size_t save_len = len & 3;
- len = len >> 2;
- --b; /* use pre increment below(*++b) for speed */
- do {
- crc ^= *++b;
- DO_CRC(0);
- DO_CRC(0);
- DO_CRC(0);
- DO_CRC(0);
- } while (--len);
- b++; /* point to next byte(s) */
- len = save_len;
- }
- /* And the last few bytes */
- if(len){
- do {
- u8 *p = (u8 *)b;
- DO_CRC(*p++);
- b = (void *)p;
- } while (--len);
- }
+ crc = crc32_body(crc, p, len, tab);
return __be32_to_cpu(crc);
-#undef ENDIAN_SHIFT
-#undef DO_CRC
-
# elif CRC_BE_BITS == 4
while (len--) {
crc ^= *p++ << 24;
diff --git a/lib/ctype.c b/lib/ctype.c
index d02ace14a322..26baa620e95b 100644
--- a/lib/ctype.c
+++ b/lib/ctype.c
@@ -7,30 +7,30 @@
#include <linux/ctype.h>
#include <linux/module.h>
-unsigned char _ctype[] = {
-_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
-_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
-_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
-_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
-_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
-_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
-_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
-_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
-_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
-_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
-_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
-_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
-_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
-_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
-_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
-_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
-0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
-0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
-_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
-_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
-_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
-_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
-_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
-_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
+const unsigned char _ctype[] = {
+_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
+_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
+_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
+_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
+_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
+_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
+_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
+_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
+_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
+_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
+_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
+_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
+_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
+_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
+_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
+_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
+_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
+_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
+_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
+_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
EXPORT_SYMBOL(_ctype);
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index bc3b11731b9c..5bf0020b9248 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -23,6 +23,7 @@
* shut up after that.
*/
int debug_locks = 1;
+EXPORT_SYMBOL_GPL(debug_locks);
/*
* The locking-testsuite uses <debug_locks_silent> to get a
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 2755a3bd16a1..deebcc57d4e6 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -9,8 +9,10 @@
*/
#include <linux/debugobjects.h>
#include <linux/interrupt.h>
+#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
+#include <linux/slab.h>
#include <linux/hash.h>
#define ODEBUG_HASH_BITS 14
@@ -25,14 +27,14 @@
struct debug_bucket {
struct hlist_head list;
- spinlock_t lock;
+ raw_spinlock_t lock;
};
static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
-static DEFINE_SPINLOCK(pool_lock);
+static DEFINE_RAW_SPINLOCK(pool_lock);
static HLIST_HEAD(obj_pool);
@@ -95,10 +97,10 @@ static int fill_pool(void)
if (!new)
return obj_pool_free;
- spin_lock_irqsave(&pool_lock, flags);
+ raw_spin_lock_irqsave(&pool_lock, flags);
hlist_add_head(&new->node, &obj_pool);
obj_pool_free++;
- spin_unlock_irqrestore(&pool_lock, flags);
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
}
return obj_pool_free;
}
@@ -132,13 +134,14 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
{
struct debug_obj *obj = NULL;
- spin_lock(&pool_lock);
+ raw_spin_lock(&pool_lock);
if (obj_pool.first) {
obj = hlist_entry(obj_pool.first, typeof(*obj), node);
obj->object = addr;
obj->descr = descr;
obj->state = ODEBUG_STATE_NONE;
+ obj->astate = 0;
hlist_del(&obj->node);
hlist_add_head(&obj->node, &b->list);
@@ -151,7 +154,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
if (obj_pool_free < obj_pool_min_free)
obj_pool_min_free = obj_pool_free;
}
- spin_unlock(&pool_lock);
+ raw_spin_unlock(&pool_lock);
return obj;
}
@@ -164,7 +167,7 @@ static void free_obj_work(struct work_struct *work)
struct debug_obj *obj;
unsigned long flags;
- spin_lock_irqsave(&pool_lock, flags);
+ raw_spin_lock_irqsave(&pool_lock, flags);
while (obj_pool_free > ODEBUG_POOL_SIZE) {
obj = hlist_entry(obj_pool.first, typeof(*obj), node);
hlist_del(&obj->node);
@@ -173,11 +176,11 @@ static void free_obj_work(struct work_struct *work)
* We release pool_lock across kmem_cache_free() to
* avoid contention on pool_lock.
*/
- spin_unlock_irqrestore(&pool_lock, flags);
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
kmem_cache_free(obj_cache, obj);
- spin_lock_irqsave(&pool_lock, flags);
+ raw_spin_lock_irqsave(&pool_lock, flags);
}
- spin_unlock_irqrestore(&pool_lock, flags);
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
}
/*
@@ -189,7 +192,7 @@ static void free_object(struct debug_obj *obj)
unsigned long flags;
int sched = 0;
- spin_lock_irqsave(&pool_lock, flags);
+ raw_spin_lock_irqsave(&pool_lock, flags);
/*
* schedule work when the pool is filled and the cache is
* initialized:
@@ -199,7 +202,7 @@ static void free_object(struct debug_obj *obj)
hlist_add_head(&obj->node, &obj_pool);
obj_pool_free++;
obj_pool_used--;
- spin_unlock_irqrestore(&pool_lock, flags);
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
if (sched)
schedule_work(&debug_obj_work);
}
@@ -220,9 +223,9 @@ static void debug_objects_oom(void)
printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
hlist_move_list(&db->list, &freelist);
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
/* Now free them */
hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
@@ -250,8 +253,10 @@ static void debug_print_object(struct debug_obj *obj, char *msg)
if (limit < 5 && obj->descr != descr_test) {
limit++;
- WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg,
- obj_states[obj->state], obj->descr->name);
+ WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
+ "object type: %s\n",
+ msg, obj_states[obj->state], obj->astate,
+ obj->descr->name);
}
debug_objects_warnings++;
}
@@ -302,14 +307,14 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj) {
obj = alloc_object(addr, db, descr);
if (!obj) {
debug_objects_enabled = 0;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_objects_oom();
return;
}
@@ -326,7 +331,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "init");
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_init, addr, state);
return;
@@ -337,7 +342,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
break;
}
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
}
/**
@@ -384,7 +389,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (obj) {
@@ -397,7 +402,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "activate");
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_activate, addr, state);
return;
@@ -407,11 +412,11 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
default:
break;
}
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
return;
}
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
/*
* This happens when a static object is activated. We
* let the type specific code decide whether this is
@@ -437,7 +442,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (obj) {
@@ -445,7 +450,10 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
case ODEBUG_STATE_INIT:
case ODEBUG_STATE_INACTIVE:
case ODEBUG_STATE_ACTIVE:
- obj->state = ODEBUG_STATE_INACTIVE;
+ if (!obj->astate)
+ obj->state = ODEBUG_STATE_INACTIVE;
+ else
+ debug_print_object(obj, "deactivate");
break;
case ODEBUG_STATE_DESTROYED:
@@ -462,7 +470,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
debug_print_object(&o, "deactivate");
}
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
}
/**
@@ -482,7 +490,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj)
@@ -497,7 +505,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "destroy");
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_destroy, addr, state);
return;
@@ -508,7 +516,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
break;
}
out_unlock:
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
}
/**
@@ -528,7 +536,7 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj)
@@ -538,17 +546,64 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "free");
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_free, addr, state);
return;
default:
hlist_del(&obj->node);
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
free_object(obj);
return;
}
out_unlock:
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+}
+
+/**
+ * debug_object_active_state - debug checks object usage state machine
+ * @addr: address of the object
+ * @descr: pointer to an object specific debug description structure
+ * @expect: expected state
+ * @next: state to move to if expected state is found
+ */
+void
+debug_object_active_state(void *addr, struct debug_obj_descr *descr,
+ unsigned int expect, unsigned int next)
+{
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+
+ if (!debug_objects_enabled)
+ return;
+
+ db = get_bucket((unsigned long) addr);
+
+ raw_spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object(addr, db);
+ if (obj) {
+ switch (obj->state) {
+ case ODEBUG_STATE_ACTIVE:
+ if (obj->astate == expect)
+ obj->astate = next;
+ else
+ debug_print_object(obj, "active_state");
+ break;
+
+ default:
+ debug_print_object(obj, "active_state");
+ break;
+ }
+ } else {
+ struct debug_obj o = { .object = addr,
+ .state = ODEBUG_STATE_NOTAVAILABLE,
+ .descr = descr };
+
+ debug_print_object(&o, "active_state");
+ }
+
+ raw_spin_unlock_irqrestore(&db->lock, flags);
}
#ifdef CONFIG_DEBUG_OBJECTS_FREE
@@ -574,7 +629,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
repeat:
cnt = 0;
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
cnt++;
oaddr = (unsigned long) obj->object;
@@ -586,7 +641,7 @@ repeat:
debug_print_object(obj, "free");
descr = obj->descr;
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_free,
(void *) oaddr, state);
goto repeat;
@@ -596,7 +651,7 @@ repeat:
break;
}
}
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
/* Now free them */
hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
@@ -772,7 +827,7 @@ static int __init fixup_free(void *addr, enum debug_obj_state state)
}
}
-static int
+static int __init
check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
{
struct debug_bucket *db;
@@ -782,7 +837,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj && state != ODEBUG_STATE_NONE) {
@@ -806,7 +861,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
}
res = 0;
out:
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
if (res)
debug_objects_enabled = 0;
return res;
@@ -906,7 +961,7 @@ void __init debug_objects_early_init(void)
int i;
for (i = 0; i < ODEBUG_HASH_SIZE; i++)
- spin_lock_init(&obj_hash[i].lock);
+ raw_spin_lock_init(&obj_hash[i].lock);
for (i = 0; i < ODEBUG_POOL_SIZE; i++)
hlist_add_head(&obj_static_pool[i].node, &obj_pool);
@@ -915,7 +970,7 @@ void __init debug_objects_early_init(void)
/*
* Convert the statically allocated objects to dynamic ones:
*/
-static int debug_objects_replace_static_objects(void)
+static int __init debug_objects_replace_static_objects(void)
{
struct debug_bucket *db = obj_hash;
struct hlist_node *node, *tmp;
diff --git a/lib/decompress.c b/lib/decompress.c
index d2842f571674..a7606815541f 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -9,6 +9,7 @@
#include <linux/decompress/bunzip2.h>
#include <linux/decompress/unlzma.h>
#include <linux/decompress/inflate.h>
+#include <linux/decompress/unlzo.h>
#include <linux/types.h>
#include <linux/string.h>
@@ -22,6 +23,9 @@
#ifndef CONFIG_DECOMPRESS_LZMA
# define unlzma NULL
#endif
+#ifndef CONFIG_DECOMPRESS_LZO
+# define unlzo NULL
+#endif
static const struct compress_format {
unsigned char magic[2];
@@ -32,6 +36,7 @@ static const struct compress_format {
{ {037, 0236}, "gzip", gunzip },
{ {0x42, 0x5a}, "bzip2", bunzip2 },
{ {0x5d, 0x00}, "lzma", unlzma },
+ { {0x89, 0x4c}, "lzo", unlzo },
{ {0, 0}, NULL, NULL }
};
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 600f473a5610..a4e971dee102 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -299,7 +299,7 @@ static int INIT get_next_block(struct bunzip_data *bd)
again when using them (during symbol decoding).*/
base = hufGroup->base-1;
limit = hufGroup->limit-1;
- /* Calculate permute[]. Concurently, initialize
+ /* Calculate permute[]. Concurrently, initialize
* temp[] and limit[]. */
pp = 0;
for (i = minLen; i <= maxLen; i++) {
@@ -637,6 +637,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len,
/* Allocate bunzip_data. Most fields initialize to zero. */
bd = *bdp = malloc(i);
+ if (!bd)
+ return RETVAL_OUT_OF_MEMORY;
memset(bd, 0, sizeof(struct bunzip_data));
/* Setup input buffer */
bd->inbuf = inbuf;
@@ -664,6 +666,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len,
bd->dbufSize = 100000*(i-BZh0);
bd->dbuf = large_malloc(bd->dbufSize * sizeof(int));
+ if (!bd->dbuf)
+ return RETVAL_OUT_OF_MEMORY;
return RETVAL_OK;
}
@@ -686,7 +690,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len,
if (!outbuf) {
error("Could not allocate output bufer");
- return -1;
+ return RETVAL_OUT_OF_MEMORY;
}
if (buf)
inbuf = buf;
@@ -694,6 +698,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len,
inbuf = malloc(BZIP2_IOBUF_SIZE);
if (!inbuf) {
error("Could not allocate input bufer");
+ i = RETVAL_OUT_OF_MEMORY;
goto exit_0;
}
i = start_bunzip(&bd, inbuf, len, fill);
@@ -720,11 +725,14 @@ STATIC int INIT bunzip2(unsigned char *buf, int len,
} else if (i == RETVAL_UNEXPECTED_OUTPUT_EOF) {
error("Compressed file ends unexpectedly");
}
+ if (!bd)
+ goto exit_1;
if (bd->dbuf)
large_free(bd->dbuf);
if (pos)
*pos = bd->inbufPos;
free(bd);
+exit_1:
if (!buf)
free(inbuf);
exit_0:
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c
new file mode 100644
index 000000000000..bcb3a4bd68ff
--- /dev/null
+++ b/lib/decompress_unlzo.c
@@ -0,0 +1,217 @@
+/*
+ * LZO decompressor for the Linux kernel. Code borrowed from the lzo
+ * implementation by Markus Franz Xaver Johannes Oberhumer.
+ *
+ * Linux kernel adaptation:
+ * Copyright (C) 2009
+ * Albin Tonnerre, Free Electrons <albin.tonnerre@free-electrons.com>
+ *
+ * Original code:
+ * Copyright (C) 1996-2005 Markus Franz Xaver Johannes Oberhumer
+ * All Rights Reserved.
+ *
+ * lzop and the LZO library are free software; you can redistribute them
+ * and/or modify them under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.
+ * If not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Markus F.X.J. Oberhumer
+ * <markus@oberhumer.com>
+ * http://www.oberhumer.com/opensource/lzop/
+ */
+
+#ifdef STATIC
+#include "lzo/lzo1x_decompress.c"
+#else
+#include <linux/slab.h>
+#include <linux/decompress/unlzo.h>
+#endif
+
+#include <linux/types.h>
+#include <linux/lzo.h>
+#include <linux/decompress/mm.h>
+
+#include <linux/compiler.h>
+#include <asm/unaligned.h>
+
+static const unsigned char lzop_magic[] = {
+ 0x89, 0x4c, 0x5a, 0x4f, 0x00, 0x0d, 0x0a, 0x1a, 0x0a };
+
+#define LZO_BLOCK_SIZE (256*1024l)
+#define HEADER_HAS_FILTER 0x00000800L
+
+STATIC inline int INIT parse_header(u8 *input, u8 *skip)
+{
+ int l;
+ u8 *parse = input;
+ u8 level = 0;
+ u16 version;
+
+ /* read magic: 9 first bits */
+ for (l = 0; l < 9; l++) {
+ if (*parse++ != lzop_magic[l])
+ return 0;
+ }
+ /* get version (2bytes), skip library version (2),
+ * 'need to be extracted' version (2) and
+ * method (1) */
+ version = get_unaligned_be16(parse);
+ parse += 7;
+ if (version >= 0x0940)
+ level = *parse++;
+ if (get_unaligned_be32(parse) & HEADER_HAS_FILTER)
+ parse += 8; /* flags + filter info */
+ else
+ parse += 4; /* flags */
+
+ /* skip mode and mtime_low */
+ parse += 8;
+ if (version >= 0x0940)
+ parse += 4; /* skip mtime_high */
+
+ l = *parse++;
+ /* don't care about the file name, and skip checksum */
+ parse += l + 4;
+
+ *skip = parse - input;
+ return 1;
+}
+
+STATIC inline int INIT unlzo(u8 *input, int in_len,
+ int (*fill) (void *, unsigned int),
+ int (*flush) (void *, unsigned int),
+ u8 *output, int *posp,
+ void (*error_fn) (char *x))
+{
+ u8 skip = 0, r = 0;
+ u32 src_len, dst_len;
+ size_t tmp;
+ u8 *in_buf, *in_buf_save, *out_buf;
+ int ret = -1;
+
+ set_error_fn(error_fn);
+
+ if (output) {
+ out_buf = output;
+ } else if (!flush) {
+ error("NULL output pointer and no flush function provided");
+ goto exit;
+ } else {
+ out_buf = malloc(LZO_BLOCK_SIZE);
+ if (!out_buf) {
+ error("Could not allocate output buffer");
+ goto exit;
+ }
+ }
+
+ if (input && fill) {
+ error("Both input pointer and fill function provided, don't know what to do");
+ goto exit_1;
+ } else if (input) {
+ in_buf = input;
+ } else if (!fill || !posp) {
+ error("NULL input pointer and missing position pointer or fill function");
+ goto exit_1;
+ } else {
+ in_buf = malloc(lzo1x_worst_compress(LZO_BLOCK_SIZE));
+ if (!in_buf) {
+ error("Could not allocate input buffer");
+ goto exit_1;
+ }
+ }
+ in_buf_save = in_buf;
+
+ if (posp)
+ *posp = 0;
+
+ if (fill)
+ fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE));
+
+ if (!parse_header(input, &skip)) {
+ error("invalid header");
+ goto exit_2;
+ }
+ in_buf += skip;
+
+ if (posp)
+ *posp = skip;
+
+ for (;;) {
+ /* read uncompressed block size */
+ dst_len = get_unaligned_be32(in_buf);
+ in_buf += 4;
+
+ /* exit if last block */
+ if (dst_len == 0) {
+ if (posp)
+ *posp += 4;
+ break;
+ }
+
+ if (dst_len > LZO_BLOCK_SIZE) {
+ error("dest len longer than block size");
+ goto exit_2;
+ }
+
+ /* read compressed block size, and skip block checksum info */
+ src_len = get_unaligned_be32(in_buf);
+ in_buf += 8;
+
+ if (src_len <= 0 || src_len > dst_len) {
+ error("file corrupted");
+ goto exit_2;
+ }
+
+ /* decompress */
+ tmp = dst_len;
+
+ /* When the input data is not compressed at all,
+ * lzo1x_decompress_safe will fail, so call memcpy()
+ * instead */
+ if (unlikely(dst_len == src_len))
+ memcpy(out_buf, in_buf, src_len);
+ else {
+ r = lzo1x_decompress_safe((u8 *) in_buf, src_len,
+ out_buf, &tmp);
+
+ if (r != LZO_E_OK || dst_len != tmp) {
+ error("Compressed data violation");
+ goto exit_2;
+ }
+ }
+
+ if (flush)
+ flush(out_buf, dst_len);
+ if (output)
+ out_buf += dst_len;
+ if (posp)
+ *posp += src_len + 12;
+ if (fill) {
+ in_buf = in_buf_save;
+ fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE));
+ } else
+ in_buf += src_len;
+ }
+
+ ret = 0;
+exit_2:
+ if (!input)
+ free(in_buf);
+exit_1:
+ if (!output)
+ free(out_buf);
+exit:
+ return ret;
+}
+
+#define decompress unlzo
diff --git a/lib/devres.c b/lib/devres.c
index 72c8909006da..6efddf53b90c 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -1,5 +1,6 @@
#include <linux/pci.h>
#include <linux/io.h>
+#include <linux/gfp.h>
#include <linux/module.h>
void devm_ioremap_release(struct device *dev, void *res)
@@ -327,7 +328,7 @@ EXPORT_SYMBOL(pcim_iomap_regions_request_all);
* @pdev: PCI device to map IO resources for
* @mask: Mask of BARs to unmap and release
*
- * Unamp and release regions specified by @mask.
+ * Unmap and release regions specified by @mask.
*/
void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask)
{
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 58a9f9fc609a..01e64270e246 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -259,7 +259,7 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
* times. Without a hardware IOMMU this results in the
* same device addresses being put into the dma-debug
* hash multiple times too. This can result in false
- * positives being reported. Therfore we implement a
+ * positives being reported. Therefore we implement a
* best-fit algorithm here which returns the entry from
* the hash which fits best to the reference value
* instead of the first-fit.
@@ -570,7 +570,7 @@ static ssize_t filter_write(struct file *file, const char __user *userbuf,
* Now parse out the first token and use it as the name for the
* driver to filter for.
*/
- for (i = 0; i < NAME_MAX_LEN; ++i) {
+ for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
current_driver_name[i] = buf[i];
if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
break;
@@ -587,7 +587,7 @@ out_unlock:
return count;
}
-const struct file_operations filter_fops = {
+static const struct file_operations filter_fops = {
.read = filter_read,
.write = filter_write,
};
@@ -670,12 +670,13 @@ static int device_dma_allocations(struct device *dev)
return count;
}
-static int dma_debug_device_change(struct notifier_block *nb,
- unsigned long action, void *data)
+static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
{
struct device *dev = data;
int count;
+ if (global_disable)
+ return 0;
switch (action) {
case BUS_NOTIFY_UNBOUND_DRIVER:
@@ -697,6 +698,9 @@ void dma_debug_add_bus(struct bus_type *bus)
{
struct notifier_block *nb;
+ if (global_disable)
+ return;
+
nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
if (nb == NULL) {
pr_err("dma_debug_add_bus: out of memory\n");
@@ -819,9 +823,11 @@ static void check_unmap(struct dma_debug_entry *ref)
err_printk(ref->dev, entry, "DMA-API: device driver frees "
"DMA memory with different CPU address "
"[device address=0x%016llx] [size=%llu bytes] "
- "[cpu alloc address=%p] [cpu free address=%p]",
+ "[cpu alloc address=0x%016llx] "
+ "[cpu free address=0x%016llx]",
ref->dev_addr, ref->size,
- (void *)entry->paddr, (void *)ref->paddr);
+ (unsigned long long)entry->paddr,
+ (unsigned long long)ref->paddr);
}
if (ref->sg_call_ents && ref->type == dma_debug_sg &&
@@ -907,6 +913,9 @@ static void check_sync(struct device *dev,
ref->size);
}
+ if (entry->direction == DMA_BIDIRECTIONAL)
+ goto out;
+
if (ref->direction != entry->direction) {
err_printk(dev, entry, "DMA-API: device driver syncs "
"DMA memory with different direction "
@@ -917,9 +926,6 @@ static void check_sync(struct device *dev,
dir2name[ref->direction]);
}
- if (entry->direction == DMA_BIDIRECTIONAL)
- goto out;
-
if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
!(ref->direction == DMA_TO_DEVICE))
err_printk(dev, entry, "DMA-API: device driver syncs "
@@ -942,7 +948,6 @@ static void check_sync(struct device *dev,
out:
put_hash_bucket(bucket, &flags);
-
}
void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index e22c148e4b7f..02afc2533728 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -21,9 +21,11 @@
#include <linux/list.h>
#include <linux/sysctl.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/dynamic_debug.h>
#include <linux/debugfs.h>
+#include <linux/slab.h>
extern struct _ddebug __start___verbose[];
extern struct _ddebug __stop___verbose[];
@@ -209,8 +211,7 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords)
char *end;
/* Skip leading whitespace */
- while (*buf && isspace(*buf))
- buf++;
+ buf = skip_spaces(buf);
if (!*buf)
break; /* oh, it was trailing whitespace */
@@ -455,7 +456,7 @@ static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
__func__, (int)len);
nwords = ddebug_tokenize(tmpbuf, words, MAXWORDS);
- if (nwords < 0)
+ if (nwords <= 0)
return -EINVAL;
if (ddebug_parse_query(words, nwords-1, &query))
return -EINVAL;
@@ -691,7 +692,7 @@ static void ddebug_table_free(struct ddebug_table *dt)
* Called in response to a module being unloaded. Removes
* any ddebug_table's which point at the module.
*/
-int ddebug_remove_module(char *mod_name)
+int ddebug_remove_module(const char *mod_name)
{
struct ddebug_table *dt, *nextdt;
int ret = -ENOENT;
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index f97af55bdd96..7e65af70635e 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -1,6 +1,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/random.h>
+#include <linux/sched.h>
#include <linux/stat.h>
#include <linux/types.h>
#include <linux/fs.h>
diff --git a/lib/flex_array.c b/lib/flex_array.c
index 66eef2e4483e..41b1804fa728 100644
--- a/lib/flex_array.c
+++ b/lib/flex_array.c
@@ -99,7 +99,7 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
ret->element_size = element_size;
ret->total_nr_elements = total;
if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
- memset(ret->parts[0], FLEX_ARRAY_FREE,
+ memset(&ret->parts[0], FLEX_ARRAY_FREE,
FLEX_ARRAY_BASE_BYTES_LEFT);
return ret;
}
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
index bea5d97df991..85d0e412a04f 100644
--- a/lib/gen_crc32table.c
+++ b/lib/gen_crc32table.c
@@ -7,8 +7,8 @@
#define LE_TABLE_SIZE (1 << CRC_LE_BITS)
#define BE_TABLE_SIZE (1 << CRC_BE_BITS)
-static uint32_t crc32table_le[LE_TABLE_SIZE];
-static uint32_t crc32table_be[BE_TABLE_SIZE];
+static uint32_t crc32table_le[4][LE_TABLE_SIZE];
+static uint32_t crc32table_be[4][BE_TABLE_SIZE];
/**
* crc32init_le() - allocate and initialize LE table data
@@ -22,12 +22,19 @@ static void crc32init_le(void)
unsigned i, j;
uint32_t crc = 1;
- crc32table_le[0] = 0;
+ crc32table_le[0][0] = 0;
for (i = 1 << (CRC_LE_BITS - 1); i; i >>= 1) {
crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
for (j = 0; j < LE_TABLE_SIZE; j += 2 * i)
- crc32table_le[i + j] = crc ^ crc32table_le[j];
+ crc32table_le[0][i + j] = crc ^ crc32table_le[0][j];
+ }
+ for (i = 0; i < LE_TABLE_SIZE; i++) {
+ crc = crc32table_le[0][i];
+ for (j = 1; j < 4; j++) {
+ crc = crc32table_le[0][crc & 0xff] ^ (crc >> 8);
+ crc32table_le[j][i] = crc;
+ }
}
}
@@ -39,25 +46,35 @@ static void crc32init_be(void)
unsigned i, j;
uint32_t crc = 0x80000000;
- crc32table_be[0] = 0;
+ crc32table_be[0][0] = 0;
for (i = 1; i < BE_TABLE_SIZE; i <<= 1) {
crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0);
for (j = 0; j < i; j++)
- crc32table_be[i + j] = crc ^ crc32table_be[j];
+ crc32table_be[0][i + j] = crc ^ crc32table_be[0][j];
+ }
+ for (i = 0; i < BE_TABLE_SIZE; i++) {
+ crc = crc32table_be[0][i];
+ for (j = 1; j < 4; j++) {
+ crc = crc32table_be[0][(crc >> 24) & 0xff] ^ (crc << 8);
+ crc32table_be[j][i] = crc;
+ }
}
}
-static void output_table(uint32_t table[], int len, char *trans)
+static void output_table(uint32_t table[4][256], int len, char *trans)
{
- int i;
+ int i, j;
- for (i = 0; i < len - 1; i++) {
- if (i % ENTRIES_PER_LINE == 0)
- printf("\n");
- printf("%s(0x%8.8xL), ", trans, table[i]);
+ for (j = 0 ; j < 4; j++) {
+ printf("{");
+ for (i = 0; i < len - 1; i++) {
+ if (i % ENTRIES_PER_LINE == 0)
+ printf("\n");
+ printf("%s(0x%8.8xL), ", trans, table[j][i]);
+ }
+ printf("%s(0x%8.8xL)},\n", trans, table[j][len - 1]);
}
- printf("%s(0x%8.8xL)\n", trans, table[len - 1]);
}
int main(int argc, char** argv)
@@ -66,14 +83,14 @@ int main(int argc, char** argv)
if (CRC_LE_BITS > 1) {
crc32init_le();
- printf("static const u32 crc32table_le[] = {");
+ printf("static const u32 crc32table_le[4][256] = {");
output_table(crc32table_le, LE_TABLE_SIZE, "tole");
printf("};\n");
}
if (CRC_BE_BITS > 1) {
crc32init_be();
- printf("static const u32 crc32table_be[] = {");
+ printf("static const u32 crc32table_be[4][256] = {");
output_table(crc32table_be, BE_TABLE_SIZE, "tobe");
printf("};\n");
}
diff --git a/lib/genalloc.c b/lib/genalloc.c
index eed2bdb865e7..1923f1490e72 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -10,7 +10,9 @@
* Version 2. See the file COPYING for more details.
*/
+#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/bitmap.h>
#include <linux/genalloc.h>
@@ -114,7 +116,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
struct gen_pool_chunk *chunk;
unsigned long addr, flags;
int order = pool->min_alloc_order;
- int nbits, bit, start_bit, end_bit;
+ int nbits, start_bit, end_bit;
if (size == 0)
return 0;
@@ -126,32 +128,21 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
end_bit = (chunk->end_addr - chunk->start_addr) >> order;
- end_bit -= nbits + 1;
spin_lock_irqsave(&chunk->lock, flags);
- bit = -1;
- while (bit + 1 < end_bit) {
- bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1);
- if (bit >= end_bit)
- break;
-
- start_bit = bit;
- if (nbits > 1) {
- bit = find_next_bit(chunk->bits, bit + nbits,
- bit + 1);
- if (bit - start_bit < nbits)
- continue;
- }
-
- addr = chunk->start_addr +
- ((unsigned long)start_bit << order);
- while (nbits--)
- __set_bit(start_bit++, chunk->bits);
+ start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0,
+ nbits, 0);
+ if (start_bit >= end_bit) {
spin_unlock_irqrestore(&chunk->lock, flags);
- read_unlock(&pool->lock);
- return addr;
+ continue;
}
+
+ addr = chunk->start_addr + ((unsigned long)start_bit << order);
+
+ bitmap_set(chunk->bits, start_bit, nbits);
spin_unlock_irqrestore(&chunk->lock, flags);
+ read_unlock(&pool->lock);
+ return addr;
}
read_unlock(&pool->lock);
return 0;
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 39af2560f765..5d7a4802c562 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -16,6 +16,24 @@ const char hex_asc[] = "0123456789abcdef";
EXPORT_SYMBOL(hex_asc);
/**
+ * hex_to_bin - convert a hex digit to its real value
+ * @ch: ascii character represents hex digit
+ *
+ * hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
+ * input.
+ */
+int hex_to_bin(char ch)
+{
+ if ((ch >= '0') && (ch <= '9'))
+ return ch - '0';
+ ch = tolower(ch);
+ if ((ch >= 'a') && (ch <= 'f'))
+ return ch - 'a' + 10;
+ return -1;
+}
+EXPORT_SYMBOL(hex_to_bin);
+
+/**
* hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
* @buf: data blob to dump
* @len: number of bytes in the @buf
@@ -34,7 +52,7 @@ EXPORT_SYMBOL(hex_asc);
*
* E.g.:
* hex_dump_to_buffer(frame->data, frame->len, 16, 1,
- * linebuf, sizeof(linebuf), 1);
+ * linebuf, sizeof(linebuf), true);
*
* example output buffer:
* 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
@@ -65,8 +83,8 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
for (j = 0; j < ngroups; j++)
lx += scnprintf(linebuf + lx, linebuflen - lx,
- "%s%16.16llx", j ? " " : "",
- (unsigned long long)*(ptr8 + j));
+ "%s%16.16llx", j ? " " : "",
+ (unsigned long long)*(ptr8 + j));
ascii_column = 17 * ngroups + 2;
break;
}
@@ -77,7 +95,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
for (j = 0; j < ngroups; j++)
lx += scnprintf(linebuf + lx, linebuflen - lx,
- "%s%8.8x", j ? " " : "", *(ptr4 + j));
+ "%s%8.8x", j ? " " : "", *(ptr4 + j));
ascii_column = 9 * ngroups + 2;
break;
}
@@ -88,7 +106,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
for (j = 0; j < ngroups; j++)
lx += scnprintf(linebuf + lx, linebuflen - lx,
- "%s%4.4x", j ? " " : "", *(ptr2 + j));
+ "%s%4.4x", j ? " " : "", *(ptr2 + j));
ascii_column = 5 * ngroups + 2;
break;
}
@@ -111,9 +129,10 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
linebuf[lx++] = ' ';
- for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
- linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
- : '.';
+ for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) {
+ ch = ptr[j];
+ linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
+ }
nil:
linebuf[lx++] = '\0';
}
@@ -143,7 +162,7 @@ EXPORT_SYMBOL(hex_dump_to_buffer);
*
* E.g.:
* print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS,
- * 16, 1, frame->data, frame->len, 1);
+ * 16, 1, frame->data, frame->len, true);
*
* Example output using %DUMP_PREFIX_OFFSET and 1-byte mode:
* 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
@@ -151,12 +170,12 @@ EXPORT_SYMBOL(hex_dump_to_buffer);
* ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~.
*/
void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
- int rowsize, int groupsize,
- const void *buf, size_t len, bool ascii)
+ int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii)
{
const u8 *ptr = buf;
int i, linelen, remaining = len;
- unsigned char linebuf[200];
+ unsigned char linebuf[32 * 3 + 2 + 32 + 1];
if (rowsize != 16 && rowsize != 32)
rowsize = 16;
@@ -164,13 +183,14 @@ void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
for (i = 0; i < len; i += rowsize) {
linelen = min(remaining, rowsize);
remaining -= rowsize;
+
hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
- linebuf, sizeof(linebuf), ascii);
+ linebuf, sizeof(linebuf), ascii);
switch (prefix_type) {
case DUMP_PREFIX_ADDRESS:
- printk("%s%s%*p: %s\n", level, prefix_str,
- (int)(2 * sizeof(void *)), ptr + i, linebuf);
+ printk("%s%s%p: %s\n",
+ level, prefix_str, ptr + i, linebuf);
break;
case DUMP_PREFIX_OFFSET:
printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
@@ -196,9 +216,9 @@ EXPORT_SYMBOL(print_hex_dump);
* rowsize of 16, groupsize of 1, and ASCII output included.
*/
void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
- const void *buf, size_t len)
+ const void *buf, size_t len)
{
print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
- buf, len, 1);
+ buf, len, true);
}
EXPORT_SYMBOL(print_hex_dump_bytes);
diff --git a/lib/hweight.c b/lib/hweight.c
index 389424ecb129..3c79d50814cf 100644
--- a/lib/hweight.c
+++ b/lib/hweight.c
@@ -9,37 +9,45 @@
* The Hamming Weight of a number is the total number of bits set in it.
*/
-unsigned int hweight32(unsigned int w)
+unsigned int __sw_hweight32(unsigned int w)
{
+#ifdef ARCH_HAS_FAST_MULTIPLIER
+ w -= (w >> 1) & 0x55555555;
+ w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
+ w = (w + (w >> 4)) & 0x0f0f0f0f;
+ return (w * 0x01010101) >> 24;
+#else
unsigned int res = w - ((w >> 1) & 0x55555555);
res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
res = (res + (res >> 4)) & 0x0F0F0F0F;
res = res + (res >> 8);
return (res + (res >> 16)) & 0x000000FF;
+#endif
}
-EXPORT_SYMBOL(hweight32);
+EXPORT_SYMBOL(__sw_hweight32);
-unsigned int hweight16(unsigned int w)
+unsigned int __sw_hweight16(unsigned int w)
{
unsigned int res = w - ((w >> 1) & 0x5555);
res = (res & 0x3333) + ((res >> 2) & 0x3333);
res = (res + (res >> 4)) & 0x0F0F;
return (res + (res >> 8)) & 0x00FF;
}
-EXPORT_SYMBOL(hweight16);
+EXPORT_SYMBOL(__sw_hweight16);
-unsigned int hweight8(unsigned int w)
+unsigned int __sw_hweight8(unsigned int w)
{
unsigned int res = w - ((w >> 1) & 0x55);
res = (res & 0x33) + ((res >> 2) & 0x33);
return (res + (res >> 4)) & 0x0F;
}
-EXPORT_SYMBOL(hweight8);
+EXPORT_SYMBOL(__sw_hweight8);
-unsigned long hweight64(__u64 w)
+unsigned long __sw_hweight64(__u64 w)
{
#if BITS_PER_LONG == 32
- return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
+ return __sw_hweight32((unsigned int)(w >> 32)) +
+ __sw_hweight32((unsigned int)w);
#elif BITS_PER_LONG == 64
#ifdef ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x5555555555555555ul;
@@ -56,4 +64,4 @@ unsigned long hweight64(__u64 w)
#endif
#endif
}
-EXPORT_SYMBOL(hweight64);
+EXPORT_SYMBOL(__sw_hweight64);
diff --git a/lib/idr.c b/lib/idr.c
index 80ca9aca038b..7f1a4f0acf50 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -156,10 +156,12 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
/* if already at the top layer, we need to grow */
- if (!(p = pa[l])) {
+ if (id >= 1 << (idp->layers * IDR_BITS)) {
*starting_id = id;
return IDR_NEED_TO_GROW;
}
+ p = pa[l];
+ BUG_ON(!p);
/* If we need to go up one layer, continue the
* loop; otherwise, restart from the top.
@@ -281,7 +283,7 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
/**
* idr_get_new_above - allocate new idr entry above or equal to a start id
* @idp: idr handle
- * @ptr: pointer you want associated with the ide
+ * @ptr: pointer you want associated with the id
* @start_id: id to start search at
* @id: pointer to the allocated handle
*
@@ -313,7 +315,7 @@ EXPORT_SYMBOL(idr_get_new_above);
/**
* idr_get_new - allocate new idr entry
* @idp: idr handle
- * @ptr: pointer you want associated with the ide
+ * @ptr: pointer you want associated with the id
* @id: pointer to the allocated handle
*
* This is the allocate id function. It should be called with any
@@ -443,6 +445,7 @@ EXPORT_SYMBOL(idr_remove);
void idr_remove_all(struct idr *idp)
{
int n, id, max;
+ int bt_mask;
struct idr_layer *p;
struct idr_layer *pa[MAX_LEVEL];
struct idr_layer **paa = &pa[0];
@@ -460,8 +463,10 @@ void idr_remove_all(struct idr *idp)
p = p->ary[(id >> n) & IDR_MASK];
}
+ bt_mask = id;
id += 1 << n;
- while (n < fls(id)) {
+ /* Get the highest bit that the above add changed from 0->1. */
+ while (n < fls(id ^ bt_mask)) {
if (p)
free_layer(p);
n += IDR_BITS;
@@ -502,7 +507,7 @@ void *idr_find(struct idr *idp, int id)
int n;
struct idr_layer *p;
- p = rcu_dereference(idp->top);
+ p = rcu_dereference_raw(idp->top);
if (!p)
return NULL;
n = (p->layer+1) * IDR_BITS;
@@ -517,7 +522,7 @@ void *idr_find(struct idr *idp, int id)
while (n > 0 && p) {
n -= IDR_BITS;
BUG_ON(n != p->layer*IDR_BITS);
- p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
+ p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
}
return((void *)p);
}
@@ -550,7 +555,7 @@ int idr_for_each(struct idr *idp,
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
- p = rcu_dereference(idp->top);
+ p = rcu_dereference_raw(idp->top);
max = 1 << n;
id = 0;
@@ -558,7 +563,7 @@ int idr_for_each(struct idr *idp,
while (n > 0 && p) {
n -= IDR_BITS;
*paa++ = p;
- p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
+ p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
}
if (p) {
@@ -597,7 +602,7 @@ void *idr_get_next(struct idr *idp, int *nextidp)
/* find first ent */
n = idp->layers * IDR_BITS;
max = 1 << n;
- p = rcu_dereference(idp->top);
+ p = rcu_dereference_raw(idp->top);
if (!p)
return NULL;
@@ -605,7 +610,7 @@ void *idr_get_next(struct idr *idp, int *nextidp)
while (n > 0 && p) {
n -= IDR_BITS;
*paa++ = p;
- p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
+ p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
}
if (p) {
@@ -621,7 +626,7 @@ void *idr_get_next(struct idr *idp, int *nextidp)
}
return NULL;
}
-
+EXPORT_SYMBOL(idr_get_next);
/**
diff --git a/lib/inflate.c b/lib/inflate.c
index d10255973a9f..677b738c2204 100644
--- a/lib/inflate.c
+++ b/lib/inflate.c
@@ -103,6 +103,7 @@
the two sets of lengths.
*/
#include <linux/compiler.h>
+#include <linux/slab.h>
#ifdef RCSID
static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #";
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
index 75dbda03f4fb..c0251f4ad08b 100644
--- a/lib/iommu-helper.c
+++ b/lib/iommu-helper.c
@@ -3,41 +3,7 @@
*/
#include <linux/module.h>
-#include <linux/bitops.h>
-
-static unsigned long find_next_zero_area(unsigned long *map,
- unsigned long size,
- unsigned long start,
- unsigned int nr,
- unsigned long align_mask)
-{
- unsigned long index, end, i;
-again:
- index = find_next_zero_bit(map, size, start);
-
- /* Align allocation */
- index = (index + align_mask) & ~align_mask;
-
- end = index + nr;
- if (end >= size)
- return -1;
- for (i = index; i < end; i++) {
- if (test_bit(i, map)) {
- start = i+1;
- goto again;
- }
- }
- return index;
-}
-
-void iommu_area_reserve(unsigned long *map, unsigned long i, int len)
-{
- unsigned long end = i + len;
- while (i < end) {
- __set_bit(i, map);
- i++;
- }
-}
+#include <linux/bitmap.h>
int iommu_is_span_boundary(unsigned int index, unsigned int nr,
unsigned long shift,
@@ -55,31 +21,24 @@ unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
unsigned long align_mask)
{
unsigned long index;
+
+ /* We don't want the last of the limit */
+ size -= 1;
again:
- index = find_next_zero_area(map, size, start, nr, align_mask);
- if (index != -1) {
+ index = bitmap_find_next_zero_area(map, size, start, nr, align_mask);
+ if (index < size) {
if (iommu_is_span_boundary(index, nr, shift, boundary_size)) {
/* we could do more effectively */
start = index + 1;
goto again;
}
- iommu_area_reserve(map, index, nr);
+ bitmap_set(map, index, nr);
+ return index;
}
- return index;
+ return -1;
}
EXPORT_SYMBOL(iommu_area_alloc);
-void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr)
-{
- unsigned long end = start + nr;
-
- while (start < end) {
- __clear_bit(start, map);
- start++;
- }
-}
-EXPORT_SYMBOL(iommu_area_free);
-
unsigned long iommu_num_pages(unsigned long addr, unsigned long len,
unsigned long io_page_size)
{
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 14c6078f17a2..5730ecd3eb66 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -13,10 +13,10 @@
#include <asm/pgtable.h>
static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
- unsigned long end, unsigned long phys_addr, pgprot_t prot)
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
pte_t *pte;
- unsigned long pfn;
+ u64 pfn;
pfn = phys_addr >> PAGE_SHIFT;
pte = pte_alloc_kernel(pmd, addr);
@@ -31,7 +31,7 @@ static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
}
static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
- unsigned long end, unsigned long phys_addr, pgprot_t prot)
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
pmd_t *pmd;
unsigned long next;
@@ -49,7 +49,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
}
static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
- unsigned long end, unsigned long phys_addr, pgprot_t prot)
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
pud_t *pud;
unsigned long next;
@@ -67,7 +67,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
}
int ioremap_page_range(unsigned long addr,
- unsigned long end, unsigned long phys_addr, pgprot_t prot)
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
pgd_t *pgd;
unsigned long start;
diff --git a/lib/kasprintf.c b/lib/kasprintf.c
index c5ff1fd10030..9c4233b23783 100644
--- a/lib/kasprintf.c
+++ b/lib/kasprintf.c
@@ -6,6 +6,7 @@
#include <stdarg.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/types.h>
#include <linux/string.h>
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 39f1029e3525..b135d04aa48a 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -5,10 +5,13 @@
* relegated to obsolescence, but used by various less
* important (or lazy) subsystems.
*/
-#include <linux/smp_lock.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/semaphore.h>
+#include <linux/smp_lock.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/bkl.h>
/*
* The 'big kernel lock'
@@ -20,7 +23,7 @@
*
* Don't use in new code.
*/
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
+static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag);
/*
@@ -33,12 +36,12 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
* If it successfully gets the lock, it should increment
* the preemption count like any spinlock does.
*
- * (This works on UP too - _raw_spin_trylock will never
+ * (This works on UP too - do_raw_spin_trylock will never
* return false in that case)
*/
int __lockfunc __reacquire_kernel_lock(void)
{
- while (!_raw_spin_trylock(&kernel_flag)) {
+ while (!do_raw_spin_trylock(&kernel_flag)) {
if (need_resched())
return -EAGAIN;
cpu_relax();
@@ -49,27 +52,27 @@ int __lockfunc __reacquire_kernel_lock(void)
void __lockfunc __release_kernel_lock(void)
{
- _raw_spin_unlock(&kernel_flag);
+ do_raw_spin_unlock(&kernel_flag);
preempt_enable_no_resched();
}
/*
* These are the BKL spinlocks - we try to be polite about preemption.
* If SMP is not on (ie UP preemption), this all goes away because the
- * _raw_spin_trylock() will always succeed.
+ * do_raw_spin_trylock() will always succeed.
*/
#ifdef CONFIG_PREEMPT
static inline void __lock_kernel(void)
{
preempt_disable();
- if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
+ if (unlikely(!do_raw_spin_trylock(&kernel_flag))) {
/*
* If preemption was disabled even before this
* was called, there's nothing we can be polite
* about - just spin.
*/
if (preempt_count() > 1) {
- _raw_spin_lock(&kernel_flag);
+ do_raw_spin_lock(&kernel_flag);
return;
}
@@ -79,10 +82,10 @@ static inline void __lock_kernel(void)
*/
do {
preempt_enable();
- while (spin_is_locked(&kernel_flag))
+ while (raw_spin_is_locked(&kernel_flag))
cpu_relax();
preempt_disable();
- } while (!_raw_spin_trylock(&kernel_flag));
+ } while (!do_raw_spin_trylock(&kernel_flag));
}
}
@@ -93,7 +96,7 @@ static inline void __lock_kernel(void)
*/
static inline void __lock_kernel(void)
{
- _raw_spin_lock(&kernel_flag);
+ do_raw_spin_lock(&kernel_flag);
}
#endif
@@ -103,7 +106,7 @@ static inline void __unlock_kernel(void)
* the BKL is not covered by lockdep, so we open-code the
* unlocking sequence (and thus avoid the dep-chain ops):
*/
- _raw_spin_unlock(&kernel_flag);
+ do_raw_spin_unlock(&kernel_flag);
preempt_enable();
}
@@ -113,21 +116,28 @@ static inline void __unlock_kernel(void)
* This cannot happen asynchronously, so we only need to
* worry about other CPU's.
*/
-void __lockfunc lock_kernel(void)
+void __lockfunc _lock_kernel(const char *func, const char *file, int line)
{
- int depth = current->lock_depth+1;
- if (likely(!depth))
+ int depth = current->lock_depth + 1;
+
+ trace_lock_kernel(func, file, line);
+
+ if (likely(!depth)) {
+ might_sleep();
__lock_kernel();
+ }
current->lock_depth = depth;
}
-void __lockfunc unlock_kernel(void)
+void __lockfunc _unlock_kernel(const char *func, const char *file, int line)
{
BUG_ON(current->lock_depth < 0);
if (likely(--current->lock_depth < 0))
__unlock_kernel();
+
+ trace_unlock_kernel(func, file, line);
}
-EXPORT_SYMBOL(lock_kernel);
-EXPORT_SYMBOL(unlock_kernel);
+EXPORT_SYMBOL(_lock_kernel);
+EXPORT_SYMBOL(_unlock_kernel);
diff --git a/lib/kobject.c b/lib/kobject.c
index b512b746d2af..f07c57252e82 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
return ret;
}
-struct sysfs_ops kobj_sysfs_ops = {
+const struct sysfs_ops kobj_sysfs_ops = {
.show = kobj_attr_show,
.store = kobj_attr_store,
};
@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
* If the kset was not able to be created, NULL will be returned.
*/
static struct kset *kset_create(const char *name,
- struct kset_uevent_ops *uevent_ops,
+ const struct kset_uevent_ops *uevent_ops,
struct kobject *parent_kobj)
{
struct kset *kset;
@@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
* If the kset was not able to be created, NULL will be returned.
*/
struct kset *kset_create_and_add(const char *name,
- struct kset_uevent_ops *uevent_ops,
+ const struct kset_uevent_ops *uevent_ops,
struct kobject *parent_kobj)
{
struct kset *kset;
@@ -850,6 +850,121 @@ struct kset *kset_create_and_add(const char *name,
}
EXPORT_SYMBOL_GPL(kset_create_and_add);
+
+static DEFINE_SPINLOCK(kobj_ns_type_lock);
+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
+
+int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
+{
+ enum kobj_ns_type type = ops->type;
+ int error;
+
+ spin_lock(&kobj_ns_type_lock);
+
+ error = -EINVAL;
+ if (type >= KOBJ_NS_TYPES)
+ goto out;
+
+ error = -EINVAL;
+ if (type <= KOBJ_NS_TYPE_NONE)
+ goto out;
+
+ error = -EBUSY;
+ if (kobj_ns_ops_tbl[type])
+ goto out;
+
+ error = 0;
+ kobj_ns_ops_tbl[type] = ops;
+
+out:
+ spin_unlock(&kobj_ns_type_lock);
+ return error;
+}
+
+int kobj_ns_type_registered(enum kobj_ns_type type)
+{
+ int registered = 0;
+
+ spin_lock(&kobj_ns_type_lock);
+ if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES))
+ registered = kobj_ns_ops_tbl[type] != NULL;
+ spin_unlock(&kobj_ns_type_lock);
+
+ return registered;
+}
+
+const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent)
+{
+ const struct kobj_ns_type_operations *ops = NULL;
+
+ if (parent && parent->ktype->child_ns_type)
+ ops = parent->ktype->child_ns_type(parent);
+
+ return ops;
+}
+
+const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)
+{
+ return kobj_child_ns_ops(kobj->parent);
+}
+
+
+const void *kobj_ns_current(enum kobj_ns_type type)
+{
+ const void *ns = NULL;
+
+ spin_lock(&kobj_ns_type_lock);
+ if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
+ kobj_ns_ops_tbl[type])
+ ns = kobj_ns_ops_tbl[type]->current_ns();
+ spin_unlock(&kobj_ns_type_lock);
+
+ return ns;
+}
+
+const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk)
+{
+ const void *ns = NULL;
+
+ spin_lock(&kobj_ns_type_lock);
+ if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
+ kobj_ns_ops_tbl[type])
+ ns = kobj_ns_ops_tbl[type]->netlink_ns(sk);
+ spin_unlock(&kobj_ns_type_lock);
+
+ return ns;
+}
+
+const void *kobj_ns_initial(enum kobj_ns_type type)
+{
+ const void *ns = NULL;
+
+ spin_lock(&kobj_ns_type_lock);
+ if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
+ kobj_ns_ops_tbl[type])
+ ns = kobj_ns_ops_tbl[type]->initial_ns();
+ spin_unlock(&kobj_ns_type_lock);
+
+ return ns;
+}
+
+/*
+ * kobj_ns_exit - invalidate a namespace tag
+ *
+ * @type: the namespace type (i.e. KOBJ_NS_TYPE_NET)
+ * @ns: the actual namespace being invalidated
+ *
+ * This is called when a tag is no longer valid. For instance,
+ * when a network namespace exits, it uses this helper to
+ * make sure no sb's sysfs_info points to the now-invalidated
+ * netns.
+ */
+void kobj_ns_exit(enum kobj_ns_type type, const void *ns)
+{
+ sysfs_exit_ns(type, ns);
+}
+
+
EXPORT_SYMBOL(kobject_get);
EXPORT_SYMBOL(kobject_put);
EXPORT_SYMBOL(kobject_del);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 920a3ca6e259..b93579504dfa 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -18,18 +18,25 @@
#include <linux/string.h>
#include <linux/kobject.h>
#include <linux/module.h>
-
+#include <linux/slab.h>
+#include <linux/user_namespace.h>
#include <linux/socket.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <net/sock.h>
+#include <net/net_namespace.h>
u64 uevent_seqnum;
char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
static DEFINE_SPINLOCK(sequence_lock);
-#if defined(CONFIG_NET)
-static struct sock *uevent_sock;
+#ifdef CONFIG_NET
+struct uevent_sock {
+ struct list_head list;
+ struct sock *sk;
+};
+static LIST_HEAD(uevent_sock_list);
+static DEFINE_MUTEX(uevent_sock_mutex);
#endif
/* the strings here must match the enum in include/linux/kobject.h */
@@ -76,6 +83,39 @@ out:
return ret;
}
+#ifdef CONFIG_NET
+static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data)
+{
+ struct kobject *kobj = data;
+ const struct kobj_ns_type_operations *ops;
+
+ ops = kobj_ns_ops(kobj);
+ if (ops) {
+ const void *sock_ns, *ns;
+ ns = kobj->ktype->namespace(kobj);
+ sock_ns = ops->netlink_ns(dsk);
+ return sock_ns != ns;
+ }
+
+ return 0;
+}
+#endif
+
+static int kobj_usermode_filter(struct kobject *kobj)
+{
+ const struct kobj_ns_type_operations *ops;
+
+ ops = kobj_ns_ops(kobj);
+ if (ops) {
+ const void *init_ns, *ns;
+ ns = kobj->ktype->namespace(kobj);
+ init_ns = ops->initial_ns();
+ return ns != init_ns;
+ }
+
+ return 0;
+}
+
/**
* kobject_uevent_env - send an uevent with environmental data
*
@@ -95,10 +135,13 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
const char *subsystem;
struct kobject *top_kobj;
struct kset *kset;
- struct kset_uevent_ops *uevent_ops;
+ const struct kset_uevent_ops *uevent_ops;
u64 seq;
int i = 0;
int retval = 0;
+#ifdef CONFIG_NET
+ struct uevent_sock *ue_sk;
+#endif
pr_debug("kobject: '%s' (%p): %s\n",
kobject_name(kobj), kobj, __func__);
@@ -210,7 +253,9 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
#if defined(CONFIG_NET)
/* send netlink message */
- if (uevent_sock) {
+ mutex_lock(&uevent_sock_mutex);
+ list_for_each_entry(ue_sk, &uevent_sock_list, list) {
+ struct sock *uevent_sock = ue_sk->sk;
struct sk_buff *skb;
size_t len;
@@ -232,18 +277,21 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
}
NETLINK_CB(skb).dst_group = 1;
- retval = netlink_broadcast(uevent_sock, skb, 0, 1,
- GFP_KERNEL);
+ retval = netlink_broadcast_filtered(uevent_sock, skb,
+ 0, 1, GFP_KERNEL,
+ kobj_bcast_filter,
+ kobj);
/* ENOBUFS should be handled in userspace */
if (retval == -ENOBUFS)
retval = 0;
} else
retval = -ENOMEM;
}
+ mutex_unlock(&uevent_sock_mutex);
#endif
/* call uevent_helper, usually only enabled during early boot */
- if (uevent_helper[0]) {
+ if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
char *argv [3];
argv [0] = uevent_helper;
@@ -319,18 +367,59 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
EXPORT_SYMBOL_GPL(add_uevent_var);
#if defined(CONFIG_NET)
-static int __init kobject_uevent_init(void)
+static int uevent_net_init(struct net *net)
{
- uevent_sock = netlink_kernel_create(&init_net, NETLINK_KOBJECT_UEVENT,
- 1, NULL, NULL, THIS_MODULE);
- if (!uevent_sock) {
+ struct uevent_sock *ue_sk;
+
+ ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
+ if (!ue_sk)
+ return -ENOMEM;
+
+ ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT,
+ 1, NULL, NULL, THIS_MODULE);
+ if (!ue_sk->sk) {
printk(KERN_ERR
"kobject_uevent: unable to create netlink socket!\n");
+ kfree(ue_sk);
return -ENODEV;
}
- netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV);
+ mutex_lock(&uevent_sock_mutex);
+ list_add_tail(&ue_sk->list, &uevent_sock_list);
+ mutex_unlock(&uevent_sock_mutex);
return 0;
}
+static void uevent_net_exit(struct net *net)
+{
+ struct uevent_sock *ue_sk;
+
+ mutex_lock(&uevent_sock_mutex);
+ list_for_each_entry(ue_sk, &uevent_sock_list, list) {
+ if (sock_net(ue_sk->sk) == net)
+ goto found;
+ }
+ mutex_unlock(&uevent_sock_mutex);
+ return;
+
+found:
+ list_del(&ue_sk->list);
+ mutex_unlock(&uevent_sock_mutex);
+
+ netlink_kernel_release(ue_sk->sk);
+ kfree(ue_sk);
+}
+
+static struct pernet_operations uevent_net_ops = {
+ .init = uevent_net_init,
+ .exit = uevent_net_exit,
+};
+
+static int __init kobject_uevent_init(void)
+{
+ netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV);
+ return register_pernet_subsys(&uevent_net_ops);
+}
+
+
postcore_initcall(kobject_uevent_init);
#endif
diff --git a/lib/kref.c b/lib/kref.c
index 9ecd6e865610..d3d227a08a4b 100644
--- a/lib/kref.c
+++ b/lib/kref.c
@@ -13,17 +13,7 @@
#include <linux/kref.h>
#include <linux/module.h>
-
-/**
- * kref_set - initialize object and set refcount to requested number.
- * @kref: object in question.
- * @num: initial reference counter
- */
-void kref_set(struct kref *kref, int num)
-{
- atomic_set(&kref->refcount, num);
- smp_mb();
-}
+#include <linux/slab.h>
/**
* kref_init - initialize object.
@@ -31,7 +21,8 @@ void kref_set(struct kref *kref, int num)
*/
void kref_init(struct kref *kref)
{
- kref_set(kref, 1);
+ atomic_set(&kref->refcount, 1);
+ smp_mb();
}
/**
@@ -71,7 +62,6 @@ int kref_put(struct kref *kref, void (*release)(struct kref *kref))
return 0;
}
-EXPORT_SYMBOL(kref_set);
EXPORT_SYMBOL(kref_init);
EXPORT_SYMBOL(kref_get);
EXPORT_SYMBOL(kref_put);
diff --git a/lib/lcm.c b/lib/lcm.c
new file mode 100644
index 000000000000..157cd88a6ffc
--- /dev/null
+++ b/lib/lcm.c
@@ -0,0 +1,15 @@
+#include <linux/kernel.h>
+#include <linux/gcd.h>
+#include <linux/module.h>
+
+/* Lowest common multiple */
+unsigned long lcm(unsigned long a, unsigned long b)
+{
+ if (a && b)
+ return (a * b) / gcd(a, b);
+ else if (b)
+ return b;
+
+ return a;
+}
+EXPORT_SYMBOL_GPL(lcm);
diff --git a/lib/list_sort.c b/lib/list_sort.c
new file mode 100644
index 000000000000..4b5cb794c38b
--- /dev/null
+++ b/lib/list_sort.c
@@ -0,0 +1,217 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list_sort.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+
+#define MAX_LIST_LENGTH_BITS 20
+
+/*
+ * Returns a list organized in an intermediate format suited
+ * to chaining of merge() calls: null-terminated, no reserved or
+ * sentinel head node, "prev" links not maintained.
+ */
+static struct list_head *merge(void *priv,
+ int (*cmp)(void *priv, struct list_head *a,
+ struct list_head *b),
+ struct list_head *a, struct list_head *b)
+{
+ struct list_head head, *tail = &head;
+
+ while (a && b) {
+ /* if equal, take 'a' -- important for sort stability */
+ if ((*cmp)(priv, a, b) <= 0) {
+ tail->next = a;
+ a = a->next;
+ } else {
+ tail->next = b;
+ b = b->next;
+ }
+ tail = tail->next;
+ }
+ tail->next = a?:b;
+ return head.next;
+}
+
+/*
+ * Combine final list merge with restoration of standard doubly-linked
+ * list structure. This approach duplicates code from merge(), but
+ * runs faster than the tidier alternatives of either a separate final
+ * prev-link restoration pass, or maintaining the prev links
+ * throughout.
+ */
+static void merge_and_restore_back_links(void *priv,
+ int (*cmp)(void *priv, struct list_head *a,
+ struct list_head *b),
+ struct list_head *head,
+ struct list_head *a, struct list_head *b)
+{
+ struct list_head *tail = head;
+
+ while (a && b) {
+ /* if equal, take 'a' -- important for sort stability */
+ if ((*cmp)(priv, a, b) <= 0) {
+ tail->next = a;
+ a->prev = tail;
+ a = a->next;
+ } else {
+ tail->next = b;
+ b->prev = tail;
+ b = b->next;
+ }
+ tail = tail->next;
+ }
+ tail->next = a ? : b;
+
+ do {
+ /*
+ * In worst cases this loop may run many iterations.
+ * Continue callbacks to the client even though no
+ * element comparison is needed, so the client's cmp()
+ * routine can invoke cond_resched() periodically.
+ */
+ (*cmp)(priv, tail, tail);
+
+ tail->next->prev = tail;
+ tail = tail->next;
+ } while (tail->next);
+
+ tail->next = head;
+ head->prev = tail;
+}
+
+/**
+ * list_sort - sort a list
+ * @priv: private data, opaque to list_sort(), passed to @cmp
+ * @head: the list to sort
+ * @cmp: the elements comparison function
+ *
+ * This function implements "merge sort", which has O(nlog(n))
+ * complexity.
+ *
+ * The comparison function @cmp must return a negative value if @a
+ * should sort before @b, and a positive value if @a should sort after
+ * @b. If @a and @b are equivalent, and their original relative
+ * ordering is to be preserved, @cmp must return 0.
+ */
+void list_sort(void *priv, struct list_head *head,
+ int (*cmp)(void *priv, struct list_head *a,
+ struct list_head *b))
+{
+ struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists
+ -- last slot is a sentinel */
+ int lev; /* index into part[] */
+ int max_lev = 0;
+ struct list_head *list;
+
+ if (list_empty(head))
+ return;
+
+ memset(part, 0, sizeof(part));
+
+ head->prev->next = NULL;
+ list = head->next;
+
+ while (list) {
+ struct list_head *cur = list;
+ list = list->next;
+ cur->next = NULL;
+
+ for (lev = 0; part[lev]; lev++) {
+ cur = merge(priv, cmp, part[lev], cur);
+ part[lev] = NULL;
+ }
+ if (lev > max_lev) {
+ if (unlikely(lev >= ARRAY_SIZE(part)-1)) {
+ printk_once(KERN_DEBUG "list passed to"
+ " list_sort() too long for"
+ " efficiency\n");
+ lev--;
+ }
+ max_lev = lev;
+ }
+ part[lev] = cur;
+ }
+
+ for (lev = 0; lev < max_lev; lev++)
+ if (part[lev])
+ list = merge(priv, cmp, part[lev], list);
+
+ merge_and_restore_back_links(priv, cmp, head, part[max_lev], list);
+}
+EXPORT_SYMBOL(list_sort);
+
+#ifdef DEBUG_LIST_SORT
+struct debug_el {
+ struct list_head l_h;
+ int value;
+ unsigned serial;
+};
+
+static int cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+ return container_of(a, struct debug_el, l_h)->value
+ - container_of(b, struct debug_el, l_h)->value;
+}
+
+/*
+ * The pattern of set bits in the list length determines which cases
+ * are hit in list_sort().
+ */
+#define LIST_SORT_TEST_LENGTH (512+128+2) /* not including head */
+
+static int __init list_sort_test(void)
+{
+ int i, r = 1, count;
+ struct list_head *head = kmalloc(sizeof(*head), GFP_KERNEL);
+ struct list_head *cur;
+
+ printk(KERN_WARNING "testing list_sort()\n");
+
+ cur = head;
+ for (i = 0; i < LIST_SORT_TEST_LENGTH; i++) {
+ struct debug_el *el = kmalloc(sizeof(*el), GFP_KERNEL);
+ BUG_ON(!el);
+ /* force some equivalencies */
+ el->value = (r = (r * 725861) % 6599) % (LIST_SORT_TEST_LENGTH/3);
+ el->serial = i;
+
+ el->l_h.prev = cur;
+ cur->next = &el->l_h;
+ cur = cur->next;
+ }
+ head->prev = cur;
+
+ list_sort(NULL, head, cmp);
+
+ count = 1;
+ for (cur = head->next; cur->next != head; cur = cur->next) {
+ struct debug_el *el = container_of(cur, struct debug_el, l_h);
+ int cmp_result = cmp(NULL, cur, cur->next);
+ if (cur->next->prev != cur) {
+ printk(KERN_EMERG "list_sort() returned "
+ "a corrupted list!\n");
+ return 1;
+ } else if (cmp_result > 0) {
+ printk(KERN_EMERG "list_sort() failed to sort!\n");
+ return 1;
+ } else if (cmp_result == 0 &&
+ el->serial >= container_of(cur->next,
+ struct debug_el, l_h)->serial) {
+ printk(KERN_EMERG "list_sort() failed to preserve order"
+ " of equivalent elements!\n");
+ return 1;
+ }
+ kfree(cur->prev);
+ count++;
+ }
+ kfree(cur);
+ if (count != LIST_SORT_TEST_LENGTH) {
+ printk(KERN_EMERG "list_sort() returned list of"
+ "different length!\n");
+ return 1;
+ }
+ return 0;
+}
+module_init(list_sort_test);
+#endif
diff --git a/lib/lmb.c b/lib/lmb.c
deleted file mode 100644
index 0343c05609f0..000000000000
--- a/lib/lmb.c
+++ /dev/null
@@ -1,527 +0,0 @@
-/*
- * Procedures for maintaining information about logical memory blocks.
- *
- * Peter Bergner, IBM Corp. June 2001.
- * Copyright (C) 2001 Peter Bergner.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <linux/lmb.h>
-
-#define LMB_ALLOC_ANYWHERE 0
-
-struct lmb lmb;
-
-static int lmb_debug;
-
-static int __init early_lmb(char *p)
-{
- if (p && strstr(p, "debug"))
- lmb_debug = 1;
- return 0;
-}
-early_param("lmb", early_lmb);
-
-static void lmb_dump(struct lmb_region *region, char *name)
-{
- unsigned long long base, size;
- int i;
-
- pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
-
- for (i = 0; i < region->cnt; i++) {
- base = region->region[i].base;
- size = region->region[i].size;
-
- pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
- name, i, base, base + size - 1, size);
- }
-}
-
-void lmb_dump_all(void)
-{
- if (!lmb_debug)
- return;
-
- pr_info("LMB configuration:\n");
- pr_info(" rmo_size = 0x%llx\n", (unsigned long long)lmb.rmo_size);
- pr_info(" memory.size = 0x%llx\n", (unsigned long long)lmb.memory.size);
-
- lmb_dump(&lmb.memory, "memory");
- lmb_dump(&lmb.reserved, "reserved");
-}
-
-static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
- u64 size2)
-{
- return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
-}
-
-static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
-{
- if (base2 == base1 + size1)
- return 1;
- else if (base1 == base2 + size2)
- return -1;
-
- return 0;
-}
-
-static long lmb_regions_adjacent(struct lmb_region *rgn,
- unsigned long r1, unsigned long r2)
-{
- u64 base1 = rgn->region[r1].base;
- u64 size1 = rgn->region[r1].size;
- u64 base2 = rgn->region[r2].base;
- u64 size2 = rgn->region[r2].size;
-
- return lmb_addrs_adjacent(base1, size1, base2, size2);
-}
-
-static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
-{
- unsigned long i;
-
- for (i = r; i < rgn->cnt - 1; i++) {
- rgn->region[i].base = rgn->region[i + 1].base;
- rgn->region[i].size = rgn->region[i + 1].size;
- }
- rgn->cnt--;
-}
-
-/* Assumption: base addr of region 1 < base addr of region 2 */
-static void lmb_coalesce_regions(struct lmb_region *rgn,
- unsigned long r1, unsigned long r2)
-{
- rgn->region[r1].size += rgn->region[r2].size;
- lmb_remove_region(rgn, r2);
-}
-
-void __init lmb_init(void)
-{
- /* Create a dummy zero size LMB which will get coalesced away later.
- * This simplifies the lmb_add() code below...
- */
- lmb.memory.region[0].base = 0;
- lmb.memory.region[0].size = 0;
- lmb.memory.cnt = 1;
-
- /* Ditto. */
- lmb.reserved.region[0].base = 0;
- lmb.reserved.region[0].size = 0;
- lmb.reserved.cnt = 1;
-}
-
-void __init lmb_analyze(void)
-{
- int i;
-
- lmb.memory.size = 0;
-
- for (i = 0; i < lmb.memory.cnt; i++)
- lmb.memory.size += lmb.memory.region[i].size;
-}
-
-static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
-{
- unsigned long coalesced = 0;
- long adjacent, i;
-
- if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
- rgn->region[0].base = base;
- rgn->region[0].size = size;
- return 0;
- }
-
- /* First try and coalesce this LMB with another. */
- for (i = 0; i < rgn->cnt; i++) {
- u64 rgnbase = rgn->region[i].base;
- u64 rgnsize = rgn->region[i].size;
-
- if ((rgnbase == base) && (rgnsize == size))
- /* Already have this region, so we're done */
- return 0;
-
- adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
- if (adjacent > 0) {
- rgn->region[i].base -= size;
- rgn->region[i].size += size;
- coalesced++;
- break;
- } else if (adjacent < 0) {
- rgn->region[i].size += size;
- coalesced++;
- break;
- }
- }
-
- if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) {
- lmb_coalesce_regions(rgn, i, i+1);
- coalesced++;
- }
-
- if (coalesced)
- return coalesced;
- if (rgn->cnt >= MAX_LMB_REGIONS)
- return -1;
-
- /* Couldn't coalesce the LMB, so add it to the sorted table. */
- for (i = rgn->cnt - 1; i >= 0; i--) {
- if (base < rgn->region[i].base) {
- rgn->region[i+1].base = rgn->region[i].base;
- rgn->region[i+1].size = rgn->region[i].size;
- } else {
- rgn->region[i+1].base = base;
- rgn->region[i+1].size = size;
- break;
- }
- }
-
- if (base < rgn->region[0].base) {
- rgn->region[0].base = base;
- rgn->region[0].size = size;
- }
- rgn->cnt++;
-
- return 0;
-}
-
-long lmb_add(u64 base, u64 size)
-{
- struct lmb_region *_rgn = &lmb.memory;
-
- /* On pSeries LPAR systems, the first LMB is our RMO region. */
- if (base == 0)
- lmb.rmo_size = size;
-
- return lmb_add_region(_rgn, base, size);
-
-}
-
-long lmb_remove(u64 base, u64 size)
-{
- struct lmb_region *rgn = &(lmb.memory);
- u64 rgnbegin, rgnend;
- u64 end = base + size;
- int i;
-
- rgnbegin = rgnend = 0; /* supress gcc warnings */
-
- /* Find the region where (base, size) belongs to */
- for (i=0; i < rgn->cnt; i++) {
- rgnbegin = rgn->region[i].base;
- rgnend = rgnbegin + rgn->region[i].size;
-
- if ((rgnbegin <= base) && (end <= rgnend))
- break;
- }
-
- /* Didn't find the region */
- if (i == rgn->cnt)
- return -1;
-
- /* Check to see if we are removing entire region */
- if ((rgnbegin == base) && (rgnend == end)) {
- lmb_remove_region(rgn, i);
- return 0;
- }
-
- /* Check to see if region is matching at the front */
- if (rgnbegin == base) {
- rgn->region[i].base = end;
- rgn->region[i].size -= size;
- return 0;
- }
-
- /* Check to see if the region is matching at the end */
- if (rgnend == end) {
- rgn->region[i].size -= size;
- return 0;
- }
-
- /*
- * We need to split the entry - adjust the current one to the
- * beginging of the hole and add the region after hole.
- */
- rgn->region[i].size = base - rgn->region[i].base;
- return lmb_add_region(rgn, end, rgnend - end);
-}
-
-long __init lmb_reserve(u64 base, u64 size)
-{
- struct lmb_region *_rgn = &lmb.reserved;
-
- BUG_ON(0 == size);
-
- return lmb_add_region(_rgn, base, size);
-}
-
-long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
-{
- unsigned long i;
-
- for (i = 0; i < rgn->cnt; i++) {
- u64 rgnbase = rgn->region[i].base;
- u64 rgnsize = rgn->region[i].size;
- if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
- break;
- }
-
- return (i < rgn->cnt) ? i : -1;
-}
-
-static u64 lmb_align_down(u64 addr, u64 size)
-{
- return addr & ~(size - 1);
-}
-
-static u64 lmb_align_up(u64 addr, u64 size)
-{
- return (addr + (size - 1)) & ~(size - 1);
-}
-
-static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
- u64 size, u64 align)
-{
- u64 base, res_base;
- long j;
-
- base = lmb_align_down((end - size), align);
- while (start <= base) {
- j = lmb_overlaps_region(&lmb.reserved, base, size);
- if (j < 0) {
- /* this area isn't reserved, take it */
- if (lmb_add_region(&lmb.reserved, base, size) < 0)
- base = ~(u64)0;
- return base;
- }
- res_base = lmb.reserved.region[j].base;
- if (res_base < size)
- break;
- base = lmb_align_down(res_base - size, align);
- }
-
- return ~(u64)0;
-}
-
-static u64 __init lmb_alloc_nid_region(struct lmb_property *mp,
- u64 (*nid_range)(u64, u64, int *),
- u64 size, u64 align, int nid)
-{
- u64 start, end;
-
- start = mp->base;
- end = start + mp->size;
-
- start = lmb_align_up(start, align);
- while (start < end) {
- u64 this_end;
- int this_nid;
-
- this_end = nid_range(start, end, &this_nid);
- if (this_nid == nid) {
- u64 ret = lmb_alloc_nid_unreserved(start, this_end,
- size, align);
- if (ret != ~(u64)0)
- return ret;
- }
- start = this_end;
- }
-
- return ~(u64)0;
-}
-
-u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
- u64 (*nid_range)(u64 start, u64 end, int *nid))
-{
- struct lmb_region *mem = &lmb.memory;
- int i;
-
- BUG_ON(0 == size);
-
- size = lmb_align_up(size, align);
-
- for (i = 0; i < mem->cnt; i++) {
- u64 ret = lmb_alloc_nid_region(&mem->region[i],
- nid_range,
- size, align, nid);
- if (ret != ~(u64)0)
- return ret;
- }
-
- return lmb_alloc(size, align);
-}
-
-u64 __init lmb_alloc(u64 size, u64 align)
-{
- return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
-}
-
-u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr)
-{
- u64 alloc;
-
- alloc = __lmb_alloc_base(size, align, max_addr);
-
- if (alloc == 0)
- panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
- (unsigned long long) size, (unsigned long long) max_addr);
-
- return alloc;
-}
-
-u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
-{
- long i, j;
- u64 base = 0;
- u64 res_base;
-
- BUG_ON(0 == size);
-
- size = lmb_align_up(size, align);
-
- /* On some platforms, make sure we allocate lowmem */
- /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
- if (max_addr == LMB_ALLOC_ANYWHERE)
- max_addr = LMB_REAL_LIMIT;
-
- for (i = lmb.memory.cnt - 1; i >= 0; i--) {
- u64 lmbbase = lmb.memory.region[i].base;
- u64 lmbsize = lmb.memory.region[i].size;
-
- if (lmbsize < size)
- continue;
- if (max_addr == LMB_ALLOC_ANYWHERE)
- base = lmb_align_down(lmbbase + lmbsize - size, align);
- else if (lmbbase < max_addr) {
- base = min(lmbbase + lmbsize, max_addr);
- base = lmb_align_down(base - size, align);
- } else
- continue;
-
- while (base && lmbbase <= base) {
- j = lmb_overlaps_region(&lmb.reserved, base, size);
- if (j < 0) {
- /* this area isn't reserved, take it */
- if (lmb_add_region(&lmb.reserved, base, size) < 0)
- return 0;
- return base;
- }
- res_base = lmb.reserved.region[j].base;
- if (res_base < size)
- break;
- base = lmb_align_down(res_base - size, align);
- }
- }
- return 0;
-}
-
-/* You must call lmb_analyze() before this. */
-u64 __init lmb_phys_mem_size(void)
-{
- return lmb.memory.size;
-}
-
-u64 lmb_end_of_DRAM(void)
-{
- int idx = lmb.memory.cnt - 1;
-
- return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
-}
-
-/* You must call lmb_analyze() after this. */
-void __init lmb_enforce_memory_limit(u64 memory_limit)
-{
- unsigned long i;
- u64 limit;
- struct lmb_property *p;
-
- if (!memory_limit)
- return;
-
- /* Truncate the lmb regions to satisfy the memory limit. */
- limit = memory_limit;
- for (i = 0; i < lmb.memory.cnt; i++) {
- if (limit > lmb.memory.region[i].size) {
- limit -= lmb.memory.region[i].size;
- continue;
- }
-
- lmb.memory.region[i].size = limit;
- lmb.memory.cnt = i + 1;
- break;
- }
-
- if (lmb.memory.region[0].size < lmb.rmo_size)
- lmb.rmo_size = lmb.memory.region[0].size;
-
- memory_limit = lmb_end_of_DRAM();
-
- /* And truncate any reserves above the limit also. */
- for (i = 0; i < lmb.reserved.cnt; i++) {
- p = &lmb.reserved.region[i];
-
- if (p->base > memory_limit)
- p->size = 0;
- else if ((p->base + p->size) > memory_limit)
- p->size = memory_limit - p->base;
-
- if (p->size == 0) {
- lmb_remove_region(&lmb.reserved, i);
- i--;
- }
- }
-}
-
-int __init lmb_is_reserved(u64 addr)
-{
- int i;
-
- for (i = 0; i < lmb.reserved.cnt; i++) {
- u64 upper = lmb.reserved.region[i].base +
- lmb.reserved.region[i].size - 1;
- if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
- return 1;
- }
- return 0;
-}
-
-/*
- * Given a <base, len>, find which memory regions belong to this range.
- * Adjust the request and return a contiguous chunk.
- */
-int lmb_find(struct lmb_property *res)
-{
- int i;
- u64 rstart, rend;
-
- rstart = res->base;
- rend = rstart + res->size - 1;
-
- for (i = 0; i < lmb.memory.cnt; i++) {
- u64 start = lmb.memory.region[i].base;
- u64 end = start + lmb.memory.region[i].size - 1;
-
- if (start > rend)
- return -1;
-
- if ((end >= rstart) && (start < rend)) {
- /* adjust the request */
- if (rstart < start)
- rstart = start;
- if (rend > end)
- rend = end;
- res->base = rstart;
- res->size = rend - rstart + 1;
- return 0;
- }
- }
- return -1;
-}
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
new file mode 100644
index 000000000000..270de9d31b8c
--- /dev/null
+++ b/lib/lru_cache.c
@@ -0,0 +1,560 @@
+/*
+ lru_cache.c
+
+ This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
+
+ Copyright (C) 2003-2008, LINBIT Information Technologies GmbH.
+ Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>.
+ Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
+
+ drbd is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ drbd is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with drbd; see the file COPYING. If not, write to
+ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ */
+
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/string.h> /* for memset */
+#include <linux/seq_file.h> /* for seq_printf */
+#include <linux/lru_cache.h>
+
+MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
+ "Lars Ellenberg <lars@linbit.com>");
+MODULE_DESCRIPTION("lru_cache - Track sets of hot objects");
+MODULE_LICENSE("GPL");
+
+/* this is developers aid only.
+ * it catches concurrent access (lack of locking on the users part) */
+#define PARANOIA_ENTRY() do { \
+ BUG_ON(!lc); \
+ BUG_ON(!lc->nr_elements); \
+ BUG_ON(test_and_set_bit(__LC_PARANOIA, &lc->flags)); \
+} while (0)
+
+#define RETURN(x...) do { \
+ clear_bit(__LC_PARANOIA, &lc->flags); \
+ smp_mb__after_clear_bit(); return x ; } while (0)
+
+/* BUG() if e is not one of the elements tracked by lc */
+#define PARANOIA_LC_ELEMENT(lc, e) do { \
+ struct lru_cache *lc_ = (lc); \
+ struct lc_element *e_ = (e); \
+ unsigned i = e_->lc_index; \
+ BUG_ON(i >= lc_->nr_elements); \
+ BUG_ON(lc_->lc_element[i] != e_); } while (0)
+
+/**
+ * lc_create - prepares to track objects in an active set
+ * @name: descriptive name only used in lc_seq_printf_stats and lc_seq_dump_details
+ * @e_count: number of elements allowed to be active simultaneously
+ * @e_size: size of the tracked objects
+ * @e_off: offset to the &struct lc_element member in a tracked object
+ *
+ * Returns a pointer to a newly initialized struct lru_cache on success,
+ * or NULL on (allocation) failure.
+ */
+struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
+ unsigned e_count, size_t e_size, size_t e_off)
+{
+ struct hlist_head *slot = NULL;
+ struct lc_element **element = NULL;
+ struct lru_cache *lc;
+ struct lc_element *e;
+ unsigned cache_obj_size = kmem_cache_size(cache);
+ unsigned i;
+
+ WARN_ON(cache_obj_size < e_size);
+ if (cache_obj_size < e_size)
+ return NULL;
+
+ /* e_count too big; would probably fail the allocation below anyways.
+ * for typical use cases, e_count should be few thousand at most. */
+ if (e_count > LC_MAX_ACTIVE)
+ return NULL;
+
+ slot = kzalloc(e_count * sizeof(struct hlist_head*), GFP_KERNEL);
+ if (!slot)
+ goto out_fail;
+ element = kzalloc(e_count * sizeof(struct lc_element *), GFP_KERNEL);
+ if (!element)
+ goto out_fail;
+
+ lc = kzalloc(sizeof(*lc), GFP_KERNEL);
+ if (!lc)
+ goto out_fail;
+
+ INIT_LIST_HEAD(&lc->in_use);
+ INIT_LIST_HEAD(&lc->lru);
+ INIT_LIST_HEAD(&lc->free);
+
+ lc->name = name;
+ lc->element_size = e_size;
+ lc->element_off = e_off;
+ lc->nr_elements = e_count;
+ lc->new_number = LC_FREE;
+ lc->lc_cache = cache;
+ lc->lc_element = element;
+ lc->lc_slot = slot;
+
+ /* preallocate all objects */
+ for (i = 0; i < e_count; i++) {
+ void *p = kmem_cache_alloc(cache, GFP_KERNEL);
+ if (!p)
+ break;
+ memset(p, 0, lc->element_size);
+ e = p + e_off;
+ e->lc_index = i;
+ e->lc_number = LC_FREE;
+ list_add(&e->list, &lc->free);
+ element[i] = e;
+ }
+ if (i == e_count)
+ return lc;
+
+ /* else: could not allocate all elements, give up */
+ for (i--; i; i--) {
+ void *p = element[i];
+ kmem_cache_free(cache, p - e_off);
+ }
+ kfree(lc);
+out_fail:
+ kfree(element);
+ kfree(slot);
+ return NULL;
+}
+
+void lc_free_by_index(struct lru_cache *lc, unsigned i)
+{
+ void *p = lc->lc_element[i];
+ WARN_ON(!p);
+ if (p) {
+ p -= lc->element_off;
+ kmem_cache_free(lc->lc_cache, p);
+ }
+}
+
+/**
+ * lc_destroy - frees memory allocated by lc_create()
+ * @lc: the lru cache to destroy
+ */
+void lc_destroy(struct lru_cache *lc)
+{
+ unsigned i;
+ if (!lc)
+ return;
+ for (i = 0; i < lc->nr_elements; i++)
+ lc_free_by_index(lc, i);
+ kfree(lc->lc_element);
+ kfree(lc->lc_slot);
+ kfree(lc);
+}
+
+/**
+ * lc_reset - does a full reset for @lc and the hash table slots.
+ * @lc: the lru cache to operate on
+ *
+ * It is roughly the equivalent of re-allocating a fresh lru_cache object,
+ * basically a short cut to lc_destroy(lc); lc = lc_create(...);
+ */
+void lc_reset(struct lru_cache *lc)
+{
+ unsigned i;
+
+ INIT_LIST_HEAD(&lc->in_use);
+ INIT_LIST_HEAD(&lc->lru);
+ INIT_LIST_HEAD(&lc->free);
+ lc->used = 0;
+ lc->hits = 0;
+ lc->misses = 0;
+ lc->starving = 0;
+ lc->dirty = 0;
+ lc->changed = 0;
+ lc->flags = 0;
+ lc->changing_element = NULL;
+ lc->new_number = LC_FREE;
+ memset(lc->lc_slot, 0, sizeof(struct hlist_head) * lc->nr_elements);
+
+ for (i = 0; i < lc->nr_elements; i++) {
+ struct lc_element *e = lc->lc_element[i];
+ void *p = e;
+ p -= lc->element_off;
+ memset(p, 0, lc->element_size);
+ /* re-init it */
+ e->lc_index = i;
+ e->lc_number = LC_FREE;
+ list_add(&e->list, &lc->free);
+ }
+}
+
+/**
+ * lc_seq_printf_stats - print stats about @lc into @seq
+ * @seq: the seq_file to print into
+ * @lc: the lru cache to print statistics of
+ */
+size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc)
+{
+ /* NOTE:
+ * total calls to lc_get are
+ * (starving + hits + misses)
+ * misses include "dirty" count (update from an other thread in
+ * progress) and "changed", when this in fact lead to an successful
+ * update of the cache.
+ */
+ return seq_printf(seq, "\t%s: used:%u/%u "
+ "hits:%lu misses:%lu starving:%lu dirty:%lu changed:%lu\n",
+ lc->name, lc->used, lc->nr_elements,
+ lc->hits, lc->misses, lc->starving, lc->dirty, lc->changed);
+}
+
+static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr)
+{
+ return lc->lc_slot + (enr % lc->nr_elements);
+}
+
+
+/**
+ * lc_find - find element by label, if present in the hash table
+ * @lc: The lru_cache object
+ * @enr: element number
+ *
+ * Returns the pointer to an element, if the element with the requested
+ * "label" or element number is present in the hash table,
+ * or NULL if not found. Does not change the refcnt.
+ */
+struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr)
+{
+ struct hlist_node *n;
+ struct lc_element *e;
+
+ BUG_ON(!lc);
+ BUG_ON(!lc->nr_elements);
+ hlist_for_each_entry(e, n, lc_hash_slot(lc, enr), colision) {
+ if (e->lc_number == enr)
+ return e;
+ }
+ return NULL;
+}
+
+/* returned element will be "recycled" immediately */
+static struct lc_element *lc_evict(struct lru_cache *lc)
+{
+ struct list_head *n;
+ struct lc_element *e;
+
+ if (list_empty(&lc->lru))
+ return NULL;
+
+ n = lc->lru.prev;
+ e = list_entry(n, struct lc_element, list);
+
+ PARANOIA_LC_ELEMENT(lc, e);
+
+ list_del(&e->list);
+ hlist_del(&e->colision);
+ return e;
+}
+
+/**
+ * lc_del - removes an element from the cache
+ * @lc: The lru_cache object
+ * @e: The element to remove
+ *
+ * @e must be unused (refcnt == 0). Moves @e from "lru" to "free" list,
+ * sets @e->enr to %LC_FREE.
+ */
+void lc_del(struct lru_cache *lc, struct lc_element *e)
+{
+ PARANOIA_ENTRY();
+ PARANOIA_LC_ELEMENT(lc, e);
+ BUG_ON(e->refcnt);
+
+ e->lc_number = LC_FREE;
+ hlist_del_init(&e->colision);
+ list_move(&e->list, &lc->free);
+ RETURN();
+}
+
+static struct lc_element *lc_get_unused_element(struct lru_cache *lc)
+{
+ struct list_head *n;
+
+ if (list_empty(&lc->free))
+ return lc_evict(lc);
+
+ n = lc->free.next;
+ list_del(n);
+ return list_entry(n, struct lc_element, list);
+}
+
+static int lc_unused_element_available(struct lru_cache *lc)
+{
+ if (!list_empty(&lc->free))
+ return 1; /* something on the free list */
+ if (!list_empty(&lc->lru))
+ return 1; /* something to evict */
+
+ return 0;
+}
+
+
+/**
+ * lc_get - get element by label, maybe change the active set
+ * @lc: the lru cache to operate on
+ * @enr: the label to look up
+ *
+ * Finds an element in the cache, increases its usage count,
+ * "touches" and returns it.
+ *
+ * In case the requested number is not present, it needs to be added to the
+ * cache. Therefore it is possible that an other element becomes evicted from
+ * the cache. In either case, the user is notified so he is able to e.g. keep
+ * a persistent log of the cache changes, and therefore the objects in use.
+ *
+ * Return values:
+ * NULL
+ * The cache was marked %LC_STARVING,
+ * or the requested label was not in the active set
+ * and a changing transaction is still pending (@lc was marked %LC_DIRTY).
+ * Or no unused or free element could be recycled (@lc will be marked as
+ * %LC_STARVING, blocking further lc_get() operations).
+ *
+ * pointer to the element with the REQUESTED element number.
+ * In this case, it can be used right away
+ *
+ * pointer to an UNUSED element with some different element number,
+ * where that different number may also be %LC_FREE.
+ *
+ * In this case, the cache is marked %LC_DIRTY (blocking further changes),
+ * and the returned element pointer is removed from the lru list and
+ * hash collision chains. The user now should do whatever housekeeping
+ * is necessary.
+ * Then he must call lc_changed(lc,element_pointer), to finish
+ * the change.
+ *
+ * NOTE: The user needs to check the lc_number on EACH use, so he recognizes
+ * any cache set change.
+ */
+struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr)
+{
+ struct lc_element *e;
+
+ PARANOIA_ENTRY();
+ if (lc->flags & LC_STARVING) {
+ ++lc->starving;
+ RETURN(NULL);
+ }
+
+ e = lc_find(lc, enr);
+ if (e) {
+ ++lc->hits;
+ if (e->refcnt++ == 0)
+ lc->used++;
+ list_move(&e->list, &lc->in_use); /* Not evictable... */
+ RETURN(e);
+ }
+
+ ++lc->misses;
+
+ /* In case there is nothing available and we can not kick out
+ * the LRU element, we have to wait ...
+ */
+ if (!lc_unused_element_available(lc)) {
+ __set_bit(__LC_STARVING, &lc->flags);
+ RETURN(NULL);
+ }
+
+ /* it was not present in the active set.
+ * we are going to recycle an unused (or even "free") element.
+ * user may need to commit a transaction to record that change.
+ * we serialize on flags & TF_DIRTY */
+ if (test_and_set_bit(__LC_DIRTY, &lc->flags)) {
+ ++lc->dirty;
+ RETURN(NULL);
+ }
+
+ e = lc_get_unused_element(lc);
+ BUG_ON(!e);
+
+ clear_bit(__LC_STARVING, &lc->flags);
+ BUG_ON(++e->refcnt != 1);
+ lc->used++;
+
+ lc->changing_element = e;
+ lc->new_number = enr;
+
+ RETURN(e);
+}
+
+/* similar to lc_get,
+ * but only gets a new reference on an existing element.
+ * you either get the requested element, or NULL.
+ * will be consolidated into one function.
+ */
+struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr)
+{
+ struct lc_element *e;
+
+ PARANOIA_ENTRY();
+ if (lc->flags & LC_STARVING) {
+ ++lc->starving;
+ RETURN(NULL);
+ }
+
+ e = lc_find(lc, enr);
+ if (e) {
+ ++lc->hits;
+ if (e->refcnt++ == 0)
+ lc->used++;
+ list_move(&e->list, &lc->in_use); /* Not evictable... */
+ }
+ RETURN(e);
+}
+
+/**
+ * lc_changed - tell @lc that the change has been recorded
+ * @lc: the lru cache to operate on
+ * @e: the element pending label change
+ */
+void lc_changed(struct lru_cache *lc, struct lc_element *e)
+{
+ PARANOIA_ENTRY();
+ BUG_ON(e != lc->changing_element);
+ PARANOIA_LC_ELEMENT(lc, e);
+ ++lc->changed;
+ e->lc_number = lc->new_number;
+ list_add(&e->list, &lc->in_use);
+ hlist_add_head(&e->colision, lc_hash_slot(lc, lc->new_number));
+ lc->changing_element = NULL;
+ lc->new_number = LC_FREE;
+ clear_bit(__LC_DIRTY, &lc->flags);
+ smp_mb__after_clear_bit();
+ RETURN();
+}
+
+
+/**
+ * lc_put - give up refcnt of @e
+ * @lc: the lru cache to operate on
+ * @e: the element to put
+ *
+ * If refcnt reaches zero, the element is moved to the lru list,
+ * and a %LC_STARVING (if set) is cleared.
+ * Returns the new (post-decrement) refcnt.
+ */
+unsigned int lc_put(struct lru_cache *lc, struct lc_element *e)
+{
+ PARANOIA_ENTRY();
+ PARANOIA_LC_ELEMENT(lc, e);
+ BUG_ON(e->refcnt == 0);
+ BUG_ON(e == lc->changing_element);
+ if (--e->refcnt == 0) {
+ /* move it to the front of LRU. */
+ list_move(&e->list, &lc->lru);
+ lc->used--;
+ clear_bit(__LC_STARVING, &lc->flags);
+ smp_mb__after_clear_bit();
+ }
+ RETURN(e->refcnt);
+}
+
+/**
+ * lc_element_by_index
+ * @lc: the lru cache to operate on
+ * @i: the index of the element to return
+ */
+struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i)
+{
+ BUG_ON(i >= lc->nr_elements);
+ BUG_ON(lc->lc_element[i] == NULL);
+ BUG_ON(lc->lc_element[i]->lc_index != i);
+ return lc->lc_element[i];
+}
+
+/**
+ * lc_index_of
+ * @lc: the lru cache to operate on
+ * @e: the element to query for its index position in lc->element
+ */
+unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e)
+{
+ PARANOIA_LC_ELEMENT(lc, e);
+ return e->lc_index;
+}
+
+/**
+ * lc_set - associate index with label
+ * @lc: the lru cache to operate on
+ * @enr: the label to set
+ * @index: the element index to associate label with.
+ *
+ * Used to initialize the active set to some previously recorded state.
+ */
+void lc_set(struct lru_cache *lc, unsigned int enr, int index)
+{
+ struct lc_element *e;
+
+ if (index < 0 || index >= lc->nr_elements)
+ return;
+
+ e = lc_element_by_index(lc, index);
+ e->lc_number = enr;
+
+ hlist_del_init(&e->colision);
+ hlist_add_head(&e->colision, lc_hash_slot(lc, enr));
+ list_move(&e->list, e->refcnt ? &lc->in_use : &lc->lru);
+}
+
+/**
+ * lc_dump - Dump a complete LRU cache to seq in textual form.
+ * @lc: the lru cache to operate on
+ * @seq: the &struct seq_file pointer to seq_printf into
+ * @utext: user supplied "heading" or other info
+ * @detail: function pointer the user may provide to dump further details
+ * of the object the lc_element is embedded in.
+ */
+void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext,
+ void (*detail) (struct seq_file *, struct lc_element *))
+{
+ unsigned int nr_elements = lc->nr_elements;
+ struct lc_element *e;
+ int i;
+
+ seq_printf(seq, "\tnn: lc_number refcnt %s\n ", utext);
+ for (i = 0; i < nr_elements; i++) {
+ e = lc_element_by_index(lc, i);
+ if (e->lc_number == LC_FREE) {
+ seq_printf(seq, "\t%2d: FREE\n", i);
+ } else {
+ seq_printf(seq, "\t%2d: %4u %4u ", i,
+ e->lc_number, e->refcnt);
+ detail(seq, e);
+ }
+ }
+}
+
+EXPORT_SYMBOL(lc_create);
+EXPORT_SYMBOL(lc_reset);
+EXPORT_SYMBOL(lc_destroy);
+EXPORT_SYMBOL(lc_set);
+EXPORT_SYMBOL(lc_del);
+EXPORT_SYMBOL(lc_try_get);
+EXPORT_SYMBOL(lc_find);
+EXPORT_SYMBOL(lc_get);
+EXPORT_SYMBOL(lc_put);
+EXPORT_SYMBOL(lc_changed);
+EXPORT_SYMBOL(lc_element_by_index);
+EXPORT_SYMBOL(lc_index_of);
+EXPORT_SYMBOL(lc_seq_printf_stats);
+EXPORT_SYMBOL(lc_seq_dump_details);
diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c
index 5dc6b29c1575..f2fd09850223 100644
--- a/lib/lzo/lzo1x_decompress.c
+++ b/lib/lzo/lzo1x_decompress.c
@@ -11,11 +11,13 @@
* Richard Purdie <rpurdie@openedhand.com>
*/
+#ifndef STATIC
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/lzo.h>
-#include <asm/byteorder.h>
+#endif
+
#include <asm/unaligned.h>
+#include <linux/lzo.h>
#include "lzodefs.h"
#define HAVE_IP(x, ip_end, ip) ((size_t)(ip_end - ip) < (x))
@@ -244,9 +246,10 @@ lookbehind_overrun:
*out_len = op - out;
return LZO_E_LOOKBEHIND_OVERRUN;
}
-
+#ifndef STATIC
EXPORT_SYMBOL_GPL(lzo1x_decompress_safe);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LZO1X Decompressor");
+#endif
diff --git a/lib/parser.c b/lib/parser.c
index b00d02059a5f..fb34977246bb 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -56,13 +56,16 @@ static int match_one(char *s, const char *p, substring_t args[])
args[argc].from = s;
switch (*p++) {
- case 's':
- if (strlen(s) == 0)
+ case 's': {
+ size_t str_len = strlen(s);
+
+ if (str_len == 0)
return 0;
- else if (len == -1 || len > strlen(s))
- len = strlen(s);
+ if (len == -1 || len > str_len)
+ len = str_len;
args[argc].to = s + len;
break;
+ }
case 'd':
simple_strtol(s, &args[argc].to, 0);
goto num;
diff --git a/lib/plist.c b/lib/plist.c
index d6c64a824e1d..1471988d9190 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -54,9 +54,11 @@ static void plist_check_list(struct list_head *top)
static void plist_check_head(struct plist_head *head)
{
- WARN_ON(!head->lock);
- if (head->lock)
- WARN_ON_SMP(!spin_is_locked(head->lock));
+ WARN_ON(!head->rawlock && !head->spinlock);
+ if (head->rawlock)
+ WARN_ON_SMP(!raw_spin_is_locked(head->rawlock));
+ if (head->spinlock)
+ WARN_ON_SMP(!spin_is_locked(head->spinlock));
plist_check_list(&head->prio_list);
plist_check_list(&head->node_list);
}
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 23abbd93cae1..05da38bcc298 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -28,7 +28,6 @@
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
-#include <linux/gfp.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/rcupdate.h>
@@ -200,6 +199,9 @@ radix_tree_node_free(struct radix_tree_node *node)
* ensure that the addition of a single element in the tree cannot fail. On
* success, return zero, with preemption disabled. On error, return -ENOMEM
* with preemption not disabled.
+ *
+ * To make use of this facility, the radix tree must be initialised without
+ * __GFP_WAIT being passed to INIT_RADIX_TREE().
*/
int radix_tree_preload(gfp_t gfp_mask)
{
@@ -361,7 +363,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
unsigned int height, shift;
struct radix_tree_node *node, **slot;
- node = rcu_dereference(root->rnode);
+ node = rcu_dereference_raw(root->rnode);
if (node == NULL)
return NULL;
@@ -381,7 +383,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
do {
slot = (struct radix_tree_node **)
(node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
- node = rcu_dereference(*slot);
+ node = rcu_dereference_raw(*slot);
if (node == NULL)
return NULL;
@@ -543,7 +545,6 @@ out:
}
EXPORT_SYMBOL(radix_tree_tag_clear);
-#ifndef __KERNEL__ /* Only the test harness uses this at present */
/**
* radix_tree_tag_get - get a tag on a radix tree node
* @root: radix tree root
@@ -554,6 +555,10 @@ EXPORT_SYMBOL(radix_tree_tag_clear);
*
* 0: tag not present or not set
* 1: tag set
+ *
+ * Note that the return value of this function may not be relied on, even if
+ * the RCU lock is held, unless tag modification and node deletion are excluded
+ * from concurrency.
*/
int radix_tree_tag_get(struct radix_tree_root *root,
unsigned long index, unsigned int tag)
@@ -566,7 +571,7 @@ int radix_tree_tag_get(struct radix_tree_root *root,
if (!root_tag_get(root, tag))
return 0;
- node = rcu_dereference(root->rnode);
+ node = rcu_dereference_raw(root->rnode);
if (node == NULL)
return 0;
@@ -594,19 +599,14 @@ int radix_tree_tag_get(struct radix_tree_root *root,
*/
if (!tag_get(node, tag, offset))
saw_unset_tag = 1;
- if (height == 1) {
- int ret = tag_get(node, tag, offset);
-
- BUG_ON(ret && saw_unset_tag);
- return !!ret;
- }
- node = rcu_dereference(node->slots[offset]);
+ if (height == 1)
+ return !!tag_get(node, tag, offset);
+ node = rcu_dereference_raw(node->slots[offset]);
shift -= RADIX_TREE_MAP_SHIFT;
height--;
}
}
EXPORT_SYMBOL(radix_tree_tag_get);
-#endif
/**
* radix_tree_next_hole - find the next hole (not-present entry)
@@ -656,7 +656,7 @@ EXPORT_SYMBOL(radix_tree_next_hole);
*
* Returns: the index of the hole if found, otherwise returns an index
* outside of the set specified (in which case 'index - return >= max_scan'
- * will be true). In rare cases of wrap-around, LONG_MAX will be returned.
+ * will be true). In rare cases of wrap-around, ULONG_MAX will be returned.
*
* radix_tree_next_hole may be called under rcu_read_lock. However, like
* radix_tree_gang_lookup, this will not atomically search a snapshot of
@@ -674,7 +674,7 @@ unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
if (!radix_tree_lookup(root, index))
break;
index--;
- if (index == LONG_MAX)
+ if (index == ULONG_MAX)
break;
}
@@ -710,7 +710,7 @@ __lookup(struct radix_tree_node *slot, void ***results, unsigned long index,
}
shift -= RADIX_TREE_MAP_SHIFT;
- slot = rcu_dereference(slot->slots[i]);
+ slot = rcu_dereference_raw(slot->slots[i]);
if (slot == NULL)
goto out;
}
@@ -757,7 +757,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
unsigned long cur_index = first_index;
unsigned int ret;
- node = rcu_dereference(root->rnode);
+ node = rcu_dereference_raw(root->rnode);
if (!node)
return 0;
@@ -786,7 +786,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
slot = *(((void ***)results)[ret + i]);
if (!slot)
continue;
- results[ret + nr_found] = rcu_dereference(slot);
+ results[ret + nr_found] = rcu_dereference_raw(slot);
nr_found++;
}
ret += nr_found;
@@ -825,7 +825,7 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
unsigned long cur_index = first_index;
unsigned int ret;
- node = rcu_dereference(root->rnode);
+ node = rcu_dereference_raw(root->rnode);
if (!node)
return 0;
@@ -914,7 +914,7 @@ __lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index,
}
}
shift -= RADIX_TREE_MAP_SHIFT;
- slot = rcu_dereference(slot->slots[i]);
+ slot = rcu_dereference_raw(slot->slots[i]);
if (slot == NULL)
break;
}
@@ -950,7 +950,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
if (!root_tag_get(root, tag))
return 0;
- node = rcu_dereference(root->rnode);
+ node = rcu_dereference_raw(root->rnode);
if (!node)
return 0;
@@ -979,7 +979,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
slot = *(((void ***)results)[ret + i]);
if (!slot)
continue;
- results[ret + nr_found] = rcu_dereference(slot);
+ results[ret + nr_found] = rcu_dereference_raw(slot);
nr_found++;
}
ret += nr_found;
@@ -1019,7 +1019,7 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
if (!root_tag_get(root, tag))
return 0;
- node = rcu_dereference(root->rnode);
+ node = rcu_dereference_raw(root->rnode);
if (!node)
return 0;
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index b2fe4baa90e0..19bf32da644f 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -9,7 +9,7 @@ raid6_pq-y += raid6algos.o raid6recov.o raid6tables.o \
hostprogs-y += mktables
quiet_cmd_unroll = UNROLL $@
- cmd_unroll = $(PERL) $(srctree)/$(src)/unroll.pl $(UNROLL) \
+ cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) \
< $< > $@ || ( rm -f $@ && exit 1 )
ifeq ($(CONFIG_ALTIVEC),y)
@@ -18,56 +18,56 @@ endif
targets += raid6int1.c
$(obj)/raid6int1.c: UNROLL := 1
-$(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE
+$(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
targets += raid6int2.c
$(obj)/raid6int2.c: UNROLL := 2
-$(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE
+$(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
targets += raid6int4.c
$(obj)/raid6int4.c: UNROLL := 4
-$(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE
+$(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
targets += raid6int8.c
$(obj)/raid6int8.c: UNROLL := 8
-$(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE
+$(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
targets += raid6int16.c
$(obj)/raid6int16.c: UNROLL := 16
-$(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE
+$(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
targets += raid6int32.c
$(obj)/raid6int32.c: UNROLL := 32
-$(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE
+$(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_raid6altivec1.o += $(altivec_flags)
targets += raid6altivec1.c
$(obj)/raid6altivec1.c: UNROLL := 1
-$(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE
+$(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_raid6altivec2.o += $(altivec_flags)
targets += raid6altivec2.c
$(obj)/raid6altivec2.c: UNROLL := 2
-$(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE
+$(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_raid6altivec4.o += $(altivec_flags)
targets += raid6altivec4.c
$(obj)/raid6altivec4.c: UNROLL := 4
-$(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE
+$(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_raid6altivec8.o += $(altivec_flags)
targets += raid6altivec8.c
$(obj)/raid6altivec8.c: UNROLL := 8
-$(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE
+$(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
quiet_cmd_mktable = TABLE $@
diff --git a/lib/raid6/raid6algos.c b/lib/raid6/raid6algos.c
index 866215ac7f25..1f8784bfd44d 100644
--- a/lib/raid6/raid6algos.c
+++ b/lib/raid6/raid6algos.c
@@ -17,6 +17,7 @@
*/
#include <linux/raid/pq.h>
+#include <linux/gfp.h>
#ifndef __KERNEL__
#include <sys/mman.h>
#include <stdio.h>
@@ -31,25 +32,6 @@ EXPORT_SYMBOL(raid6_empty_zero_page);
struct raid6_calls raid6_call;
EXPORT_SYMBOL_GPL(raid6_call);
-/* Various routine sets */
-extern const struct raid6_calls raid6_intx1;
-extern const struct raid6_calls raid6_intx2;
-extern const struct raid6_calls raid6_intx4;
-extern const struct raid6_calls raid6_intx8;
-extern const struct raid6_calls raid6_intx16;
-extern const struct raid6_calls raid6_intx32;
-extern const struct raid6_calls raid6_mmxx1;
-extern const struct raid6_calls raid6_mmxx2;
-extern const struct raid6_calls raid6_sse1x1;
-extern const struct raid6_calls raid6_sse1x2;
-extern const struct raid6_calls raid6_sse2x1;
-extern const struct raid6_calls raid6_sse2x2;
-extern const struct raid6_calls raid6_sse2x4;
-extern const struct raid6_calls raid6_altivec1;
-extern const struct raid6_calls raid6_altivec2;
-extern const struct raid6_calls raid6_altivec4;
-extern const struct raid6_calls raid6_altivec8;
-
const struct raid6_calls * const raid6_algos[] = {
&raid6_intx1,
&raid6_intx2,
@@ -169,3 +151,4 @@ static void raid6_exit(void)
subsys_initcall(raid6_select_algo);
module_exit(raid6_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RAID6 Q-syndrome calculations");
diff --git a/lib/raid6/raid6altivec.uc b/lib/raid6/raid6altivec.uc
index 699dfeee4944..2654d5c854be 100644
--- a/lib/raid6/raid6altivec.uc
+++ b/lib/raid6/raid6altivec.uc
@@ -15,7 +15,7 @@
*
* $#-way unrolled portable integer math RAID-6 instruction set
*
- * This file is postprocessed using unroll.pl
+ * This file is postprocessed using unroll.awk
*
* <benh> hpa: in process,
* you can just "steal" the vec unit with enable_kernel_altivec() (but
diff --git a/lib/raid6/raid6int.uc b/lib/raid6/raid6int.uc
index f9bf9cba357f..d1e276a14fab 100644
--- a/lib/raid6/raid6int.uc
+++ b/lib/raid6/raid6int.uc
@@ -15,7 +15,7 @@
*
* $#-way unrolled portable integer math RAID-6 instruction set
*
- * This file is postprocessed using unroll.pl
+ * This file is postprocessed using unroll.awk
*/
#include <linux/raid/pq.h>
diff --git a/lib/raid6/raid6test/Makefile b/lib/raid6/raid6test/Makefile
index 58ffdf4f5161..2874cbef529d 100644
--- a/lib/raid6/raid6test/Makefile
+++ b/lib/raid6/raid6test/Makefile
@@ -7,7 +7,7 @@ CC = gcc
OPTFLAGS = -O2 # Adjust as desired
CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS)
LD = ld
-PERL = perl
+AWK = awk
AR = ar
RANLIB = ranlib
@@ -35,35 +35,35 @@ raid6.a: raid6int1.o raid6int2.o raid6int4.o raid6int8.o raid6int16.o \
raid6test: test.c raid6.a
$(CC) $(CFLAGS) -o raid6test $^
-raid6altivec1.c: raid6altivec.uc ../unroll.pl
- $(PERL) ../unroll.pl 1 < raid6altivec.uc > $@
+raid6altivec1.c: raid6altivec.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=1 < raid6altivec.uc > $@
-raid6altivec2.c: raid6altivec.uc ../unroll.pl
- $(PERL) ../unroll.pl 2 < raid6altivec.uc > $@
+raid6altivec2.c: raid6altivec.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=2 < raid6altivec.uc > $@
-raid6altivec4.c: raid6altivec.uc ../unroll.pl
- $(PERL) ../unroll.pl 4 < raid6altivec.uc > $@
+raid6altivec4.c: raid6altivec.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=4 < raid6altivec.uc > $@
-raid6altivec8.c: raid6altivec.uc ../unroll.pl
- $(PERL) ../unroll.pl 8 < raid6altivec.uc > $@
+raid6altivec8.c: raid6altivec.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=8 < raid6altivec.uc > $@
-raid6int1.c: raid6int.uc ../unroll.pl
- $(PERL) ../unroll.pl 1 < raid6int.uc > $@
+raid6int1.c: raid6int.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=1 < raid6int.uc > $@
-raid6int2.c: raid6int.uc ../unroll.pl
- $(PERL) ../unroll.pl 2 < raid6int.uc > $@
+raid6int2.c: raid6int.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=2 < raid6int.uc > $@
-raid6int4.c: raid6int.uc ../unroll.pl
- $(PERL) ../unroll.pl 4 < raid6int.uc > $@
+raid6int4.c: raid6int.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=4 < raid6int.uc > $@
-raid6int8.c: raid6int.uc ../unroll.pl
- $(PERL) ../unroll.pl 8 < raid6int.uc > $@
+raid6int8.c: raid6int.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=8 < raid6int.uc > $@
-raid6int16.c: raid6int.uc ../unroll.pl
- $(PERL) ../unroll.pl 16 < raid6int.uc > $@
+raid6int16.c: raid6int.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=16 < raid6int.uc > $@
-raid6int32.c: raid6int.uc ../unroll.pl
- $(PERL) ../unroll.pl 32 < raid6int.uc > $@
+raid6int32.c: raid6int.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=32 < raid6int.uc > $@
raid6tables.c: mktables
./mktables > raid6tables.c
diff --git a/lib/raid6/unroll.awk b/lib/raid6/unroll.awk
new file mode 100644
index 000000000000..c6aa03631df8
--- /dev/null
+++ b/lib/raid6/unroll.awk
@@ -0,0 +1,20 @@
+
+# This filter requires one command line option of form -vN=n
+# where n must be a decimal number.
+#
+# Repeat each input line containing $$ n times, replacing $$ with 0...n-1.
+# Replace each $# with n, and each $* with a single $.
+
+BEGIN {
+ n = N + 0
+}
+{
+ if (/\$\$/) { rep = n } else { rep = 1 }
+ for (i = 0; i < rep; ++i) {
+ tmp = $0
+ gsub(/\$\$/, i, tmp)
+ gsub(/\$\#/, n, tmp)
+ gsub(/\$\*/, "$", tmp)
+ print tmp
+ }
+}
diff --git a/lib/raid6/unroll.pl b/lib/raid6/unroll.pl
deleted file mode 100644
index 3acc710a20ea..000000000000
--- a/lib/raid6/unroll.pl
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/perl
-#
-# Take a piece of C code and for each line which contains the sequence $$
-# repeat n times with $ replaced by 0...n-1; the sequence $# is replaced
-# by the unrolling factor, and $* with a single $
-#
-
-($n) = @ARGV;
-$n += 0;
-
-while ( defined($line = <STDIN>) ) {
- if ( $line =~ /\$\$/ ) {
- $rep = $n;
- } else {
- $rep = 1;
- }
- for ( $i = 0 ; $i < $rep ; $i++ ) {
- $tmp = $line;
- $tmp =~ s/\$\$/$i/g;
- $tmp =~ s/\$\#/$n/g;
- $tmp =~ s/\$\*/\$/g;
- print $tmp;
- }
-}
diff --git a/lib/random32.c b/lib/random32.c
index 217d5c4b666d..fc3545a32771 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -39,13 +39,16 @@
#include <linux/jiffies.h>
#include <linux/random.h>
-struct rnd_state {
- u32 s1, s2, s3;
-};
-
static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
-static u32 __random32(struct rnd_state *state)
+/**
+ * prandom32 - seeded pseudo-random number generator.
+ * @state: pointer to state structure holding seeded state.
+ *
+ * This is used for pseudo-randomness with no outside seeding.
+ * For more random results, use random32().
+ */
+u32 prandom32(struct rnd_state *state)
{
#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
@@ -55,14 +58,7 @@ static u32 __random32(struct rnd_state *state)
return (state->s1 ^ state->s2 ^ state->s3);
}
-
-/*
- * Handle minimum values for seeds
- */
-static inline u32 __seed(u32 x, u32 m)
-{
- return (x < m) ? x + m : x;
-}
+EXPORT_SYMBOL(prandom32);
/**
* random32 - pseudo random number generator
@@ -75,7 +71,7 @@ u32 random32(void)
{
unsigned long r;
struct rnd_state *state = &get_cpu_var(net_rand_state);
- r = __random32(state);
+ r = prandom32(state);
put_cpu_var(state);
return r;
}
@@ -118,12 +114,12 @@ static int __init random32_init(void)
state->s3 = __seed(LCG(state->s2), 15);
/* "warm it up" */
- __random32(state);
- __random32(state);
- __random32(state);
- __random32(state);
- __random32(state);
- __random32(state);
+ prandom32(state);
+ prandom32(state);
+ prandom32(state);
+ prandom32(state);
+ prandom32(state);
+ prandom32(state);
}
return 0;
}
@@ -131,7 +127,7 @@ core_initcall(random32_init);
/*
* Generate better values after random number generator
- * is fully initalized.
+ * is fully initialized.
*/
static int __init random32_reseed(void)
{
@@ -147,7 +143,7 @@ static int __init random32_reseed(void)
state->s3 = __seed(seeds[2], 15);
/* mix it in */
- __random32(state);
+ prandom32(state);
}
return 0;
}
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 26187edcc7ea..027a03f4c56d 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -7,51 +7,61 @@
* parameter. Now every user can use their own standalone ratelimit_state.
*
* This file is released under the GPLv2.
- *
*/
-#include <linux/kernel.h>
+#include <linux/ratelimit.h>
#include <linux/jiffies.h>
#include <linux/module.h>
-static DEFINE_SPINLOCK(ratelimit_lock);
-
/*
* __ratelimit - rate limiting
* @rs: ratelimit_state data
+ * @func: name of calling function
+ *
+ * This enforces a rate limit: not more than @rs->burst callbacks
+ * in every @rs->interval
*
- * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks
- * in every @rs->ratelimit_jiffies
+ * RETURNS:
+ * 0 means callbacks will be suppressed.
+ * 1 means go ahead and do it.
*/
-int __ratelimit(struct ratelimit_state *rs)
+int ___ratelimit(struct ratelimit_state *rs, const char *func)
{
unsigned long flags;
+ int ret;
if (!rs->interval)
return 1;
- spin_lock_irqsave(&ratelimit_lock, flags);
+ /*
+ * If we contend on this state's lock then almost
+ * by definition we are too busy to print a message,
+ * in addition to the one that will be printed by
+ * the entity that is holding the lock already:
+ */
+ if (!spin_trylock_irqsave(&rs->lock, flags))
+ return 0;
+
if (!rs->begin)
rs->begin = jiffies;
if (time_is_before_jiffies(rs->begin + rs->interval)) {
if (rs->missed)
printk(KERN_WARNING "%s: %d callbacks suppressed\n",
- __func__, rs->missed);
- rs->begin = 0;
+ func, rs->missed);
+ rs->begin = 0;
rs->printed = 0;
- rs->missed = 0;
+ rs->missed = 0;
}
- if (rs->burst && rs->burst > rs->printed)
- goto print;
-
- rs->missed++;
- spin_unlock_irqrestore(&ratelimit_lock, flags);
- return 0;
+ if (rs->burst && rs->burst > rs->printed) {
+ rs->printed++;
+ ret = 1;
+ } else {
+ rs->missed++;
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&rs->lock, flags);
-print:
- rs->printed++;
- spin_unlock_irqrestore(&ratelimit_lock, flags);
- return 1;
+ return ret;
}
-EXPORT_SYMBOL(__ratelimit);
+EXPORT_SYMBOL(___ratelimit);
diff --git a/lib/rational.c b/lib/rational.c
index b3c099b5478e..3ed247b80662 100644
--- a/lib/rational.c
+++ b/lib/rational.c
@@ -7,6 +7,7 @@
*/
#include <linux/rational.h>
+#include <linux/module.h>
/*
* calculate best rational approximation for a given fraction
diff --git a/lib/rbtree.c b/lib/rbtree.c
index e2aa3be29858..4693f79195d3 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -283,6 +283,74 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
}
EXPORT_SYMBOL(rb_erase);
+static void rb_augment_path(struct rb_node *node, rb_augment_f func, void *data)
+{
+ struct rb_node *parent;
+
+up:
+ func(node, data);
+ parent = rb_parent(node);
+ if (!parent)
+ return;
+
+ if (node == parent->rb_left && parent->rb_right)
+ func(parent->rb_right, data);
+ else if (parent->rb_left)
+ func(parent->rb_left, data);
+
+ node = parent;
+ goto up;
+}
+
+/*
+ * after inserting @node into the tree, update the tree to account for
+ * both the new entry and any damage done by rebalance
+ */
+void rb_augment_insert(struct rb_node *node, rb_augment_f func, void *data)
+{
+ if (node->rb_left)
+ node = node->rb_left;
+ else if (node->rb_right)
+ node = node->rb_right;
+
+ rb_augment_path(node, func, data);
+}
+
+/*
+ * before removing the node, find the deepest node on the rebalance path
+ * that will still be there after @node gets removed
+ */
+struct rb_node *rb_augment_erase_begin(struct rb_node *node)
+{
+ struct rb_node *deepest;
+
+ if (!node->rb_right && !node->rb_left)
+ deepest = rb_parent(node);
+ else if (!node->rb_right)
+ deepest = node->rb_left;
+ else if (!node->rb_left)
+ deepest = node->rb_right;
+ else {
+ deepest = rb_next(node);
+ if (deepest->rb_right)
+ deepest = deepest->rb_right;
+ else if (rb_parent(deepest) != node)
+ deepest = rb_parent(deepest);
+ }
+
+ return deepest;
+}
+
+/*
+ * after removal, update the tree to account for the removed entry
+ * and any rebalance damage.
+ */
+void rb_augment_erase_end(struct rb_node *node, rb_augment_f func, void *data)
+{
+ if (node)
+ rb_augment_path(node, func, data);
+}
+
/*
* This function returns the first node (in sort order) of the tree.
*/
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index 9df3ca56db11..ffc9fc7f3b05 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -17,6 +17,19 @@ struct rwsem_waiter {
#define RWSEM_WAITING_FOR_WRITE 0x00000002
};
+int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ int ret = 1;
+ unsigned long flags;
+
+ if (spin_trylock_irqsave(&sem->wait_lock, flags)) {
+ ret = (sem->activity != 0);
+ spin_unlock_irqrestore(&sem->wait_lock, flags);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(rwsem_is_locked);
+
/*
* initialise the semaphore
*/
@@ -34,6 +47,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
+EXPORT_SYMBOL(__init_rwsem);
/*
* handle the lock release when processes blocked on it that can now run
@@ -129,13 +143,14 @@ void __sched __down_read(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
struct task_struct *tsk;
+ unsigned long flags;
- spin_lock_irq(&sem->wait_lock);
+ spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity++;
- spin_unlock_irq(&sem->wait_lock);
+ spin_unlock_irqrestore(&sem->wait_lock, flags);
goto out;
}
@@ -150,7 +165,7 @@ void __sched __down_read(struct rw_semaphore *sem)
list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
- spin_unlock_irq(&sem->wait_lock);
+ spin_unlock_irqrestore(&sem->wait_lock, flags);
/* wait to be given the lock */
for (;;) {
@@ -195,13 +210,14 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
{
struct rwsem_waiter waiter;
struct task_struct *tsk;
+ unsigned long flags;
- spin_lock_irq(&sem->wait_lock);
+ spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity = -1;
- spin_unlock_irq(&sem->wait_lock);
+ spin_unlock_irqrestore(&sem->wait_lock, flags);
goto out;
}
@@ -216,7 +232,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
- spin_unlock_irq(&sem->wait_lock);
+ spin_unlock_irqrestore(&sem->wait_lock, flags);
/* wait to be given the lock */
for (;;) {
@@ -305,12 +321,3 @@ void __downgrade_write(struct rw_semaphore *sem)
spin_unlock_irqrestore(&sem->wait_lock, flags);
}
-EXPORT_SYMBOL(__init_rwsem);
-EXPORT_SYMBOL(__down_read);
-EXPORT_SYMBOL(__down_read_trylock);
-EXPORT_SYMBOL(__down_write_nested);
-EXPORT_SYMBOL(__down_write);
-EXPORT_SYMBOL(__down_write_trylock);
-EXPORT_SYMBOL(__up_read);
-EXPORT_SYMBOL(__up_write);
-EXPORT_SYMBOL(__downgrade_write);
diff --git a/lib/rwsem.c b/lib/rwsem.c
index 3e3365e5665e..ceba8e28807a 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -136,9 +136,10 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
out:
return sem;
- /* undo the change to count, but check for a transition 1->0 */
+ /* undo the change to the active count, but check for a transition
+ * 1->0 */
undo:
- if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) != 0)
+ if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK)
goto out;
goto try_again;
}
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 0d475d8167bf..9afa25b52a83 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -7,6 +7,7 @@
* Version 2. See the file COPYING for more details.
*/
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/scatterlist.h>
#include <linux/highmem.h>
diff --git a/lib/show_mem.c b/lib/show_mem.c
index 238e72a18ce1..fdc77c82f922 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -15,7 +15,7 @@ void show_mem(void)
unsigned long total = 0, reserved = 0, shared = 0,
nonshared = 0, highmem = 0;
- printk(KERN_INFO "Mem-Info:\n");
+ printk("Mem-Info:\n");
show_free_areas();
for_each_online_pgdat(pgdat) {
@@ -49,15 +49,15 @@ void show_mem(void)
pgdat_resize_unlock(pgdat, &flags);
}
- printk(KERN_INFO "%lu pages RAM\n", total);
+ printk("%lu pages RAM\n", total);
#ifdef CONFIG_HIGHMEM
- printk(KERN_INFO "%lu pages HighMem\n", highmem);
+ printk("%lu pages HighMem\n", highmem);
#endif
- printk(KERN_INFO "%lu pages reserved\n", reserved);
- printk(KERN_INFO "%lu pages shared\n", shared);
- printk(KERN_INFO "%lu pages non-shared\n", nonshared);
+ printk("%lu pages reserved\n", reserved);
+ printk("%lu pages shared\n", shared);
+ printk("%lu pages non-shared\n", nonshared);
#ifdef CONFIG_QUICKLIST
- printk(KERN_INFO "%lu pages in pagetable cache\n",
+ printk("%lu pages in pagetable cache\n",
quicklist_total_size());
#endif
}
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 9c4b0256490b..4755b98b6dfb 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -13,8 +13,8 @@
#include <linux/delay.h>
#include <linux/module.h>
-void __spin_lock_init(spinlock_t *lock, const char *name,
- struct lock_class_key *key)
+void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
+ struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
@@ -23,13 +23,13 @@ void __spin_lock_init(spinlock_t *lock, const char *name,
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
- lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
lock->magic = SPINLOCK_MAGIC;
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
}
-EXPORT_SYMBOL(__spin_lock_init);
+EXPORT_SYMBOL(__raw_spin_lock_init);
void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key)
@@ -41,7 +41,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
- lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED;
+ lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
lock->magic = RWLOCK_MAGIC;
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
@@ -49,7 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
EXPORT_SYMBOL(__rwlock_init);
-static void spin_bug(spinlock_t *lock, const char *msg)
+static void spin_bug(raw_spinlock_t *lock, const char *msg)
{
struct task_struct *owner = NULL;
@@ -73,7 +73,7 @@ static void spin_bug(spinlock_t *lock, const char *msg)
#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
static inline void
-debug_spin_lock_before(spinlock_t *lock)
+debug_spin_lock_before(raw_spinlock_t *lock)
{
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
SPIN_BUG_ON(lock->owner == current, lock, "recursion");
@@ -81,16 +81,16 @@ debug_spin_lock_before(spinlock_t *lock)
lock, "cpu recursion");
}
-static inline void debug_spin_lock_after(spinlock_t *lock)
+static inline void debug_spin_lock_after(raw_spinlock_t *lock)
{
lock->owner_cpu = raw_smp_processor_id();
lock->owner = current;
}
-static inline void debug_spin_unlock(spinlock_t *lock)
+static inline void debug_spin_unlock(raw_spinlock_t *lock)
{
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
- SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
+ SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
lock, "wrong CPU");
@@ -98,7 +98,7 @@ static inline void debug_spin_unlock(spinlock_t *lock)
lock->owner_cpu = -1;
}
-static void __spin_lock_debug(spinlock_t *lock)
+static void __spin_lock_debug(raw_spinlock_t *lock)
{
u64 i;
u64 loops = loops_per_jiffy * HZ;
@@ -106,7 +106,7 @@ static void __spin_lock_debug(spinlock_t *lock)
for (;;) {
for (i = 0; i < loops; i++) {
- if (__raw_spin_trylock(&lock->raw_lock))
+ if (arch_spin_trylock(&lock->raw_lock))
return;
__delay(1);
}
@@ -125,17 +125,17 @@ static void __spin_lock_debug(spinlock_t *lock)
}
}
-void _raw_spin_lock(spinlock_t *lock)
+void do_raw_spin_lock(raw_spinlock_t *lock)
{
debug_spin_lock_before(lock);
- if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
+ if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
__spin_lock_debug(lock);
debug_spin_lock_after(lock);
}
-int _raw_spin_trylock(spinlock_t *lock)
+int do_raw_spin_trylock(raw_spinlock_t *lock)
{
- int ret = __raw_spin_trylock(&lock->raw_lock);
+ int ret = arch_spin_trylock(&lock->raw_lock);
if (ret)
debug_spin_lock_after(lock);
@@ -148,10 +148,10 @@ int _raw_spin_trylock(spinlock_t *lock)
return ret;
}
-void _raw_spin_unlock(spinlock_t *lock)
+void do_raw_spin_unlock(raw_spinlock_t *lock)
{
debug_spin_unlock(lock);
- __raw_spin_unlock(&lock->raw_lock);
+ arch_spin_unlock(&lock->raw_lock);
}
static void rwlock_bug(rwlock_t *lock, const char *msg)
@@ -176,7 +176,7 @@ static void __read_lock_debug(rwlock_t *lock)
for (;;) {
for (i = 0; i < loops; i++) {
- if (__raw_read_trylock(&lock->raw_lock))
+ if (arch_read_trylock(&lock->raw_lock))
return;
__delay(1);
}
@@ -193,15 +193,15 @@ static void __read_lock_debug(rwlock_t *lock)
}
#endif
-void _raw_read_lock(rwlock_t *lock)
+void do_raw_read_lock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
- __raw_read_lock(&lock->raw_lock);
+ arch_read_lock(&lock->raw_lock);
}
-int _raw_read_trylock(rwlock_t *lock)
+int do_raw_read_trylock(rwlock_t *lock)
{
- int ret = __raw_read_trylock(&lock->raw_lock);
+ int ret = arch_read_trylock(&lock->raw_lock);
#ifndef CONFIG_SMP
/*
@@ -212,10 +212,10 @@ int _raw_read_trylock(rwlock_t *lock)
return ret;
}
-void _raw_read_unlock(rwlock_t *lock)
+void do_raw_read_unlock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
- __raw_read_unlock(&lock->raw_lock);
+ arch_read_unlock(&lock->raw_lock);
}
static inline void debug_write_lock_before(rwlock_t *lock)
@@ -251,7 +251,7 @@ static void __write_lock_debug(rwlock_t *lock)
for (;;) {
for (i = 0; i < loops; i++) {
- if (__raw_write_trylock(&lock->raw_lock))
+ if (arch_write_trylock(&lock->raw_lock))
return;
__delay(1);
}
@@ -268,16 +268,16 @@ static void __write_lock_debug(rwlock_t *lock)
}
#endif
-void _raw_write_lock(rwlock_t *lock)
+void do_raw_write_lock(rwlock_t *lock)
{
debug_write_lock_before(lock);
- __raw_write_lock(&lock->raw_lock);
+ arch_write_lock(&lock->raw_lock);
debug_write_lock_after(lock);
}
-int _raw_write_trylock(rwlock_t *lock)
+int do_raw_write_trylock(rwlock_t *lock)
{
- int ret = __raw_write_trylock(&lock->raw_lock);
+ int ret = arch_write_trylock(&lock->raw_lock);
if (ret)
debug_write_lock_after(lock);
@@ -290,8 +290,8 @@ int _raw_write_trylock(rwlock_t *lock)
return ret;
}
-void _raw_write_unlock(rwlock_t *lock)
+void do_raw_write_unlock(rwlock_t *lock)
{
debug_write_unlock(lock);
- __raw_write_unlock(&lock->raw_lock);
+ arch_write_unlock(&lock->raw_lock);
}
diff --git a/lib/string.c b/lib/string.c
index b19b87af65a3..f71bead1be3e 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -36,25 +36,21 @@ int strnicmp(const char *s1, const char *s2, size_t len)
/* Yes, Virginia, it had better be unsigned */
unsigned char c1, c2;
- c1 = c2 = 0;
- if (len) {
- do {
- c1 = *s1;
- c2 = *s2;
- s1++;
- s2++;
- if (!c1)
- break;
- if (!c2)
- break;
- if (c1 == c2)
- continue;
- c1 = tolower(c1);
- c2 = tolower(c2);
- if (c1 != c2)
- break;
- } while (--len);
- }
+ if (!len)
+ return 0;
+
+ do {
+ c1 = *s1++;
+ c2 = *s2++;
+ if (!c1 || !c2)
+ break;
+ if (c1 == c2)
+ continue;
+ c1 = tolower(c1);
+ c2 = tolower(c2);
+ if (c1 != c2)
+ break;
+ } while (--len);
return (int)c1 - (int)c2;
}
EXPORT_SYMBOL(strnicmp);
@@ -246,13 +242,17 @@ EXPORT_SYMBOL(strlcat);
#undef strcmp
int strcmp(const char *cs, const char *ct)
{
- signed char __res;
+ unsigned char c1, c2;
while (1) {
- if ((__res = *cs - *ct++) != 0 || !*cs++)
+ c1 = *cs++;
+ c2 = *ct++;
+ if (c1 != c2)
+ return c1 < c2 ? -1 : 1;
+ if (!c1)
break;
}
- return __res;
+ return 0;
}
EXPORT_SYMBOL(strcmp);
#endif
@@ -266,14 +266,18 @@ EXPORT_SYMBOL(strcmp);
*/
int strncmp(const char *cs, const char *ct, size_t count)
{
- signed char __res = 0;
+ unsigned char c1, c2;
while (count) {
- if ((__res = *cs - *ct++) != 0 || !*cs++)
+ c1 = *cs++;
+ c2 = *ct++;
+ if (c1 != c2)
+ return c1 < c2 ? -1 : 1;
+ if (!c1)
break;
count--;
}
- return __res;
+ return 0;
}
EXPORT_SYMBOL(strncmp);
#endif
@@ -330,20 +334,34 @@ EXPORT_SYMBOL(strnchr);
#endif
/**
- * strstrip - Removes leading and trailing whitespace from @s.
+ * skip_spaces - Removes leading whitespace from @str.
+ * @str: The string to be stripped.
+ *
+ * Returns a pointer to the first non-whitespace character in @str.
+ */
+char *skip_spaces(const char *str)
+{
+ while (isspace(*str))
+ ++str;
+ return (char *)str;
+}
+EXPORT_SYMBOL(skip_spaces);
+
+/**
+ * strim - Removes leading and trailing whitespace from @s.
* @s: The string to be stripped.
*
* Note that the first trailing whitespace is replaced with a %NUL-terminator
* in the given string @s. Returns a pointer to the first non-whitespace
* character in @s.
*/
-char *strstrip(char *s)
+char *strim(char *s)
{
size_t size;
char *end;
+ s = skip_spaces(s);
size = strlen(s);
-
if (!size)
return s;
@@ -352,12 +370,9 @@ char *strstrip(char *s)
end--;
*(end + 1) = '\0';
- while (*s && isspace(*s))
- s++;
-
return s;
}
-EXPORT_SYMBOL(strstrip);
+EXPORT_SYMBOL(strim);
#ifndef __HAVE_ARCH_STRLEN
/**
@@ -648,7 +663,7 @@ EXPORT_SYMBOL(memscan);
*/
char *strstr(const char *s1, const char *s2)
{
- int l1, l2;
+ size_t l1, l2;
l2 = strlen(s2);
if (!l2)
@@ -665,6 +680,31 @@ char *strstr(const char *s1, const char *s2)
EXPORT_SYMBOL(strstr);
#endif
+#ifndef __HAVE_ARCH_STRNSTR
+/**
+ * strnstr - Find the first substring in a length-limited string
+ * @s1: The string to be searched
+ * @s2: The string to search for
+ * @len: the maximum number of characters to search
+ */
+char *strnstr(const char *s1, const char *s2, size_t len)
+{
+ size_t l2;
+
+ l2 = strlen(s2);
+ if (!l2)
+ return (char *)s1;
+ while (len >= l2) {
+ len--;
+ if (!memcmp(s1, s2, l2))
+ return (char *)s1;
+ s1++;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(strnstr);
+#endif
+
#ifndef __HAVE_ARCH_MEMCHR
/**
* memchr - Find a character in an area of memory.
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index ac25cd28e807..34e3082632d8 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/highmem.h>
+#include <linux/gfp.h>
#include <asm/io.h>
#include <asm/dma.h>
@@ -49,19 +50,11 @@
*/
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
-/*
- * Enumeration for sync targets
- */
-enum dma_sync_target {
- SYNC_FOR_CPU = 0,
- SYNC_FOR_DEVICE = 1,
-};
-
int swiotlb_force;
/*
- * Used to do a quick range check in unmap_single and
- * sync_single_*, to see if the memory was in fact allocated by this
+ * Used to do a quick range check in swiotlb_tbl_unmap_single and
+ * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
* API.
*/
static char *io_tlb_start, *io_tlb_end;
@@ -97,6 +90,8 @@ static phys_addr_t *io_tlb_orig_addr;
*/
static DEFINE_SPINLOCK(io_tlb_lock);
+static int late_alloc;
+
static int __init
setup_io_tlb_npages(char *str)
{
@@ -109,6 +104,7 @@ setup_io_tlb_npages(char *str)
++str;
if (!strcmp(str, "force"))
swiotlb_force = 1;
+
return 1;
}
__setup("swiotlb=", setup_io_tlb_npages);
@@ -121,8 +117,9 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
return phys_to_dma(hwdev, virt_to_phys(address));
}
-static void swiotlb_print_info(unsigned long bytes)
+void swiotlb_print_info(void)
{
+ unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
phys_addr_t pstart, pend;
pstart = virt_to_phys(io_tlb_start);
@@ -135,28 +132,14 @@ static void swiotlb_print_info(unsigned long bytes)
(unsigned long long)pend);
}
-/*
- * Statically reserve bounce buffer space and initialize bounce buffer data
- * structures for the software IO TLB used to implement the DMA API.
- */
-void __init
-swiotlb_init_with_default_size(size_t default_size)
+void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
{
unsigned long i, bytes;
- if (!io_tlb_nslabs) {
- io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
- io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
- }
-
- bytes = io_tlb_nslabs << IO_TLB_SHIFT;
+ bytes = nslabs << IO_TLB_SHIFT;
- /*
- * Get IO TLB memory from the low pages
- */
- io_tlb_start = alloc_bootmem_low_pages(bytes);
- if (!io_tlb_start)
- panic("Cannot allocate SWIOTLB buffer");
+ io_tlb_nslabs = nslabs;
+ io_tlb_start = tlb;
io_tlb_end = io_tlb_start + bytes;
/*
@@ -176,14 +159,40 @@ swiotlb_init_with_default_size(size_t default_size)
io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
if (!io_tlb_overflow_buffer)
panic("Cannot allocate SWIOTLB overflow buffer!\n");
+ if (verbose)
+ swiotlb_print_info();
+}
+
+/*
+ * Statically reserve bounce buffer space and initialize bounce buffer data
+ * structures for the software IO TLB used to implement the DMA API.
+ */
+void __init
+swiotlb_init_with_default_size(size_t default_size, int verbose)
+{
+ unsigned long bytes;
+
+ if (!io_tlb_nslabs) {
+ io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
+ io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
+ }
+
+ bytes = io_tlb_nslabs << IO_TLB_SHIFT;
+
+ /*
+ * Get IO TLB memory from the low pages
+ */
+ io_tlb_start = alloc_bootmem_low_pages(bytes);
+ if (!io_tlb_start)
+ panic("Cannot allocate SWIOTLB buffer");
- swiotlb_print_info(bytes);
+ swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose);
}
void __init
-swiotlb_init(void)
+swiotlb_init(int verbose)
{
- swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
+ swiotlb_init_with_default_size(64 * (1<<20), verbose); /* default to 64MB */
}
/*
@@ -260,7 +269,9 @@ swiotlb_late_init_with_default_size(size_t default_size)
if (!io_tlb_overflow_buffer)
goto cleanup4;
- swiotlb_print_info(bytes);
+ swiotlb_print_info();
+
+ late_alloc = 1;
return 0;
@@ -281,6 +292,32 @@ cleanup1:
return -ENOMEM;
}
+void __init swiotlb_free(void)
+{
+ if (!io_tlb_overflow_buffer)
+ return;
+
+ if (late_alloc) {
+ free_pages((unsigned long)io_tlb_overflow_buffer,
+ get_order(io_tlb_overflow));
+ free_pages((unsigned long)io_tlb_orig_addr,
+ get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
+ free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
+ sizeof(int)));
+ free_pages((unsigned long)io_tlb_start,
+ get_order(io_tlb_nslabs << IO_TLB_SHIFT));
+ } else {
+ free_bootmem_late(__pa(io_tlb_overflow_buffer),
+ io_tlb_overflow);
+ free_bootmem_late(__pa(io_tlb_orig_addr),
+ io_tlb_nslabs * sizeof(phys_addr_t));
+ free_bootmem_late(__pa(io_tlb_list),
+ io_tlb_nslabs * sizeof(int));
+ free_bootmem_late(__pa(io_tlb_start),
+ io_tlb_nslabs << IO_TLB_SHIFT);
+ }
+}
+
static int is_swiotlb_buffer(phys_addr_t paddr)
{
return paddr >= virt_to_phys(io_tlb_start) &&
@@ -290,8 +327,8 @@ static int is_swiotlb_buffer(phys_addr_t paddr)
/*
* Bounce: copy the swiotlb buffer back to the original dma location
*/
-static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
- enum dma_data_direction dir)
+void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
+ enum dma_data_direction dir)
{
unsigned long pfn = PFN_DOWN(phys);
@@ -327,26 +364,25 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
memcpy(phys_to_virt(phys), dma_addr, size);
}
}
+EXPORT_SYMBOL_GPL(swiotlb_bounce);
-/*
- * Allocates bounce buffer and returns its kernel virtual address.
- */
-static void *
-map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
+void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
+ phys_addr_t phys, size_t size,
+ enum dma_data_direction dir)
{
unsigned long flags;
char *dma_addr;
unsigned int nslots, stride, index, wrap;
int i;
- unsigned long start_dma_addr;
unsigned long mask;
unsigned long offset_slots;
unsigned long max_slots;
mask = dma_get_seg_boundary(hwdev);
- start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
- offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+ tbl_dma_addr &= mask;
+
+ offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
/*
* Carefully handle integer overflow which can occur when mask == ~0UL.
@@ -433,12 +469,27 @@ found:
return dma_addr;
}
+EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
+
+/*
+ * Allocates bounce buffer and returns its kernel virtual address.
+ */
+
+static void *
+map_single(struct device *hwdev, phys_addr_t phys, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start);
+
+ return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
+}
/*
* dma_addr is the kernel virtual address of the bounce buffer to unmap.
*/
-static void
-do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
+void
+swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size,
+ enum dma_data_direction dir)
{
unsigned long flags;
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
@@ -453,7 +504,7 @@ do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
/*
* Return the buffer to the free list by setting the corresponding
- * entries to indicate the number of contigous entries available.
+ * entries to indicate the number of contiguous entries available.
* While returning the entries to the free list, we merge the entries
* with slots below and above the pool being returned.
*/
@@ -476,10 +527,12 @@ do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
}
spin_unlock_irqrestore(&io_tlb_lock, flags);
}
+EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
-static void
-sync_single(struct device *hwdev, char *dma_addr, size_t size,
- int dir, int target)
+void
+swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size,
+ enum dma_data_direction dir,
+ enum dma_sync_target target)
{
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
phys_addr_t phys = io_tlb_orig_addr[index];
@@ -503,6 +556,7 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
BUG();
}
}
+EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single);
void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
@@ -517,7 +571,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_mask = hwdev->coherent_dma_mask;
ret = (void *)__get_free_pages(flags, order);
- if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) {
+ if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) {
/*
* The allocated memory isn't reachable by the device.
*/
@@ -526,8 +580,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
}
if (!ret) {
/*
- * We are either out of memory or the device can't DMA
- * to GFP_DMA memory; fall back on map_single(), which
+ * We are either out of memory or the device can't DMA to
+ * GFP_DMA memory; fall back on map_single(), which
* will grab memory from the lowest available address range.
*/
ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
@@ -539,13 +593,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dev_addr = swiotlb_virt_to_bus(hwdev, ret);
/* Confirm address can be DMA'd by device */
- if (dev_addr + size > dma_mask) {
+ if (dev_addr + size - 1 > dma_mask) {
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
(unsigned long long)dma_mask,
(unsigned long long)dev_addr);
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
- do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
+ swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
return NULL;
}
*dma_handle = dev_addr;
@@ -563,13 +617,14 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
if (!is_swiotlb_buffer(paddr))
free_pages((unsigned long)vaddr, get_order(size));
else
- /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
- do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
+ /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */
+ swiotlb_tbl_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
}
EXPORT_SYMBOL(swiotlb_free_coherent);
static void
-swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
+swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
+ int do_panic)
{
/*
* Ran out of IOMMU space for this operation. This is very bad.
@@ -647,14 +702,14 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
* whatever the device wrote there.
*/
static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, int dir)
+ size_t size, enum dma_data_direction dir)
{
phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
BUG_ON(dir == DMA_NONE);
if (is_swiotlb_buffer(paddr)) {
- do_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
+ swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
return;
}
@@ -690,14 +745,16 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
*/
static void
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, int dir, int target)
+ size_t size, enum dma_data_direction dir,
+ enum dma_sync_target target)
{
phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
BUG_ON(dir == DMA_NONE);
if (is_swiotlb_buffer(paddr)) {
- sync_single(hwdev, phys_to_virt(paddr), size, dir, target);
+ swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir,
+ target);
return;
}
@@ -724,37 +781,6 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
EXPORT_SYMBOL(swiotlb_sync_single_for_device);
/*
- * Same as above, but for a sub-range of the mapping.
- */
-static void
-swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
- unsigned long offset, size_t size,
- int dir, int target)
-{
- swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target);
-}
-
-void
-swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
- unsigned long offset, size_t size,
- enum dma_data_direction dir)
-{
- swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
- SYNC_FOR_CPU);
-}
-EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
-
-void
-swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
- unsigned long offset, size_t size,
- enum dma_data_direction dir)
-{
- swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
- SYNC_FOR_DEVICE);
-}
-EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
-
-/*
* Map a set of buffers described by scatterlist in streaming mode for DMA.
* This is the scatter-gather version of the above swiotlb_map_page
* interface. Here the scatter gather list elements are each tagged with the
@@ -807,7 +833,7 @@ EXPORT_SYMBOL(swiotlb_map_sg_attrs);
int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
- int dir)
+ enum dma_data_direction dir)
{
return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
@@ -834,7 +860,7 @@ EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
void
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
- int dir)
+ enum dma_data_direction dir)
{
return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
@@ -849,7 +875,8 @@ EXPORT_SYMBOL(swiotlb_unmap_sg);
*/
static void
swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
- int nelems, int dir, int target)
+ int nelems, enum dma_data_direction dir,
+ enum dma_sync_target target)
{
struct scatterlist *sg;
int i;
diff --git a/lib/textsearch.c b/lib/textsearch.c
index 9fbcb44c554f..d608331b3e47 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -103,6 +103,7 @@
#include <linux/rcupdate.h>
#include <linux/err.h>
#include <linux/textsearch.h>
+#include <linux/slab.h>
static LIST_HEAD(ts_ops);
static DEFINE_SPINLOCK(ts_mod_lock);
diff --git a/lib/uuid.c b/lib/uuid.c
new file mode 100644
index 000000000000..8fadd7cef46c
--- /dev/null
+++ b/lib/uuid.c
@@ -0,0 +1,53 @@
+/*
+ * Unified UUID/GUID definition
+ *
+ * Copyright (C) 2009, Intel Corp.
+ * Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uuid.h>
+#include <linux/random.h>
+
+static void __uuid_gen_common(__u8 b[16])
+{
+ int i;
+ u32 r;
+
+ for (i = 0; i < 4; i++) {
+ r = random32();
+ memcpy(b + i * 4, &r, 4);
+ }
+ /* reversion 0b10 */
+ b[8] = (b[8] & 0x3F) | 0x80;
+}
+
+void uuid_le_gen(uuid_le *lu)
+{
+ __uuid_gen_common(lu->b);
+ /* version 4 : random generation */
+ lu->b[7] = (lu->b[7] & 0x0F) | 0x40;
+}
+EXPORT_SYMBOL_GPL(uuid_le_gen);
+
+void uuid_be_gen(uuid_be *bu)
+{
+ __uuid_gen_common(bu->b);
+ /* version 4 : random generation */
+ bu->b[6] = (bu->b[6] & 0x0F) | 0x40;
+}
+EXPORT_SYMBOL_GPL(uuid_be_gen);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index b91839e9e892..4ee19d0d3910 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -9,7 +9,7 @@
* Wirzenius wrote this portably, Torvalds fucked it up :-)
*/
-/*
+/*
* Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com>
* - changed to provide snprintf and vsnprintf functions
* So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de>
@@ -47,14 +47,14 @@ static unsigned int simple_guess_base(const char *cp)
}
/**
- * simple_strtoul - convert a string to an unsigned long
+ * simple_strtoull - convert a string to an unsigned long long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*/
-unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
+unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
{
- unsigned long result = 0;
+ unsigned long long result = 0;
if (!base)
base = simple_guess_base(cp);
@@ -71,58 +71,39 @@ unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
result = result * base + value;
cp++;
}
-
if (endp)
*endp = (char *)cp;
+
return result;
}
-EXPORT_SYMBOL(simple_strtoul);
+EXPORT_SYMBOL(simple_strtoull);
/**
- * simple_strtol - convert a string to a signed long
+ * simple_strtoul - convert a string to an unsigned long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*/
-long simple_strtol(const char *cp, char **endp, unsigned int base)
+unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
{
- if(*cp == '-')
- return -simple_strtoul(cp + 1, endp, base);
- return simple_strtoul(cp, endp, base);
+ return simple_strtoull(cp, endp, base);
}
-EXPORT_SYMBOL(simple_strtol);
+EXPORT_SYMBOL(simple_strtoul);
/**
- * simple_strtoull - convert a string to an unsigned long long
+ * simple_strtol - convert a string to a signed long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*/
-unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
+long simple_strtol(const char *cp, char **endp, unsigned int base)
{
- unsigned long long result = 0;
-
- if (!base)
- base = simple_guess_base(cp);
-
- if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x')
- cp += 2;
-
- while (isxdigit(*cp)) {
- unsigned int value;
-
- value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10;
- if (value >= base)
- break;
- result = result * base + value;
- cp++;
- }
+ if (*cp == '-')
+ return -simple_strtoul(cp + 1, endp, base);
- if (endp)
- *endp = (char *)cp;
- return result;
+ return simple_strtoul(cp, endp, base);
}
-EXPORT_SYMBOL(simple_strtoull);
+EXPORT_SYMBOL(simple_strtol);
/**
* simple_strtoll - convert a string to a signed long long
@@ -132,10 +113,12 @@ EXPORT_SYMBOL(simple_strtoull);
*/
long long simple_strtoll(const char *cp, char **endp, unsigned int base)
{
- if(*cp=='-')
+ if (*cp == '-')
return -simple_strtoull(cp + 1, endp, base);
+
return simple_strtoull(cp, endp, base);
}
+EXPORT_SYMBOL(simple_strtoll);
/**
* strict_strtoul - convert a string to an unsigned long strictly
@@ -173,6 +156,7 @@ int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
val = simple_strtoul(cp, &tail, base);
if (tail == cp)
return -EINVAL;
+
if ((*tail == '\0') ||
((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
*res = val;
@@ -283,12 +267,14 @@ int strict_strtoll(const char *cp, unsigned int base, long long *res)
}
EXPORT_SYMBOL(strict_strtoll);
-static int skip_atoi(const char **s)
+static noinline_for_stack
+int skip_atoi(const char **s)
{
- int i=0;
+ int i = 0;
while (isdigit(**s))
i = i*10 + *((*s)++) - '0';
+
return i;
}
@@ -302,7 +288,8 @@ static int skip_atoi(const char **s)
/* Formats correctly any integer in [0,99999].
* Outputs from one to five digits depending on input.
* On i386 gcc 4.1.2 -O2: ~250 bytes of code. */
-static char* put_dec_trunc(char *buf, unsigned q)
+static noinline_for_stack
+char *put_dec_trunc(char *buf, unsigned q)
{
unsigned d3, d2, d1, d0;
d1 = (q>>4) & 0xf;
@@ -331,14 +318,16 @@ static char* put_dec_trunc(char *buf, unsigned q)
d3 = d3 - 10*q;
*buf++ = d3 + '0'; /* next digit */
if (q != 0)
- *buf++ = q + '0'; /* most sign. digit */
+ *buf++ = q + '0'; /* most sign. digit */
}
}
}
+
return buf;
}
/* Same with if's removed. Always emits five digits */
-static char* put_dec_full(char *buf, unsigned q)
+static noinline_for_stack
+char *put_dec_full(char *buf, unsigned q)
{
/* BTW, if q is in [0,9999], 8-bit ints will be enough, */
/* but anyway, gcc produces better code with full-sized ints */
@@ -347,14 +336,15 @@ static char* put_dec_full(char *buf, unsigned q)
d2 = (q>>8) & 0xf;
d3 = (q>>12);
- /* Possible ways to approx. divide by 10 */
- /* gcc -O2 replaces multiply with shifts and adds */
- // (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386)
- // (x * 0x67) >> 10: 1100111
- // (x * 0x34) >> 9: 110100 - same
- // (x * 0x1a) >> 8: 11010 - same
- // (x * 0x0d) >> 7: 1101 - same, shortest code (on i386)
-
+ /*
+ * Possible ways to approx. divide by 10
+ * gcc -O2 replaces multiply with shifts and adds
+ * (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386)
+ * (x * 0x67) >> 10: 1100111
+ * (x * 0x34) >> 9: 110100 - same
+ * (x * 0x1a) >> 8: 11010 - same
+ * (x * 0x0d) >> 7: 1101 - same, shortest code (on i386)
+ */
d0 = 6*(d3 + d2 + d1) + (q & 0xf);
q = (d0 * 0xcd) >> 11;
d0 = d0 - 10*q;
@@ -375,10 +365,12 @@ static char* put_dec_full(char *buf, unsigned q)
d3 = d3 - 10*q;
*buf++ = d3 + '0';
*buf++ = q + '0';
+
return buf;
}
/* No inlining helps gcc to use registers better */
-static noinline char* put_dec(char *buf, unsigned long long num)
+static noinline_for_stack
+char *put_dec(char *buf, unsigned long long num)
{
while (1) {
unsigned rem;
@@ -394,8 +386,8 @@ static noinline char* put_dec(char *buf, unsigned long long num)
#define PLUS 4 /* show plus */
#define SPACE 8 /* space if plus */
#define LEFT 16 /* left justified */
-#define SMALL 32 /* Must be 32 == 0x20 */
-#define SPECIAL 64 /* 0x */
+#define SMALL 32 /* use lowercase in hex (must be 32 == 0x20) */
+#define SPECIAL 64 /* prefix hex with "0x", octal with "0" */
enum format_type {
FORMAT_TYPE_NONE, /* Just a string part */
@@ -421,16 +413,17 @@ enum format_type {
};
struct printf_spec {
- enum format_type type;
- int flags; /* flags to number() */
- int field_width; /* width of output field */
- int base;
- int precision; /* # of digits/chars */
- int qualifier;
+ u8 type; /* format_type enum */
+ u8 flags; /* flags to number() */
+ u8 base; /* number base, 8, 10 or 16 only */
+ u8 qualifier; /* number qualifier, one of 'hHlLtzZ' */
+ s16 field_width; /* width of output field */
+ s16 precision; /* # of digits/chars */
};
-static char *number(char *buf, char *end, unsigned long long num,
- struct printf_spec spec)
+static noinline_for_stack
+char *number(char *buf, char *end, unsigned long long num,
+ struct printf_spec spec)
{
/* we are called with base 8, 10 or 16, only, thus don't need "G..." */
static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */
@@ -448,9 +441,9 @@ static char *number(char *buf, char *end, unsigned long long num,
spec.flags &= ~ZEROPAD;
sign = 0;
if (spec.flags & SIGN) {
- if ((signed long long) num < 0) {
+ if ((signed long long)num < 0) {
sign = '-';
- num = - (signed long long) num;
+ num = -(signed long long)num;
spec.field_width--;
} else if (spec.flags & PLUS) {
sign = '+';
@@ -478,7 +471,9 @@ static char *number(char *buf, char *end, unsigned long long num,
else if (spec.base != 10) { /* 8 or 16 */
int mask = spec.base - 1;
int shift = 3;
- if (spec.base == 16) shift = 4;
+
+ if (spec.base == 16)
+ shift = 4;
do {
tmp[i++] = (digits[((unsigned char)num) & mask] | locase);
num >>= shift;
@@ -493,7 +488,7 @@ static char *number(char *buf, char *end, unsigned long long num,
/* leading space padding */
spec.field_width -= spec.precision;
if (!(spec.flags & (ZEROPAD+LEFT))) {
- while(--spec.field_width >= 0) {
+ while (--spec.field_width >= 0) {
if (buf < end)
*buf = ' ';
++buf;
@@ -543,15 +538,17 @@ static char *number(char *buf, char *end, unsigned long long num,
*buf = ' ';
++buf;
}
+
return buf;
}
-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
+static noinline_for_stack
+char *string(char *buf, char *end, const char *s, struct printf_spec spec)
{
int len, i;
if ((unsigned long)s < PAGE_SIZE)
- s = "<NULL>";
+ s = "(null)";
len = strnlen(s, spec.precision);
@@ -572,11 +569,13 @@ static char *string(char *buf, char *end, char *s, struct printf_spec spec)
*buf = ' ';
++buf;
}
+
return buf;
}
-static char *symbol_string(char *buf, char *end, void *ptr,
- struct printf_spec spec, char ext)
+static noinline_for_stack
+char *symbol_string(char *buf, char *end, void *ptr,
+ struct printf_spec spec, char ext)
{
unsigned long value = (unsigned long) ptr;
#ifdef CONFIG_KALLSYMS
@@ -585,75 +584,177 @@ static char *symbol_string(char *buf, char *end, void *ptr,
sprint_symbol(sym, value);
else
kallsyms_lookup(value, NULL, NULL, NULL, sym);
+
return string(buf, end, sym, spec);
#else
- spec.field_width = 2*sizeof(void *);
+ spec.field_width = 2 * sizeof(void *);
spec.flags |= SPECIAL | SMALL | ZEROPAD;
spec.base = 16;
+
return number(buf, end, value, spec);
#endif
}
-static char *resource_string(char *buf, char *end, struct resource *res,
- struct printf_spec spec)
+static noinline_for_stack
+char *resource_string(char *buf, char *end, struct resource *res,
+ struct printf_spec spec, const char *fmt)
{
#ifndef IO_RSRC_PRINTK_SIZE
-#define IO_RSRC_PRINTK_SIZE 4
+#define IO_RSRC_PRINTK_SIZE 6
#endif
#ifndef MEM_RSRC_PRINTK_SIZE
-#define MEM_RSRC_PRINTK_SIZE 8
+#define MEM_RSRC_PRINTK_SIZE 10
#endif
- struct printf_spec num_spec = {
+ static const struct printf_spec io_spec = {
.base = 16,
+ .field_width = IO_RSRC_PRINTK_SIZE,
.precision = -1,
.flags = SPECIAL | SMALL | ZEROPAD,
};
- /* room for the actual numbers, the two "0x", -, [, ] and the final zero */
- char sym[4*sizeof(resource_size_t) + 8];
- char *p = sym, *pend = sym + sizeof(sym);
- int size = -1;
+ static const struct printf_spec mem_spec = {
+ .base = 16,
+ .field_width = MEM_RSRC_PRINTK_SIZE,
+ .precision = -1,
+ .flags = SPECIAL | SMALL | ZEROPAD,
+ };
+ static const struct printf_spec bus_spec = {
+ .base = 16,
+ .field_width = 2,
+ .precision = -1,
+ .flags = SMALL | ZEROPAD,
+ };
+ static const struct printf_spec dec_spec = {
+ .base = 10,
+ .precision = -1,
+ .flags = 0,
+ };
+ static const struct printf_spec str_spec = {
+ .field_width = -1,
+ .precision = 10,
+ .flags = LEFT,
+ };
+ static const struct printf_spec flag_spec = {
+ .base = 16,
+ .precision = -1,
+ .flags = SPECIAL | SMALL,
+ };
- if (res->flags & IORESOURCE_IO)
- size = IO_RSRC_PRINTK_SIZE;
- else if (res->flags & IORESOURCE_MEM)
- size = MEM_RSRC_PRINTK_SIZE;
+ /* 32-bit res (sizeof==4): 10 chars in dec, 10 in hex ("0x" + 8)
+ * 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */
+#define RSRC_BUF_SIZE ((2 * sizeof(resource_size_t)) + 4)
+#define FLAG_BUF_SIZE (2 * sizeof(res->flags))
+#define DECODED_BUF_SIZE sizeof("[mem - 64bit pref window disabled]")
+#define RAW_BUF_SIZE sizeof("[mem - flags 0x]")
+ char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE,
+ 2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)];
+
+ char *p = sym, *pend = sym + sizeof(sym);
+ int decode = (fmt[0] == 'R') ? 1 : 0;
+ const struct printf_spec *specp;
*p++ = '[';
- num_spec.field_width = size;
- p = number(p, pend, res->start, num_spec);
- *p++ = '-';
- p = number(p, pend, res->end, num_spec);
+ if (res->flags & IORESOURCE_IO) {
+ p = string(p, pend, "io ", str_spec);
+ specp = &io_spec;
+ } else if (res->flags & IORESOURCE_MEM) {
+ p = string(p, pend, "mem ", str_spec);
+ specp = &mem_spec;
+ } else if (res->flags & IORESOURCE_IRQ) {
+ p = string(p, pend, "irq ", str_spec);
+ specp = &dec_spec;
+ } else if (res->flags & IORESOURCE_DMA) {
+ p = string(p, pend, "dma ", str_spec);
+ specp = &dec_spec;
+ } else if (res->flags & IORESOURCE_BUS) {
+ p = string(p, pend, "bus ", str_spec);
+ specp = &bus_spec;
+ } else {
+ p = string(p, pend, "??? ", str_spec);
+ specp = &mem_spec;
+ decode = 0;
+ }
+ p = number(p, pend, res->start, *specp);
+ if (res->start != res->end) {
+ *p++ = '-';
+ p = number(p, pend, res->end, *specp);
+ }
+ if (decode) {
+ if (res->flags & IORESOURCE_MEM_64)
+ p = string(p, pend, " 64bit", str_spec);
+ if (res->flags & IORESOURCE_PREFETCH)
+ p = string(p, pend, " pref", str_spec);
+ if (res->flags & IORESOURCE_WINDOW)
+ p = string(p, pend, " window", str_spec);
+ if (res->flags & IORESOURCE_DISABLED)
+ p = string(p, pend, " disabled", str_spec);
+ } else {
+ p = string(p, pend, " flags ", str_spec);
+ p = number(p, pend, res->flags, flag_spec);
+ }
*p++ = ']';
- *p = 0;
+ *p = '\0';
return string(buf, end, sym, spec);
}
-static char *mac_address_string(char *buf, char *end, u8 *addr,
- struct printf_spec spec, const char *fmt)
+static noinline_for_stack
+char *mac_address_string(char *buf, char *end, u8 *addr,
+ struct printf_spec spec, const char *fmt)
{
char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")];
char *p = mac_addr;
int i;
+ char separator;
+
+ if (fmt[1] == 'F') { /* FDDI canonical format */
+ separator = '-';
+ } else {
+ separator = ':';
+ }
for (i = 0; i < 6; i++) {
p = pack_hex_byte(p, addr[i]);
if (fmt[0] == 'M' && i != 5)
- *p++ = ':';
+ *p++ = separator;
}
*p = '\0';
return string(buf, end, mac_addr, spec);
}
-static char *ip4_string(char *p, const u8 *addr, bool leading_zeros)
+static noinline_for_stack
+char *ip4_string(char *p, const u8 *addr, const char *fmt)
{
int i;
-
+ bool leading_zeros = (fmt[0] == 'i');
+ int index;
+ int step;
+
+ switch (fmt[2]) {
+ case 'h':
+#ifdef __BIG_ENDIAN
+ index = 0;
+ step = 1;
+#else
+ index = 3;
+ step = -1;
+#endif
+ break;
+ case 'l':
+ index = 3;
+ step = -1;
+ break;
+ case 'n':
+ case 'b':
+ default:
+ index = 0;
+ step = 1;
+ break;
+ }
for (i = 0; i < 4; i++) {
char temp[3]; /* hold each IP quad in reverse order */
- int digits = put_dec_trunc(temp, addr[i]) - temp;
+ int digits = put_dec_trunc(temp, addr[index]) - temp;
if (leading_zeros) {
if (digits < 3)
*p++ = '0';
@@ -665,23 +766,22 @@ static char *ip4_string(char *p, const u8 *addr, bool leading_zeros)
*p++ = temp[digits];
if (i < 3)
*p++ = '.';
+ index += step;
}
-
*p = '\0';
+
return p;
}
-static char *ip6_compressed_string(char *p, const char *addr)
+static noinline_for_stack
+char *ip6_compressed_string(char *p, const char *addr)
{
- int i;
- int j;
- int range;
+ int i, j, range;
unsigned char zerolength[8];
int longest = 1;
int colonpos = -1;
u16 word;
- u8 hi;
- u8 lo;
+ u8 hi, lo;
bool needcolon = false;
bool useIPv4;
struct in6_addr in6;
@@ -735,8 +835,9 @@ static char *ip6_compressed_string(char *p, const char *addr)
p = pack_hex_byte(p, hi);
else
*p++ = hex_asc_lo(hi);
+ p = pack_hex_byte(p, lo);
}
- if (hi || lo > 0x0f)
+ else if (lo > 0x0f)
p = pack_hex_byte(p, lo);
else
*p++ = hex_asc_lo(lo);
@@ -746,29 +847,32 @@ static char *ip6_compressed_string(char *p, const char *addr)
if (useIPv4) {
if (needcolon)
*p++ = ':';
- p = ip4_string(p, &in6.s6_addr[12], false);
+ p = ip4_string(p, &in6.s6_addr[12], "I4");
}
-
*p = '\0';
+
return p;
}
-static char *ip6_string(char *p, const char *addr, const char *fmt)
+static noinline_for_stack
+char *ip6_string(char *p, const char *addr, const char *fmt)
{
int i;
+
for (i = 0; i < 8; i++) {
p = pack_hex_byte(p, *addr++);
p = pack_hex_byte(p, *addr++);
if (fmt[0] == 'I' && i != 7)
*p++ = ':';
}
-
*p = '\0';
+
return p;
}
-static char *ip6_addr_string(char *buf, char *end, const u8 *addr,
- struct printf_spec spec, const char *fmt)
+static noinline_for_stack
+char *ip6_addr_string(char *buf, char *end, const u8 *addr,
+ struct printf_spec spec, const char *fmt)
{
char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")];
@@ -780,16 +884,64 @@ static char *ip6_addr_string(char *buf, char *end, const u8 *addr,
return string(buf, end, ip6_addr, spec);
}
-static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
- struct printf_spec spec, const char *fmt)
+static noinline_for_stack
+char *ip4_addr_string(char *buf, char *end, const u8 *addr,
+ struct printf_spec spec, const char *fmt)
{
char ip4_addr[sizeof("255.255.255.255")];
- ip4_string(ip4_addr, addr, fmt[0] == 'i');
+ ip4_string(ip4_addr, addr, fmt);
return string(buf, end, ip4_addr, spec);
}
+static noinline_for_stack
+char *uuid_string(char *buf, char *end, const u8 *addr,
+ struct printf_spec spec, const char *fmt)
+{
+ char uuid[sizeof("xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx")];
+ char *p = uuid;
+ int i;
+ static const u8 be[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
+ static const u8 le[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15};
+ const u8 *index = be;
+ bool uc = false;
+
+ switch (*(++fmt)) {
+ case 'L':
+ uc = true; /* fall-through */
+ case 'l':
+ index = le;
+ break;
+ case 'B':
+ uc = true;
+ break;
+ }
+
+ for (i = 0; i < 16; i++) {
+ p = pack_hex_byte(p, addr[index[i]]);
+ switch (i) {
+ case 3:
+ case 5:
+ case 7:
+ case 9:
+ *p++ = '-';
+ break;
+ }
+ }
+
+ *p = 0;
+
+ if (uc) {
+ p = uuid;
+ do {
+ *p = toupper(*p);
+ } while (*(++p));
+ }
+
+ return string(buf, end, uuid, spec);
+}
+
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
* by an extra set of alphanumeric characters that are extended format
@@ -801,25 +953,46 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
* - 'f' For simple symbolic function names without offset
* - 'S' For symbolic direct pointers with offset
* - 's' For symbolic direct pointers without offset
- * - 'R' For a struct resource pointer, it prints the range of
- * addresses (not the name nor the flags)
+ * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
+ * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
* - 'M' For a 6-byte MAC address, it prints the address in the
* usual colon-separated hex notation
* - 'm' For a 6-byte MAC address, it prints the hex address without colons
+ * - 'MF' For a 6-byte MAC FDDI address, it prints the address
+ * with a dash-separated hex notation
* - 'I' [46] for IPv4/IPv6 addresses printed in the usual way
* IPv4 uses dot-separated decimal without leading 0's (1.2.3.4)
* IPv6 uses colon separated network-order 16 bit hex with leading 0's
* - 'i' [46] for 'raw' IPv4/IPv6 addresses
* IPv6 omits the colons (01020304...0f)
* IPv4 uses dot-separated decimal with leading 0's (010.123.045.006)
+ * - '[Ii]4[hnbl]' IPv4 addresses in host, network, big or little endian order
* - 'I6c' for IPv6 addresses printed as specified by
- * http://www.ietf.org/id/draft-kawamura-ipv6-text-representation-03.txt
+ * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00
+ * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form
+ * "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
+ * Options for %pU are:
+ * b big endian lower case hex (default)
+ * B big endian UPPER case hex
+ * l little endian lower case hex
+ * L little endian UPPER case hex
+ * big endian output byte order is:
+ * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15]
+ * little endian output byte order is:
+ * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15]
+ * - 'V' For a struct va_format which contains a format string * and va_list *,
+ * call vsnprintf(->format, *->va_list).
+ * Implements a "recursive vsnprintf".
+ * Do not use this feature without some mechanism to verify the
+ * correctness of the format string and va_list arguments.
+ *
* Note: The difference between 'S' and 'F' is that on ia64 and ppc64
* function pointers are really function descriptors, which contain a
* pointer to the real address.
*/
-static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
- struct printf_spec spec)
+static noinline_for_stack
+char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+ struct printf_spec spec)
{
if (!ptr)
return string(buf, end, "(null)", spec);
@@ -828,14 +1001,16 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
case 'F':
case 'f':
ptr = dereference_function_descriptor(ptr);
- case 's':
/* Fallthrough */
case 'S':
+ case 's':
return symbol_string(buf, end, ptr, spec, *fmt);
case 'R':
- return resource_string(buf, end, ptr, spec);
+ case 'r':
+ return resource_string(buf, end, ptr, spec, fmt);
case 'M': /* Colon separated: 00:01:02:03:04:05 */
case 'm': /* Contiguous: 000102030405 */
+ /* [mM]F (FDDI, bit reversed) */
return mac_address_string(buf, end, ptr, spec, fmt);
case 'I': /* Formatted IP supported
* 4: 1.2.3.4
@@ -853,6 +1028,12 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
return ip4_addr_string(buf, end, ptr, spec, fmt);
}
break;
+ case 'U':
+ return uuid_string(buf, end, ptr, spec, fmt);
+ case 'V':
+ return buf + vsnprintf(buf, end - buf,
+ ((struct va_format *)ptr)->fmt,
+ *(((struct va_format *)ptr)->va));
}
spec.flags |= SMALL;
if (spec.field_width == -1) {
@@ -884,7 +1065,8 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
* @precision: precision of a number
* @qualifier: qualifier of a number (long, size_t, ...)
*/
-static int format_decode(const char *fmt, struct printf_spec *spec)
+static noinline_for_stack
+int format_decode(const char *fmt, struct printf_spec *spec)
{
const char *start = fmt;
@@ -970,8 +1152,8 @@ precision:
qualifier:
/* get the conversion qualifier */
spec->qualifier = -1;
- if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
- *fmt == 'Z' || *fmt == 'z' || *fmt == 't') {
+ if (*fmt == 'h' || TOLOWER(*fmt) == 'l' ||
+ TOLOWER(*fmt) == 'z' || *fmt == 't') {
spec->qualifier = *fmt++;
if (unlikely(spec->qualifier == *fmt)) {
if (spec->qualifier == 'l') {
@@ -1038,7 +1220,7 @@ qualifier:
spec->type = FORMAT_TYPE_LONG;
else
spec->type = FORMAT_TYPE_ULONG;
- } else if (spec->qualifier == 'Z' || spec->qualifier == 'z') {
+ } else if (TOLOWER(spec->qualifier) == 'z') {
spec->type = FORMAT_TYPE_SIZE_T;
} else if (spec->qualifier == 't') {
spec->type = FORMAT_TYPE_PTRDIFF;
@@ -1074,7 +1256,18 @@ qualifier:
* %ps output the name of a text symbol without offset
* %pF output the name of a function pointer with its offset
* %pf output the name of a function pointer without its offset
- * %pR output the address range in a struct resource
+ * %pR output the address range in a struct resource with decoded flags
+ * %pr output the address range in a struct resource with raw flags
+ * %pM output a 6-byte MAC address with colons
+ * %pm output a 6-byte MAC address without colons
+ * %pI4 print an IPv4 address without leading zeros
+ * %pi4 print an IPv4 address with leading zeros
+ * %pI6 print an IPv6 address with colons
+ * %pi6 print an IPv6 address without colons
+ * %pI6c print an IPv6 address as specified by
+ * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00
+ * %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper
+ * case.
* %n is ignored
*
* The return value is the number of characters which would
@@ -1091,8 +1284,7 @@ qualifier:
int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
unsigned long long num;
- char *str, *end, c;
- int read;
+ char *str, *end;
struct printf_spec spec = {0};
/* Reject out-of-range values early. Large positive sizes are
@@ -1111,8 +1303,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
while (*fmt) {
const char *old_fmt = fmt;
-
- read = format_decode(fmt, &spec);
+ int read = format_decode(fmt, &spec);
fmt += read;
@@ -1136,7 +1327,9 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
spec.precision = va_arg(args, int);
break;
- case FORMAT_TYPE_CHAR:
+ case FORMAT_TYPE_CHAR: {
+ char c;
+
if (!(spec.flags & LEFT)) {
while (--spec.field_width > 0) {
if (str < end)
@@ -1155,6 +1348,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
++str;
}
break;
+ }
case FORMAT_TYPE_STR:
str = string(str, end, va_arg(args, char *), spec);
@@ -1180,13 +1374,12 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
break;
case FORMAT_TYPE_NRCHARS: {
- int qualifier = spec.qualifier;
+ u8 qualifier = spec.qualifier;
if (qualifier == 'l') {
long *ip = va_arg(args, long *);
*ip = (str - buf);
- } else if (qualifier == 'Z' ||
- qualifier == 'z') {
+ } else if (TOLOWER(qualifier) == 'z') {
size_t *ip = va_arg(args, size_t *);
*ip = (str - buf);
} else {
@@ -1269,7 +1462,8 @@ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
int i;
- i=vsnprintf(buf,size,fmt,args);
+ i = vsnprintf(buf, size, fmt, args);
+
return (i >= size) ? (size - 1) : i;
}
EXPORT_SYMBOL(vscnprintf);
@@ -1288,14 +1482,15 @@ EXPORT_SYMBOL(vscnprintf);
*
* See the vsnprintf() documentation for format string extensions over C99.
*/
-int snprintf(char * buf, size_t size, const char *fmt, ...)
+int snprintf(char *buf, size_t size, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
- i=vsnprintf(buf,size,fmt,args);
+ i = vsnprintf(buf, size, fmt, args);
va_end(args);
+
return i;
}
EXPORT_SYMBOL(snprintf);
@@ -1311,7 +1506,7 @@ EXPORT_SYMBOL(snprintf);
* the trailing '\0'. If @size is <= 0 the function returns 0.
*/
-int scnprintf(char * buf, size_t size, const char *fmt, ...)
+int scnprintf(char *buf, size_t size, const char *fmt, ...)
{
va_list args;
int i;
@@ -1319,6 +1514,7 @@ int scnprintf(char * buf, size_t size, const char *fmt, ...)
va_start(args, fmt);
i = vsnprintf(buf, size, fmt, args);
va_end(args);
+
return (i >= size) ? (size - 1) : i;
}
EXPORT_SYMBOL(scnprintf);
@@ -1356,14 +1552,15 @@ EXPORT_SYMBOL(vsprintf);
*
* See the vsnprintf() documentation for format string extensions over C99.
*/
-int sprintf(char * buf, const char *fmt, ...)
+int sprintf(char *buf, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
- i=vsnprintf(buf, INT_MAX, fmt, args);
+ i = vsnprintf(buf, INT_MAX, fmt, args);
va_end(args);
+
return i;
}
EXPORT_SYMBOL(sprintf);
@@ -1396,7 +1593,6 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
{
struct printf_spec spec = {0};
char *str, *end;
- int read;
str = (char *)bin_buf;
end = (char *)(bin_buf + size);
@@ -1421,14 +1617,15 @@ do { \
str += sizeof(type); \
} while (0)
-
while (*fmt) {
- read = format_decode(fmt, &spec);
+ int read = format_decode(fmt, &spec);
fmt += read;
switch (spec.type) {
case FORMAT_TYPE_NONE:
+ case FORMAT_TYPE_INVALID:
+ case FORMAT_TYPE_PERCENT_CHAR:
break;
case FORMAT_TYPE_WIDTH:
@@ -1443,13 +1640,14 @@ do { \
case FORMAT_TYPE_STR: {
const char *save_str = va_arg(args, char *);
size_t len;
+
if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
|| (unsigned long)save_str < PAGE_SIZE)
- save_str = "<NULL>";
- len = strlen(save_str);
- if (str + len + 1 < end)
- memcpy(str, save_str, len + 1);
- str += len + 1;
+ save_str = "(null)";
+ len = strlen(save_str) + 1;
+ if (str + len < end)
+ memcpy(str, save_str, len);
+ str += len;
break;
}
@@ -1460,19 +1658,13 @@ do { \
fmt++;
break;
- case FORMAT_TYPE_PERCENT_CHAR:
- break;
-
- case FORMAT_TYPE_INVALID:
- break;
-
case FORMAT_TYPE_NRCHARS: {
/* skip %n 's argument */
- int qualifier = spec.qualifier;
+ u8 qualifier = spec.qualifier;
void *skip_arg;
if (qualifier == 'l')
skip_arg = va_arg(args, long *);
- else if (qualifier == 'Z' || qualifier == 'z')
+ else if (TOLOWER(qualifier) == 'z')
skip_arg = va_arg(args, size_t *);
else
skip_arg = va_arg(args, int *);
@@ -1508,8 +1700,8 @@ do { \
}
}
}
- return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf;
+ return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf;
#undef save_arg
}
EXPORT_SYMBOL_GPL(vbin_printf);
@@ -1538,11 +1730,9 @@ EXPORT_SYMBOL_GPL(vbin_printf);
*/
int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
{
- unsigned long long num;
- char *str, *end, c;
- const char *args = (const char *)bin_buf;
-
struct printf_spec spec = {0};
+ char *str, *end;
+ const char *args = (const char *)bin_buf;
if (WARN_ON_ONCE((int) size < 0))
return 0;
@@ -1572,10 +1762,8 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
}
while (*fmt) {
- int read;
const char *old_fmt = fmt;
-
- read = format_decode(fmt, &spec);
+ int read = format_decode(fmt, &spec);
fmt += read;
@@ -1599,7 +1787,9 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
spec.precision = get_arg(int);
break;
- case FORMAT_TYPE_CHAR:
+ case FORMAT_TYPE_CHAR: {
+ char c;
+
if (!(spec.flags & LEFT)) {
while (--spec.field_width > 0) {
if (str < end)
@@ -1617,11 +1807,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
++str;
}
break;
+ }
case FORMAT_TYPE_STR: {
const char *str_arg = args;
- size_t len = strlen(str_arg);
- args += len + 1;
+ args += strlen(str_arg) + 1;
str = string(str, end, (char *)str_arg, spec);
break;
}
@@ -1633,11 +1823,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
break;
case FORMAT_TYPE_PERCENT_CHAR:
- if (str < end)
- *str = '%';
- ++str;
- break;
-
case FORMAT_TYPE_INVALID:
if (str < end)
*str = '%';
@@ -1648,15 +1833,15 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
/* skip */
break;
- default:
+ default: {
+ unsigned long long num;
+
switch (spec.type) {
case FORMAT_TYPE_LONG_LONG:
num = get_arg(long long);
break;
case FORMAT_TYPE_ULONG:
- num = get_arg(unsigned long);
- break;
case FORMAT_TYPE_LONG:
num = get_arg(unsigned long);
break;
@@ -1686,8 +1871,9 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
}
str = number(str, end, num, spec);
- }
- }
+ } /* default: */
+ } /* switch(spec.type) */
+ } /* while(*fmt) */
if (size > 0) {
if (str < end)
@@ -1721,6 +1907,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...)
va_start(args, fmt);
ret = vbin_printf(bin_buf, size, fmt, args);
va_end(args);
+
return ret;
}
EXPORT_SYMBOL_GPL(bprintf);
@@ -1733,27 +1920,25 @@ EXPORT_SYMBOL_GPL(bprintf);
* @fmt: format of buffer
* @args: arguments
*/
-int vsscanf(const char * buf, const char * fmt, va_list args)
+int vsscanf(const char *buf, const char *fmt, va_list args)
{
const char *str = buf;
char *next;
char digit;
int num = 0;
- int qualifier;
- int base;
- int field_width;
- int is_sign = 0;
+ u8 qualifier;
+ u8 base;
+ s16 field_width;
+ bool is_sign;
- while(*fmt && *str) {
+ while (*fmt && *str) {
/* skip any white space in format */
/* white space in format matchs any amount of
* white space, including none, in the input.
*/
if (isspace(*fmt)) {
- while (isspace(*fmt))
- ++fmt;
- while (isspace(*str))
- ++str;
+ fmt = skip_spaces(++fmt);
+ str = skip_spaces(str);
}
/* anything that is not a conversion must match exactly */
@@ -1766,12 +1951,12 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
if (!*fmt)
break;
++fmt;
-
+
/* skip this conversion.
* advance both strings to next white space
*/
if (*fmt == '*') {
- while (!isspace(*fmt) && *fmt)
+ while (!isspace(*fmt) && *fmt != '%' && *fmt)
fmt++;
while (!isspace(*str) && *str)
str++;
@@ -1785,8 +1970,8 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
/* get conversion qualifier */
qualifier = -1;
- if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
- *fmt == 'Z' || *fmt == 'z') {
+ if (*fmt == 'h' || TOLOWER(*fmt) == 'l' ||
+ TOLOWER(*fmt) == 'z') {
qualifier = *fmt++;
if (unlikely(qualifier == *fmt)) {
if (qualifier == 'h') {
@@ -1798,16 +1983,17 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
}
}
}
- base = 10;
- is_sign = 0;
if (!*fmt || !*str)
break;
- switch(*fmt++) {
+ base = 10;
+ is_sign = 0;
+
+ switch (*fmt++) {
case 'c':
{
- char *s = (char *) va_arg(args,char*);
+ char *s = (char *)va_arg(args, char*);
if (field_width == -1)
field_width = 1;
do {
@@ -1818,17 +2004,15 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
continue;
case 's':
{
- char *s = (char *) va_arg(args, char *);
- if(field_width == -1)
- field_width = INT_MAX;
+ char *s = (char *)va_arg(args, char *);
+ if (field_width == -1)
+ field_width = SHRT_MAX;
/* first, skip leading white space in buffer */
- while (isspace(*str))
- str++;
+ str = skip_spaces(str);
/* now copy until next white space */
- while (*str && !isspace(*str) && field_width--) {
+ while (*str && !isspace(*str) && field_width--)
*s++ = *str++;
- }
*s = '\0';
num++;
}
@@ -1836,7 +2020,7 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
case 'n':
/* return number of characters read so far */
{
- int *i = (int *)va_arg(args,int*);
+ int *i = (int *)va_arg(args, int*);
*i = str - buf;
}
continue;
@@ -1848,14 +2032,14 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
base = 16;
break;
case 'i':
- base = 0;
+ base = 0;
case 'd':
is_sign = 1;
case 'u':
break;
case '%':
/* looking for '%' in str */
- if (*str++ != '%')
+ if (*str++ != '%')
return num;
continue;
default:
@@ -1866,71 +2050,70 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
/* have some sort of integer conversion.
* first, skip white space in buffer.
*/
- while (isspace(*str))
- str++;
+ str = skip_spaces(str);
digit = *str;
if (is_sign && digit == '-')
digit = *(str + 1);
if (!digit
- || (base == 16 && !isxdigit(digit))
- || (base == 10 && !isdigit(digit))
- || (base == 8 && (!isdigit(digit) || digit > '7'))
- || (base == 0 && !isdigit(digit)))
- break;
+ || (base == 16 && !isxdigit(digit))
+ || (base == 10 && !isdigit(digit))
+ || (base == 8 && (!isdigit(digit) || digit > '7'))
+ || (base == 0 && !isdigit(digit)))
+ break;
- switch(qualifier) {
+ switch (qualifier) {
case 'H': /* that's 'hh' in format */
if (is_sign) {
- signed char *s = (signed char *) va_arg(args,signed char *);
- *s = (signed char) simple_strtol(str,&next,base);
+ signed char *s = (signed char *)va_arg(args, signed char *);
+ *s = (signed char)simple_strtol(str, &next, base);
} else {
- unsigned char *s = (unsigned char *) va_arg(args, unsigned char *);
- *s = (unsigned char) simple_strtoul(str, &next, base);
+ unsigned char *s = (unsigned char *)va_arg(args, unsigned char *);
+ *s = (unsigned char)simple_strtoul(str, &next, base);
}
break;
case 'h':
if (is_sign) {
- short *s = (short *) va_arg(args,short *);
- *s = (short) simple_strtol(str,&next,base);
+ short *s = (short *)va_arg(args, short *);
+ *s = (short)simple_strtol(str, &next, base);
} else {
- unsigned short *s = (unsigned short *) va_arg(args, unsigned short *);
- *s = (unsigned short) simple_strtoul(str, &next, base);
+ unsigned short *s = (unsigned short *)va_arg(args, unsigned short *);
+ *s = (unsigned short)simple_strtoul(str, &next, base);
}
break;
case 'l':
if (is_sign) {
- long *l = (long *) va_arg(args,long *);
- *l = simple_strtol(str,&next,base);
+ long *l = (long *)va_arg(args, long *);
+ *l = simple_strtol(str, &next, base);
} else {
- unsigned long *l = (unsigned long*) va_arg(args,unsigned long*);
- *l = simple_strtoul(str,&next,base);
+ unsigned long *l = (unsigned long *)va_arg(args, unsigned long *);
+ *l = simple_strtoul(str, &next, base);
}
break;
case 'L':
if (is_sign) {
- long long *l = (long long*) va_arg(args,long long *);
- *l = simple_strtoll(str,&next,base);
+ long long *l = (long long *)va_arg(args, long long *);
+ *l = simple_strtoll(str, &next, base);
} else {
- unsigned long long *l = (unsigned long long*) va_arg(args,unsigned long long*);
- *l = simple_strtoull(str,&next,base);
+ unsigned long long *l = (unsigned long long *)va_arg(args, unsigned long long *);
+ *l = simple_strtoull(str, &next, base);
}
break;
case 'Z':
case 'z':
{
- size_t *s = (size_t*) va_arg(args,size_t*);
- *s = (size_t) simple_strtoul(str,&next,base);
+ size_t *s = (size_t *)va_arg(args, size_t *);
+ *s = (size_t)simple_strtoul(str, &next, base);
}
break;
default:
if (is_sign) {
- int *i = (int *) va_arg(args, int*);
- *i = (int) simple_strtol(str,&next,base);
+ int *i = (int *)va_arg(args, int *);
+ *i = (int)simple_strtol(str, &next, base);
} else {
- unsigned int *i = (unsigned int*) va_arg(args, unsigned int*);
- *i = (unsigned int) simple_strtoul(str,&next,base);
+ unsigned int *i = (unsigned int *)va_arg(args, unsigned int*);
+ *i = (unsigned int)simple_strtoul(str, &next, base);
}
break;
}
@@ -1961,14 +2144,15 @@ EXPORT_SYMBOL(vsscanf);
* @fmt: formatting of buffer
* @...: resulting arguments
*/
-int sscanf(const char * buf, const char * fmt, ...)
+int sscanf(const char *buf, const char *fmt, ...)
{
va_list args;
int i;
- va_start(args,fmt);
- i = vsscanf(buf,fmt,args);
+ va_start(args, fmt);
+ i = vsscanf(buf, fmt, args);
va_end(args);
+
return i;
}
EXPORT_SYMBOL(sscanf);
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c
index 8550b0c05d00..2c13ecc5bb2c 100644
--- a/lib/zlib_inflate/inffast.c
+++ b/lib/zlib_inflate/inffast.c
@@ -21,12 +21,31 @@
- Pentium III (Anderson)
- M68060 (Nikl)
*/
+union uu {
+ unsigned short us;
+ unsigned char b[2];
+};
+
+/* Endian independed version */
+static inline unsigned short
+get_unaligned16(const unsigned short *p)
+{
+ union uu mm;
+ unsigned char *b = (unsigned char *)p;
+
+ mm.b[0] = b[0];
+ mm.b[1] = b[1];
+ return mm.us;
+}
+
#ifdef POSTINC
# define OFF 0
# define PUP(a) *(a)++
+# define UP_UNALIGNED(a) get_unaligned16((a)++)
#else
# define OFF 1
# define PUP(a) *++(a)
+# define UP_UNALIGNED(a) get_unaligned16(++(a))
#endif
/*
@@ -239,18 +258,50 @@ void inflate_fast(z_streamp strm, unsigned start)
}
}
else {
+ unsigned short *sout;
+ unsigned long loops;
+
from = out - dist; /* copy direct from output */
- do { /* minimum length is three */
- PUP(out) = PUP(from);
- PUP(out) = PUP(from);
- PUP(out) = PUP(from);
- len -= 3;
- } while (len > 2);
- if (len) {
- PUP(out) = PUP(from);
- if (len > 1)
- PUP(out) = PUP(from);
- }
+ /* minimum length is three */
+ /* Align out addr */
+ if (!((long)(out - 1 + OFF) & 1)) {
+ PUP(out) = PUP(from);
+ len--;
+ }
+ sout = (unsigned short *)(out - OFF);
+ if (dist > 2) {
+ unsigned short *sfrom;
+
+ sfrom = (unsigned short *)(from - OFF);
+ loops = len >> 1;
+ do
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ PUP(sout) = PUP(sfrom);
+#else
+ PUP(sout) = UP_UNALIGNED(sfrom);
+#endif
+ while (--loops);
+ out = (unsigned char *)sout + OFF;
+ from = (unsigned char *)sfrom + OFF;
+ } else { /* dist == 1 or dist == 2 */
+ unsigned short pat16;
+
+ pat16 = *(sout-1+OFF);
+ if (dist == 1) {
+ union uu mm;
+ /* copy one char pattern to both bytes */
+ mm.us = pat16;
+ mm.b[0] = mm.b[1];
+ pat16 = mm.us;
+ }
+ loops = len >> 1;
+ do
+ PUP(sout) = pat16;
+ while (--loops);
+ out = (unsigned char *)sout + OFF;
+ }
+ if (len & 1)
+ PUP(out) = PUP(from);
}
}
else if ((op & 64) == 0) { /* 2nd level distance code */