summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig46
-rw-r--r--lib/Kconfig.debug70
-rw-r--r--lib/Makefile5
-rw-r--r--lib/assoc_array.c6
-rw-r--r--lib/atomic64.c83
-rw-r--r--lib/bitmap.c115
-rw-r--r--lib/cmdline.c15
-rw-r--r--lib/crc32.c153
-rw-r--r--lib/decompress.c2
-rw-r--r--lib/decompress_bunzip2.c26
-rw-r--r--lib/decompress_inflate.c12
-rw-r--r--lib/decompress_unlz4.c83
-rw-r--r--lib/decompress_unlzma.c28
-rw-r--r--lib/decompress_unlzo.c12
-rw-r--r--lib/decompress_unxz.c10
-rw-r--r--lib/devres.c30
-rw-r--r--lib/dma-debug.c2
-rw-r--r--lib/dynamic_debug.c48
-rw-r--r--lib/flex_proportions.c8
-rw-r--r--lib/genalloc.c50
-rw-r--r--lib/glob.c287
-rw-r--r--lib/hexdump.c16
-rw-r--r--lib/hweight.c4
-rw-r--r--lib/idr.c27
-rw-r--r--lib/iovec.c4
-rw-r--r--lib/kfifo.c6
-rw-r--r--lib/klist.c6
-rw-r--r--lib/list_sort.c71
-rw-r--r--lib/lru_cache.c23
-rw-r--r--lib/lzo/lzo1x_decompress_safe.c103
-rw-r--r--lib/net_utils.c10
-rw-r--r--lib/percpu-refcount.c299
-rw-r--r--lib/percpu_counter.c20
-rw-r--r--lib/proportions.c10
-rw-r--r--lib/random32.c86
-rw-r--r--lib/rbtree.c2
-rw-r--r--lib/rhashtable.c794
-rw-r--r--lib/scatterlist.c29
-rw-r--r--lib/string.c4
-rw-r--r--lib/string_helpers.c15
-rw-r--r--lib/test-kstrtox.c2
-rw-r--r--lib/test_bpf.c89
-rw-r--r--lib/test_firmware.c117
-rw-r--r--lib/vsprintf.c2
-rw-r--r--lib/zlib_deflate/deflate.c143
-rw-r--r--lib/zlib_inflate/inflate.c132
46 files changed, 2210 insertions, 895 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 334f7722a999..54cf309a92a5 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -51,6 +51,9 @@ config PERCPU_RWSEM
config ARCH_USE_CMPXCHG_LOCKREF
bool
+config ARCH_HAS_FAST_MULTIPLIER
+ bool
+
config CRC_CCITT
tristate "CRC-CCITT functions"
help
@@ -396,6 +399,39 @@ config CPU_RMAP
config DQL
bool
+config GLOB
+ bool
+# This actually supports modular compilation, but the module overhead
+# is ridiculous for the amount of code involved. Until an out-of-tree
+# driver asks for it, we'll just link it directly it into the kernel
+# when required. Since we're ignoring out-of-tree users, there's also
+# no need bother prompting for a manual decision:
+# prompt "glob_match() function"
+ help
+ This option provides a glob_match function for performing
+ simple text pattern matching. It originated in the ATA code
+ to blacklist particular drive models, but other device drivers
+ may need similar functionality.
+
+ All drivers in the Linux kernel tree that require this function
+ should automatically select this option. Say N unless you
+ are compiling an out-of tree driver which tells you that it
+ depends on this.
+
+config GLOB_SELFTEST
+ bool "glob self-test on init"
+ default n
+ depends on GLOB
+ help
+ This option enables a simple self-test of the glob_match
+ function on startup. It is primarily useful for people
+ working on the code to ensure they haven't introduced any
+ regressions.
+
+ It only adds a little bit of code and slows kernel boot (or
+ module load) by a small amount, so you're welcome to play with
+ it, but you probably don't need it.
+
#
# Netlink attribute parsing support is select'ed if needed
#
@@ -451,7 +487,8 @@ config MPILIB
config SIGNATURE
tristate
- depends on KEYS && CRYPTO
+ depends on KEYS
+ select CRYPTO
select CRYPTO_SHA1
select MPILIB
help
@@ -474,4 +511,11 @@ config UCS2_STRING
source "lib/fonts/Kconfig"
+#
+# sg chaining option
+#
+
+config ARCH_HAS_SG_CHAIN
+ def_bool n
+
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 9b94a063e26c..49d5fb754e88 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -15,7 +15,7 @@ config PRINTK_TIME
The behavior is also controlled by the kernel command line
parameter printk.time=1. See Documentation/kernel-parameters.txt
-config DEFAULT_MESSAGE_LOGLEVEL
+config MESSAGE_LOGLEVEL_DEFAULT
int "Default message log level (1-7)"
range 1 7
default "4"
@@ -143,6 +143,30 @@ config DEBUG_INFO_REDUCED
DEBUG_INFO build and compile times are reduced too.
Only works with newer gcc versions.
+config DEBUG_INFO_SPLIT
+ bool "Produce split debuginfo in .dwo files"
+ depends on DEBUG_INFO
+ help
+ Generate debug info into separate .dwo files. This significantly
+ reduces the build directory size for builds with DEBUG_INFO,
+ because it stores the information only once on disk in .dwo
+ files instead of multiple times in object files and executables.
+ In addition the debug information is also compressed.
+
+ Requires recent gcc (4.7+) and recent gdb/binutils.
+ Any tool that packages or reads debug information would need
+ to know about the .dwo files and include them.
+ Incompatible with older versions of ccache.
+
+config DEBUG_INFO_DWARF4
+ bool "Generate dwarf4 debuginfo"
+ depends on DEBUG_INFO
+ help
+ Generate dwarf4 debug info. This requires recent versions
+ of gcc and gdb. It makes the debug information larger.
+ But it significantly improves the success of resolving
+ variables in gdb on optimized code.
+
config ENABLE_WARN_DEPRECATED
bool "Enable __deprecated logic"
default y
@@ -868,6 +892,10 @@ config DEBUG_WW_MUTEX_SLOWPATH
the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this
will test all possible w/w mutex interface abuse with the
exception of simply not acquiring all the required locks.
+ Note that this feature can introduce significant overhead, so
+ it really should not be enabled in a production or distro kernel,
+ even a debug kernel. If you are a driver writer, enable it. If
+ you are a distro, do not.
config DEBUG_LOCK_ALLOC
bool "Lock debugging: detect incorrect freeing of live locks"
@@ -1008,8 +1036,13 @@ config TRACE_IRQFLAGS
either tracing or lock debugging.
config STACKTRACE
- bool
+ bool "Stack backtrace support"
depends on STACKTRACE_SUPPORT
+ help
+ This option causes the kernel to create a /proc/pid/stack for
+ every process, showing its current stack trace.
+ It is also used by various kernel debugging features that require
+ stack trace generation.
config DEBUG_KOBJECT
bool "kobject debugging"
@@ -1536,6 +1569,14 @@ config TEST_STRING_HELPERS
config TEST_KSTRTOX
tristate "Test kstrto*() family of functions at runtime"
+config TEST_RHASHTABLE
+ bool "Perform selftest on resizable hash table"
+ default n
+ help
+ Enable this option to test the rhashtable functions at boot.
+
+ If unsure, say N.
+
endmenu # runtime tests
config PROVIDE_OHCI1394_DMA_INIT
@@ -1631,7 +1672,30 @@ config TEST_BPF
against the BPF interpreter or BPF JIT compiler depending on the
current setting. This is in particular useful for BPF JIT compiler
development, but also to run regression tests against changes in
- the interpreter code.
+ the interpreter code. It also enables test stubs for eBPF maps and
+ verifier used by user space verifier testsuite.
+
+ If unsure, say N.
+
+config TEST_FIRMWARE
+ tristate "Test firmware loading via userspace interface"
+ default n
+ depends on FW_LOADER
+ help
+ This builds the "test_firmware" module that creates a userspace
+ interface for testing firmware loading. This can be used to
+ control the triggering of firmware loading without needing an
+ actual firmware-using device. The contents can be rechecked by
+ userspace.
+
+ If unsure, say N.
+
+config TEST_UDELAY
+ tristate "udelay test driver"
+ default n
+ help
+ This builds the "udelay_test" module that helps to make sure
+ that udelay() is working properly.
If unsure, say N.
diff --git a/lib/Makefile b/lib/Makefile
index ba967a19edba..d6b4bc496408 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
- percpu-refcount.o percpu_ida.o hash.o
+ percpu-refcount.o percpu_ida.o hash.o rhashtable.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
obj-y += kstrtox.o
@@ -34,6 +34,7 @@ obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_MODULE) += test_module.o
obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
obj-$(CONFIG_TEST_BPF) += test_bpf.o
+obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
@@ -136,6 +137,8 @@ obj-$(CONFIG_CORDIC) += cordic.o
obj-$(CONFIG_DQL) += dynamic_queue_limits.o
+obj-$(CONFIG_GLOB) += glob.o
+
obj-$(CONFIG_MPILIB) += mpi/
obj-$(CONFIG_SIGNATURE) += digsig.o
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index c0b1007011e1..2404d03e251a 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -1723,11 +1723,13 @@ ascend_old_tree:
shortcut = assoc_array_ptr_to_shortcut(ptr);
slot = shortcut->parent_slot;
cursor = shortcut->back_pointer;
+ if (!cursor)
+ goto gc_complete;
} else {
slot = node->parent_slot;
cursor = ptr;
}
- BUG_ON(!ptr);
+ BUG_ON(!cursor);
node = assoc_array_ptr_to_node(cursor);
slot++;
goto continue_node;
@@ -1735,7 +1737,7 @@ ascend_old_tree:
gc_complete:
edit->set[0].to = new_root;
assoc_array_apply_edit(edit);
- edit->array->nr_leaves_on_tree = nr_leaves_on_tree;
+ array->nr_leaves_on_tree = nr_leaves_on_tree;
return 0;
enomem:
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 08a4f068e61e..1298c05ef528 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -70,53 +70,42 @@ void atomic64_set(atomic64_t *v, long long i)
}
EXPORT_SYMBOL(atomic64_set);
-void atomic64_add(long long a, atomic64_t *v)
-{
- unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
-
- raw_spin_lock_irqsave(lock, flags);
- v->counter += a;
- raw_spin_unlock_irqrestore(lock, flags);
-}
-EXPORT_SYMBOL(atomic64_add);
-
-long long atomic64_add_return(long long a, atomic64_t *v)
-{
- unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
- long long val;
-
- raw_spin_lock_irqsave(lock, flags);
- val = v->counter += a;
- raw_spin_unlock_irqrestore(lock, flags);
- return val;
-}
-EXPORT_SYMBOL(atomic64_add_return);
-
-void atomic64_sub(long long a, atomic64_t *v)
-{
- unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
-
- raw_spin_lock_irqsave(lock, flags);
- v->counter -= a;
- raw_spin_unlock_irqrestore(lock, flags);
-}
-EXPORT_SYMBOL(atomic64_sub);
-
-long long atomic64_sub_return(long long a, atomic64_t *v)
-{
- unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
- long long val;
-
- raw_spin_lock_irqsave(lock, flags);
- val = v->counter -= a;
- raw_spin_unlock_irqrestore(lock, flags);
- return val;
-}
-EXPORT_SYMBOL(atomic64_sub_return);
+#define ATOMIC64_OP(op, c_op) \
+void atomic64_##op(long long a, atomic64_t *v) \
+{ \
+ unsigned long flags; \
+ raw_spinlock_t *lock = lock_addr(v); \
+ \
+ raw_spin_lock_irqsave(lock, flags); \
+ v->counter c_op a; \
+ raw_spin_unlock_irqrestore(lock, flags); \
+} \
+EXPORT_SYMBOL(atomic64_##op);
+
+#define ATOMIC64_OP_RETURN(op, c_op) \
+long long atomic64_##op##_return(long long a, atomic64_t *v) \
+{ \
+ unsigned long flags; \
+ raw_spinlock_t *lock = lock_addr(v); \
+ long long val; \
+ \
+ raw_spin_lock_irqsave(lock, flags); \
+ val = (v->counter c_op a); \
+ raw_spin_unlock_irqrestore(lock, flags); \
+ return val; \
+} \
+EXPORT_SYMBOL(atomic64_##op##_return);
+
+#define ATOMIC64_OPS(op, c_op) \
+ ATOMIC64_OP(op, c_op) \
+ ATOMIC64_OP_RETURN(op, c_op)
+
+ATOMIC64_OPS(add, +=)
+ATOMIC64_OPS(sub, -=)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
long long atomic64_dec_if_positive(atomic64_t *v)
{
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 06f7e4fe8d2d..cd250a2e14cb 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -40,9 +40,9 @@
* for the best explanations of this ordering.
*/
-int __bitmap_empty(const unsigned long *bitmap, int bits)
+int __bitmap_empty(const unsigned long *bitmap, unsigned int bits)
{
- int k, lim = bits/BITS_PER_LONG;
+ unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap[k])
return 0;
@@ -55,9 +55,9 @@ int __bitmap_empty(const unsigned long *bitmap, int bits)
}
EXPORT_SYMBOL(__bitmap_empty);
-int __bitmap_full(const unsigned long *bitmap, int bits)
+int __bitmap_full(const unsigned long *bitmap, unsigned int bits)
{
- int k, lim = bits/BITS_PER_LONG;
+ unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (~bitmap[k])
return 0;
@@ -71,9 +71,9 @@ int __bitmap_full(const unsigned long *bitmap, int bits)
EXPORT_SYMBOL(__bitmap_full);
int __bitmap_equal(const unsigned long *bitmap1,
- const unsigned long *bitmap2, int bits)
+ const unsigned long *bitmap2, unsigned int bits)
{
- int k, lim = bits/BITS_PER_LONG;
+ unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] != bitmap2[k])
return 0;
@@ -86,14 +86,14 @@ int __bitmap_equal(const unsigned long *bitmap1,
}
EXPORT_SYMBOL(__bitmap_equal);
-void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits)
+void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int bits)
{
- int k, lim = bits/BITS_PER_LONG;
+ unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
dst[k] = ~src[k];
if (bits % BITS_PER_LONG)
- dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits);
+ dst[k] = ~src[k];
}
EXPORT_SYMBOL(__bitmap_complement);
@@ -182,23 +182,26 @@ void __bitmap_shift_left(unsigned long *dst,
EXPORT_SYMBOL(__bitmap_shift_left);
int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, int bits)
+ const unsigned long *bitmap2, unsigned int bits)
{
- int k;
- int nr = BITS_TO_LONGS(bits);
+ unsigned int k;
+ unsigned int lim = bits/BITS_PER_LONG;
unsigned long result = 0;
- for (k = 0; k < nr; k++)
+ for (k = 0; k < lim; k++)
result |= (dst[k] = bitmap1[k] & bitmap2[k]);
+ if (bits % BITS_PER_LONG)
+ result |= (dst[k] = bitmap1[k] & bitmap2[k] &
+ BITMAP_LAST_WORD_MASK(bits));
return result != 0;
}
EXPORT_SYMBOL(__bitmap_and);
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, int bits)
+ const unsigned long *bitmap2, unsigned int bits)
{
- int k;
- int nr = BITS_TO_LONGS(bits);
+ unsigned int k;
+ unsigned int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] | bitmap2[k];
@@ -206,10 +209,10 @@ void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
EXPORT_SYMBOL(__bitmap_or);
void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, int bits)
+ const unsigned long *bitmap2, unsigned int bits)
{
- int k;
- int nr = BITS_TO_LONGS(bits);
+ unsigned int k;
+ unsigned int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] ^ bitmap2[k];
@@ -217,22 +220,25 @@ void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
EXPORT_SYMBOL(__bitmap_xor);
int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, int bits)
+ const unsigned long *bitmap2, unsigned int bits)
{
- int k;
- int nr = BITS_TO_LONGS(bits);
+ unsigned int k;
+ unsigned int lim = bits/BITS_PER_LONG;
unsigned long result = 0;
- for (k = 0; k < nr; k++)
+ for (k = 0; k < lim; k++)
result |= (dst[k] = bitmap1[k] & ~bitmap2[k]);
+ if (bits % BITS_PER_LONG)
+ result |= (dst[k] = bitmap1[k] & ~bitmap2[k] &
+ BITMAP_LAST_WORD_MASK(bits));
return result != 0;
}
EXPORT_SYMBOL(__bitmap_andnot);
int __bitmap_intersects(const unsigned long *bitmap1,
- const unsigned long *bitmap2, int bits)
+ const unsigned long *bitmap2, unsigned int bits)
{
- int k, lim = bits/BITS_PER_LONG;
+ unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] & bitmap2[k])
return 1;
@@ -245,9 +251,9 @@ int __bitmap_intersects(const unsigned long *bitmap1,
EXPORT_SYMBOL(__bitmap_intersects);
int __bitmap_subset(const unsigned long *bitmap1,
- const unsigned long *bitmap2, int bits)
+ const unsigned long *bitmap2, unsigned int bits)
{
- int k, lim = bits/BITS_PER_LONG;
+ unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] & ~bitmap2[k])
return 0;
@@ -259,9 +265,10 @@ int __bitmap_subset(const unsigned long *bitmap1,
}
EXPORT_SYMBOL(__bitmap_subset);
-int __bitmap_weight(const unsigned long *bitmap, int bits)
+int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
{
- int k, w = 0, lim = bits/BITS_PER_LONG;
+ unsigned int k, lim = bits/BITS_PER_LONG;
+ int w = 0;
for (k = 0; k < lim; k++)
w += hweight_long(bitmap[k]);
@@ -273,42 +280,42 @@ int __bitmap_weight(const unsigned long *bitmap, int bits)
}
EXPORT_SYMBOL(__bitmap_weight);
-void bitmap_set(unsigned long *map, int start, int nr)
+void bitmap_set(unsigned long *map, unsigned int start, int len)
{
unsigned long *p = map + BIT_WORD(start);
- const int size = start + nr;
+ const unsigned int size = start + len;
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
- while (nr - bits_to_set >= 0) {
+ while (len - bits_to_set >= 0) {
*p |= mask_to_set;
- nr -= bits_to_set;
+ len -= bits_to_set;
bits_to_set = BITS_PER_LONG;
mask_to_set = ~0UL;
p++;
}
- if (nr) {
+ if (len) {
mask_to_set &= BITMAP_LAST_WORD_MASK(size);
*p |= mask_to_set;
}
}
EXPORT_SYMBOL(bitmap_set);
-void bitmap_clear(unsigned long *map, int start, int nr)
+void bitmap_clear(unsigned long *map, unsigned int start, int len)
{
unsigned long *p = map + BIT_WORD(start);
- const int size = start + nr;
+ const unsigned int size = start + len;
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
- while (nr - bits_to_clear >= 0) {
+ while (len - bits_to_clear >= 0) {
*p &= ~mask_to_clear;
- nr -= bits_to_clear;
+ len -= bits_to_clear;
bits_to_clear = BITS_PER_LONG;
mask_to_clear = ~0UL;
p++;
}
- if (nr) {
+ if (len) {
mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
*p &= ~mask_to_clear;
}
@@ -664,13 +671,8 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
{
- char *nl = strchr(bp, '\n');
- int len;
-
- if (nl)
- len = nl - bp;
- else
- len = strlen(bp);
+ char *nl = strchrnul(bp, '\n');
+ int len = nl - bp;
return __bitmap_parselist(bp, len, 0, maskp, nmaskbits);
}
@@ -716,7 +718,7 @@ EXPORT_SYMBOL(bitmap_parselist_user);
*
* If for example, just bits 4 through 7 are set in @buf, then @pos
* values 4 through 7 will get mapped to 0 through 3, respectively,
- * and other @pos values will get mapped to 0. When @pos value 7
+ * and other @pos values will get mapped to -1. When @pos value 7
* gets mapped to (returns) @ord value 3 in this example, that means
* that bit 7 is the 3rd (starting with 0th) set bit in @buf.
*
@@ -882,7 +884,7 @@ EXPORT_SYMBOL(bitmap_bitremap);
* read it, you're overqualified for your current job.)
*
* In other words, @orig is mapped onto (surjectively) @dst,
- * using the the map { <n, m> | the n-th bit of @relmap is the
+ * using the map { <n, m> | the n-th bit of @relmap is the
* m-th set bit of @relmap }.
*
* Any set bits in @orig above bit number W, where W is the
@@ -930,7 +932,7 @@ EXPORT_SYMBOL(bitmap_bitremap);
*
* Further lets say we use the following code, invoking
* bitmap_fold() then bitmap_onto, as suggested above to
- * avoid the possitility of an empty @dst result:
+ * avoid the possibility of an empty @dst result:
*
* unsigned long *tmp; // a temporary bitmap's bits
*
@@ -1046,7 +1048,7 @@ enum {
REG_OP_RELEASE, /* clear all bits in region */
};
-static int __reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
+static int __reg_op(unsigned long *bitmap, unsigned int pos, int order, int reg_op)
{
int nbits_reg; /* number of bits in region */
int index; /* index first long of region in bitmap */
@@ -1112,11 +1114,11 @@ done:
* Return the bit offset in bitmap of the allocated region,
* or -errno on failure.
*/
-int bitmap_find_free_region(unsigned long *bitmap, int bits, int order)
+int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
{
- int pos, end; /* scans bitmap by regions of size order */
+ unsigned int pos, end; /* scans bitmap by regions of size order */
- for (pos = 0 ; (end = pos + (1 << order)) <= bits; pos = end) {
+ for (pos = 0 ; (end = pos + (1U << order)) <= bits; pos = end) {
if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
continue;
__reg_op(bitmap, pos, order, REG_OP_ALLOC);
@@ -1137,7 +1139,7 @@ EXPORT_SYMBOL(bitmap_find_free_region);
*
* No return value.
*/
-void bitmap_release_region(unsigned long *bitmap, int pos, int order)
+void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
{
__reg_op(bitmap, pos, order, REG_OP_RELEASE);
}
@@ -1154,12 +1156,11 @@ EXPORT_SYMBOL(bitmap_release_region);
* Return 0 on success, or %-EBUSY if specified region wasn't
* free (not all bits were zero).
*/
-int bitmap_allocate_region(unsigned long *bitmap, int pos, int order)
+int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
{
if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
return -EBUSY;
- __reg_op(bitmap, pos, order, REG_OP_ALLOC);
- return 0;
+ return __reg_op(bitmap, pos, order, REG_OP_ALLOC);
}
EXPORT_SYMBOL(bitmap_allocate_region);
diff --git a/lib/cmdline.c b/lib/cmdline.c
index d4932f745e92..76a712e6e20e 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -121,11 +121,7 @@ EXPORT_SYMBOL(get_options);
* @retptr: (output) Optional pointer to next char after parse completes
*
* Parses a string into a number. The number stored at @ptr is
- * potentially suffixed with %K (for kilobytes, or 1024 bytes),
- * %M (for megabytes, or 1048576 bytes), or %G (for gigabytes, or
- * 1073741824). If the number is suffixed with K, M, or G, then
- * the return value is the number multiplied by one kilobyte, one
- * megabyte, or one gigabyte, respectively.
+ * potentially suffixed with K, M, G, T, P, E.
*/
unsigned long long memparse(const char *ptr, char **retptr)
@@ -135,6 +131,15 @@ unsigned long long memparse(const char *ptr, char **retptr)
unsigned long long ret = simple_strtoull(ptr, &endptr, 0);
switch (*endptr) {
+ case 'E':
+ case 'e':
+ ret <<= 10;
+ case 'P':
+ case 'p':
+ ret <<= 10;
+ case 'T':
+ case 't':
+ ret <<= 10;
case 'G':
case 'g':
ret <<= 10;
diff --git a/lib/crc32.c b/lib/crc32.c
index 21a7b2135af6..9a907d489d95 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -50,34 +50,10 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
MODULE_DESCRIPTION("Various CRC32 calculations");
MODULE_LICENSE("GPL");
-#define GF2_DIM 32
-
-static u32 gf2_matrix_times(u32 *mat, u32 vec)
-{
- u32 sum = 0;
-
- while (vec) {
- if (vec & 1)
- sum ^= *mat;
- vec >>= 1;
- mat++;
- }
-
- return sum;
-}
-
-static void gf2_matrix_square(u32 *square, u32 *mat)
-{
- int i;
-
- for (i = 0; i < GF2_DIM; i++)
- square[i] = gf2_matrix_times(mat, mat[i]);
-}
-
#if CRC_LE_BITS > 8 || CRC_BE_BITS > 8
/* implements slicing-by-4 or slicing-by-8 algorithm */
-static inline u32
+static inline u32 __pure
crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
{
# ifdef __LITTLE_ENDIAN
@@ -155,51 +131,6 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
}
#endif
-/* For conditions of distribution and use, see copyright notice in zlib.h */
-static u32 crc32_generic_combine(u32 crc1, u32 crc2, size_t len2,
- u32 polynomial)
-{
- u32 even[GF2_DIM]; /* Even-power-of-two zeros operator */
- u32 odd[GF2_DIM]; /* Odd-power-of-two zeros operator */
- u32 row;
- int i;
-
- if (len2 <= 0)
- return crc1;
-
- /* Put operator for one zero bit in odd */
- odd[0] = polynomial;
- row = 1;
- for (i = 1; i < GF2_DIM; i++) {
- odd[i] = row;
- row <<= 1;
- }
-
- gf2_matrix_square(even, odd); /* Put operator for two zero bits in even */
- gf2_matrix_square(odd, even); /* Put operator for four zero bits in odd */
-
- /* Apply len2 zeros to crc1 (first square will put the operator for one
- * zero byte, eight zero bits, in even).
- */
- do {
- /* Apply zeros operator for this bit of len2 */
- gf2_matrix_square(even, odd);
- if (len2 & 1)
- crc1 = gf2_matrix_times(even, crc1);
- len2 >>= 1;
- /* If no more bits set, then done */
- if (len2 == 0)
- break;
- /* Another iteration of the loop with odd and even swapped */
- gf2_matrix_square(odd, even);
- if (len2 & 1)
- crc1 = gf2_matrix_times(odd, crc1);
- len2 >>= 1;
- } while (len2 != 0);
-
- crc1 ^= crc2;
- return crc1;
-}
/**
* crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II
@@ -271,19 +202,81 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
(const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE);
}
#endif
-u32 __pure crc32_le_combine(u32 crc1, u32 crc2, size_t len2)
+EXPORT_SYMBOL(crc32_le);
+EXPORT_SYMBOL(__crc32c_le);
+
+/*
+ * This multiplies the polynomials x and y modulo the given modulus.
+ * This follows the "little-endian" CRC convention that the lsbit
+ * represents the highest power of x, and the msbit represents x^0.
+ */
+static u32 __attribute_const__ gf2_multiply(u32 x, u32 y, u32 modulus)
{
- return crc32_generic_combine(crc1, crc2, len2, CRCPOLY_LE);
+ u32 product = x & 1 ? y : 0;
+ int i;
+
+ for (i = 0; i < 31; i++) {
+ product = (product >> 1) ^ (product & 1 ? modulus : 0);
+ x >>= 1;
+ product ^= x & 1 ? y : 0;
+ }
+
+ return product;
}
-u32 __pure __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2)
+/**
+ * crc32_generic_shift - Append len 0 bytes to crc, in logarithmic time
+ * @crc: The original little-endian CRC (i.e. lsbit is x^31 coefficient)
+ * @len: The number of bytes. @crc is multiplied by x^(8*@len)
+ * @polynomial: The modulus used to reduce the result to 32 bits.
+ *
+ * It's possible to parallelize CRC computations by computing a CRC
+ * over separate ranges of a buffer, then summing them.
+ * This shifts the given CRC by 8*len bits (i.e. produces the same effect
+ * as appending len bytes of zero to the data), in time proportional
+ * to log(len).
+ */
+static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
+ u32 polynomial)
{
- return crc32_generic_combine(crc1, crc2, len2, CRC32C_POLY_LE);
+ u32 power = polynomial; /* CRC of x^32 */
+ int i;
+
+ /* Shift up to 32 bits in the simple linear way */
+ for (i = 0; i < 8 * (int)(len & 3); i++)
+ crc = (crc >> 1) ^ (crc & 1 ? polynomial : 0);
+
+ len >>= 2;
+ if (!len)
+ return crc;
+
+ for (;;) {
+ /* "power" is x^(2^i), modulo the polynomial */
+ if (len & 1)
+ crc = gf2_multiply(crc, power, polynomial);
+
+ len >>= 1;
+ if (!len)
+ break;
+
+ /* Square power, advancing to x^(2^(i+1)) */
+ power = gf2_multiply(power, power, polynomial);
+ }
+
+ return crc;
}
-EXPORT_SYMBOL(crc32_le);
-EXPORT_SYMBOL(crc32_le_combine);
-EXPORT_SYMBOL(__crc32c_le);
-EXPORT_SYMBOL(__crc32c_le_combine);
+
+u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len)
+{
+ return crc32_generic_shift(crc, len, CRCPOLY_LE);
+}
+
+u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
+{
+ return crc32_generic_shift(crc, len, CRC32C_POLY_LE);
+}
+EXPORT_SYMBOL(crc32_le_shift);
+EXPORT_SYMBOL(__crc32c_le_shift);
/**
* crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
@@ -351,7 +344,7 @@ EXPORT_SYMBOL(crc32_be);
#ifdef CONFIG_CRC32_SELFTEST
/* 4096 random bytes */
-static u8 __attribute__((__aligned__(8))) test_buf[] =
+static u8 const __aligned(8) test_buf[] __initconst =
{
0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30,
0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4,
@@ -875,7 +868,7 @@ static struct crc_test {
u32 crc_le; /* expected crc32_le result */
u32 crc_be; /* expected crc32_be result */
u32 crc32c_le; /* expected crc32c_le result */
-} test[] =
+} const test[] __initconst =
{
{0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c},
{0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca},
diff --git a/lib/decompress.c b/lib/decompress.c
index 86069d74c062..37f3c786348f 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -54,7 +54,7 @@ static const struct compress_format compressed_formats[] __initconst = {
{ {0, 0}, NULL, NULL }
};
-decompress_fn __init decompress_method(const unsigned char *inbuf, int len,
+decompress_fn __init decompress_method(const unsigned char *inbuf, long len,
const char **name)
{
const struct compress_format *cf;
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 31c5f7675fbf..8290e0bef7ea 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -92,8 +92,8 @@ struct bunzip_data {
/* State for interrupting output loop */
int writeCopies, writePos, writeRunCountdown, writeCount, writeCurrent;
/* I/O tracking data (file handles, buffers, positions, etc.) */
- int (*fill)(void*, unsigned int);
- int inbufCount, inbufPos /*, outbufPos*/;
+ long (*fill)(void*, unsigned long);
+ long inbufCount, inbufPos /*, outbufPos*/;
unsigned char *inbuf /*,*outbuf*/;
unsigned int inbufBitCount, inbufBits;
/* The CRC values stored in the block header and calculated from the
@@ -617,7 +617,7 @@ decode_next_byte:
goto decode_next_byte;
}
-static int INIT nofill(void *buf, unsigned int len)
+static long INIT nofill(void *buf, unsigned long len)
{
return -1;
}
@@ -625,8 +625,8 @@ static int INIT nofill(void *buf, unsigned int len)
/* Allocate the structure, read file header. If in_fd ==-1, inbuf must contain
a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are
ignored, and data is read from file handle into temporary buffer. */
-static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len,
- int (*fill)(void*, unsigned int))
+static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, long len,
+ long (*fill)(void*, unsigned long))
{
struct bunzip_data *bd;
unsigned int i, j, c;
@@ -675,11 +675,11 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len,
/* Example usage: decompress src_fd to dst_fd. (Stops at end of bzip2 data,
not end of file.) */
-STATIC int INIT bunzip2(unsigned char *buf, int len,
- int(*fill)(void*, unsigned int),
- int(*flush)(void*, unsigned int),
+STATIC int INIT bunzip2(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
unsigned char *outbuf,
- int *pos,
+ long *pos,
void(*error)(char *x))
{
struct bunzip_data *bd;
@@ -743,11 +743,11 @@ exit_0:
}
#ifdef PREBOOT
-STATIC int INIT decompress(unsigned char *buf, int len,
- int(*fill)(void*, unsigned int),
- int(*flush)(void*, unsigned int),
+STATIC int INIT decompress(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
unsigned char *outbuf,
- int *pos,
+ long *pos,
void(*error)(char *x))
{
return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error);
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index 0edfd742a154..d4c7891635ec 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -27,17 +27,17 @@
#define GZIP_IOBUF_SIZE (16*1024)
-static int INIT nofill(void *buffer, unsigned int len)
+static long INIT nofill(void *buffer, unsigned long len)
{
return -1;
}
/* Included from initramfs et al code */
-STATIC int INIT gunzip(unsigned char *buf, int len,
- int(*fill)(void*, unsigned int),
- int(*flush)(void*, unsigned int),
+STATIC int INIT gunzip(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
unsigned char *out_buf,
- int *pos,
+ long *pos,
void(*error)(char *x)) {
u8 *zbuf;
struct z_stream_s *strm;
@@ -142,7 +142,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
/* Write any data generated */
if (flush && strm->next_out > out_buf) {
- int l = strm->next_out - out_buf;
+ long l = strm->next_out - out_buf;
if (l != flush(out_buf, l)) {
rc = -1;
error("write error");
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c
index 7d1e83caf8ad..40f66ebe57b7 100644
--- a/lib/decompress_unlz4.c
+++ b/lib/decompress_unlz4.c
@@ -31,10 +31,10 @@
#define LZ4_DEFAULT_UNCOMPRESSED_CHUNK_SIZE (8 << 20)
#define ARCHIVE_MAGICNUMBER 0x184C2102
-STATIC inline int INIT unlz4(u8 *input, int in_len,
- int (*fill) (void *, unsigned int),
- int (*flush) (void *, unsigned int),
- u8 *output, int *posp,
+STATIC inline int INIT unlz4(u8 *input, long in_len,
+ long (*fill)(void *, unsigned long),
+ long (*flush)(void *, unsigned long),
+ u8 *output, long *posp,
void (*error) (char *x))
{
int ret = -1;
@@ -43,7 +43,7 @@ STATIC inline int INIT unlz4(u8 *input, int in_len,
u8 *inp;
u8 *inp_start;
u8 *outp;
- int size = in_len;
+ long size = in_len;
#ifdef PREBOOT
size_t out_len = get_unaligned_le32(input + in_len);
#endif
@@ -83,13 +83,20 @@ STATIC inline int INIT unlz4(u8 *input, int in_len,
if (posp)
*posp = 0;
- if (fill)
- fill(inp, 4);
+ if (fill) {
+ size = fill(inp, 4);
+ if (size < 4) {
+ error("data corrupted");
+ goto exit_2;
+ }
+ }
chunksize = get_unaligned_le32(inp);
if (chunksize == ARCHIVE_MAGICNUMBER) {
- inp += 4;
- size -= 4;
+ if (!fill) {
+ inp += 4;
+ size -= 4;
+ }
} else {
error("invalid header");
goto exit_2;
@@ -100,29 +107,44 @@ STATIC inline int INIT unlz4(u8 *input, int in_len,
for (;;) {
- if (fill)
- fill(inp, 4);
+ if (fill) {
+ size = fill(inp, 4);
+ if (size == 0)
+ break;
+ if (size < 4) {
+ error("data corrupted");
+ goto exit_2;
+ }
+ }
chunksize = get_unaligned_le32(inp);
if (chunksize == ARCHIVE_MAGICNUMBER) {
- inp += 4;
- size -= 4;
+ if (!fill) {
+ inp += 4;
+ size -= 4;
+ }
if (posp)
*posp += 4;
continue;
}
- inp += 4;
- size -= 4;
+
if (posp)
*posp += 4;
- if (fill) {
+ if (!fill) {
+ inp += 4;
+ size -= 4;
+ } else {
if (chunksize > lz4_compressbound(uncomp_chunksize)) {
error("chunk length is longer than allocated");
goto exit_2;
}
- fill(inp, chunksize);
+ size = fill(inp, chunksize);
+ if (size < chunksize) {
+ error("data corrupted");
+ goto exit_2;
+ }
}
#ifdef PREBOOT
if (out_len >= uncomp_chunksize) {
@@ -149,18 +171,17 @@ STATIC inline int INIT unlz4(u8 *input, int in_len,
if (posp)
*posp += chunksize;
- size -= chunksize;
+ if (!fill) {
+ size -= chunksize;
- if (size == 0)
- break;
- else if (size < 0) {
- error("data corrupted");
- goto exit_2;
+ if (size == 0)
+ break;
+ else if (size < 0) {
+ error("data corrupted");
+ goto exit_2;
+ }
+ inp += chunksize;
}
-
- inp += chunksize;
- if (fill)
- inp = inp_start;
}
ret = 0;
@@ -175,11 +196,11 @@ exit_0:
}
#ifdef PREBOOT
-STATIC int INIT decompress(unsigned char *buf, int in_len,
- int(*fill)(void*, unsigned int),
- int(*flush)(void*, unsigned int),
+STATIC int INIT decompress(unsigned char *buf, long in_len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
unsigned char *output,
- int *posp,
+ long *posp,
void(*error)(char *x)
)
{
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
index 32adb73a9038..0be83af62b88 100644
--- a/lib/decompress_unlzma.c
+++ b/lib/decompress_unlzma.c
@@ -65,11 +65,11 @@ static long long INIT read_int(unsigned char *ptr, int size)
#define LZMA_IOBUF_SIZE 0x10000
struct rc {
- int (*fill)(void*, unsigned int);
+ long (*fill)(void*, unsigned long);
uint8_t *ptr;
uint8_t *buffer;
uint8_t *buffer_end;
- int buffer_size;
+ long buffer_size;
uint32_t code;
uint32_t range;
uint32_t bound;
@@ -82,7 +82,7 @@ struct rc {
#define RC_MODEL_TOTAL_BITS 11
-static int INIT nofill(void *buffer, unsigned int len)
+static long INIT nofill(void *buffer, unsigned long len)
{
return -1;
}
@@ -99,8 +99,8 @@ static void INIT rc_read(struct rc *rc)
/* Called once */
static inline void INIT rc_init(struct rc *rc,
- int (*fill)(void*, unsigned int),
- char *buffer, int buffer_size)
+ long (*fill)(void*, unsigned long),
+ char *buffer, long buffer_size)
{
if (fill)
rc->fill = fill;
@@ -280,7 +280,7 @@ struct writer {
size_t buffer_pos;
int bufsize;
size_t global_pos;
- int(*flush)(void*, unsigned int);
+ long (*flush)(void*, unsigned long);
struct lzma_header *header;
};
@@ -534,11 +534,11 @@ static inline int INIT process_bit1(struct writer *wr, struct rc *rc,
-STATIC inline int INIT unlzma(unsigned char *buf, int in_len,
- int(*fill)(void*, unsigned int),
- int(*flush)(void*, unsigned int),
+STATIC inline int INIT unlzma(unsigned char *buf, long in_len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
unsigned char *output,
- int *posp,
+ long *posp,
void(*error)(char *x)
)
{
@@ -667,11 +667,11 @@ exit_0:
}
#ifdef PREBOOT
-STATIC int INIT decompress(unsigned char *buf, int in_len,
- int(*fill)(void*, unsigned int),
- int(*flush)(void*, unsigned int),
+STATIC int INIT decompress(unsigned char *buf, long in_len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
unsigned char *output,
- int *posp,
+ long *posp,
void(*error)(char *x)
)
{
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c
index 960183d4258f..b94a31bdd87d 100644
--- a/lib/decompress_unlzo.c
+++ b/lib/decompress_unlzo.c
@@ -51,7 +51,7 @@ static const unsigned char lzop_magic[] = {
#define HEADER_SIZE_MIN (9 + 7 + 4 + 8 + 1 + 4)
#define HEADER_SIZE_MAX (9 + 7 + 1 + 8 + 8 + 4 + 1 + 255 + 4)
-STATIC inline int INIT parse_header(u8 *input, int *skip, int in_len)
+STATIC inline long INIT parse_header(u8 *input, long *skip, long in_len)
{
int l;
u8 *parse = input;
@@ -108,14 +108,14 @@ STATIC inline int INIT parse_header(u8 *input, int *skip, int in_len)
return 1;
}
-STATIC inline int INIT unlzo(u8 *input, int in_len,
- int (*fill) (void *, unsigned int),
- int (*flush) (void *, unsigned int),
- u8 *output, int *posp,
+STATIC int INIT unlzo(u8 *input, long in_len,
+ long (*fill)(void *, unsigned long),
+ long (*flush)(void *, unsigned long),
+ u8 *output, long *posp,
void (*error) (char *x))
{
u8 r = 0;
- int skip = 0;
+ long skip = 0;
u32 src_len, dst_len;
size_t tmp;
u8 *in_buf, *in_buf_save, *out_buf;
diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c
index 9f34eb56854d..b07a78340e9d 100644
--- a/lib/decompress_unxz.c
+++ b/lib/decompress_unxz.c
@@ -248,10 +248,10 @@ void *memmove(void *dest, const void *src, size_t size)
* both input and output buffers are available as a single chunk, i.e. when
* fill() and flush() won't be used.
*/
-STATIC int INIT unxz(unsigned char *in, int in_size,
- int (*fill)(void *dest, unsigned int size),
- int (*flush)(void *src, unsigned int size),
- unsigned char *out, int *in_used,
+STATIC int INIT unxz(unsigned char *in, long in_size,
+ long (*fill)(void *dest, unsigned long size),
+ long (*flush)(void *src, unsigned long size),
+ unsigned char *out, long *in_used,
void (*error)(char *x))
{
struct xz_buf b;
@@ -329,7 +329,7 @@ STATIC int INIT unxz(unsigned char *in, int in_size,
* returned by xz_dec_run(), but probably
* it's not too bad.
*/
- if (flush(b.out, b.out_pos) != (int)b.out_pos)
+ if (flush(b.out, b.out_pos) != (long)b.out_pos)
ret = XZ_BUF_ERROR;
b.out_pos = 0;
diff --git a/lib/devres.c b/lib/devres.c
index f562bf6ff71d..f4a195a6efe4 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -86,8 +86,6 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
}
EXPORT_SYMBOL(devm_iounmap);
-#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
-
/**
* devm_ioremap_resource() - check, request region, and ioremap resource
* @dev: generic device to handle the resource for
@@ -142,34 +140,6 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
}
EXPORT_SYMBOL(devm_ioremap_resource);
-/**
- * devm_request_and_ioremap() - Check, request region, and ioremap resource
- * @dev: Generic device to handle the resource for
- * @res: resource to be handled
- *
- * Takes all necessary steps to ioremap a mem resource. Uses managed device, so
- * everything is undone on driver detach. Checks arguments, so you can feed
- * it the result from e.g. platform_get_resource() directly. Returns the
- * remapped pointer or NULL on error. Usage example:
- *
- * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- * base = devm_request_and_ioremap(&pdev->dev, res);
- * if (!base)
- * return -EADDRNOTAVAIL;
- */
-void __iomem *devm_request_and_ioremap(struct device *dev,
- struct resource *res)
-{
- void __iomem *dest_ptr;
-
- dest_ptr = devm_ioremap_resource(dev, res);
- if (IS_ERR(dest_ptr))
- return NULL;
-
- return dest_ptr;
-}
-EXPORT_SYMBOL(devm_request_and_ioremap);
-
#ifdef CONFIG_HAS_IOPORT_MAP
/*
* Generic iomap devres
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 98f2d7e91a91..add80cc02dbe 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -1149,7 +1149,7 @@ static void check_unmap(struct dma_debug_entry *ref)
static void check_for_stack(struct device *dev, void *addr)
{
if (object_is_on_stack(addr))
- err_printk(dev, NULL, "DMA-API: device driver maps memory from"
+ err_printk(dev, NULL, "DMA-API: device driver maps memory from "
"stack [addr=%p]\n", addr);
}
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 7288e38e1757..31fe79e31ab8 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -537,10 +537,9 @@ static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
return buf;
}
-int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
+void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
{
va_list args;
- int res;
struct va_format vaf;
char buf[PREFIX_SIZE];
@@ -552,21 +551,17 @@ int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
vaf.fmt = fmt;
vaf.va = &args;
- res = printk(KERN_DEBUG "%s%pV",
- dynamic_emit_prefix(descriptor, buf), &vaf);
+ printk(KERN_DEBUG "%s%pV", dynamic_emit_prefix(descriptor, buf), &vaf);
va_end(args);
-
- return res;
}
EXPORT_SYMBOL(__dynamic_pr_debug);
-int __dynamic_dev_dbg(struct _ddebug *descriptor,
+void __dynamic_dev_dbg(struct _ddebug *descriptor,
const struct device *dev, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
- int res;
BUG_ON(!descriptor);
BUG_ON(!fmt);
@@ -577,30 +572,27 @@ int __dynamic_dev_dbg(struct _ddebug *descriptor,
vaf.va = &args;
if (!dev) {
- res = printk(KERN_DEBUG "(NULL device *): %pV", &vaf);
+ printk(KERN_DEBUG "(NULL device *): %pV", &vaf);
} else {
char buf[PREFIX_SIZE];
- res = dev_printk_emit(7, dev, "%s%s %s: %pV",
- dynamic_emit_prefix(descriptor, buf),
- dev_driver_string(dev), dev_name(dev),
- &vaf);
+ dev_printk_emit(7, dev, "%s%s %s: %pV",
+ dynamic_emit_prefix(descriptor, buf),
+ dev_driver_string(dev), dev_name(dev),
+ &vaf);
}
va_end(args);
-
- return res;
}
EXPORT_SYMBOL(__dynamic_dev_dbg);
#ifdef CONFIG_NET
-int __dynamic_netdev_dbg(struct _ddebug *descriptor,
- const struct net_device *dev, const char *fmt, ...)
+void __dynamic_netdev_dbg(struct _ddebug *descriptor,
+ const struct net_device *dev, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
- int res;
BUG_ON(!descriptor);
BUG_ON(!fmt);
@@ -613,21 +605,21 @@ int __dynamic_netdev_dbg(struct _ddebug *descriptor,
if (dev && dev->dev.parent) {
char buf[PREFIX_SIZE];
- res = dev_printk_emit(7, dev->dev.parent,
- "%s%s %s %s: %pV",
- dynamic_emit_prefix(descriptor, buf),
- dev_driver_string(dev->dev.parent),
- dev_name(dev->dev.parent),
- netdev_name(dev), &vaf);
+ dev_printk_emit(7, dev->dev.parent,
+ "%s%s %s %s%s: %pV",
+ dynamic_emit_prefix(descriptor, buf),
+ dev_driver_string(dev->dev.parent),
+ dev_name(dev->dev.parent),
+ netdev_name(dev), netdev_reg_state(dev),
+ &vaf);
} else if (dev) {
- res = printk(KERN_DEBUG "%s: %pV", netdev_name(dev), &vaf);
+ printk(KERN_DEBUG "%s%s: %pV", netdev_name(dev),
+ netdev_reg_state(dev), &vaf);
} else {
- res = printk(KERN_DEBUG "(NULL net_device): %pV", &vaf);
+ printk(KERN_DEBUG "(NULL net_device): %pV", &vaf);
}
va_end(args);
-
- return res;
}
EXPORT_SYMBOL(__dynamic_netdev_dbg);
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index ebf3bac460b0..8f25652f40d4 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -34,13 +34,13 @@
*/
#include <linux/flex_proportions.h>
-int fprop_global_init(struct fprop_global *p)
+int fprop_global_init(struct fprop_global *p, gfp_t gfp)
{
int err;
p->period = 0;
/* Use 1 to avoid dealing with periods with 0 events... */
- err = percpu_counter_init(&p->events, 1);
+ err = percpu_counter_init(&p->events, 1, gfp);
if (err)
return err;
seqcount_init(&p->sequence);
@@ -168,11 +168,11 @@ void fprop_fraction_single(struct fprop_global *p,
*/
#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
-int fprop_local_init_percpu(struct fprop_local_percpu *pl)
+int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
{
int err;
- err = percpu_counter_init(&pl->events, 0);
+ err = percpu_counter_init(&pl->events, 0, gfp);
if (err)
return err;
pl->period = 0;
diff --git a/lib/genalloc.c b/lib/genalloc.c
index bdb9a456bcbb..cce4dd68c40d 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -403,6 +403,35 @@ void gen_pool_for_each_chunk(struct gen_pool *pool,
EXPORT_SYMBOL(gen_pool_for_each_chunk);
/**
+ * addr_in_gen_pool - checks if an address falls within the range of a pool
+ * @pool: the generic memory pool
+ * @start: start address
+ * @size: size of the region
+ *
+ * Check if the range of addresses falls within the specified pool. Returns
+ * true if the entire range is contained in the pool and false otherwise.
+ */
+bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
+ size_t size)
+{
+ bool found = false;
+ unsigned long end = start + size;
+ struct gen_pool_chunk *chunk;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
+ if (start >= chunk->start_addr && start <= chunk->end_addr) {
+ if (end <= chunk->end_addr) {
+ found = true;
+ break;
+ }
+ }
+ }
+ rcu_read_unlock();
+ return found;
+}
+
+/**
* gen_pool_avail - get available free space of the pool
* @pool: pool to get available free space
*
@@ -481,6 +510,26 @@ unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
EXPORT_SYMBOL(gen_pool_first_fit);
/**
+ * gen_pool_first_fit_order_align - find the first available region
+ * of memory matching the size requirement. The region will be aligned
+ * to the order of the size specified.
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @data: additional data - unused
+ */
+unsigned long gen_pool_first_fit_order_align(unsigned long *map,
+ unsigned long size, unsigned long start,
+ unsigned int nr, void *data)
+{
+ unsigned long align_mask = roundup_pow_of_two(nr) - 1;
+
+ return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
+}
+EXPORT_SYMBOL(gen_pool_first_fit_order_align);
+
+/**
* gen_pool_best_fit - find the best fitting region of memory
* macthing the size requirement (no alignment constraint)
* @map: The address to base the search on
@@ -588,6 +637,7 @@ struct gen_pool *of_get_named_gen_pool(struct device_node *np,
if (!np_pool)
return NULL;
pdev = of_find_device_by_node(np_pool);
+ of_node_put(np_pool);
if (!pdev)
return NULL;
return dev_get_gen_pool(&pdev->dev);
diff --git a/lib/glob.c b/lib/glob.c
new file mode 100644
index 000000000000..500fc80d23e1
--- /dev/null
+++ b/lib/glob.c
@@ -0,0 +1,287 @@
+#include <linux/module.h>
+#include <linux/glob.h>
+
+/*
+ * The only reason this code can be compiled as a module is because the
+ * ATA code that depends on it can be as well. In practice, they're
+ * both usually compiled in and the module overhead goes away.
+ */
+MODULE_DESCRIPTION("glob(7) matching");
+MODULE_LICENSE("Dual MIT/GPL");
+
+/**
+ * glob_match - Shell-style pattern matching, like !fnmatch(pat, str, 0)
+ * @pat: Shell-style pattern to match, e.g. "*.[ch]".
+ * @str: String to match. The pattern must match the entire string.
+ *
+ * Perform shell-style glob matching, returning true (1) if the match
+ * succeeds, or false (0) if it fails. Equivalent to !fnmatch(@pat, @str, 0).
+ *
+ * Pattern metacharacters are ?, *, [ and \.
+ * (And, inside character classes, !, - and ].)
+ *
+ * This is small and simple implementation intended for device blacklists
+ * where a string is matched against a number of patterns. Thus, it
+ * does not preprocess the patterns. It is non-recursive, and run-time
+ * is at most quadratic: strlen(@str)*strlen(@pat).
+ *
+ * An example of the worst case is glob_match("*aaaaa", "aaaaaaaaaa");
+ * it takes 6 passes over the pattern before matching the string.
+ *
+ * Like !fnmatch(@pat, @str, 0) and unlike the shell, this does NOT
+ * treat / or leading . specially; it isn't actually used for pathnames.
+ *
+ * Note that according to glob(7) (and unlike bash), character classes
+ * are complemented by a leading !; this does not support the regex-style
+ * [^a-z] syntax.
+ *
+ * An opening bracket without a matching close is matched literally.
+ */
+bool __pure glob_match(char const *pat, char const *str)
+{
+ /*
+ * Backtrack to previous * on mismatch and retry starting one
+ * character later in the string. Because * matches all characters
+ * (no exception for /), it can be easily proved that there's
+ * never a need to backtrack multiple levels.
+ */
+ char const *back_pat = NULL, *back_str = back_str;
+
+ /*
+ * Loop over each token (character or class) in pat, matching
+ * it against the remaining unmatched tail of str. Return false
+ * on mismatch, or true after matching the trailing nul bytes.
+ */
+ for (;;) {
+ unsigned char c = *str++;
+ unsigned char d = *pat++;
+
+ switch (d) {
+ case '?': /* Wildcard: anything but nul */
+ if (c == '\0')
+ return false;
+ break;
+ case '*': /* Any-length wildcard */
+ if (*pat == '\0') /* Optimize trailing * case */
+ return true;
+ back_pat = pat;
+ back_str = --str; /* Allow zero-length match */
+ break;
+ case '[': { /* Character class */
+ bool match = false, inverted = (*pat == '!');
+ char const *class = pat + inverted;
+ unsigned char a = *class++;
+
+ /*
+ * Iterate over each span in the character class.
+ * A span is either a single character a, or a
+ * range a-b. The first span may begin with ']'.
+ */
+ do {
+ unsigned char b = a;
+
+ if (a == '\0') /* Malformed */
+ goto literal;
+
+ if (class[0] == '-' && class[1] != ']') {
+ b = class[1];
+
+ if (b == '\0')
+ goto literal;
+
+ class += 2;
+ /* Any special action if a > b? */
+ }
+ match |= (a <= c && c <= b);
+ } while ((a = *class++) != ']');
+
+ if (match == inverted)
+ goto backtrack;
+ pat = class;
+ }
+ break;
+ case '\\':
+ d = *pat++;
+ /*FALLTHROUGH*/
+ default: /* Literal character */
+literal:
+ if (c == d) {
+ if (d == '\0')
+ return true;
+ break;
+ }
+backtrack:
+ if (c == '\0' || !back_pat)
+ return false; /* No point continuing */
+ /* Try again from last *, one character later in str. */
+ pat = back_pat;
+ str = ++back_str;
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(glob_match);
+
+
+#ifdef CONFIG_GLOB_SELFTEST
+
+#include <linux/printk.h>
+#include <linux/moduleparam.h>
+
+/* Boot with "glob.verbose=1" to show successful tests, too */
+static bool verbose = false;
+module_param(verbose, bool, 0);
+
+struct glob_test {
+ char const *pat, *str;
+ bool expected;
+};
+
+static bool __pure __init test(char const *pat, char const *str, bool expected)
+{
+ bool match = glob_match(pat, str);
+ bool success = match == expected;
+
+ /* Can't get string literals into a particular section, so... */
+ static char const msg_error[] __initconst =
+ KERN_ERR "glob: \"%s\" vs. \"%s\": %s *** ERROR ***\n";
+ static char const msg_ok[] __initconst =
+ KERN_DEBUG "glob: \"%s\" vs. \"%s\": %s OK\n";
+ static char const mismatch[] __initconst = "mismatch";
+ char const *message;
+
+ if (!success)
+ message = msg_error;
+ else if (verbose)
+ message = msg_ok;
+ else
+ return success;
+
+ printk(message, pat, str, mismatch + 3*match);
+ return success;
+}
+
+/*
+ * The tests are all jammed together in one array to make it simpler
+ * to place that array in the .init.rodata section. The obvious
+ * "array of structures containing char *" has no way to force the
+ * pointed-to strings to be in a particular section.
+ *
+ * Anyway, a test consists of:
+ * 1. Expected glob_match result: '1' or '0'.
+ * 2. Pattern to match: null-terminated string
+ * 3. String to match against: null-terminated string
+ *
+ * The list of tests is terminated with a final '\0' instead of
+ * a glob_match result character.
+ */
+static char const glob_tests[] __initconst =
+ /* Some basic tests */
+ "1" "a\0" "a\0"
+ "0" "a\0" "b\0"
+ "0" "a\0" "aa\0"
+ "0" "a\0" "\0"
+ "1" "\0" "\0"
+ "0" "\0" "a\0"
+ /* Simple character class tests */
+ "1" "[a]\0" "a\0"
+ "0" "[a]\0" "b\0"
+ "0" "[!a]\0" "a\0"
+ "1" "[!a]\0" "b\0"
+ "1" "[ab]\0" "a\0"
+ "1" "[ab]\0" "b\0"
+ "0" "[ab]\0" "c\0"
+ "1" "[!ab]\0" "c\0"
+ "1" "[a-c]\0" "b\0"
+ "0" "[a-c]\0" "d\0"
+ /* Corner cases in character class parsing */
+ "1" "[a-c-e-g]\0" "-\0"
+ "0" "[a-c-e-g]\0" "d\0"
+ "1" "[a-c-e-g]\0" "f\0"
+ "1" "[]a-ceg-ik[]\0" "a\0"
+ "1" "[]a-ceg-ik[]\0" "]\0"
+ "1" "[]a-ceg-ik[]\0" "[\0"
+ "1" "[]a-ceg-ik[]\0" "h\0"
+ "0" "[]a-ceg-ik[]\0" "f\0"
+ "0" "[!]a-ceg-ik[]\0" "h\0"
+ "0" "[!]a-ceg-ik[]\0" "]\0"
+ "1" "[!]a-ceg-ik[]\0" "f\0"
+ /* Simple wild cards */
+ "1" "?\0" "a\0"
+ "0" "?\0" "aa\0"
+ "0" "??\0" "a\0"
+ "1" "?x?\0" "axb\0"
+ "0" "?x?\0" "abx\0"
+ "0" "?x?\0" "xab\0"
+ /* Asterisk wild cards (backtracking) */
+ "0" "*??\0" "a\0"
+ "1" "*??\0" "ab\0"
+ "1" "*??\0" "abc\0"
+ "1" "*??\0" "abcd\0"
+ "0" "??*\0" "a\0"
+ "1" "??*\0" "ab\0"
+ "1" "??*\0" "abc\0"
+ "1" "??*\0" "abcd\0"
+ "0" "?*?\0" "a\0"
+ "1" "?*?\0" "ab\0"
+ "1" "?*?\0" "abc\0"
+ "1" "?*?\0" "abcd\0"
+ "1" "*b\0" "b\0"
+ "1" "*b\0" "ab\0"
+ "0" "*b\0" "ba\0"
+ "1" "*b\0" "bb\0"
+ "1" "*b\0" "abb\0"
+ "1" "*b\0" "bab\0"
+ "1" "*bc\0" "abbc\0"
+ "1" "*bc\0" "bc\0"
+ "1" "*bc\0" "bbc\0"
+ "1" "*bc\0" "bcbc\0"
+ /* Multiple asterisks (complex backtracking) */
+ "1" "*ac*\0" "abacadaeafag\0"
+ "1" "*ac*ae*ag*\0" "abacadaeafag\0"
+ "1" "*a*b*[bc]*[ef]*g*\0" "abacadaeafag\0"
+ "0" "*a*b*[ef]*[cd]*g*\0" "abacadaeafag\0"
+ "1" "*abcd*\0" "abcabcabcabcdefg\0"
+ "1" "*ab*cd*\0" "abcabcabcabcdefg\0"
+ "1" "*abcd*abcdef*\0" "abcabcdabcdeabcdefg\0"
+ "0" "*abcd*\0" "abcabcabcabcefg\0"
+ "0" "*ab*cd*\0" "abcabcabcabcefg\0";
+
+static int __init glob_init(void)
+{
+ unsigned successes = 0;
+ unsigned n = 0;
+ char const *p = glob_tests;
+ static char const message[] __initconst =
+ KERN_INFO "glob: %u self-tests passed, %u failed\n";
+
+ /*
+ * Tests are jammed together in a string. The first byte is '1'
+ * or '0' to indicate the expected outcome, or '\0' to indicate the
+ * end of the tests. Then come two null-terminated strings: the
+ * pattern and the string to match it against.
+ */
+ while (*p) {
+ bool expected = *p++ & 1;
+ char const *pat = p;
+
+ p += strlen(p) + 1;
+ successes += test(pat, p, expected);
+ p += strlen(p) + 1;
+ n++;
+ }
+
+ n -= successes;
+ printk(message, successes, n);
+
+ /* What's the errno for "kernel bug detected"? Guess... */
+ return n ? -ECANCELED : 0;
+}
+
+/* We need a dummy exit function to allow unload */
+static void __exit glob_fini(void) { }
+
+module_init(glob_init);
+module_exit(glob_fini);
+
+#endif /* CONFIG_GLOB_SELFTEST */
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 8499c810909a..270773b91923 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -59,6 +59,22 @@ int hex2bin(u8 *dst, const char *src, size_t count)
EXPORT_SYMBOL(hex2bin);
/**
+ * bin2hex - convert binary data to an ascii hexadecimal string
+ * @dst: ascii hexadecimal result
+ * @src: binary data
+ * @count: binary data length
+ */
+char *bin2hex(char *dst, const void *src, size_t count)
+{
+ const unsigned char *_src = src;
+
+ while (count--)
+ dst = hex_byte_pack(dst, *_src++);
+ return dst;
+}
+EXPORT_SYMBOL(bin2hex);
+
+/**
* hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
* @buf: data blob to dump
* @len: number of bytes in the @buf
diff --git a/lib/hweight.c b/lib/hweight.c
index b7d81ba143d1..9a5c1f221558 100644
--- a/lib/hweight.c
+++ b/lib/hweight.c
@@ -11,7 +11,7 @@
unsigned int __sw_hweight32(unsigned int w)
{
-#ifdef ARCH_HAS_FAST_MULTIPLIER
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x55555555;
w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
w = (w + (w >> 4)) & 0x0f0f0f0f;
@@ -49,7 +49,7 @@ unsigned long __sw_hweight64(__u64 w)
return __sw_hweight32((unsigned int)(w >> 32)) +
__sw_hweight32((unsigned int)w);
#elif BITS_PER_LONG == 64
-#ifdef ARCH_HAS_FAST_MULTIPLIER
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x5555555555555555ul;
w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;
diff --git a/lib/idr.c b/lib/idr.c
index 39158abebad1..e654aebd5f80 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -590,26 +590,27 @@ static void __idr_remove_all(struct idr *idp)
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
- p = idp->top;
+ *paa = idp->top;
RCU_INIT_POINTER(idp->top, NULL);
max = idr_max(idp->layers);
id = 0;
while (id >= 0 && id <= max) {
+ p = *paa;
while (n > IDR_BITS && p) {
n -= IDR_BITS;
- *paa++ = p;
p = p->ary[(id >> n) & IDR_MASK];
+ *++paa = p;
}
bt_mask = id;
id += 1 << n;
/* Get the highest bit that the above add changed from 0->1. */
while (n < fls(id ^ bt_mask)) {
- if (p)
- free_layer(idp, p);
+ if (*paa)
+ free_layer(idp, *paa);
n += IDR_BITS;
- p = *--paa;
+ --paa;
}
}
idp->layers = 0;
@@ -625,7 +626,7 @@ static void __idr_remove_all(struct idr *idp)
* idr_destroy().
*
* A typical clean-up sequence for objects stored in an idr tree will use
- * idr_for_each() to free all objects, if necessay, then idr_destroy() to
+ * idr_for_each() to free all objects, if necessary, then idr_destroy() to
* free up the id mappings and cached idr_layers.
*/
void idr_destroy(struct idr *idp)
@@ -692,15 +693,16 @@ int idr_for_each(struct idr *idp,
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
- p = rcu_dereference_raw(idp->top);
+ *paa = rcu_dereference_raw(idp->top);
max = idr_max(idp->layers);
id = 0;
while (id >= 0 && id <= max) {
+ p = *paa;
while (n > 0 && p) {
n -= IDR_BITS;
- *paa++ = p;
p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
+ *++paa = p;
}
if (p) {
@@ -712,7 +714,7 @@ int idr_for_each(struct idr *idp,
id += 1 << n;
while (n < fls(id)) {
n += IDR_BITS;
- p = *--paa;
+ --paa;
}
}
@@ -740,17 +742,18 @@ void *idr_get_next(struct idr *idp, int *nextidp)
int n, max;
/* find first ent */
- p = rcu_dereference_raw(idp->top);
+ p = *paa = rcu_dereference_raw(idp->top);
if (!p)
return NULL;
n = (p->layer + 1) * IDR_BITS;
max = idr_max(p->layer + 1);
while (id >= 0 && id <= max) {
+ p = *paa;
while (n > 0 && p) {
n -= IDR_BITS;
- *paa++ = p;
p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
+ *++paa = p;
}
if (p) {
@@ -768,7 +771,7 @@ void *idr_get_next(struct idr *idp, int *nextidp)
id = round_up(id + 1, 1 << n);
while (n < fls(id)) {
n += IDR_BITS;
- p = *--paa;
+ --paa;
}
}
return NULL;
diff --git a/lib/iovec.c b/lib/iovec.c
index 7a7c2da4cddf..df3abd1eaa4a 100644
--- a/lib/iovec.c
+++ b/lib/iovec.c
@@ -85,6 +85,10 @@ EXPORT_SYMBOL(memcpy_toiovecend);
int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
int offset, int len)
{
+ /* No data? Done! */
+ if (len == 0)
+ return 0;
+
/* Skip over the finished iovecs */
while (offset >= iov->iov_len) {
offset -= iov->iov_len;
diff --git a/lib/kfifo.c b/lib/kfifo.c
index d79b9d222065..90ba1eb1df06 100644
--- a/lib/kfifo.c
+++ b/lib/kfifo.c
@@ -561,8 +561,7 @@ EXPORT_SYMBOL(__kfifo_to_user_r);
unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo,
struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
{
- if (!nents)
- BUG();
+ BUG_ON(!nents);
len = __kfifo_max_r(len, recsize);
@@ -585,8 +584,7 @@ EXPORT_SYMBOL(__kfifo_dma_in_finish_r);
unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo,
struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
{
- if (!nents)
- BUG();
+ BUG_ON(!nents);
len = __kfifo_max_r(len, recsize);
diff --git a/lib/klist.c b/lib/klist.c
index 358a368a2947..89b485a2a58d 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -140,11 +140,11 @@ void klist_add_tail(struct klist_node *n, struct klist *k)
EXPORT_SYMBOL_GPL(klist_add_tail);
/**
- * klist_add_after - Init a klist_node and add it after an existing node
+ * klist_add_behind - Init a klist_node and add it after an existing node
* @n: node we're adding.
* @pos: node to put @n after
*/
-void klist_add_after(struct klist_node *n, struct klist_node *pos)
+void klist_add_behind(struct klist_node *n, struct klist_node *pos)
{
struct klist *k = knode_klist(pos);
@@ -153,7 +153,7 @@ void klist_add_after(struct klist_node *n, struct klist_node *pos)
list_add(&n->n_node, &pos->n_node);
spin_unlock(&k->k_lock);
}
-EXPORT_SYMBOL_GPL(klist_add_after);
+EXPORT_SYMBOL_GPL(klist_add_behind);
/**
* klist_add_before - Init a klist_node and add it before an existing node
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 1183fa70a44d..12bcba1c8612 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -1,3 +1,6 @@
+
+#define pr_fmt(fmt) "list_sort_test: " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list_sort.h>
@@ -47,6 +50,7 @@ static void merge_and_restore_back_links(void *priv,
struct list_head *a, struct list_head *b)
{
struct list_head *tail = head;
+ u8 count = 0;
while (a && b) {
/* if equal, take 'a' -- important for sort stability */
@@ -70,7 +74,8 @@ static void merge_and_restore_back_links(void *priv,
* element comparison is needed, so the client's cmp()
* routine can invoke cond_resched() periodically.
*/
- (*cmp)(priv, tail->next, tail->next);
+ if (unlikely(!(++count)))
+ (*cmp)(priv, tail->next, tail->next);
tail->next->prev = tail;
tail = tail->next;
@@ -123,9 +128,7 @@ void list_sort(void *priv, struct list_head *head,
}
if (lev > max_lev) {
if (unlikely(lev >= ARRAY_SIZE(part)-1)) {
- printk_once(KERN_DEBUG "list passed to"
- " list_sort() too long for"
- " efficiency\n");
+ printk_once(KERN_DEBUG "list too long for efficiency\n");
lev--;
}
max_lev = lev;
@@ -168,27 +171,25 @@ static struct debug_el **elts __initdata;
static int __init check(struct debug_el *ela, struct debug_el *elb)
{
if (ela->serial >= TEST_LIST_LEN) {
- printk(KERN_ERR "list_sort_test: error: incorrect serial %d\n",
- ela->serial);
+ pr_err("error: incorrect serial %d\n", ela->serial);
return -EINVAL;
}
if (elb->serial >= TEST_LIST_LEN) {
- printk(KERN_ERR "list_sort_test: error: incorrect serial %d\n",
- elb->serial);
+ pr_err("error: incorrect serial %d\n", elb->serial);
return -EINVAL;
}
if (elts[ela->serial] != ela || elts[elb->serial] != elb) {
- printk(KERN_ERR "list_sort_test: error: phantom element\n");
+ pr_err("error: phantom element\n");
return -EINVAL;
}
if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) {
- printk(KERN_ERR "list_sort_test: error: bad poison: %#x/%#x\n",
- ela->poison1, ela->poison2);
+ pr_err("error: bad poison: %#x/%#x\n",
+ ela->poison1, ela->poison2);
return -EINVAL;
}
if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) {
- printk(KERN_ERR "list_sort_test: error: bad poison: %#x/%#x\n",
- elb->poison1, elb->poison2);
+ pr_err("error: bad poison: %#x/%#x\n",
+ elb->poison1, elb->poison2);
return -EINVAL;
}
return 0;
@@ -207,25 +208,23 @@ static int __init cmp(void *priv, struct list_head *a, struct list_head *b)
static int __init list_sort_test(void)
{
- int i, count = 1, err = -EINVAL;
+ int i, count = 1, err = -ENOMEM;
struct debug_el *el;
- struct list_head *cur, *tmp;
+ struct list_head *cur;
LIST_HEAD(head);
- printk(KERN_DEBUG "list_sort_test: start testing list_sort()\n");
+ pr_debug("start testing list_sort()\n");
- elts = kmalloc(sizeof(void *) * TEST_LIST_LEN, GFP_KERNEL);
+ elts = kcalloc(TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
if (!elts) {
- printk(KERN_ERR "list_sort_test: error: cannot allocate "
- "memory\n");
- goto exit;
+ pr_err("error: cannot allocate memory\n");
+ return err;
}
for (i = 0; i < TEST_LIST_LEN; i++) {
el = kmalloc(sizeof(*el), GFP_KERNEL);
if (!el) {
- printk(KERN_ERR "list_sort_test: error: cannot "
- "allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto exit;
}
/* force some equivalencies */
@@ -239,52 +238,52 @@ static int __init list_sort_test(void)
list_sort(NULL, &head, cmp);
+ err = -EINVAL;
for (cur = head.next; cur->next != &head; cur = cur->next) {
struct debug_el *el1;
int cmp_result;
if (cur->next->prev != cur) {
- printk(KERN_ERR "list_sort_test: error: list is "
- "corrupted\n");
+ pr_err("error: list is corrupted\n");
goto exit;
}
cmp_result = cmp(NULL, cur, cur->next);
if (cmp_result > 0) {
- printk(KERN_ERR "list_sort_test: error: list is not "
- "sorted\n");
+ pr_err("error: list is not sorted\n");
goto exit;
}
el = container_of(cur, struct debug_el, list);
el1 = container_of(cur->next, struct debug_el, list);
if (cmp_result == 0 && el->serial >= el1->serial) {
- printk(KERN_ERR "list_sort_test: error: order of "
- "equivalent elements not preserved\n");
+ pr_err("error: order of equivalent elements not "
+ "preserved\n");
goto exit;
}
if (check(el, el1)) {
- printk(KERN_ERR "list_sort_test: error: element check "
- "failed\n");
+ pr_err("error: element check failed\n");
goto exit;
}
count++;
}
+ if (head.prev != cur) {
+ pr_err("error: list is corrupted\n");
+ goto exit;
+ }
+
if (count != TEST_LIST_LEN) {
- printk(KERN_ERR "list_sort_test: error: bad list length %d",
- count);
+ pr_err("error: bad list length %d", count);
goto exit;
}
err = 0;
exit:
+ for (i = 0; i < TEST_LIST_LEN; i++)
+ kfree(elts[i]);
kfree(elts);
- list_for_each_safe(cur, tmp, &head) {
- list_del(cur);
- kfree(container_of(cur, struct debug_el, list));
- }
return err;
}
module_init(list_sort_test);
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
index 4a83ecd03650..852c81e3ba9a 100644
--- a/lib/lru_cache.c
+++ b/lib/lru_cache.c
@@ -169,7 +169,7 @@ out_fail:
return NULL;
}
-void lc_free_by_index(struct lru_cache *lc, unsigned i)
+static void lc_free_by_index(struct lru_cache *lc, unsigned i)
{
void *p = lc->lc_element[i];
WARN_ON(!p);
@@ -643,9 +643,10 @@ void lc_set(struct lru_cache *lc, unsigned int enr, int index)
* lc_dump - Dump a complete LRU cache to seq in textual form.
* @lc: the lru cache to operate on
* @seq: the &struct seq_file pointer to seq_printf into
- * @utext: user supplied "heading" or other info
+ * @utext: user supplied additional "heading" or other info
* @detail: function pointer the user may provide to dump further details
- * of the object the lc_element is embedded in.
+ * of the object the lc_element is embedded in. May be NULL.
+ * Note: a leading space ' ' and trailing newline '\n' is implied.
*/
void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext,
void (*detail) (struct seq_file *, struct lc_element *))
@@ -654,16 +655,18 @@ void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext
struct lc_element *e;
int i;
- seq_printf(seq, "\tnn: lc_number refcnt %s\n ", utext);
+ seq_printf(seq, "\tnn: lc_number (new nr) refcnt %s\n ", utext);
for (i = 0; i < nr_elements; i++) {
e = lc_element_by_index(lc, i);
- if (e->lc_number == LC_FREE) {
- seq_printf(seq, "\t%2d: FREE\n", i);
- } else {
- seq_printf(seq, "\t%2d: %4u %4u ", i,
- e->lc_number, e->refcnt);
+ if (e->lc_number != e->lc_new_number)
+ seq_printf(seq, "\t%5d: %6d %8d %6d ",
+ i, e->lc_number, e->lc_new_number, e->refcnt);
+ else
+ seq_printf(seq, "\t%5d: %6d %-8s %6d ",
+ i, e->lc_number, "-\"-", e->refcnt);
+ if (detail)
detail(seq, e);
- }
+ seq_putc(seq, '\n');
}
}
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
index 8563081e8da3..a1c387f6afba 100644
--- a/lib/lzo/lzo1x_decompress_safe.c
+++ b/lib/lzo/lzo1x_decompress_safe.c
@@ -19,31 +19,21 @@
#include <linux/lzo.h>
#include "lzodefs.h"
-#define HAVE_IP(t, x) \
- (((size_t)(ip_end - ip) >= (size_t)(t + x)) && \
- (((t + x) >= t) && ((t + x) >= x)))
+#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x))
+#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x))
+#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun
+#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
+#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun
-#define HAVE_OP(t, x) \
- (((size_t)(op_end - op) >= (size_t)(t + x)) && \
- (((t + x) >= t) && ((t + x) >= x)))
-
-#define NEED_IP(t, x) \
- do { \
- if (!HAVE_IP(t, x)) \
- goto input_overrun; \
- } while (0)
-
-#define NEED_OP(t, x) \
- do { \
- if (!HAVE_OP(t, x)) \
- goto output_overrun; \
- } while (0)
-
-#define TEST_LB(m_pos) \
- do { \
- if ((m_pos) < out) \
- goto lookbehind_overrun; \
- } while (0)
+/* This MAX_255_COUNT is the maximum number of times we can add 255 to a base
+ * count without overflowing an integer. The multiply will overflow when
+ * multiplying 255 by more than MAXINT/255. The sum will overflow earlier
+ * depending on the base count. Since the base count is taken from a u8
+ * and a few bits, it is safe to assume that it will always be lower than
+ * or equal to 2*255, thus we can always prevent any overflow by accepting
+ * two less 255 steps. See Documentation/lzo.txt for more information.
+ */
+#define MAX_255_COUNT ((((size_t)~0) / 255) - 2)
int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
unsigned char *out, size_t *out_len)
@@ -75,17 +65,24 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
if (t < 16) {
if (likely(state == 0)) {
if (unlikely(t == 0)) {
+ size_t offset;
+ const unsigned char *ip_last = ip;
+
while (unlikely(*ip == 0)) {
- t += 255;
ip++;
- NEED_IP(1, 0);
+ NEED_IP(1);
}
- t += 15 + *ip++;
+ offset = ip - ip_last;
+ if (unlikely(offset > MAX_255_COUNT))
+ return LZO_E_ERROR;
+
+ offset = (offset << 8) - offset;
+ t += offset + 15 + *ip++;
}
t += 3;
copy_literal_run:
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
- if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
+ if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
const unsigned char *ie = ip + t;
unsigned char *oe = op + t;
do {
@@ -101,8 +98,8 @@ copy_literal_run:
} else
#endif
{
- NEED_OP(t, 0);
- NEED_IP(t, 3);
+ NEED_OP(t);
+ NEED_IP(t + 3);
do {
*op++ = *ip++;
} while (--t > 0);
@@ -115,7 +112,7 @@ copy_literal_run:
m_pos -= t >> 2;
m_pos -= *ip++ << 2;
TEST_LB(m_pos);
- NEED_OP(2, 0);
+ NEED_OP(2);
op[0] = m_pos[0];
op[1] = m_pos[1];
op += 2;
@@ -136,13 +133,20 @@ copy_literal_run:
} else if (t >= 32) {
t = (t & 31) + (3 - 1);
if (unlikely(t == 2)) {
+ size_t offset;
+ const unsigned char *ip_last = ip;
+
while (unlikely(*ip == 0)) {
- t += 255;
ip++;
- NEED_IP(1, 0);
+ NEED_IP(1);
}
- t += 31 + *ip++;
- NEED_IP(2, 0);
+ offset = ip - ip_last;
+ if (unlikely(offset > MAX_255_COUNT))
+ return LZO_E_ERROR;
+
+ offset = (offset << 8) - offset;
+ t += offset + 31 + *ip++;
+ NEED_IP(2);
}
m_pos = op - 1;
next = get_unaligned_le16(ip);
@@ -154,13 +158,20 @@ copy_literal_run:
m_pos -= (t & 8) << 11;
t = (t & 7) + (3 - 1);
if (unlikely(t == 2)) {
+ size_t offset;
+ const unsigned char *ip_last = ip;
+
while (unlikely(*ip == 0)) {
- t += 255;
ip++;
- NEED_IP(1, 0);
+ NEED_IP(1);
}
- t += 7 + *ip++;
- NEED_IP(2, 0);
+ offset = ip - ip_last;
+ if (unlikely(offset > MAX_255_COUNT))
+ return LZO_E_ERROR;
+
+ offset = (offset << 8) - offset;
+ t += offset + 7 + *ip++;
+ NEED_IP(2);
}
next = get_unaligned_le16(ip);
ip += 2;
@@ -174,7 +185,7 @@ copy_literal_run:
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
if (op - m_pos >= 8) {
unsigned char *oe = op + t;
- if (likely(HAVE_OP(t, 15))) {
+ if (likely(HAVE_OP(t + 15))) {
do {
COPY8(op, m_pos);
op += 8;
@@ -184,7 +195,7 @@ copy_literal_run:
m_pos += 8;
} while (op < oe);
op = oe;
- if (HAVE_IP(6, 0)) {
+ if (HAVE_IP(6)) {
state = next;
COPY4(op, ip);
op += next;
@@ -192,7 +203,7 @@ copy_literal_run:
continue;
}
} else {
- NEED_OP(t, 0);
+ NEED_OP(t);
do {
*op++ = *m_pos++;
} while (op < oe);
@@ -201,7 +212,7 @@ copy_literal_run:
#endif
{
unsigned char *oe = op + t;
- NEED_OP(t, 0);
+ NEED_OP(t);
op[0] = m_pos[0];
op[1] = m_pos[1];
op += 2;
@@ -214,15 +225,15 @@ match_next:
state = next;
t = next;
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
- if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
+ if (likely(HAVE_IP(6) && HAVE_OP(4))) {
COPY4(op, ip);
op += t;
ip += t;
} else
#endif
{
- NEED_IP(t, 3);
- NEED_OP(t, 0);
+ NEED_IP(t + 3);
+ NEED_OP(t);
while (t > 0) {
*op++ = *ip++;
t--;
diff --git a/lib/net_utils.c b/lib/net_utils.c
index 2e3c52c8d050..148fc6e99ef6 100644
--- a/lib/net_utils.c
+++ b/lib/net_utils.c
@@ -3,24 +3,24 @@
#include <linux/ctype.h>
#include <linux/kernel.h>
-int mac_pton(const char *s, u8 *mac)
+bool mac_pton(const char *s, u8 *mac)
{
int i;
/* XX:XX:XX:XX:XX:XX */
if (strlen(s) < 3 * ETH_ALEN - 1)
- return 0;
+ return false;
/* Don't dirty result unless string is valid MAC. */
for (i = 0; i < ETH_ALEN; i++) {
if (!isxdigit(s[i * 3]) || !isxdigit(s[i * 3 + 1]))
- return 0;
+ return false;
if (i != ETH_ALEN - 1 && s[i * 3 + 2] != ':')
- return 0;
+ return false;
}
for (i = 0; i < ETH_ALEN; i++) {
mac[i] = (hex_to_bin(s[i * 3]) << 4) | hex_to_bin(s[i * 3 + 1]);
}
- return 1;
+ return true;
}
EXPORT_SYMBOL(mac_pton);
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index fe5a3342e960..6111bcb28376 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -1,6 +1,8 @@
#define pr_fmt(fmt) "%s: " fmt "\n", __func__
#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
#include <linux/percpu-refcount.h>
/*
@@ -11,8 +13,8 @@
* percpu counters will all sum to the correct value
*
* (More precisely: because moduler arithmatic is commutative the sum of all the
- * pcpu_count vars will be equal to what it would have been if all the gets and
- * puts were done to a single integer, even if some of the percpu integers
+ * percpu_count vars will be equal to what it would have been if all the gets
+ * and puts were done to a single integer, even if some of the percpu integers
* overflow or underflow).
*
* The real trick to implementing percpu refcounts is shutdown. We can't detect
@@ -25,75 +27,64 @@
* works.
*
* Converting to non percpu mode is done with some RCUish stuff in
- * percpu_ref_kill. Additionally, we need a bias value so that the atomic_t
- * can't hit 0 before we've added up all the percpu refs.
+ * percpu_ref_kill. Additionally, we need a bias value so that the
+ * atomic_long_t can't hit 0 before we've added up all the percpu refs.
*/
-#define PCPU_COUNT_BIAS (1U << 31)
+#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
-static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref)
+static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
+
+static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
{
- return (unsigned __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
+ return (unsigned long __percpu *)
+ (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
}
/**
* percpu_ref_init - initialize a percpu refcount
* @ref: percpu_ref to initialize
* @release: function which will be called when refcount hits 0
+ * @flags: PERCPU_REF_INIT_* flags
+ * @gfp: allocation mask to use
*
- * Initializes the refcount in single atomic counter mode with a refcount of 1;
- * analagous to atomic_set(ref, 1).
+ * Initializes @ref. If @flags is zero, @ref starts in percpu mode with a
+ * refcount of 1; analagous to atomic_long_set(ref, 1). See the
+ * definitions of PERCPU_REF_INIT_* flags for flag behaviors.
*
* Note that @release must not sleep - it may potentially be called from RCU
* callback context by percpu_ref_kill().
*/
-int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
+int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
+ unsigned int flags, gfp_t gfp)
{
- atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
+ size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
+ __alignof__(unsigned long));
+ unsigned long start_count = 0;
- ref->pcpu_count_ptr = (unsigned long)alloc_percpu(unsigned);
- if (!ref->pcpu_count_ptr)
+ ref->percpu_count_ptr = (unsigned long)
+ __alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
+ if (!ref->percpu_count_ptr)
return -ENOMEM;
- ref->release = release;
- return 0;
-}
-EXPORT_SYMBOL_GPL(percpu_ref_init);
+ ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
-/**
- * percpu_ref_reinit - re-initialize a percpu refcount
- * @ref: perpcu_ref to re-initialize
- *
- * Re-initialize @ref so that it's in the same state as when it finished
- * percpu_ref_init(). @ref must have been initialized successfully, killed
- * and reached 0 but not exited.
- *
- * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
- * this function is in progress.
- */
-void percpu_ref_reinit(struct percpu_ref *ref)
-{
- unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
- int cpu;
-
- BUG_ON(!pcpu_count);
- WARN_ON(!percpu_ref_is_zero(ref));
+ if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD))
+ ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
+ else
+ start_count += PERCPU_COUNT_BIAS;
- atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
+ if (flags & PERCPU_REF_INIT_DEAD)
+ ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
+ else
+ start_count++;
- /*
- * Restore per-cpu operation. smp_store_release() is paired with
- * smp_read_barrier_depends() in __pcpu_ref_alive() and guarantees
- * that the zeroing is visible to all percpu accesses which can see
- * the following PCPU_REF_DEAD clearing.
- */
- for_each_possible_cpu(cpu)
- *per_cpu_ptr(pcpu_count, cpu) = 0;
+ atomic_long_set(&ref->count, start_count);
- smp_store_release(&ref->pcpu_count_ptr,
- ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
+ ref->release = release;
+ return 0;
}
-EXPORT_SYMBOL_GPL(percpu_ref_reinit);
+EXPORT_SYMBOL_GPL(percpu_ref_init);
/**
* percpu_ref_exit - undo percpu_ref_init()
@@ -107,26 +98,39 @@ EXPORT_SYMBOL_GPL(percpu_ref_reinit);
*/
void percpu_ref_exit(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
+ unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
- if (pcpu_count) {
- free_percpu(pcpu_count);
- ref->pcpu_count_ptr = PCPU_REF_DEAD;
+ if (percpu_count) {
+ free_percpu(percpu_count);
+ ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
}
}
EXPORT_SYMBOL_GPL(percpu_ref_exit);
-static void percpu_ref_kill_rcu(struct rcu_head *rcu)
+static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
+{
+ struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
+
+ ref->confirm_switch(ref);
+ ref->confirm_switch = NULL;
+ wake_up_all(&percpu_ref_switch_waitq);
+
+ /* drop ref from percpu_ref_switch_to_atomic() */
+ percpu_ref_put(ref);
+}
+
+static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
{
struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
- unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
- unsigned count = 0;
+ unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
+ unsigned long count = 0;
int cpu;
for_each_possible_cpu(cpu)
- count += *per_cpu_ptr(pcpu_count, cpu);
+ count += *per_cpu_ptr(percpu_count, cpu);
- pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count);
+ pr_debug("global %ld percpu %ld",
+ atomic_long_read(&ref->count), (long)count);
/*
* It's crucial that we sum the percpu counters _before_ adding the sum
@@ -140,21 +144,137 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
* reaching 0 before we add the percpu counts. But doing it at the same
* time is equivalent and saves us atomic operations:
*/
+ atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count);
+
+ WARN_ONCE(atomic_long_read(&ref->count) <= 0,
+ "percpu ref (%pf) <= 0 (%ld) after switching to atomic",
+ ref->release, atomic_long_read(&ref->count));
+
+ /* @ref is viewed as dead on all CPUs, send out switch confirmation */
+ percpu_ref_call_confirm_rcu(rcu);
+}
+
+static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
+{
+}
+
+static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
+ percpu_ref_func_t *confirm_switch)
+{
+ if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) {
+ /* switching from percpu to atomic */
+ ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
+
+ /*
+ * Non-NULL ->confirm_switch is used to indicate that
+ * switching is in progress. Use noop one if unspecified.
+ */
+ WARN_ON_ONCE(ref->confirm_switch);
+ ref->confirm_switch =
+ confirm_switch ?: percpu_ref_noop_confirm_switch;
+
+ percpu_ref_get(ref); /* put after confirmation */
+ call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
+ } else if (confirm_switch) {
+ /*
+ * Somebody already set ATOMIC. Switching may still be in
+ * progress. @confirm_switch must be invoked after the
+ * switching is complete and a full sched RCU grace period
+ * has passed. Wait synchronously for the previous
+ * switching and schedule @confirm_switch invocation.
+ */
+ wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
+ ref->confirm_switch = confirm_switch;
+
+ percpu_ref_get(ref); /* put after confirmation */
+ call_rcu_sched(&ref->rcu, percpu_ref_call_confirm_rcu);
+ }
+}
+
+/**
+ * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
+ * @ref: percpu_ref to switch to atomic mode
+ * @confirm_switch: optional confirmation callback
+ *
+ * There's no reason to use this function for the usual reference counting.
+ * Use percpu_ref_kill[_and_confirm]().
+ *
+ * Schedule switching of @ref to atomic mode. All its percpu counts will
+ * be collected to the main atomic counter. On completion, when all CPUs
+ * are guaraneed to be in atomic mode, @confirm_switch, which may not
+ * block, is invoked. This function may be invoked concurrently with all
+ * the get/put operations and can safely be mixed with kill and reinit
+ * operations. Note that @ref will stay in atomic mode across kill/reinit
+ * cycles until percpu_ref_switch_to_percpu() is called.
+ *
+ * This function normally doesn't block and can be called from any context
+ * but it may block if @confirm_kill is specified and @ref is already in
+ * the process of switching to atomic mode. In such cases, @confirm_switch
+ * will be invoked after the switching is complete.
+ *
+ * Due to the way percpu_ref is implemented, @confirm_switch will be called
+ * after at least one full sched RCU grace period has passed but this is an
+ * implementation detail and must not be depended upon.
+ */
+void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
+ percpu_ref_func_t *confirm_switch)
+{
+ ref->force_atomic = true;
+ __percpu_ref_switch_to_atomic(ref, confirm_switch);
+}
- atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count);
+static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
+{
+ unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
+ int cpu;
- WARN_ONCE(atomic_read(&ref->count) <= 0, "percpu ref <= 0 (%i)",
- atomic_read(&ref->count));
+ BUG_ON(!percpu_count);
- /* @ref is viewed as dead on all CPUs, send out kill confirmation */
- if (ref->confirm_kill)
- ref->confirm_kill(ref);
+ if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
+ return;
+
+ wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
+
+ atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
/*
- * Now we're in single atomic_t mode with a consistent refcount, so it's
- * safe to drop our initial ref:
+ * Restore per-cpu operation. smp_store_release() is paired with
+ * smp_read_barrier_depends() in __ref_is_percpu() and guarantees
+ * that the zeroing is visible to all percpu accesses which can see
+ * the following __PERCPU_REF_ATOMIC clearing.
*/
- percpu_ref_put(ref);
+ for_each_possible_cpu(cpu)
+ *per_cpu_ptr(percpu_count, cpu) = 0;
+
+ smp_store_release(&ref->percpu_count_ptr,
+ ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
+}
+
+/**
+ * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
+ * @ref: percpu_ref to switch to percpu mode
+ *
+ * There's no reason to use this function for the usual reference counting.
+ * To re-use an expired ref, use percpu_ref_reinit().
+ *
+ * Switch @ref to percpu mode. This function may be invoked concurrently
+ * with all the get/put operations and can safely be mixed with kill and
+ * reinit operations. This function reverses the sticky atomic state set
+ * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is
+ * dying or dead, the actual switching takes place on the following
+ * percpu_ref_reinit().
+ *
+ * This function normally doesn't block and can be called from any context
+ * but it may block if @ref is in the process of switching to atomic mode
+ * by percpu_ref_switch_atomic().
+ */
+void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
+{
+ ref->force_atomic = false;
+
+ /* a dying or dead ref can't be switched to percpu mode w/o reinit */
+ if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD))
+ __percpu_ref_switch_to_percpu(ref);
}
/**
@@ -164,23 +284,48 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
*
* Equivalent to percpu_ref_kill() but also schedules kill confirmation if
* @confirm_kill is not NULL. @confirm_kill, which may not block, will be
- * called after @ref is seen as dead from all CPUs - all further
- * invocations of percpu_ref_tryget() will fail. See percpu_ref_tryget()
- * for more details.
+ * called after @ref is seen as dead from all CPUs at which point all
+ * further invocations of percpu_ref_tryget_live() will fail. See
+ * percpu_ref_tryget_live() for details.
+ *
+ * This function normally doesn't block and can be called from any context
+ * but it may block if @confirm_kill is specified and @ref is in the
+ * process of switching to atomic mode by percpu_ref_switch_atomic().
*
- * Due to the way percpu_ref is implemented, @confirm_kill will be called
- * after at least one full RCU grace period has passed but this is an
- * implementation detail and callers must not depend on it.
+ * Due to the way percpu_ref is implemented, @confirm_switch will be called
+ * after at least one full sched RCU grace period has passed but this is an
+ * implementation detail and must not be depended upon.
*/
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill)
{
- WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
- "percpu_ref_kill() called more than once!\n");
+ WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
+ "%s called more than once on %pf!", __func__, ref->release);
- ref->pcpu_count_ptr |= PCPU_REF_DEAD;
- ref->confirm_kill = confirm_kill;
-
- call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
+ ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
+ __percpu_ref_switch_to_atomic(ref, confirm_kill);
+ percpu_ref_put(ref);
}
EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
+
+/**
+ * percpu_ref_reinit - re-initialize a percpu refcount
+ * @ref: perpcu_ref to re-initialize
+ *
+ * Re-initialize @ref so that it's in the same state as when it finished
+ * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been
+ * initialized successfully and reached 0 but not exited.
+ *
+ * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
+ * this function is in progress.
+ */
+void percpu_ref_reinit(struct percpu_ref *ref)
+{
+ WARN_ON_ONCE(!percpu_ref_is_zero(ref));
+
+ ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
+ percpu_ref_get(ref);
+ if (!ref->force_atomic)
+ __percpu_ref_switch_to_percpu(ref);
+}
+EXPORT_SYMBOL_GPL(percpu_ref_reinit);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 7dd33577b905..48144cdae819 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -112,13 +112,15 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
}
EXPORT_SYMBOL(__percpu_counter_sum);
-int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
+int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
struct lock_class_key *key)
{
+ unsigned long flags __maybe_unused;
+
raw_spin_lock_init(&fbc->lock);
lockdep_set_class(&fbc->lock, key);
fbc->count = amount;
- fbc->counters = alloc_percpu(s32);
+ fbc->counters = alloc_percpu_gfp(s32, gfp);
if (!fbc->counters)
return -ENOMEM;
@@ -126,9 +128,9 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
#ifdef CONFIG_HOTPLUG_CPU
INIT_LIST_HEAD(&fbc->list);
- spin_lock(&percpu_counters_lock);
+ spin_lock_irqsave(&percpu_counters_lock, flags);
list_add(&fbc->list, &percpu_counters);
- spin_unlock(&percpu_counters_lock);
+ spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif
return 0;
}
@@ -136,15 +138,17 @@ EXPORT_SYMBOL(__percpu_counter_init);
void percpu_counter_destroy(struct percpu_counter *fbc)
{
+ unsigned long flags __maybe_unused;
+
if (!fbc->counters)
return;
debug_percpu_counter_deactivate(fbc);
#ifdef CONFIG_HOTPLUG_CPU
- spin_lock(&percpu_counters_lock);
+ spin_lock_irqsave(&percpu_counters_lock, flags);
list_del(&fbc->list);
- spin_unlock(&percpu_counters_lock);
+ spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif
free_percpu(fbc->counters);
fbc->counters = NULL;
@@ -173,7 +177,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
return NOTIFY_OK;
cpu = (unsigned long)hcpu;
- spin_lock(&percpu_counters_lock);
+ spin_lock_irq(&percpu_counters_lock);
list_for_each_entry(fbc, &percpu_counters, list) {
s32 *pcount;
unsigned long flags;
@@ -184,7 +188,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
*pcount = 0;
raw_spin_unlock_irqrestore(&fbc->lock, flags);
}
- spin_unlock(&percpu_counters_lock);
+ spin_unlock_irq(&percpu_counters_lock);
#endif
return NOTIFY_OK;
}
diff --git a/lib/proportions.c b/lib/proportions.c
index 05df84801b56..6f724298f67a 100644
--- a/lib/proportions.c
+++ b/lib/proportions.c
@@ -73,7 +73,7 @@
#include <linux/proportions.h>
#include <linux/rcupdate.h>
-int prop_descriptor_init(struct prop_descriptor *pd, int shift)
+int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp)
{
int err;
@@ -83,11 +83,11 @@ int prop_descriptor_init(struct prop_descriptor *pd, int shift)
pd->index = 0;
pd->pg[0].shift = shift;
mutex_init(&pd->mutex);
- err = percpu_counter_init(&pd->pg[0].events, 0);
+ err = percpu_counter_init(&pd->pg[0].events, 0, gfp);
if (err)
goto out;
- err = percpu_counter_init(&pd->pg[1].events, 0);
+ err = percpu_counter_init(&pd->pg[1].events, 0, gfp);
if (err)
percpu_counter_destroy(&pd->pg[0].events);
@@ -188,12 +188,12 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift)
#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
-int prop_local_init_percpu(struct prop_local_percpu *pl)
+int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp)
{
raw_spin_lock_init(&pl->lock);
pl->shift = 0;
pl->period = 0;
- return percpu_counter_init(&pl->events, 0);
+ return percpu_counter_init(&pl->events, 0, gfp);
}
void prop_local_destroy_percpu(struct prop_local_percpu *pl)
diff --git a/lib/random32.c b/lib/random32.c
index fa5da61ce7ad..0bee183fa18f 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -37,9 +37,14 @@
#include <linux/jiffies.h>
#include <linux/random.h>
#include <linux/sched.h>
+#include <asm/unaligned.h>
#ifdef CONFIG_RANDOM32_SELFTEST
static void __init prandom_state_selftest(void);
+#else
+static inline void prandom_state_selftest(void)
+{
+}
#endif
static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
@@ -53,8 +58,7 @@ static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
*/
u32 prandom_u32_state(struct rnd_state *state)
{
-#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
-
+#define TAUSWORTHE(s, a, b, c, d) ((s & c) << d) ^ (((s << a) ^ s) >> b)
state->s1 = TAUSWORTHE(state->s1, 6U, 13U, 4294967294U, 18U);
state->s2 = TAUSWORTHE(state->s2, 2U, 27U, 4294967288U, 2U);
state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U, 7U);
@@ -93,27 +97,23 @@ EXPORT_SYMBOL(prandom_u32);
* This is used for pseudo-randomness with no outside seeding.
* For more random results, use prandom_bytes().
*/
-void prandom_bytes_state(struct rnd_state *state, void *buf, int bytes)
+void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes)
{
- unsigned char *p = buf;
- int i;
+ u8 *ptr = buf;
- for (i = 0; i < round_down(bytes, sizeof(u32)); i += sizeof(u32)) {
- u32 random = prandom_u32_state(state);
- int j;
-
- for (j = 0; j < sizeof(u32); j++) {
- p[i + j] = random;
- random >>= BITS_PER_BYTE;
- }
+ while (bytes >= sizeof(u32)) {
+ put_unaligned(prandom_u32_state(state), (u32 *) ptr);
+ ptr += sizeof(u32);
+ bytes -= sizeof(u32);
}
- if (i < bytes) {
- u32 random = prandom_u32_state(state);
- for (; i < bytes; i++) {
- p[i] = random;
- random >>= BITS_PER_BYTE;
- }
+ if (bytes > 0) {
+ u32 rem = prandom_u32_state(state);
+ do {
+ *ptr++ = (u8) rem;
+ bytes--;
+ rem >>= BITS_PER_BYTE;
+ } while (bytes > 0);
}
}
EXPORT_SYMBOL(prandom_bytes_state);
@@ -123,7 +123,7 @@ EXPORT_SYMBOL(prandom_bytes_state);
* @buf: where to copy the pseudo-random bytes to
* @bytes: the requested number of bytes
*/
-void prandom_bytes(void *buf, int bytes)
+void prandom_bytes(void *buf, size_t bytes)
{
struct rnd_state *state = &get_cpu_var(net_rand_state);
@@ -134,7 +134,7 @@ EXPORT_SYMBOL(prandom_bytes);
static void prandom_warmup(struct rnd_state *state)
{
- /* Calling RNG ten times to satify recurrence condition */
+ /* Calling RNG ten times to satisfy recurrence condition */
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
@@ -147,21 +147,25 @@ static void prandom_warmup(struct rnd_state *state)
prandom_u32_state(state);
}
-static void prandom_seed_very_weak(struct rnd_state *state, u32 seed)
+static u32 __extract_hwseed(void)
{
- /* Note: This sort of seeding is ONLY used in test cases and
- * during boot at the time from core_initcall until late_initcall
- * as we don't have a stronger entropy source available yet.
- * After late_initcall, we reseed entire state, we have to (!),
- * otherwise an attacker just needs to search 32 bit space to
- * probe for our internal 128 bit state if he knows a couple
- * of prandom32 outputs!
- */
-#define LCG(x) ((x) * 69069U) /* super-duper LCG */
- state->s1 = __seed(LCG(seed), 2U);
- state->s2 = __seed(LCG(state->s1), 8U);
- state->s3 = __seed(LCG(state->s2), 16U);
- state->s4 = __seed(LCG(state->s3), 128U);
+ unsigned int val = 0;
+
+ (void)(arch_get_random_seed_int(&val) ||
+ arch_get_random_int(&val));
+
+ return val;
+}
+
+static void prandom_seed_early(struct rnd_state *state, u32 seed,
+ bool mix_with_hwseed)
+{
+#define LCG(x) ((x) * 69069U) /* super-duper LCG */
+#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
+ state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
+ state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
+ state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
+ state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
}
/**
@@ -194,14 +198,13 @@ static int __init prandom_init(void)
{
int i;
-#ifdef CONFIG_RANDOM32_SELFTEST
prandom_state_selftest();
-#endif
for_each_possible_cpu(i) {
struct rnd_state *state = &per_cpu(net_rand_state,i);
+ u32 weak_seed = (i + jiffies) ^ random_get_entropy();
- prandom_seed_very_weak(state, (i + jiffies) ^ random_get_entropy());
+ prandom_seed_early(state, weak_seed, true);
prandom_warmup(state);
}
@@ -210,6 +213,7 @@ static int __init prandom_init(void)
core_initcall(prandom_init);
static void __prandom_timer(unsigned long dontcare);
+
static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0);
static void __prandom_timer(unsigned long dontcare)
@@ -221,7 +225,7 @@ static void __prandom_timer(unsigned long dontcare)
prandom_seed(entropy);
/* reseed every ~60 seconds, in [40 .. 80) interval with slack */
- expires = 40 + (prandom_u32() % 40);
+ expires = 40 + prandom_u32_max(40);
seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC);
add_timer(&seed_timer);
@@ -419,7 +423,7 @@ static void __init prandom_state_selftest(void)
for (i = 0; i < ARRAY_SIZE(test1); i++) {
struct rnd_state state;
- prandom_seed_very_weak(&state, test1[i].seed);
+ prandom_seed_early(&state, test1[i].seed, false);
prandom_warmup(&state);
if (test1[i].result != prandom_u32_state(&state))
@@ -434,7 +438,7 @@ static void __init prandom_state_selftest(void)
for (i = 0; i < ARRAY_SIZE(test2); i++) {
struct rnd_state state;
- prandom_seed_very_weak(&state, test2[i].seed);
+ prandom_seed_early(&state, test2[i].seed, false);
prandom_warmup(&state);
for (j = 0; j < test2[i].iteration - 1; j++)
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 65f4effd117f..c16c81a3d430 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -101,7 +101,7 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
* / \ / \
* p u --> P U
* / /
- * n N
+ * n n
*
* However, since g's parent might be red, and
* 4) does not allow this, we need to recurse
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
new file mode 100644
index 000000000000..081be3ba9ea8
--- /dev/null
+++ b/lib/rhashtable.c
@@ -0,0 +1,794 @@
+/*
+ * Resizable, Scalable, Concurrent Hash Table
+ *
+ * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
+ * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
+ *
+ * Based on the following paper:
+ * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
+ *
+ * Code partially derived from nft_hash
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/hash.h>
+#include <linux/random.h>
+#include <linux/rhashtable.h>
+
+#define HASH_DEFAULT_SIZE 64UL
+#define HASH_MIN_SIZE 4UL
+
+#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
+
+#ifdef CONFIG_PROVE_LOCKING
+int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
+{
+ return ht->p.mutex_is_held();
+}
+EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
+#endif
+
+static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
+{
+ return (void *) he - ht->p.head_offset;
+}
+
+static u32 __hashfn(const struct rhashtable *ht, const void *key,
+ u32 len, u32 hsize)
+{
+ u32 h;
+
+ h = ht->p.hashfn(key, len, ht->p.hash_rnd);
+
+ return h & (hsize - 1);
+}
+
+/**
+ * rhashtable_hashfn - compute hash for key of given length
+ * @ht: hash table to compute for
+ * @key: pointer to key
+ * @len: length of key
+ *
+ * Computes the hash value using the hash function provided in the 'hashfn'
+ * of struct rhashtable_params. The returned value is guaranteed to be
+ * smaller than the number of buckets in the hash table.
+ */
+u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len)
+{
+ struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+
+ return __hashfn(ht, key, len, tbl->size);
+}
+EXPORT_SYMBOL_GPL(rhashtable_hashfn);
+
+static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize)
+{
+ if (unlikely(!ht->p.key_len)) {
+ u32 h;
+
+ h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
+
+ return h & (hsize - 1);
+ }
+
+ return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize);
+}
+
+/**
+ * rhashtable_obj_hashfn - compute hash for hashed object
+ * @ht: hash table to compute for
+ * @ptr: pointer to hashed object
+ *
+ * Computes the hash value using the hash function `hashfn` respectively
+ * 'obj_hashfn' depending on whether the hash table is set up to work with
+ * a fixed length key. The returned value is guaranteed to be smaller than
+ * the number of buckets in the hash table.
+ */
+u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr)
+{
+ struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+
+ return obj_hashfn(ht, ptr, tbl->size);
+}
+EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn);
+
+static u32 head_hashfn(const struct rhashtable *ht,
+ const struct rhash_head *he, u32 hsize)
+{
+ return obj_hashfn(ht, rht_obj(ht, he), hsize);
+}
+
+static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags)
+{
+ struct bucket_table *tbl;
+ size_t size;
+
+ size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
+ tbl = kzalloc(size, flags);
+ if (tbl == NULL)
+ tbl = vzalloc(size);
+
+ if (tbl == NULL)
+ return NULL;
+
+ tbl->size = nbuckets;
+
+ return tbl;
+}
+
+static void bucket_table_free(const struct bucket_table *tbl)
+{
+ kvfree(tbl);
+}
+
+/**
+ * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
+ * @ht: hash table
+ * @new_size: new table size
+ */
+bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
+{
+ /* Expand table when exceeding 75% load */
+ return ht->nelems > (new_size / 4 * 3);
+}
+EXPORT_SYMBOL_GPL(rht_grow_above_75);
+
+/**
+ * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
+ * @ht: hash table
+ * @new_size: new table size
+ */
+bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
+{
+ /* Shrink table beneath 30% load */
+ return ht->nelems < (new_size * 3 / 10);
+}
+EXPORT_SYMBOL_GPL(rht_shrink_below_30);
+
+static void hashtable_chain_unzip(const struct rhashtable *ht,
+ const struct bucket_table *new_tbl,
+ struct bucket_table *old_tbl, size_t n)
+{
+ struct rhash_head *he, *p, *next;
+ unsigned int h;
+
+ /* Old bucket empty, no work needed. */
+ p = rht_dereference(old_tbl->buckets[n], ht);
+ if (!p)
+ return;
+
+ /* Advance the old bucket pointer one or more times until it
+ * reaches a node that doesn't hash to the same bucket as the
+ * previous node p. Call the previous node p;
+ */
+ h = head_hashfn(ht, p, new_tbl->size);
+ rht_for_each(he, p->next, ht) {
+ if (head_hashfn(ht, he, new_tbl->size) != h)
+ break;
+ p = he;
+ }
+ RCU_INIT_POINTER(old_tbl->buckets[n], p->next);
+
+ /* Find the subsequent node which does hash to the same
+ * bucket as node P, or NULL if no such node exists.
+ */
+ next = NULL;
+ if (he) {
+ rht_for_each(he, he->next, ht) {
+ if (head_hashfn(ht, he, new_tbl->size) == h) {
+ next = he;
+ break;
+ }
+ }
+ }
+
+ /* Set p's next pointer to that subsequent node pointer,
+ * bypassing the nodes which do not hash to p's bucket
+ */
+ RCU_INIT_POINTER(p->next, next);
+}
+
+/**
+ * rhashtable_expand - Expand hash table while allowing concurrent lookups
+ * @ht: the hash table to expand
+ * @flags: allocation flags
+ *
+ * A secondary bucket array is allocated and the hash entries are migrated
+ * while keeping them on both lists until the end of the RCU grace period.
+ *
+ * This function may only be called in a context where it is safe to call
+ * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
+ *
+ * The caller must ensure that no concurrent table mutations take place.
+ * It is however valid to have concurrent lookups if they are RCU protected.
+ */
+int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
+{
+ struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
+ struct rhash_head *he;
+ unsigned int i, h;
+ bool complete;
+
+ ASSERT_RHT_MUTEX(ht);
+
+ if (ht->p.max_shift && ht->shift >= ht->p.max_shift)
+ return 0;
+
+ new_tbl = bucket_table_alloc(old_tbl->size * 2, flags);
+ if (new_tbl == NULL)
+ return -ENOMEM;
+
+ ht->shift++;
+
+ /* For each new bucket, search the corresponding old bucket
+ * for the first entry that hashes to the new bucket, and
+ * link the new bucket to that entry. Since all the entries
+ * which will end up in the new bucket appear in the same
+ * old bucket, this constructs an entirely valid new hash
+ * table, but with multiple buckets "zipped" together into a
+ * single imprecise chain.
+ */
+ for (i = 0; i < new_tbl->size; i++) {
+ h = i & (old_tbl->size - 1);
+ rht_for_each(he, old_tbl->buckets[h], ht) {
+ if (head_hashfn(ht, he, new_tbl->size) == i) {
+ RCU_INIT_POINTER(new_tbl->buckets[i], he);
+ break;
+ }
+ }
+ }
+
+ /* Publish the new table pointer. Lookups may now traverse
+ * the new table, but they will not benefit from any
+ * additional efficiency until later steps unzip the buckets.
+ */
+ rcu_assign_pointer(ht->tbl, new_tbl);
+
+ /* Unzip interleaved hash chains */
+ do {
+ /* Wait for readers. All new readers will see the new
+ * table, and thus no references to the old table will
+ * remain.
+ */
+ synchronize_rcu();
+
+ /* For each bucket in the old table (each of which
+ * contains items from multiple buckets of the new
+ * table): ...
+ */
+ complete = true;
+ for (i = 0; i < old_tbl->size; i++) {
+ hashtable_chain_unzip(ht, new_tbl, old_tbl, i);
+ if (old_tbl->buckets[i] != NULL)
+ complete = false;
+ }
+ } while (!complete);
+
+ bucket_table_free(old_tbl);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rhashtable_expand);
+
+/**
+ * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
+ * @ht: the hash table to shrink
+ * @flags: allocation flags
+ *
+ * This function may only be called in a context where it is safe to call
+ * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
+ *
+ * The caller must ensure that no concurrent table mutations take place.
+ * It is however valid to have concurrent lookups if they are RCU protected.
+ */
+int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
+{
+ struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht);
+ struct rhash_head __rcu **pprev;
+ unsigned int i;
+
+ ASSERT_RHT_MUTEX(ht);
+
+ if (ht->shift <= ht->p.min_shift)
+ return 0;
+
+ ntbl = bucket_table_alloc(tbl->size / 2, flags);
+ if (ntbl == NULL)
+ return -ENOMEM;
+
+ ht->shift--;
+
+ /* Link each bucket in the new table to the first bucket
+ * in the old table that contains entries which will hash
+ * to the new bucket.
+ */
+ for (i = 0; i < ntbl->size; i++) {
+ ntbl->buckets[i] = tbl->buckets[i];
+
+ /* Link each bucket in the new table to the first bucket
+ * in the old table that contains entries which will hash
+ * to the new bucket.
+ */
+ for (pprev = &ntbl->buckets[i]; *pprev != NULL;
+ pprev = &rht_dereference(*pprev, ht)->next)
+ ;
+ RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
+ }
+
+ /* Publish the new, valid hash table */
+ rcu_assign_pointer(ht->tbl, ntbl);
+
+ /* Wait for readers. No new readers will have references to the
+ * old hash table.
+ */
+ synchronize_rcu();
+
+ bucket_table_free(tbl);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rhashtable_shrink);
+
+/**
+ * rhashtable_insert - insert object into hash hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @flags: allocation flags (table expansion)
+ *
+ * Will automatically grow the table via rhashtable_expand() if the the
+ * grow_decision function specified at rhashtable_init() returns true.
+ *
+ * The caller must ensure that no concurrent table mutations occur. It is
+ * however valid to have concurrent lookups if they are RCU protected.
+ */
+void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
+ gfp_t flags)
+{
+ struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
+ u32 hash;
+
+ ASSERT_RHT_MUTEX(ht);
+
+ hash = head_hashfn(ht, obj, tbl->size);
+ RCU_INIT_POINTER(obj->next, tbl->buckets[hash]);
+ rcu_assign_pointer(tbl->buckets[hash], obj);
+ ht->nelems++;
+
+ if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
+ rhashtable_expand(ht, flags);
+}
+EXPORT_SYMBOL_GPL(rhashtable_insert);
+
+/**
+ * rhashtable_remove_pprev - remove object from hash table given previous element
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @pprev: pointer to previous element
+ * @flags: allocation flags (table expansion)
+ *
+ * Identical to rhashtable_remove() but caller is alreayd aware of the element
+ * in front of the element to be deleted. This is in particular useful for
+ * deletion when combined with walking or lookup.
+ */
+void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
+ struct rhash_head __rcu **pprev, gfp_t flags)
+{
+ struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
+
+ ASSERT_RHT_MUTEX(ht);
+
+ RCU_INIT_POINTER(*pprev, obj->next);
+ ht->nelems--;
+
+ if (ht->p.shrink_decision &&
+ ht->p.shrink_decision(ht, tbl->size))
+ rhashtable_shrink(ht, flags);
+}
+EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
+
+/**
+ * rhashtable_remove - remove object from hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @flags: allocation flags (table expansion)
+ *
+ * Since the hash chain is single linked, the removal operation needs to
+ * walk the bucket chain upon removal. The removal operation is thus
+ * considerable slow if the hash table is not correctly sized.
+ *
+ * Will automatically shrink the table via rhashtable_expand() if the the
+ * shrink_decision function specified at rhashtable_init() returns true.
+ *
+ * The caller must ensure that no concurrent table mutations occur. It is
+ * however valid to have concurrent lookups if they are RCU protected.
+ */
+bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj,
+ gfp_t flags)
+{
+ struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
+ struct rhash_head __rcu **pprev;
+ struct rhash_head *he;
+ u32 h;
+
+ ASSERT_RHT_MUTEX(ht);
+
+ h = head_hashfn(ht, obj, tbl->size);
+
+ pprev = &tbl->buckets[h];
+ rht_for_each(he, tbl->buckets[h], ht) {
+ if (he != obj) {
+ pprev = &he->next;
+ continue;
+ }
+
+ rhashtable_remove_pprev(ht, he, pprev, flags);
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(rhashtable_remove);
+
+/**
+ * rhashtable_lookup - lookup key in hash table
+ * @ht: hash table
+ * @key: pointer to key
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * This lookup function may only be used for fixed key hash table (key_len
+ * paramter set). It will BUG() if used inappropriately.
+ *
+ * Lookups may occur in parallel with hash mutations as long as the lookup is
+ * guarded by rcu_read_lock(). The caller must take care of this.
+ */
+void *rhashtable_lookup(const struct rhashtable *ht, const void *key)
+{
+ const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+ struct rhash_head *he;
+ u32 h;
+
+ BUG_ON(!ht->p.key_len);
+
+ h = __hashfn(ht, key, ht->p.key_len, tbl->size);
+ rht_for_each_rcu(he, tbl->buckets[h], ht) {
+ if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key,
+ ht->p.key_len))
+ continue;
+ return (void *) he - ht->p.head_offset;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(rhashtable_lookup);
+
+/**
+ * rhashtable_lookup_compare - search hash table with compare function
+ * @ht: hash table
+ * @hash: hash value of desired entry
+ * @compare: compare function, must return true on match
+ * @arg: argument passed on to compare function
+ *
+ * Traverses the bucket chain behind the provided hash value and calls the
+ * specified compare function for each entry.
+ *
+ * Lookups may occur in parallel with hash mutations as long as the lookup is
+ * guarded by rcu_read_lock(). The caller must take care of this.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
+ bool (*compare)(void *, void *), void *arg)
+{
+ const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+ struct rhash_head *he;
+
+ if (unlikely(hash >= tbl->size))
+ return NULL;
+
+ rht_for_each_rcu(he, tbl->buckets[hash], ht) {
+ if (!compare(rht_obj(ht, he), arg))
+ continue;
+ return (void *) he - ht->p.head_offset;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
+
+static size_t rounded_hashtable_size(struct rhashtable_params *params)
+{
+ return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
+ 1UL << params->min_shift);
+}
+
+/**
+ * rhashtable_init - initialize a new hash table
+ * @ht: hash table to be initialized
+ * @params: configuration parameters
+ *
+ * Initializes a new hash table based on the provided configuration
+ * parameters. A table can be configured either with a variable or
+ * fixed length key:
+ *
+ * Configuration Example 1: Fixed length keys
+ * struct test_obj {
+ * int key;
+ * void * my_member;
+ * struct rhash_head node;
+ * };
+ *
+ * struct rhashtable_params params = {
+ * .head_offset = offsetof(struct test_obj, node),
+ * .key_offset = offsetof(struct test_obj, key),
+ * .key_len = sizeof(int),
+ * .hashfn = arch_fast_hash,
+ * .mutex_is_held = &my_mutex_is_held,
+ * };
+ *
+ * Configuration Example 2: Variable length keys
+ * struct test_obj {
+ * [...]
+ * struct rhash_head node;
+ * };
+ *
+ * u32 my_hash_fn(const void *data, u32 seed)
+ * {
+ * struct test_obj *obj = data;
+ *
+ * return [... hash ...];
+ * }
+ *
+ * struct rhashtable_params params = {
+ * .head_offset = offsetof(struct test_obj, node),
+ * .hashfn = arch_fast_hash,
+ * .obj_hashfn = my_hash_fn,
+ * .mutex_is_held = &my_mutex_is_held,
+ * };
+ */
+int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
+{
+ struct bucket_table *tbl;
+ size_t size;
+
+ size = HASH_DEFAULT_SIZE;
+
+ if ((params->key_len && !params->hashfn) ||
+ (!params->key_len && !params->obj_hashfn))
+ return -EINVAL;
+
+ params->min_shift = max_t(size_t, params->min_shift,
+ ilog2(HASH_MIN_SIZE));
+
+ if (params->nelem_hint)
+ size = rounded_hashtable_size(params);
+
+ tbl = bucket_table_alloc(size, GFP_KERNEL);
+ if (tbl == NULL)
+ return -ENOMEM;
+
+ memset(ht, 0, sizeof(*ht));
+ ht->shift = ilog2(tbl->size);
+ memcpy(&ht->p, params, sizeof(*params));
+ RCU_INIT_POINTER(ht->tbl, tbl);
+
+ if (!ht->p.hash_rnd)
+ get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rhashtable_init);
+
+/**
+ * rhashtable_destroy - destroy hash table
+ * @ht: the hash table to destroy
+ *
+ * Frees the bucket array. This function is not rcu safe, therefore the caller
+ * has to make sure that no resizing may happen by unpublishing the hashtable
+ * and waiting for the quiescent cycle before releasing the bucket array.
+ */
+void rhashtable_destroy(const struct rhashtable *ht)
+{
+ bucket_table_free(ht->tbl);
+}
+EXPORT_SYMBOL_GPL(rhashtable_destroy);
+
+/**************************************************************************
+ * Self Test
+ **************************************************************************/
+
+#ifdef CONFIG_TEST_RHASHTABLE
+
+#define TEST_HT_SIZE 8
+#define TEST_ENTRIES 2048
+#define TEST_PTR ((void *) 0xdeadbeef)
+#define TEST_NEXPANDS 4
+
+static int test_mutex_is_held(void)
+{
+ return 1;
+}
+
+struct test_obj {
+ void *ptr;
+ int value;
+ struct rhash_head node;
+};
+
+static int __init test_rht_lookup(struct rhashtable *ht)
+{
+ unsigned int i;
+
+ for (i = 0; i < TEST_ENTRIES * 2; i++) {
+ struct test_obj *obj;
+ bool expected = !(i % 2);
+ u32 key = i;
+
+ obj = rhashtable_lookup(ht, &key);
+
+ if (expected && !obj) {
+ pr_warn("Test failed: Could not find key %u\n", key);
+ return -ENOENT;
+ } else if (!expected && obj) {
+ pr_warn("Test failed: Unexpected entry found for key %u\n",
+ key);
+ return -EEXIST;
+ } else if (expected && obj) {
+ if (obj->ptr != TEST_PTR || obj->value != i) {
+ pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
+ obj->ptr, TEST_PTR, obj->value, i);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void test_bucket_stats(struct rhashtable *ht,
+ struct bucket_table *tbl,
+ bool quiet)
+{
+ unsigned int cnt, i, total = 0;
+ struct test_obj *obj;
+
+ for (i = 0; i < tbl->size; i++) {
+ cnt = 0;
+
+ if (!quiet)
+ pr_info(" [%#4x/%zu]", i, tbl->size);
+
+ rht_for_each_entry_rcu(obj, tbl->buckets[i], node) {
+ cnt++;
+ total++;
+ if (!quiet)
+ pr_cont(" [%p],", obj);
+ }
+
+ if (!quiet)
+ pr_cont("\n [%#x] first element: %p, chain length: %u\n",
+ i, tbl->buckets[i], cnt);
+ }
+
+ pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n",
+ total, ht->nelems, TEST_ENTRIES);
+}
+
+static int __init test_rhashtable(struct rhashtable *ht)
+{
+ struct bucket_table *tbl;
+ struct test_obj *obj, *next;
+ int err;
+ unsigned int i;
+
+ /*
+ * Insertion Test:
+ * Insert TEST_ENTRIES into table with all keys even numbers
+ */
+ pr_info(" Adding %d keys\n", TEST_ENTRIES);
+ for (i = 0; i < TEST_ENTRIES; i++) {
+ struct test_obj *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ obj->ptr = TEST_PTR;
+ obj->value = i * 2;
+
+ rhashtable_insert(ht, &obj->node, GFP_KERNEL);
+ }
+
+ rcu_read_lock();
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+ test_bucket_stats(ht, tbl, true);
+ test_rht_lookup(ht);
+ rcu_read_unlock();
+
+ for (i = 0; i < TEST_NEXPANDS; i++) {
+ pr_info(" Table expansion iteration %u...\n", i);
+ rhashtable_expand(ht, GFP_KERNEL);
+
+ rcu_read_lock();
+ pr_info(" Verifying lookups...\n");
+ test_rht_lookup(ht);
+ rcu_read_unlock();
+ }
+
+ for (i = 0; i < TEST_NEXPANDS; i++) {
+ pr_info(" Table shrinkage iteration %u...\n", i);
+ rhashtable_shrink(ht, GFP_KERNEL);
+
+ rcu_read_lock();
+ pr_info(" Verifying lookups...\n");
+ test_rht_lookup(ht);
+ rcu_read_unlock();
+ }
+
+ pr_info(" Deleting %d keys\n", TEST_ENTRIES);
+ for (i = 0; i < TEST_ENTRIES; i++) {
+ u32 key = i * 2;
+
+ obj = rhashtable_lookup(ht, &key);
+ BUG_ON(!obj);
+
+ rhashtable_remove(ht, &obj->node, GFP_KERNEL);
+ kfree(obj);
+ }
+
+ return 0;
+
+error:
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+ for (i = 0; i < tbl->size; i++)
+ rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node)
+ kfree(obj);
+
+ return err;
+}
+
+static int __init test_rht_init(void)
+{
+ struct rhashtable ht;
+ struct rhashtable_params params = {
+ .nelem_hint = TEST_HT_SIZE,
+ .head_offset = offsetof(struct test_obj, node),
+ .key_offset = offsetof(struct test_obj, value),
+ .key_len = sizeof(int),
+ .hashfn = arch_fast_hash,
+ .mutex_is_held = &test_mutex_is_held,
+ .grow_decision = rht_grow_above_75,
+ .shrink_decision = rht_shrink_below_30,
+ };
+ int err;
+
+ pr_info("Running resizable hashtable tests...\n");
+
+ err = rhashtable_init(&ht, &params);
+ if (err < 0) {
+ pr_warn("Test failed: Unable to initialize hashtable: %d\n",
+ err);
+ return err;
+ }
+
+ err = test_rhashtable(&ht);
+
+ rhashtable_destroy(&ht);
+
+ return err;
+}
+
+subsys_initcall(test_rht_init);
+
+#endif /* CONFIG_TEST_RHASHTABLE */
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 3a8e8e8fb2a5..9cdf62f8accd 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -73,7 +73,7 @@ EXPORT_SYMBOL(sg_nents);
**/
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
{
-#ifndef ARCH_HAS_SG_CHAIN
+#ifndef CONFIG_ARCH_HAS_SG_CHAIN
struct scatterlist *ret = &sgl[nents - 1];
#else
struct scatterlist *sg, *ret = NULL;
@@ -165,6 +165,7 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
* __sg_free_table - Free a previously mapped sg table
* @table: The sg table header to use
* @max_ents: The maximum number of entries per single scatterlist
+ * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
* @free_fn: Free function
*
* Description:
@@ -174,7 +175,7 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
*
**/
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
- sg_free_fn *free_fn)
+ bool skip_first_chunk, sg_free_fn *free_fn)
{
struct scatterlist *sgl, *next;
@@ -202,7 +203,10 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
}
table->orig_nents -= sg_size;
- free_fn(sgl, alloc_size);
+ if (!skip_first_chunk) {
+ free_fn(sgl, alloc_size);
+ skip_first_chunk = false;
+ }
sgl = next;
}
@@ -217,7 +221,7 @@ EXPORT_SYMBOL(__sg_free_table);
**/
void sg_free_table(struct sg_table *table)
{
- __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
+ __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
}
EXPORT_SYMBOL(sg_free_table);
@@ -241,8 +245,8 @@ EXPORT_SYMBOL(sg_free_table);
*
**/
int __sg_alloc_table(struct sg_table *table, unsigned int nents,
- unsigned int max_ents, gfp_t gfp_mask,
- sg_alloc_fn *alloc_fn)
+ unsigned int max_ents, struct scatterlist *first_chunk,
+ gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
{
struct scatterlist *sg, *prv;
unsigned int left;
@@ -251,7 +255,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
if (nents == 0)
return -EINVAL;
-#ifndef ARCH_HAS_SG_CHAIN
+#ifndef CONFIG_ARCH_HAS_SG_CHAIN
if (WARN_ON_ONCE(nents > max_ents))
return -EINVAL;
#endif
@@ -269,7 +273,12 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
left -= sg_size;
- sg = alloc_fn(alloc_size, gfp_mask);
+ if (first_chunk) {
+ sg = first_chunk;
+ first_chunk = NULL;
+ } else {
+ sg = alloc_fn(alloc_size, gfp_mask);
+ }
if (unlikely(!sg)) {
/*
* Adjust entry count to reflect that the last
@@ -324,9 +333,9 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
int ret;
ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
- gfp_mask, sg_kmalloc);
+ NULL, gfp_mask, sg_kmalloc);
if (unlikely(ret))
- __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
+ __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
return ret;
}
diff --git a/lib/string.c b/lib/string.c
index 992bf30af759..f3c6ff596414 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -807,9 +807,9 @@ void *memchr_inv(const void *start, int c, size_t bytes)
return check_bytes8(start, value, bytes);
value64 = value;
-#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
value64 *= 0x0101010101010101;
-#elif defined(ARCH_HAS_FAST_MULTIPLIER)
+#elif defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER)
value64 *= 0x01010101;
value64 |= value64 << 32;
#else
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index ed5c1454dd62..29033f319aea 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -25,12 +25,15 @@
int string_get_size(u64 size, const enum string_size_units units,
char *buf, int len)
{
- static const char *units_10[] = { "B", "kB", "MB", "GB", "TB", "PB",
- "EB", "ZB", "YB", NULL};
- static const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB",
- "EiB", "ZiB", "YiB", NULL };
- static const char **units_str[] = {
- [STRING_UNITS_10] = units_10,
+ static const char *const units_10[] = {
+ "B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", NULL
+ };
+ static const char *const units_2[] = {
+ "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB",
+ NULL
+ };
+ static const char *const *const units_str[] = {
+ [STRING_UNITS_10] = units_10,
[STRING_UNITS_2] = units_2,
};
static const unsigned int divisor[] = {
diff --git a/lib/test-kstrtox.c b/lib/test-kstrtox.c
index bea3f3fa3f02..4137bca5f8e8 100644
--- a/lib/test-kstrtox.c
+++ b/lib/test-kstrtox.c
@@ -3,7 +3,7 @@
#include <linux/module.h>
#define for_each_test(i, test) \
- for (i = 0; i < sizeof(test) / sizeof(test[0]); i++)
+ for (i = 0; i < ARRAY_SIZE(test); i++)
struct test_fail {
const char *str;
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index c579e0f58818..23e070bcf72d 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -66,7 +66,7 @@ struct bpf_test {
const char *descr;
union {
struct sock_filter insns[MAX_INSNS];
- struct sock_filter_int insns_int[MAX_INSNS];
+ struct bpf_insn insns_int[MAX_INSNS];
} u;
__u8 aux;
__u8 data[MAX_DATA];
@@ -1342,6 +1342,44 @@ static struct bpf_test tests[] = {
{ { 0, -1 } }
},
{
+ "INT: shifts by register",
+ .u.insns_int = {
+ BPF_MOV64_IMM(R0, -1234),
+ BPF_MOV64_IMM(R1, 1),
+ BPF_ALU32_REG(BPF_RSH, R0, R1),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0x7ffffd97, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(R2, 1),
+ BPF_ALU64_REG(BPF_LSH, R0, R2),
+ BPF_MOV32_IMM(R4, -1234),
+ BPF_JMP_REG(BPF_JEQ, R0, R4, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_AND, R4, 63),
+ BPF_ALU64_REG(BPF_LSH, R0, R4), /* R0 <= 46 */
+ BPF_MOV64_IMM(R3, 47),
+ BPF_ALU64_REG(BPF_ARSH, R0, R3),
+ BPF_JMP_IMM(BPF_JEQ, R0, -617, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(R2, 1),
+ BPF_ALU64_REG(BPF_LSH, R4, R2), /* R4 = 46 << 1 */
+ BPF_JMP_IMM(BPF_JEQ, R4, 92, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(R4, 4),
+ BPF_ALU64_REG(BPF_LSH, R4, R4), /* R4 = 4 << 4 */
+ BPF_JMP_IMM(BPF_JEQ, R4, 64, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(R4, 5),
+ BPF_ALU32_REG(BPF_LSH, R4, R4), /* R4 = 5 << 5 */
+ BPF_JMP_IMM(BPF_JEQ, R4, 160, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(R0, -1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } }
+ },
+ {
"INT: DIV + ABS",
.u.insns_int = {
BPF_ALU64_REG(BPF_MOV, R6, R1),
@@ -1697,6 +1735,27 @@ static struct bpf_test tests[] = {
{ },
{ { 1, 0 } },
},
+ {
+ "load 64-bit immediate",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x567800001234LL),
+ BPF_MOV64_REG(R2, R1),
+ BPF_MOV64_REG(R3, R2),
+ BPF_ALU64_IMM(BPF_RSH, R2, 32),
+ BPF_ALU64_IMM(BPF_LSH, R3, 32),
+ BPF_ALU64_IMM(BPF_RSH, R3, 32),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R2, 0x5678, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, R3, 0x1234, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
};
static struct net_device dev;
@@ -1761,9 +1820,9 @@ static int probe_filter_length(struct sock_filter *fp)
return len + 1;
}
-static struct sk_filter *generate_filter(int which, int *err)
+static struct bpf_prog *generate_filter(int which, int *err)
{
- struct sk_filter *fp;
+ struct bpf_prog *fp;
struct sock_fprog_kern fprog;
unsigned int flen = probe_filter_length(tests[which].u.insns);
__u8 test_type = tests[which].aux & TEST_TYPE_MASK;
@@ -1773,7 +1832,7 @@ static struct sk_filter *generate_filter(int which, int *err)
fprog.filter = tests[which].u.insns;
fprog.len = flen;
- *err = sk_unattached_filter_create(&fp, &fprog);
+ *err = bpf_prog_create(&fp, &fprog);
if (tests[which].aux & FLAG_EXPECTED_FAIL) {
if (*err == -EINVAL) {
pr_cont("PASS\n");
@@ -1798,7 +1857,7 @@ static struct sk_filter *generate_filter(int which, int *err)
break;
case INTERNAL:
- fp = kzalloc(sk_filter_size(flen), GFP_KERNEL);
+ fp = bpf_prog_alloc(bpf_prog_size(flen), 0);
if (fp == NULL) {
pr_cont("UNEXPECTED_FAIL no memory left\n");
*err = -ENOMEM;
@@ -1807,9 +1866,9 @@ static struct sk_filter *generate_filter(int which, int *err)
fp->len = flen;
memcpy(fp->insnsi, tests[which].u.insns_int,
- fp->len * sizeof(struct sock_filter_int));
+ fp->len * sizeof(struct bpf_insn));
- sk_filter_select_runtime(fp);
+ bpf_prog_select_runtime(fp);
break;
}
@@ -1817,30 +1876,30 @@ static struct sk_filter *generate_filter(int which, int *err)
return fp;
}
-static void release_filter(struct sk_filter *fp, int which)
+static void release_filter(struct bpf_prog *fp, int which)
{
__u8 test_type = tests[which].aux & TEST_TYPE_MASK;
switch (test_type) {
case CLASSIC:
- sk_unattached_filter_destroy(fp);
+ bpf_prog_destroy(fp);
break;
case INTERNAL:
- sk_filter_free(fp);
+ bpf_prog_free(fp);
break;
}
}
-static int __run_one(const struct sk_filter *fp, const void *data,
+static int __run_one(const struct bpf_prog *fp, const void *data,
int runs, u64 *duration)
{
u64 start, finish;
- int ret, i;
+ int ret = 0, i;
start = ktime_to_us(ktime_get());
for (i = 0; i < runs; i++)
- ret = SK_RUN_FILTER(fp, data);
+ ret = BPF_PROG_RUN(fp, data);
finish = ktime_to_us(ktime_get());
@@ -1850,7 +1909,7 @@ static int __run_one(const struct sk_filter *fp, const void *data,
return ret;
}
-static int run_one(const struct sk_filter *fp, struct bpf_test *test)
+static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
{
int err_cnt = 0, i, runs = MAX_TESTRUNS;
@@ -1884,7 +1943,7 @@ static __init int test_bpf(void)
int i, err_cnt = 0, pass_cnt = 0;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
- struct sk_filter *fp;
+ struct bpf_prog *fp;
int err;
pr_info("#%d %s ", i, tests[i].descr);
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
new file mode 100644
index 000000000000..86374c1c49a4
--- /dev/null
+++ b/lib/test_firmware.c
@@ -0,0 +1,117 @@
+/*
+ * This module provides an interface to trigger and test firmware loading.
+ *
+ * It is designed to be used for basic evaluation of the firmware loading
+ * subsystem (for example when validating firmware verification). It lacks
+ * any extra dependencies, and will not normally be loaded by the system
+ * unless explicitly requested by name.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/firmware.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+static DEFINE_MUTEX(test_fw_mutex);
+static const struct firmware *test_firmware;
+
+static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
+ size_t size, loff_t *offset)
+{
+ ssize_t rc = 0;
+
+ mutex_lock(&test_fw_mutex);
+ if (test_firmware)
+ rc = simple_read_from_buffer(buf, size, offset,
+ test_firmware->data,
+ test_firmware->size);
+ mutex_unlock(&test_fw_mutex);
+ return rc;
+}
+
+static const struct file_operations test_fw_fops = {
+ .owner = THIS_MODULE,
+ .read = test_fw_misc_read,
+};
+
+static struct miscdevice test_fw_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "test_firmware",
+ .fops = &test_fw_fops,
+};
+
+static ssize_t trigger_request_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ char *name;
+
+ name = kzalloc(count + 1, GFP_KERNEL);
+ if (!name)
+ return -ENOSPC;
+ memcpy(name, buf, count);
+
+ pr_info("loading '%s'\n", name);
+
+ mutex_lock(&test_fw_mutex);
+ release_firmware(test_firmware);
+ test_firmware = NULL;
+ rc = request_firmware(&test_firmware, name, dev);
+ if (rc)
+ pr_info("load of '%s' failed: %d\n", name, rc);
+ pr_info("loaded: %zu\n", test_firmware ? test_firmware->size : 0);
+ mutex_unlock(&test_fw_mutex);
+
+ kfree(name);
+
+ return count;
+}
+static DEVICE_ATTR_WO(trigger_request);
+
+static int __init test_firmware_init(void)
+{
+ int rc;
+
+ rc = misc_register(&test_fw_misc_device);
+ if (rc) {
+ pr_err("could not register misc device: %d\n", rc);
+ return rc;
+ }
+ rc = device_create_file(test_fw_misc_device.this_device,
+ &dev_attr_trigger_request);
+ if (rc) {
+ pr_err("could not create sysfs interface: %d\n", rc);
+ goto dereg;
+ }
+
+ pr_warn("interface ready\n");
+
+ return 0;
+dereg:
+ misc_deregister(&test_fw_misc_device);
+ return rc;
+}
+
+module_init(test_firmware_init);
+
+static void __exit test_firmware_exit(void)
+{
+ release_firmware(test_firmware);
+ device_remove_file(test_fw_misc_device.this_device,
+ &dev_attr_trigger_request);
+ misc_deregister(&test_fw_misc_device);
+ pr_warn("removed interface\n");
+}
+
+module_exit(test_firmware_exit);
+
+MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 6fe2c84eb055..ba3cd0a35640 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1937,7 +1937,7 @@ EXPORT_SYMBOL(sprintf);
* @args: Arguments for the format string
*
* The format follows C99 vsnprintf, except %n is ignored, and its argument
- * is skiped.
+ * is skipped.
*
* The return value is the number of words(32bits) which would be generated for
* the given input.
diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c
index d63381e8e333..d20ef458f137 100644
--- a/lib/zlib_deflate/deflate.c
+++ b/lib/zlib_deflate/deflate.c
@@ -250,52 +250,6 @@ int zlib_deflateInit2(
}
/* ========================================================================= */
-#if 0
-int zlib_deflateSetDictionary(
- z_streamp strm,
- const Byte *dictionary,
- uInt dictLength
-)
-{
- deflate_state *s;
- uInt length = dictLength;
- uInt n;
- IPos hash_head = 0;
-
- if (strm == NULL || strm->state == NULL || dictionary == NULL)
- return Z_STREAM_ERROR;
-
- s = (deflate_state *) strm->state;
- if (s->status != INIT_STATE) return Z_STREAM_ERROR;
-
- strm->adler = zlib_adler32(strm->adler, dictionary, dictLength);
-
- if (length < MIN_MATCH) return Z_OK;
- if (length > MAX_DIST(s)) {
- length = MAX_DIST(s);
-#ifndef USE_DICT_HEAD
- dictionary += dictLength - length; /* use the tail of the dictionary */
-#endif
- }
- memcpy((char *)s->window, dictionary, length);
- s->strstart = length;
- s->block_start = (long)length;
-
- /* Insert all strings in the hash table (except for the last two bytes).
- * s->lookahead stays null, so s->ins_h will be recomputed at the next
- * call of fill_window.
- */
- s->ins_h = s->window[0];
- UPDATE_HASH(s, s->ins_h, s->window[1]);
- for (n = 0; n <= length - MIN_MATCH; n++) {
- INSERT_STRING(s, n, hash_head);
- }
- if (hash_head) hash_head = 0; /* to make compiler happy */
- return Z_OK;
-}
-#endif /* 0 */
-
-/* ========================================================================= */
int zlib_deflateReset(
z_streamp strm
)
@@ -326,45 +280,6 @@ int zlib_deflateReset(
return Z_OK;
}
-/* ========================================================================= */
-#if 0
-int zlib_deflateParams(
- z_streamp strm,
- int level,
- int strategy
-)
-{
- deflate_state *s;
- compress_func func;
- int err = Z_OK;
-
- if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
- s = (deflate_state *) strm->state;
-
- if (level == Z_DEFAULT_COMPRESSION) {
- level = 6;
- }
- if (level < 0 || level > 9 || strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
- return Z_STREAM_ERROR;
- }
- func = configuration_table[s->level].func;
-
- if (func != configuration_table[level].func && strm->total_in != 0) {
- /* Flush the last buffer: */
- err = zlib_deflate(strm, Z_PARTIAL_FLUSH);
- }
- if (s->level != level) {
- s->level = level;
- s->max_lazy_match = configuration_table[level].max_lazy;
- s->good_match = configuration_table[level].good_length;
- s->nice_match = configuration_table[level].nice_length;
- s->max_chain_length = configuration_table[level].max_chain;
- }
- s->strategy = strategy;
- return err;
-}
-#endif /* 0 */
-
/* =========================================================================
* Put a short in the pending buffer. The 16-bit value is put in MSB order.
* IN assertion: the stream state is correct and there is enough room in
@@ -568,64 +483,6 @@ int zlib_deflateEnd(
return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
}
-/* =========================================================================
- * Copy the source state to the destination state.
- */
-#if 0
-int zlib_deflateCopy (
- z_streamp dest,
- z_streamp source
-)
-{
-#ifdef MAXSEG_64K
- return Z_STREAM_ERROR;
-#else
- deflate_state *ds;
- deflate_state *ss;
- ush *overlay;
- deflate_workspace *mem;
-
-
- if (source == NULL || dest == NULL || source->state == NULL) {
- return Z_STREAM_ERROR;
- }
-
- ss = (deflate_state *) source->state;
-
- *dest = *source;
-
- mem = (deflate_workspace *) dest->workspace;
-
- ds = &(mem->deflate_memory);
-
- dest->state = (struct internal_state *) ds;
- *ds = *ss;
- ds->strm = dest;
-
- ds->window = (Byte *) mem->window_memory;
- ds->prev = (Pos *) mem->prev_memory;
- ds->head = (Pos *) mem->head_memory;
- overlay = (ush *) mem->overlay_memory;
- ds->pending_buf = (uch *) overlay;
-
- memcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
- memcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos));
- memcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos));
- memcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
-
- ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
- ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
- ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
-
- ds->l_desc.dyn_tree = ds->dyn_ltree;
- ds->d_desc.dyn_tree = ds->dyn_dtree;
- ds->bl_desc.dyn_tree = ds->bl_tree;
-
- return Z_OK;
-#endif
-}
-#endif /* 0 */
-
/* ===========================================================================
* Read a new buffer from the current input stream, update the adler32
* and total number of bytes read. All deflate() input goes through
diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c
index f5ce87b0800e..58a733b10387 100644
--- a/lib/zlib_inflate/inflate.c
+++ b/lib/zlib_inflate/inflate.c
@@ -45,21 +45,6 @@ int zlib_inflateReset(z_streamp strm)
return Z_OK;
}
-#if 0
-int zlib_inflatePrime(z_streamp strm, int bits, int value)
-{
- struct inflate_state *state;
-
- if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
- state = (struct inflate_state *)strm->state;
- if (bits > 16 || state->bits + bits > 32) return Z_STREAM_ERROR;
- value &= (1L << bits) - 1;
- state->hold += value << state->bits;
- state->bits += bits;
- return Z_OK;
-}
-#endif
-
int zlib_inflateInit2(z_streamp strm, int windowBits)
{
struct inflate_state *state;
@@ -761,123 +746,6 @@ int zlib_inflateEnd(z_streamp strm)
return Z_OK;
}
-#if 0
-int zlib_inflateSetDictionary(z_streamp strm, const Byte *dictionary,
- uInt dictLength)
-{
- struct inflate_state *state;
- unsigned long id;
-
- /* check state */
- if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
- state = (struct inflate_state *)strm->state;
- if (state->wrap != 0 && state->mode != DICT)
- return Z_STREAM_ERROR;
-
- /* check for correct dictionary id */
- if (state->mode == DICT) {
- id = zlib_adler32(0L, NULL, 0);
- id = zlib_adler32(id, dictionary, dictLength);
- if (id != state->check)
- return Z_DATA_ERROR;
- }
-
- /* copy dictionary to window */
- zlib_updatewindow(strm, strm->avail_out);
-
- if (dictLength > state->wsize) {
- memcpy(state->window, dictionary + dictLength - state->wsize,
- state->wsize);
- state->whave = state->wsize;
- }
- else {
- memcpy(state->window + state->wsize - dictLength, dictionary,
- dictLength);
- state->whave = dictLength;
- }
- state->havedict = 1;
- return Z_OK;
-}
-#endif
-
-#if 0
-/*
- Search buf[0..len-1] for the pattern: 0, 0, 0xff, 0xff. Return when found
- or when out of input. When called, *have is the number of pattern bytes
- found in order so far, in 0..3. On return *have is updated to the new
- state. If on return *have equals four, then the pattern was found and the
- return value is how many bytes were read including the last byte of the
- pattern. If *have is less than four, then the pattern has not been found
- yet and the return value is len. In the latter case, zlib_syncsearch() can be
- called again with more data and the *have state. *have is initialized to
- zero for the first call.
- */
-static unsigned zlib_syncsearch(unsigned *have, unsigned char *buf,
- unsigned len)
-{
- unsigned got;
- unsigned next;
-
- got = *have;
- next = 0;
- while (next < len && got < 4) {
- if ((int)(buf[next]) == (got < 2 ? 0 : 0xff))
- got++;
- else if (buf[next])
- got = 0;
- else
- got = 4 - got;
- next++;
- }
- *have = got;
- return next;
-}
-#endif
-
-#if 0
-int zlib_inflateSync(z_streamp strm)
-{
- unsigned len; /* number of bytes to look at or looked at */
- unsigned long in, out; /* temporary to save total_in and total_out */
- unsigned char buf[4]; /* to restore bit buffer to byte string */
- struct inflate_state *state;
-
- /* check parameters */
- if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
- state = (struct inflate_state *)strm->state;
- if (strm->avail_in == 0 && state->bits < 8) return Z_BUF_ERROR;
-
- /* if first time, start search in bit buffer */
- if (state->mode != SYNC) {
- state->mode = SYNC;
- state->hold <<= state->bits & 7;
- state->bits -= state->bits & 7;
- len = 0;
- while (state->bits >= 8) {
- buf[len++] = (unsigned char)(state->hold);
- state->hold >>= 8;
- state->bits -= 8;
- }
- state->have = 0;
- zlib_syncsearch(&(state->have), buf, len);
- }
-
- /* search available input */
- len = zlib_syncsearch(&(state->have), strm->next_in, strm->avail_in);
- strm->avail_in -= len;
- strm->next_in += len;
- strm->total_in += len;
-
- /* return no joy or set up to restart inflate() on a new block */
- if (state->have != 4) return Z_DATA_ERROR;
- in = strm->total_in; out = strm->total_out;
- zlib_inflateReset(strm);
- strm->total_in = in; strm->total_out = out;
- state->mode = TYPE;
- return Z_OK;
-}
-#endif
-
/*
* This subroutine adds the data at next_in/avail_in to the output history
* without performing any output. The output buffer must be "caught up";