summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug147
-rw-r--r--lib/Makefile2
-rw-r--r--lib/assoc_array.c2
-rw-r--r--lib/atomic64_test.c7
-rw-r--r--lib/bitmap.c8
-rw-r--r--lib/bsearch.c22
-rw-r--r--lib/debugobjects.c3
-rw-r--r--lib/extable.c41
-rw-r--r--lib/fault-inject.c12
-rw-r--r--lib/interval_tree_test.c93
-rw-r--r--lib/iov_iter.c98
-rw-r--r--lib/kobject_uevent.c2
-rw-r--r--lib/kstrtox.c12
-rw-r--r--lib/locking-selftest.c123
-rw-r--r--lib/mpi/mpicoder.c4
-rw-r--r--lib/percpu_counter.c7
-rw-r--r--lib/radix-tree.c1
-rw-r--r--lib/raid6/Makefile4
-rw-r--r--lib/raid6/algos.c3
-rw-r--r--lib/raid6/mktables.c20
-rw-r--r--lib/raid6/neon.uc33
-rw-r--r--lib/raid6/recov_neon.c110
-rw-r--r--lib/raid6/recov_neon_inner.c117
-rw-r--r--lib/rhashtable.c9
-rw-r--r--lib/string.c7
-rw-r--r--lib/swiotlb.c57
-rw-r--r--lib/test_bitmap.c29
-rw-r--r--lib/test_firmware.c710
-rw-r--r--lib/test_kmod.c1246
-rw-r--r--lib/test_rhashtable.c57
-rw-r--r--lib/test_sysctl.c148
-rw-r--r--lib/test_uuid.c2
-rw-r--r--lib/usercopy.c10
33 files changed, 2980 insertions, 166 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ca9460f049b8..7396f5044397 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -374,6 +374,9 @@ config STACK_VALIDATION
pointers (if CONFIG_FRAME_POINTER is enabled). This helps ensure
that runtime stack traces are more reliable.
+ This is also a prerequisite for generation of ORC unwind data, which
+ is needed for CONFIG_ORC_UNWINDER.
+
For more information, see
tools/objtool/Documentation/stack-validation.txt.
@@ -778,34 +781,52 @@ config DEBUG_SHIRQ
menu "Debug Lockups and Hangs"
config LOCKUP_DETECTOR
- bool "Detect Hard and Soft Lockups"
+ bool
+
+config SOFTLOCKUP_DETECTOR
+ bool "Detect Soft Lockups"
depends on DEBUG_KERNEL && !S390
+ select LOCKUP_DETECTOR
help
Say Y here to enable the kernel to act as a watchdog to detect
- hard and soft lockups.
+ soft lockups.
Softlockups are bugs that cause the kernel to loop in kernel
mode for more than 20 seconds, without giving other tasks a
chance to run. The current stack trace is displayed upon
detection and the system will stay locked up.
+config HARDLOCKUP_DETECTOR_PERF
+ bool
+ select SOFTLOCKUP_DETECTOR
+
+#
+# Enables a timestamp based low pass filter to compensate for perf based
+# hard lockup detection which runs too fast due to turbo modes.
+#
+config HARDLOCKUP_CHECK_TIMESTAMP
+ bool
+
+#
+# arch/ can define HAVE_HARDLOCKUP_DETECTOR_ARCH to provide their own hard
+# lockup detector rather than the perf based detector.
+#
+config HARDLOCKUP_DETECTOR
+ bool "Detect Hard Lockups"
+ depends on DEBUG_KERNEL && !S390
+ depends on HAVE_HARDLOCKUP_DETECTOR_PERF || HAVE_HARDLOCKUP_DETECTOR_ARCH
+ select LOCKUP_DETECTOR
+ select HARDLOCKUP_DETECTOR_PERF if HAVE_HARDLOCKUP_DETECTOR_PERF
+ select HARDLOCKUP_DETECTOR_ARCH if HAVE_HARDLOCKUP_DETECTOR_ARCH
+ help
+ Say Y here to enable the kernel to act as a watchdog to detect
+ hard lockups.
+
Hardlockups are bugs that cause the CPU to loop in kernel mode
for more than 10 seconds, without letting other interrupts have a
chance to run. The current stack trace is displayed upon detection
and the system will stay locked up.
- The overhead should be minimal. A periodic hrtimer runs to
- generate interrupts and kick the watchdog task every 4 seconds.
- An NMI is generated every 10 seconds or so to check for hardlockups.
-
- The frequency of hrtimer and NMI events and the soft and hard lockup
- thresholds can be controlled through the sysctl watchdog_thresh.
-
-config HARDLOCKUP_DETECTOR
- def_bool y
- depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG
- depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI
-
config BOOTPARAM_HARDLOCKUP_PANIC
bool "Panic (Reboot) On Hard Lockups"
depends on HARDLOCKUP_DETECTOR
@@ -826,7 +847,7 @@ config BOOTPARAM_HARDLOCKUP_PANIC_VALUE
config BOOTPARAM_SOFTLOCKUP_PANIC
bool "Panic (Reboot) On Soft Lockups"
- depends on LOCKUP_DETECTOR
+ depends on SOFTLOCKUP_DETECTOR
help
Say Y here to enable the kernel to panic on "soft lockups",
which are bugs that cause the kernel to loop in kernel
@@ -843,7 +864,7 @@ config BOOTPARAM_SOFTLOCKUP_PANIC
config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
int
- depends on LOCKUP_DETECTOR
+ depends on SOFTLOCKUP_DETECTOR
range 0 1
default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
@@ -851,7 +872,7 @@ config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
config DETECT_HUNG_TASK
bool "Detect Hung Tasks"
depends on DEBUG_KERNEL
- default LOCKUP_DETECTOR
+ default SOFTLOCKUP_DETECTOR
help
Say Y here to enable the kernel to detect "hung tasks",
which are bugs that cause the task to be stuck in
@@ -1070,6 +1091,8 @@ config PROVE_LOCKING
select DEBUG_MUTEXES
select DEBUG_RT_MUTEXES if RT_MUTEXES
select DEBUG_LOCK_ALLOC
+ select LOCKDEP_CROSSRELEASE
+ select LOCKDEP_COMPLETIONS
select TRACE_IRQFLAGS
default n
help
@@ -1110,7 +1133,7 @@ config LOCKDEP
bool
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select STACKTRACE
- select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !SCORE
+ select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !SCORE && !X86
select KALLSYMS
select KALLSYMS_ALL
@@ -1139,6 +1162,22 @@ config LOCK_STAT
CONFIG_LOCK_STAT defines "contended" and "acquired" lock events.
(CONFIG_LOCKDEP defines "acquire" and "release" events.)
+config LOCKDEP_CROSSRELEASE
+ bool
+ help
+ This makes lockdep work for crosslock which is a lock allowed to
+ be released in a different context from the acquisition context.
+ Normally a lock must be released in the context acquiring the lock.
+ However, relexing this constraint helps synchronization primitives
+ such as page locks or completions can use the lock correctness
+ detector, lockdep.
+
+config LOCKDEP_COMPLETIONS
+ bool
+ help
+ A deadlock caused by wait_for_completion() and complete() can be
+ detected by lockdep using crossrelease feature.
+
config DEBUG_LOCKDEP
bool "Lock dependency engine debugging"
depends on DEBUG_KERNEL && LOCKDEP
@@ -1212,6 +1251,34 @@ config STACKTRACE
It is also used by various kernel debugging features that require
stack trace generation.
+config WARN_ALL_UNSEEDED_RANDOM
+ bool "Warn for all uses of unseeded randomness"
+ default n
+ help
+ Some parts of the kernel contain bugs relating to their use of
+ cryptographically secure random numbers before it's actually possible
+ to generate those numbers securely. This setting ensures that these
+ flaws don't go unnoticed, by enabling a message, should this ever
+ occur. This will allow people with obscure setups to know when things
+ are going wrong, so that they might contact developers about fixing
+ it.
+
+ Unfortunately, on some models of some architectures getting
+ a fully seeded CRNG is extremely difficult, and so this can
+ result in dmesg getting spammed for a surprisingly long
+ time. This is really bad from a security perspective, and
+ so architecture maintainers really need to do what they can
+ to get the CRNG seeded sooner after the system is booted.
+ However, since users can not do anything actionble to
+ address this, by default the kernel will issue only a single
+ warning for the first use of unseeded randomness.
+
+ Say Y here if you want to receive warnings for all uses of
+ unseeded randomness. This will be of use primarily for
+ those developers interersted in improving the security of
+ Linux kernels running on their architecture (or
+ subarchitecture).
+
config DEBUG_KOBJECT
bool "kobject debugging"
depends on DEBUG_KERNEL
@@ -1501,7 +1568,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
depends on !X86_64
select STACKTRACE
- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !SCORE
+ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !SCORE && !X86
help
Provide stacktrace filter for fault-injection capabilities
@@ -1510,7 +1577,7 @@ config LATENCYTOP
depends on DEBUG_KERNEL
depends on STACKTRACE_SUPPORT
depends on PROC_FS
- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
+ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !X86
select KALLSYMS
select KALLSYMS_ALL
select STACKTRACE
@@ -1594,7 +1661,7 @@ config RBTREE_TEST
config INTERVAL_TREE_TEST
tristate "Interval tree test"
- depends on m && DEBUG_KERNEL
+ depends on DEBUG_KERNEL
select INTERVAL_TREE
help
A benchmark measuring the performance of the interval tree library
@@ -1785,6 +1852,17 @@ config TEST_FIRMWARE
If unsure, say N.
+config TEST_SYSCTL
+ tristate "sysctl test driver"
+ default n
+ depends on PROC_SYSCTL
+ help
+ This builds the "test_sysctl" module. This driver enables to test the
+ proc sysctl interfaces available to drivers safely without affecting
+ production knobs which might alter system functionality.
+
+ If unsure, say N.
+
config TEST_UDELAY
tristate "udelay test driver"
default n
@@ -1825,6 +1903,33 @@ config BUG_ON_DATA_CORRUPTION
If unsure, say N.
+config TEST_KMOD
+ tristate "kmod stress tester"
+ default n
+ depends on m
+ depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS
+ depends on NETDEVICES && NET_CORE && INET # for TUN
+ select TEST_LKM
+ select XFS_FS
+ select TUN
+ select BTRFS_FS
+ help
+ Test the kernel's module loading mechanism: kmod. kmod implements
+ support to load modules using the Linux kernel's usermode helper.
+ This test provides a series of tests against kmod.
+
+ Although technically you can either build test_kmod as a module or
+ into the kernel we disallow building it into the kernel since
+ it stress tests request_module() and this will very likely cause
+ some issues by taking over precious threads available from other
+ module load requests, ultimately this could be fatal.
+
+ To run tests run:
+
+ tools/testing/selftests/kmod/kmod.sh --help
+
+ If unsure, say N.
+
source "samples/Kconfig"
source "lib/Kconfig.kgdb"
diff --git a/lib/Makefile b/lib/Makefile
index 5a008329324e..40c18372b301 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o
obj-y += kstrtox.o
obj-$(CONFIG_TEST_BPF) += test_bpf.o
obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
+obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
obj-$(CONFIG_TEST_KASAN) += test_kasan.o
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
@@ -60,6 +61,7 @@ obj-$(CONFIG_TEST_PRINTF) += test_printf.o
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
obj-$(CONFIG_TEST_UUID) += test_uuid.o
obj-$(CONFIG_TEST_PARMAN) += test_parman.o
+obj-$(CONFIG_TEST_KMOD) += test_kmod.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 59fd7c0b119c..155c55d8db5f 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -1,6 +1,6 @@
/* Generic associative array implementation.
*
- * See Documentation/assoc_array.txt for information.
+ * See Documentation/core-api/assoc_array.rst for information.
*
* Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index fd70c0e0e673..62ab629f51ca 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -153,8 +153,10 @@ static __init void test_atomic64(void)
long long v0 = 0xaaa31337c001d00dLL;
long long v1 = 0xdeadbeefdeafcafeLL;
long long v2 = 0xfaceabadf00df001LL;
+ long long v3 = 0x8000000000000000LL;
long long onestwos = 0x1111111122222222LL;
long long one = 1LL;
+ int r_int;
atomic64_t v = ATOMIC64_INIT(v0);
long long r = v0;
@@ -240,6 +242,11 @@ static __init void test_atomic64(void)
BUG_ON(!atomic64_inc_not_zero(&v));
r += one;
BUG_ON(v.counter != r);
+
+ /* Confirm the return value fits in an int, even if the value doesn't */
+ INIT(v3);
+ r_int = atomic64_inc_not_zero(&v);
+ BUG_ON(!r_int);
}
static __init int test_atomics_init(void)
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 08c6ef3a2b6f..9a532805364b 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -251,7 +251,7 @@ int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
}
EXPORT_SYMBOL(__bitmap_weight);
-void bitmap_set(unsigned long *map, unsigned int start, int len)
+void __bitmap_set(unsigned long *map, unsigned int start, int len)
{
unsigned long *p = map + BIT_WORD(start);
const unsigned int size = start + len;
@@ -270,9 +270,9 @@ void bitmap_set(unsigned long *map, unsigned int start, int len)
*p |= mask_to_set;
}
}
-EXPORT_SYMBOL(bitmap_set);
+EXPORT_SYMBOL(__bitmap_set);
-void bitmap_clear(unsigned long *map, unsigned int start, int len)
+void __bitmap_clear(unsigned long *map, unsigned int start, int len)
{
unsigned long *p = map + BIT_WORD(start);
const unsigned int size = start + len;
@@ -291,7 +291,7 @@ void bitmap_clear(unsigned long *map, unsigned int start, int len)
*p &= ~mask_to_clear;
}
}
-EXPORT_SYMBOL(bitmap_clear);
+EXPORT_SYMBOL(__bitmap_clear);
/**
* bitmap_find_next_zero_area_off - find a contiguous aligned zero area
diff --git a/lib/bsearch.c b/lib/bsearch.c
index e33c179089db..18b445b010c3 100644
--- a/lib/bsearch.c
+++ b/lib/bsearch.c
@@ -33,19 +33,21 @@
void *bsearch(const void *key, const void *base, size_t num, size_t size,
int (*cmp)(const void *key, const void *elt))
{
- size_t start = 0, end = num;
+ const char *pivot;
int result;
- while (start < end) {
- size_t mid = start + (end - start) / 2;
+ while (num > 0) {
+ pivot = base + (num >> 1) * size;
+ result = cmp(key, pivot);
- result = cmp(key, base + mid * size);
- if (result < 0)
- end = mid;
- else if (result > 0)
- start = mid + 1;
- else
- return (void *)base + mid * size;
+ if (result == 0)
+ return (void *)pivot;
+
+ if (result > 0) {
+ base = pivot + size;
+ num--;
+ }
+ num >>= 1;
}
return NULL;
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 17afb0430161..2f5349c6e81a 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -18,6 +18,7 @@
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/hash.h>
+#include <linux/kmemleak.h>
#define ODEBUG_HASH_BITS 14
#define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
@@ -110,6 +111,7 @@ static void fill_pool(void)
if (!new)
return;
+ kmemleak_ignore(new);
raw_spin_lock_irqsave(&pool_lock, flags);
hlist_add_head(&new->node, &obj_pool);
debug_objects_allocated++;
@@ -1080,6 +1082,7 @@ static int __init debug_objects_replace_static_objects(void)
obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
if (!obj)
goto free;
+ kmemleak_ignore(obj);
hlist_add_head(&obj->node, &objects);
}
diff --git a/lib/extable.c b/lib/extable.c
index 62968daa66a9..f54996fdd0b8 100644
--- a/lib/extable.c
+++ b/lib/extable.c
@@ -9,6 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/bsearch.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sort.h>
@@ -51,7 +52,7 @@ static void swap_ex(void *a, void *b, int size)
* This is used both for the kernel exception table and for
* the exception tables of modules that get loaded.
*/
-static int cmp_ex(const void *a, const void *b)
+static int cmp_ex_sort(const void *a, const void *b)
{
const struct exception_table_entry *x = a, *y = b;
@@ -67,7 +68,7 @@ void sort_extable(struct exception_table_entry *start,
struct exception_table_entry *finish)
{
sort(start, finish - start, sizeof(struct exception_table_entry),
- cmp_ex, swap_ex);
+ cmp_ex_sort, swap_ex);
}
#ifdef CONFIG_MODULES
@@ -93,6 +94,20 @@ void trim_init_extable(struct module *m)
#endif /* !ARCH_HAS_SORT_EXTABLE */
#ifndef ARCH_HAS_SEARCH_EXTABLE
+
+static int cmp_ex_search(const void *key, const void *elt)
+{
+ const struct exception_table_entry *_elt = elt;
+ unsigned long _key = *(unsigned long *)key;
+
+ /* avoid overflow */
+ if (_key > ex_to_insn(_elt))
+ return 1;
+ if (_key < ex_to_insn(_elt))
+ return -1;
+ return 0;
+}
+
/*
* Search one exception table for an entry corresponding to the
* given instruction address, and return the address of the entry,
@@ -101,25 +116,11 @@ void trim_init_extable(struct module *m)
* already sorted.
*/
const struct exception_table_entry *
-search_extable(const struct exception_table_entry *first,
- const struct exception_table_entry *last,
+search_extable(const struct exception_table_entry *base,
+ const size_t num,
unsigned long value)
{
- while (first <= last) {
- const struct exception_table_entry *mid;
-
- mid = ((last - first) >> 1) + first;
- /*
- * careful, the distance between value and insn
- * can be larger than MAX_LONG:
- */
- if (ex_to_insn(mid) < value)
- first = mid + 1;
- else if (ex_to_insn(mid) > value)
- last = mid - 1;
- else
- return mid;
- }
- return NULL;
+ return bsearch(&value, base, num,
+ sizeof(struct exception_table_entry), cmp_ex_search);
}
#endif
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 4ff157159a0d..cf7b129b0b2b 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -107,6 +107,17 @@ static inline bool fail_stacktrace(struct fault_attr *attr)
bool should_fail(struct fault_attr *attr, ssize_t size)
{
+ if (in_task()) {
+ unsigned int fail_nth = READ_ONCE(current->fail_nth);
+
+ if (fail_nth) {
+ if (!WRITE_ONCE(current->fail_nth, fail_nth - 1))
+ goto fail;
+
+ return false;
+ }
+ }
+
/* No need to check any other properties if the probability is 0 */
if (attr->probability == 0)
return false;
@@ -134,6 +145,7 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
if (!fail_stacktrace(attr))
return false;
+fail:
fail_dump(attr);
if (atomic_read(&attr->times) != -1)
diff --git a/lib/interval_tree_test.c b/lib/interval_tree_test.c
index 245900b98c8e..df495fe81421 100644
--- a/lib/interval_tree_test.c
+++ b/lib/interval_tree_test.c
@@ -1,27 +1,38 @@
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/interval_tree.h>
#include <linux/random.h>
+#include <linux/slab.h>
#include <asm/timex.h>
-#define NODES 100
-#define PERF_LOOPS 100000
-#define SEARCHES 100
-#define SEARCH_LOOPS 10000
+#define __param(type, name, init, msg) \
+ static type name = init; \
+ module_param(name, type, 0444); \
+ MODULE_PARM_DESC(name, msg);
+
+__param(int, nnodes, 100, "Number of nodes in the interval tree");
+__param(int, perf_loops, 100000, "Number of iterations modifying the tree");
+
+__param(int, nsearches, 100, "Number of searches to the interval tree");
+__param(int, search_loops, 10000, "Number of iterations searching the tree");
+__param(bool, search_all, false, "Searches will iterate all nodes in the tree");
+
+__param(uint, max_endpoint, ~0, "Largest value for the interval's endpoint");
static struct rb_root root = RB_ROOT;
-static struct interval_tree_node nodes[NODES];
-static u32 queries[SEARCHES];
+static struct interval_tree_node *nodes = NULL;
+static u32 *queries = NULL;
static struct rnd_state rnd;
static inline unsigned long
-search(unsigned long query, struct rb_root *root)
+search(struct rb_root *root, unsigned long start, unsigned long last)
{
struct interval_tree_node *node;
unsigned long results = 0;
- for (node = interval_tree_iter_first(root, query, query); node;
- node = interval_tree_iter_next(node, query, query))
+ for (node = interval_tree_iter_first(root, start, last); node;
+ node = interval_tree_iter_next(node, start, last))
results++;
return results;
}
@@ -29,19 +40,22 @@ search(unsigned long query, struct rb_root *root)
static void init(void)
{
int i;
- for (i = 0; i < NODES; i++) {
- u32 a = prandom_u32_state(&rnd);
- u32 b = prandom_u32_state(&rnd);
- if (a <= b) {
- nodes[i].start = a;
- nodes[i].last = b;
- } else {
- nodes[i].start = b;
- nodes[i].last = a;
- }
+
+ for (i = 0; i < nnodes; i++) {
+ u32 b = (prandom_u32_state(&rnd) >> 4) % max_endpoint;
+ u32 a = (prandom_u32_state(&rnd) >> 4) % b;
+
+ nodes[i].start = a;
+ nodes[i].last = b;
}
- for (i = 0; i < SEARCHES; i++)
- queries[i] = prandom_u32_state(&rnd);
+
+ /*
+ * Limit the search scope to what the user defined.
+ * Otherwise we are merely measuring empty walks,
+ * which is pointless.
+ */
+ for (i = 0; i < nsearches; i++)
+ queries[i] = (prandom_u32_state(&rnd) >> 4) % max_endpoint;
}
static int interval_tree_test_init(void)
@@ -50,6 +64,16 @@ static int interval_tree_test_init(void)
unsigned long results;
cycles_t time1, time2, time;
+ nodes = kmalloc(nnodes * sizeof(struct interval_tree_node), GFP_KERNEL);
+ if (!nodes)
+ return -ENOMEM;
+
+ queries = kmalloc(nsearches * sizeof(int), GFP_KERNEL);
+ if (!queries) {
+ kfree(nodes);
+ return -ENOMEM;
+ }
+
printk(KERN_ALERT "interval tree insert/remove");
prandom_seed_state(&rnd, 3141592653589793238ULL);
@@ -57,39 +81,46 @@ static int interval_tree_test_init(void)
time1 = get_cycles();
- for (i = 0; i < PERF_LOOPS; i++) {
- for (j = 0; j < NODES; j++)
+ for (i = 0; i < perf_loops; i++) {
+ for (j = 0; j < nnodes; j++)
interval_tree_insert(nodes + j, &root);
- for (j = 0; j < NODES; j++)
+ for (j = 0; j < nnodes; j++)
interval_tree_remove(nodes + j, &root);
}
time2 = get_cycles();
time = time2 - time1;
- time = div_u64(time, PERF_LOOPS);
+ time = div_u64(time, perf_loops);
printk(" -> %llu cycles\n", (unsigned long long)time);
printk(KERN_ALERT "interval tree search");
- for (j = 0; j < NODES; j++)
+ for (j = 0; j < nnodes; j++)
interval_tree_insert(nodes + j, &root);
time1 = get_cycles();
results = 0;
- for (i = 0; i < SEARCH_LOOPS; i++)
- for (j = 0; j < SEARCHES; j++)
- results += search(queries[j], &root);
+ for (i = 0; i < search_loops; i++)
+ for (j = 0; j < nsearches; j++) {
+ unsigned long start = search_all ? 0 : queries[j];
+ unsigned long last = search_all ? max_endpoint : queries[j];
+
+ results += search(&root, start, last);
+ }
time2 = get_cycles();
time = time2 - time1;
- time = div_u64(time, SEARCH_LOOPS);
- results = div_u64(results, SEARCH_LOOPS);
+ time = div_u64(time, search_loops);
+ results = div_u64(results, search_loops);
printk(" -> %llu cycles (%lu results)\n",
(unsigned long long)time, results);
+ kfree(queries);
+ kfree(nodes);
+
return -EAGAIN; /* Fail will directly unload the module */
}
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index c9a69064462f..52c8dd6d8e82 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -130,6 +130,24 @@
} \
}
+static int copyout(void __user *to, const void *from, size_t n)
+{
+ if (access_ok(VERIFY_WRITE, to, n)) {
+ kasan_check_read(from, n);
+ n = raw_copy_to_user(to, from, n);
+ }
+ return n;
+}
+
+static int copyin(void *to, const void __user *from, size_t n)
+{
+ if (access_ok(VERIFY_READ, from, n)) {
+ kasan_check_write(to, n);
+ n = raw_copy_from_user(to, from, n);
+ }
+ return n;
+}
+
static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
@@ -144,6 +162,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
if (unlikely(!bytes))
return 0;
+ might_fault();
wanted = bytes;
iov = i->iov;
skip = i->iov_offset;
@@ -155,7 +174,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
from = kaddr + offset;
/* first chunk, usually the only one */
- left = __copy_to_user_inatomic(buf, from, copy);
+ left = copyout(buf, from, copy);
copy -= left;
skip += copy;
from += copy;
@@ -165,7 +184,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
- left = __copy_to_user_inatomic(buf, from, copy);
+ left = copyout(buf, from, copy);
copy -= left;
skip = copy;
from += copy;
@@ -184,7 +203,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
kaddr = kmap(page);
from = kaddr + offset;
- left = __copy_to_user(buf, from, copy);
+ left = copyout(buf, from, copy);
copy -= left;
skip += copy;
from += copy;
@@ -193,7 +212,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
- left = __copy_to_user(buf, from, copy);
+ left = copyout(buf, from, copy);
copy -= left;
skip = copy;
from += copy;
@@ -227,6 +246,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
if (unlikely(!bytes))
return 0;
+ might_fault();
wanted = bytes;
iov = i->iov;
skip = i->iov_offset;
@@ -238,7 +258,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
to = kaddr + offset;
/* first chunk, usually the only one */
- left = __copy_from_user_inatomic(to, buf, copy);
+ left = copyin(to, buf, copy);
copy -= left;
skip += copy;
to += copy;
@@ -248,7 +268,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
- left = __copy_from_user_inatomic(to, buf, copy);
+ left = copyin(to, buf, copy);
copy -= left;
skip = copy;
to += copy;
@@ -267,7 +287,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
kaddr = kmap(page);
to = kaddr + offset;
- left = __copy_from_user(to, buf, copy);
+ left = copyin(to, buf, copy);
copy -= left;
skip += copy;
to += copy;
@@ -276,7 +296,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
- left = __copy_from_user(to, buf, copy);
+ left = copyin(to, buf, copy);
copy -= left;
skip = copy;
to += copy;
@@ -535,14 +555,15 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
return bytes;
}
-size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
+size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
const char *from = addr;
if (unlikely(i->type & ITER_PIPE))
return copy_pipe_to_iter(addr, bytes, i);
+ if (iter_is_iovec(i))
+ might_fault();
iterate_and_advance(i, bytes, v,
- __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
- v.iov_len),
+ copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
memcpy_to_page(v.bv_page, v.bv_offset,
(from += v.bv_len) - v.bv_len, v.bv_len),
memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
@@ -550,18 +571,19 @@ size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
return bytes;
}
-EXPORT_SYMBOL(copy_to_iter);
+EXPORT_SYMBOL(_copy_to_iter);
-size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
+size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
WARN_ON(1);
return 0;
}
+ if (iter_is_iovec(i))
+ might_fault();
iterate_and_advance(i, bytes, v,
- __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
- v.iov_len),
+ copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
@@ -569,9 +591,9 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
return bytes;
}
-EXPORT_SYMBOL(copy_from_iter);
+EXPORT_SYMBOL(_copy_from_iter);
-bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
+bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
@@ -581,8 +603,10 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
if (unlikely(i->count < bytes))
return false;
+ if (iter_is_iovec(i))
+ might_fault();
iterate_all_kinds(i, bytes, v, ({
- if (__copy_from_user((to += v.iov_len) - v.iov_len,
+ if (copyin((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len))
return false;
0;}),
@@ -594,9 +618,9 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
iov_iter_advance(i, bytes);
return true;
}
-EXPORT_SYMBOL(copy_from_iter_full);
+EXPORT_SYMBOL(_copy_from_iter_full);
-size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
+size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
@@ -613,10 +637,10 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
return bytes;
}
-EXPORT_SYMBOL(copy_from_iter_nocache);
+EXPORT_SYMBOL(_copy_from_iter_nocache);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
-size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
+size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
@@ -634,10 +658,10 @@ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
return bytes;
}
-EXPORT_SYMBOL_GPL(copy_from_iter_flushcache);
+EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
#endif
-bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
+bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
@@ -659,11 +683,22 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
iov_iter_advance(i, bytes);
return true;
}
-EXPORT_SYMBOL(copy_from_iter_full_nocache);
+EXPORT_SYMBOL(_copy_from_iter_full_nocache);
+
+static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
+{
+ size_t v = n + offset;
+ if (likely(n <= v && v <= (PAGE_SIZE << compound_order(page))))
+ return true;
+ WARN_ON(1);
+ return false;
+}
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
+ if (unlikely(!page_copy_sane(page, offset, bytes)))
+ return 0;
if (i->type & (ITER_BVEC|ITER_KVEC)) {
void *kaddr = kmap_atomic(page);
size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
@@ -679,13 +714,15 @@ EXPORT_SYMBOL(copy_page_to_iter);
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
+ if (unlikely(!page_copy_sane(page, offset, bytes)))
+ return 0;
if (unlikely(i->type & ITER_PIPE)) {
WARN_ON(1);
return 0;
}
if (i->type & (ITER_BVEC|ITER_KVEC)) {
void *kaddr = kmap_atomic(page);
- size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
+ size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
kunmap_atomic(kaddr);
return wanted;
} else
@@ -722,7 +759,7 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
if (unlikely(i->type & ITER_PIPE))
return pipe_zero(bytes, i);
iterate_and_advance(i, bytes, v,
- __clear_user(v.iov_base, v.iov_len),
+ clear_user(v.iov_base, v.iov_len),
memzero_page(v.bv_page, v.bv_offset, v.bv_len),
memset(v.iov_base, 0, v.iov_len)
)
@@ -735,14 +772,17 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes)
{
char *kaddr = kmap_atomic(page), *p = kaddr + offset;
+ if (unlikely(!page_copy_sane(page, offset, bytes))) {
+ kunmap_atomic(kaddr);
+ return 0;
+ }
if (unlikely(i->type & ITER_PIPE)) {
kunmap_atomic(kaddr);
WARN_ON(1);
return 0;
}
iterate_all_kinds(i, bytes, v,
- __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len),
+ copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 719c155fce20..e590523ea476 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -52,6 +52,8 @@ static const char *kobject_actions[] = {
[KOBJ_MOVE] = "move",
[KOBJ_ONLINE] = "online",
[KOBJ_OFFLINE] = "offline",
+ [KOBJ_BIND] = "bind",
+ [KOBJ_UNBIND] = "unbind",
};
static int kobject_action_type(const char *buf, size_t count,
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index bf85e05ce858..720144075c1e 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -51,13 +51,15 @@ unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long
res = 0;
rv = 0;
- while (*s) {
+ while (1) {
+ unsigned int c = *s;
+ unsigned int lc = c | 0x20; /* don't tolower() this line */
unsigned int val;
- if ('0' <= *s && *s <= '9')
- val = *s - '0';
- else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f')
- val = _tolower(*s) - 'a' + 10;
+ if ('0' <= c && c <= '9')
+ val = c - '0';
+ else if ('a' <= lc && lc <= 'f')
+ val = lc - 'a' + 10;
else
break;
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 6f2b135dc5e8..cd0b5c964bd0 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -363,6 +363,103 @@ static void rsem_AA3(void)
}
/*
+ * read_lock(A)
+ * spin_lock(B)
+ * spin_lock(B)
+ * write_lock(A)
+ */
+static void rlock_ABBA1(void)
+{
+ RL(X1);
+ L(Y1);
+ U(Y1);
+ RU(X1);
+
+ L(Y1);
+ WL(X1);
+ WU(X1);
+ U(Y1); // should fail
+}
+
+static void rwsem_ABBA1(void)
+{
+ RSL(X1);
+ ML(Y1);
+ MU(Y1);
+ RSU(X1);
+
+ ML(Y1);
+ WSL(X1);
+ WSU(X1);
+ MU(Y1); // should fail
+}
+
+/*
+ * read_lock(A)
+ * spin_lock(B)
+ * spin_lock(B)
+ * read_lock(A)
+ */
+static void rlock_ABBA2(void)
+{
+ RL(X1);
+ L(Y1);
+ U(Y1);
+ RU(X1);
+
+ L(Y1);
+ RL(X1);
+ RU(X1);
+ U(Y1); // should NOT fail
+}
+
+static void rwsem_ABBA2(void)
+{
+ RSL(X1);
+ ML(Y1);
+ MU(Y1);
+ RSU(X1);
+
+ ML(Y1);
+ RSL(X1);
+ RSU(X1);
+ MU(Y1); // should fail
+}
+
+
+/*
+ * write_lock(A)
+ * spin_lock(B)
+ * spin_lock(B)
+ * write_lock(A)
+ */
+static void rlock_ABBA3(void)
+{
+ WL(X1);
+ L(Y1);
+ U(Y1);
+ WU(X1);
+
+ L(Y1);
+ WL(X1);
+ WU(X1);
+ U(Y1); // should fail
+}
+
+static void rwsem_ABBA3(void)
+{
+ WSL(X1);
+ ML(Y1);
+ MU(Y1);
+ WSU(X1);
+
+ ML(Y1);
+ WSL(X1);
+ WSU(X1);
+ MU(Y1); // should fail
+}
+
+/*
* ABBA deadlock:
*/
@@ -1056,8 +1153,6 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
if (debug_locks != expected) {
unexpected_testcase_failures++;
pr_cont("FAILED|");
-
- dump_stack();
} else {
testcase_successes++;
pr_cont(" ok |");
@@ -1933,6 +2028,30 @@ void locking_selftest(void)
dotest(rsem_AA3, FAILURE, LOCKTYPE_RWSEM);
pr_cont("\n");
+ print_testname("mixed read-lock/lock-write ABBA");
+ pr_cont(" |");
+ dotest(rlock_ABBA1, FAILURE, LOCKTYPE_RWLOCK);
+ /*
+ * Lockdep does indeed fail here, but there's nothing we can do about
+ * that now. Don't kill lockdep for it.
+ */
+ unexpected_testcase_failures--;
+
+ pr_cont(" |");
+ dotest(rwsem_ABBA1, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("mixed read-lock/lock-read ABBA");
+ pr_cont(" |");
+ dotest(rlock_ABBA2, SUCCESS, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA2, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("mixed write-lock/lock-write ABBA");
+ pr_cont(" |");
+ dotest(rlock_ABBA3, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA3, FAILURE, LOCKTYPE_RWSEM);
+
printk(" --------------------------------------------------------------------------\n");
/*
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index 5a0f75a3bf01..eead4b339466 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -364,11 +364,11 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
}
miter.consumed = lzeros;
- sg_miter_stop(&miter);
nbytes -= lzeros;
nbits = nbytes * 8;
if (nbits > MAX_EXTERN_MPI_BITS) {
+ sg_miter_stop(&miter);
pr_info("MPI: mpi too large (%u bits)\n", nbits);
return NULL;
}
@@ -376,6 +376,8 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
if (nbytes > 0)
nbits -= count_leading_zeros(*buff) - (BITS_PER_LONG - 8);
+ sg_miter_stop(&miter);
+
nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB);
val = mpi_alloc(nlimbs);
if (!val)
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 8ee7e5ec21be..3bf4a9984f4c 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -72,6 +72,13 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
}
EXPORT_SYMBOL(percpu_counter_set);
+/**
+ * This function is both preempt and irq safe. The former is due to explicit
+ * preemption disable. The latter is guaranteed by the fact that the slow path
+ * is explicitly protected by an irq-safe spinlock whereas the fast patch uses
+ * this_cpu_add which is irq-safe by definition. Hence there is no need muck
+ * with irq state before calling this one
+ */
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
{
s64 count;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 898e87998417..3527eb364964 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -2022,6 +2022,7 @@ void radix_tree_iter_delete(struct radix_tree_root *root,
if (__radix_tree_delete(root, iter->node, slot))
iter->index = iter->next_index;
}
+EXPORT_SYMBOL(radix_tree_iter_delete);
/**
* radix_tree_delete_item - delete an item from a radix tree
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 3057011f5599..a93adf6dcfb2 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -5,7 +5,7 @@ raid6_pq-y += algos.o recov.o tables.o int1.o int2.o int4.o \
raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o avx512.o recov_avx512.o
raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o
-raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o
+raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
raid6_pq-$(CONFIG_TILEGX) += tilegx8.o
raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o
@@ -26,7 +26,9 @@ NEON_FLAGS := -ffreestanding
ifeq ($(ARCH),arm)
NEON_FLAGS += -mfloat-abi=softfp -mfpu=neon
endif
+CFLAGS_recov_neon_inner.o += $(NEON_FLAGS)
ifeq ($(ARCH),arm64)
+CFLAGS_REMOVE_recov_neon_inner.o += -mgeneral-regs-only
CFLAGS_REMOVE_neon1.o += -mgeneral-regs-only
CFLAGS_REMOVE_neon2.o += -mgeneral-regs-only
CFLAGS_REMOVE_neon4.o += -mgeneral-regs-only
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index 7857049fd7d3..476994723258 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -113,6 +113,9 @@ const struct raid6_recov_calls *const raid6_recov_algos[] = {
#ifdef CONFIG_S390
&raid6_recov_s390xc,
#endif
+#if defined(CONFIG_KERNEL_MODE_NEON)
+ &raid6_recov_neon,
+#endif
&raid6_recov_intx1,
NULL
};
diff --git a/lib/raid6/mktables.c b/lib/raid6/mktables.c
index 39787db588b0..e824d088f72c 100644
--- a/lib/raid6/mktables.c
+++ b/lib/raid6/mktables.c
@@ -125,6 +125,26 @@ int main(int argc, char *argv[])
printf("EXPORT_SYMBOL(raid6_gfexp);\n");
printf("#endif\n");
+ /* Compute log-of-2 table */
+ printf("\nconst u8 __attribute__((aligned(256)))\n"
+ "raid6_gflog[256] =\n" "{\n");
+ for (i = 0; i < 256; i += 8) {
+ printf("\t");
+ for (j = 0; j < 8; j++) {
+ v = 255;
+ for (k = 0; k < 256; k++)
+ if (exptbl[k] == (i + j)) {
+ v = k;
+ break;
+ }
+ printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
+ }
+ }
+ printf("};\n");
+ printf("#ifdef __KERNEL__\n");
+ printf("EXPORT_SYMBOL(raid6_gflog);\n");
+ printf("#endif\n");
+
/* Compute inverse table x^-1 == x^254 */
printf("\nconst u8 __attribute__((aligned(256)))\n"
"raid6_gfinv[256] =\n" "{\n");
diff --git a/lib/raid6/neon.uc b/lib/raid6/neon.uc
index 4fa51b761dd0..d5242f544551 100644
--- a/lib/raid6/neon.uc
+++ b/lib/raid6/neon.uc
@@ -46,8 +46,12 @@ static inline unative_t SHLBYTE(unative_t v)
*/
static inline unative_t MASK(unative_t v)
{
- const uint8x16_t temp = NBYTES(0);
- return (unative_t)vcltq_s8((int8x16_t)v, (int8x16_t)temp);
+ return (unative_t)vshrq_n_s8((int8x16_t)v, 7);
+}
+
+static inline unative_t PMUL(unative_t v, unative_t u)
+{
+ return (unative_t)vmulq_p8((poly8x16_t)v, (poly8x16_t)u);
}
void raid6_neon$#_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs)
@@ -110,7 +114,30 @@ void raid6_neon$#_xor_syndrome_real(int disks, int start, int stop,
wq$$ = veorq_u8(w1$$, wd$$);
}
/* P/Q left side optimization */
- for ( z = start-1 ; z >= 0 ; z-- ) {
+ for ( z = start-1 ; z >= 3 ; z -= 4 ) {
+ w2$$ = vshrq_n_u8(wq$$, 4);
+ w1$$ = vshlq_n_u8(wq$$, 4);
+
+ w2$$ = PMUL(w2$$, x1d);
+ wq$$ = veorq_u8(w1$$, w2$$);
+ }
+
+ switch (z) {
+ case 2:
+ w2$$ = vshrq_n_u8(wq$$, 5);
+ w1$$ = vshlq_n_u8(wq$$, 3);
+
+ w2$$ = PMUL(w2$$, x1d);
+ wq$$ = veorq_u8(w1$$, w2$$);
+ break;
+ case 1:
+ w2$$ = vshrq_n_u8(wq$$, 6);
+ w1$$ = vshlq_n_u8(wq$$, 2);
+
+ w2$$ = PMUL(w2$$, x1d);
+ wq$$ = veorq_u8(w1$$, w2$$);
+ break;
+ case 0:
w2$$ = MASK(wq$$);
w1$$ = SHLBYTE(wq$$);
diff --git a/lib/raid6/recov_neon.c b/lib/raid6/recov_neon.c
new file mode 100644
index 000000000000..eeb5c4065b92
--- /dev/null
+++ b/lib/raid6/recov_neon.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2012 Intel Corporation
+ * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/raid/pq.h>
+
+#ifdef __KERNEL__
+#include <asm/neon.h>
+#else
+#define kernel_neon_begin()
+#define kernel_neon_end()
+#define cpu_has_neon() (1)
+#endif
+
+static int raid6_has_neon(void)
+{
+ return cpu_has_neon();
+}
+
+void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp,
+ uint8_t *dq, const uint8_t *pbmul,
+ const uint8_t *qmul);
+
+void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq,
+ const uint8_t *qmul);
+
+static void raid6_2data_recov_neon(int disks, size_t bytes, int faila,
+ int failb, void **ptrs)
+{
+ u8 *p, *q, *dp, *dq;
+ const u8 *pbmul; /* P multiplier table for B data */
+ const u8 *qmul; /* Q multiplier table (for both) */
+
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data pages
+ * Use the dead data pages as temporary storage for
+ * delta p and delta q
+ */
+ dp = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 2] = dp;
+ dq = (u8 *)ptrs[failb];
+ ptrs[failb] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dp;
+ ptrs[failb] = dq;
+ ptrs[disks - 2] = p;
+ ptrs[disks - 1] = q;
+
+ /* Now, pick the proper data tables */
+ pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
+ raid6_gfexp[failb]]];
+
+ kernel_neon_begin();
+ __raid6_2data_recov_neon(bytes, p, q, dp, dq, pbmul, qmul);
+ kernel_neon_end();
+}
+
+static void raid6_datap_recov_neon(int disks, size_t bytes, int faila,
+ void **ptrs)
+{
+ u8 *p, *q, *dq;
+ const u8 *qmul; /* Q multiplier table */
+
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data page
+ * Use the dead data page as temporary storage for delta q
+ */
+ dq = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dq;
+ ptrs[disks - 1] = q;
+
+ /* Now, pick the proper data tables */
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
+
+ kernel_neon_begin();
+ __raid6_datap_recov_neon(bytes, p, q, dq, qmul);
+ kernel_neon_end();
+}
+
+const struct raid6_recov_calls raid6_recov_neon = {
+ .data2 = raid6_2data_recov_neon,
+ .datap = raid6_datap_recov_neon,
+ .valid = raid6_has_neon,
+ .name = "neon",
+ .priority = 10,
+};
diff --git a/lib/raid6/recov_neon_inner.c b/lib/raid6/recov_neon_inner.c
new file mode 100644
index 000000000000..8cd20c9f834a
--- /dev/null
+++ b/lib/raid6/recov_neon_inner.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2012 Intel Corporation
+ * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <arm_neon.h>
+
+static const uint8x16_t x0f = {
+ 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
+ 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
+};
+
+#ifdef CONFIG_ARM
+/*
+ * AArch32 does not provide this intrinsic natively because it does not
+ * implement the underlying instruction. AArch32 only provides a 64-bit
+ * wide vtbl.8 instruction, so use that instead.
+ */
+static uint8x16_t vqtbl1q_u8(uint8x16_t a, uint8x16_t b)
+{
+ union {
+ uint8x16_t val;
+ uint8x8x2_t pair;
+ } __a = { a };
+
+ return vcombine_u8(vtbl2_u8(__a.pair, vget_low_u8(b)),
+ vtbl2_u8(__a.pair, vget_high_u8(b)));
+}
+#endif
+
+void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp,
+ uint8_t *dq, const uint8_t *pbmul,
+ const uint8_t *qmul)
+{
+ uint8x16_t pm0 = vld1q_u8(pbmul);
+ uint8x16_t pm1 = vld1q_u8(pbmul + 16);
+ uint8x16_t qm0 = vld1q_u8(qmul);
+ uint8x16_t qm1 = vld1q_u8(qmul + 16);
+
+ /*
+ * while ( bytes-- ) {
+ * uint8_t px, qx, db;
+ *
+ * px = *p ^ *dp;
+ * qx = qmul[*q ^ *dq];
+ * *dq++ = db = pbmul[px] ^ qx;
+ * *dp++ = db ^ px;
+ * p++; q++;
+ * }
+ */
+
+ while (bytes) {
+ uint8x16_t vx, vy, px, qx, db;
+
+ px = veorq_u8(vld1q_u8(p), vld1q_u8(dp));
+ vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq));
+
+ vy = (uint8x16_t)vshrq_n_s16((int16x8_t)vx, 4);
+ vx = vqtbl1q_u8(qm0, vandq_u8(vx, x0f));
+ vy = vqtbl1q_u8(qm1, vandq_u8(vy, x0f));
+ qx = veorq_u8(vx, vy);
+
+ vy = (uint8x16_t)vshrq_n_s16((int16x8_t)px, 4);
+ vx = vqtbl1q_u8(pm0, vandq_u8(px, x0f));
+ vy = vqtbl1q_u8(pm1, vandq_u8(vy, x0f));
+ vx = veorq_u8(vx, vy);
+ db = veorq_u8(vx, qx);
+
+ vst1q_u8(dq, db);
+ vst1q_u8(dp, veorq_u8(db, px));
+
+ bytes -= 16;
+ p += 16;
+ q += 16;
+ dp += 16;
+ dq += 16;
+ }
+}
+
+void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq,
+ const uint8_t *qmul)
+{
+ uint8x16_t qm0 = vld1q_u8(qmul);
+ uint8x16_t qm1 = vld1q_u8(qmul + 16);
+
+ /*
+ * while (bytes--) {
+ * *p++ ^= *dq = qmul[*q ^ *dq];
+ * q++; dq++;
+ * }
+ */
+
+ while (bytes) {
+ uint8x16_t vx, vy;
+
+ vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq));
+
+ vy = (uint8x16_t)vshrq_n_s16((int16x8_t)vx, 4);
+ vx = vqtbl1q_u8(qm0, vandq_u8(vx, x0f));
+ vy = vqtbl1q_u8(qm1, vandq_u8(vy, x0f));
+ vx = veorq_u8(vx, vy);
+ vy = veorq_u8(vx, vld1q_u8(p));
+
+ vst1q_u8(dq, vx);
+ vst1q_u8(p, vy);
+
+ bytes -= 16;
+ p += 16;
+ q += 16;
+ dq += 16;
+ }
+}
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index d9e7274a04cd..707ca5d677c6 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -211,11 +211,10 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
int i;
size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
- if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
- gfp != GFP_KERNEL)
+ if (gfp != GFP_KERNEL)
tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
- if (tbl == NULL && gfp == GFP_KERNEL)
- tbl = vzalloc(size);
+ else
+ tbl = kvzalloc(size, gfp);
size = nbuckets;
@@ -235,7 +234,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
INIT_LIST_HEAD(&tbl->walkers);
- get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
+ tbl->hash_rnd = get_random_u32();
for (i = 0; i < nbuckets; i++)
INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
diff --git a/lib/string.c b/lib/string.c
index 1c1fc9187b05..ebbb99c775bd 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -978,3 +978,10 @@ char *strreplace(char *s, char old, char new)
return s;
}
EXPORT_SYMBOL(strreplace);
+
+void fortify_panic(const char *name)
+{
+ pr_emerg("detected buffer overflow in %s\n", name);
+ BUG();
+}
+EXPORT_SYMBOL(fortify_panic);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index a8d74a733a38..8c6c83ef57a4 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -30,6 +30,7 @@
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/scatterlist.h>
+#include <linux/mem_encrypt.h>
#include <asm/io.h>
#include <asm/dma.h>
@@ -155,6 +156,15 @@ unsigned long swiotlb_size_or_default(void)
return size ? size : (IO_TLB_DEFAULT_SIZE);
}
+void __weak swiotlb_set_mem_attributes(void *vaddr, unsigned long size) { }
+
+/* For swiotlb, clear memory encryption mask from dma addresses */
+static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev,
+ phys_addr_t address)
+{
+ return __sme_clr(phys_to_dma(hwdev, address));
+}
+
/* Note that this doesn't work with highmem page */
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
volatile void *address)
@@ -183,6 +193,31 @@ void swiotlb_print_info(void)
bytes >> 20, vstart, vend - 1);
}
+/*
+ * Early SWIOTLB allocation may be too early to allow an architecture to
+ * perform the desired operations. This function allows the architecture to
+ * call SWIOTLB when the operations are possible. It needs to be called
+ * before the SWIOTLB memory is used.
+ */
+void __init swiotlb_update_mem_attributes(void)
+{
+ void *vaddr;
+ unsigned long bytes;
+
+ if (no_iotlb_memory || late_alloc)
+ return;
+
+ vaddr = phys_to_virt(io_tlb_start);
+ bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
+ swiotlb_set_mem_attributes(vaddr, bytes);
+ memset(vaddr, 0, bytes);
+
+ vaddr = phys_to_virt(io_tlb_overflow_buffer);
+ bytes = PAGE_ALIGN(io_tlb_overflow);
+ swiotlb_set_mem_attributes(vaddr, bytes);
+ memset(vaddr, 0, bytes);
+}
+
int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
{
void *v_overflow_buffer;
@@ -320,6 +355,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
io_tlb_start = virt_to_phys(tlb);
io_tlb_end = io_tlb_start + bytes;
+ swiotlb_set_mem_attributes(tlb, bytes);
memset(tlb, 0, bytes);
/*
@@ -330,6 +366,8 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
if (!v_overflow_buffer)
goto cleanup2;
+ swiotlb_set_mem_attributes(v_overflow_buffer, io_tlb_overflow);
+ memset(v_overflow_buffer, 0, io_tlb_overflow);
io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
/*
@@ -469,6 +507,9 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
if (no_iotlb_memory)
panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
+ if (sme_active())
+ pr_warn_once("SME is active and system is using DMA bounce buffers\n");
+
mask = dma_get_seg_boundary(hwdev);
tbl_dma_addr &= mask;
@@ -581,7 +622,7 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size,
return SWIOTLB_MAP_ERROR;
}
- start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
+ start_dma_addr = swiotlb_phys_to_dma(hwdev, io_tlb_start);
return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
dir, attrs);
}
@@ -702,7 +743,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
goto err_warn;
ret = phys_to_virt(paddr);
- dev_addr = phys_to_dma(hwdev, paddr);
+ dev_addr = swiotlb_phys_to_dma(hwdev, paddr);
/* Confirm address can be DMA'd by device */
if (dev_addr + size - 1 > dma_mask) {
@@ -812,10 +853,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
map = map_single(dev, phys, size, dir, attrs);
if (map == SWIOTLB_MAP_ERROR) {
swiotlb_full(dev, size, dir, 1);
- return phys_to_dma(dev, io_tlb_overflow_buffer);
+ return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
}
- dev_addr = phys_to_dma(dev, map);
+ dev_addr = swiotlb_phys_to_dma(dev, map);
/* Ensure that the address returned is DMA'ble */
if (dma_capable(dev, dev_addr, size))
@@ -824,7 +865,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
- return phys_to_dma(dev, io_tlb_overflow_buffer);
+ return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
}
EXPORT_SYMBOL_GPL(swiotlb_map_page);
@@ -958,7 +999,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
sg_dma_len(sgl) = 0;
return 0;
}
- sg->dma_address = phys_to_dma(hwdev, map);
+ sg->dma_address = swiotlb_phys_to_dma(hwdev, map);
} else
sg->dma_address = dev_addr;
sg_dma_len(sg) = sg->length;
@@ -1026,7 +1067,7 @@ EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
int
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
{
- return (dma_addr == phys_to_dma(hwdev, io_tlb_overflow_buffer));
+ return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer));
}
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
@@ -1039,6 +1080,6 @@ EXPORT_SYMBOL(swiotlb_dma_mapping_error);
int
swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
- return phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
+ return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
}
EXPORT_SYMBOL(swiotlb_dma_supported);
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index e2cbd43d193c..2526a2975c51 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -333,10 +333,39 @@ static void __init test_bitmap_u32_array_conversions(void)
}
}
+static void noinline __init test_mem_optimisations(void)
+{
+ DECLARE_BITMAP(bmap1, 1024);
+ DECLARE_BITMAP(bmap2, 1024);
+ unsigned int start, nbits;
+
+ for (start = 0; start < 1024; start += 8) {
+ memset(bmap1, 0x5a, sizeof(bmap1));
+ memset(bmap2, 0x5a, sizeof(bmap2));
+ for (nbits = 0; nbits < 1024 - start; nbits += 8) {
+ bitmap_set(bmap1, start, nbits);
+ __bitmap_set(bmap2, start, nbits);
+ if (!bitmap_equal(bmap1, bmap2, 1024))
+ printk("set not equal %d %d\n", start, nbits);
+ if (!__bitmap_equal(bmap1, bmap2, 1024))
+ printk("set not __equal %d %d\n", start, nbits);
+
+ bitmap_clear(bmap1, start, nbits);
+ __bitmap_clear(bmap2, start, nbits);
+ if (!bitmap_equal(bmap1, bmap2, 1024))
+ printk("clear not equal %d %d\n", start, nbits);
+ if (!__bitmap_equal(bmap1, bmap2, 1024))
+ printk("clear not __equal %d %d\n", start,
+ nbits);
+ }
+ }
+}
+
static int __init test_bitmap_init(void)
{
test_zero_fill_copy();
test_bitmap_u32_array_conversions();
+ test_mem_optimisations();
if (failed_tests == 0)
pr_info("all %u tests passed\n", total_tests);
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 09371b0a9baf..64a4c76cba2b 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -19,10 +19,85 @@
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+
+#define TEST_FIRMWARE_NAME "test-firmware.bin"
+#define TEST_FIRMWARE_NUM_REQS 4
static DEFINE_MUTEX(test_fw_mutex);
static const struct firmware *test_firmware;
+struct test_batched_req {
+ u8 idx;
+ int rc;
+ bool sent;
+ const struct firmware *fw;
+ const char *name;
+ struct completion completion;
+ struct task_struct *task;
+ struct device *dev;
+};
+
+/**
+ * test_config - represents configuration for the test for different triggers
+ *
+ * @name: the name of the firmware file to look for
+ * @sync_direct: when the sync trigger is used if this is true
+ * request_firmware_direct() will be used instead.
+ * @send_uevent: whether or not to send a uevent for async requests
+ * @num_requests: number of requests to try per test case. This is trigger
+ * specific.
+ * @reqs: stores all requests information
+ * @read_fw_idx: index of thread from which we want to read firmware results
+ * from through the read_fw trigger.
+ * @test_result: a test may use this to collect the result from the call
+ * of the request_firmware*() calls used in their tests. In order of
+ * priority we always keep first any setup error. If no setup errors were
+ * found then we move on to the first error encountered while running the
+ * API. Note that for async calls this typically will be a successful
+ * result (0) unless of course you've used bogus parameters, or the system
+ * is out of memory. In the async case the callback is expected to do a
+ * bit more homework to figure out what happened, unfortunately the only
+ * information passed today on error is the fact that no firmware was
+ * found so we can only assume -ENOENT on async calls if the firmware is
+ * NULL.
+ *
+ * Errors you can expect:
+ *
+ * API specific:
+ *
+ * 0: success for sync, for async it means request was sent
+ * -EINVAL: invalid parameters or request
+ * -ENOENT: files not found
+ *
+ * System environment:
+ *
+ * -ENOMEM: memory pressure on system
+ * -ENODEV: out of number of devices to test
+ * -EINVAL: an unexpected error has occurred
+ * @req_firmware: if @sync_direct is true this is set to
+ * request_firmware_direct(), otherwise request_firmware()
+ */
+struct test_config {
+ char *name;
+ bool sync_direct;
+ bool send_uevent;
+ u8 num_requests;
+ u8 read_fw_idx;
+
+ /*
+ * These below don't belong her but we'll move them once we create
+ * a struct fw_test_device and stuff the misc_dev under there later.
+ */
+ struct test_batched_req *reqs;
+ int test_result;
+ int (*req_firmware)(const struct firmware **fw, const char *name,
+ struct device *device);
+};
+
+struct test_config *test_fw_config;
+
static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
size_t size, loff_t *offset)
{
@@ -42,6 +117,338 @@ static const struct file_operations test_fw_fops = {
.read = test_fw_misc_read,
};
+static void __test_release_all_firmware(void)
+{
+ struct test_batched_req *req;
+ u8 i;
+
+ if (!test_fw_config->reqs)
+ return;
+
+ for (i = 0; i < test_fw_config->num_requests; i++) {
+ req = &test_fw_config->reqs[i];
+ if (req->fw)
+ release_firmware(req->fw);
+ }
+
+ vfree(test_fw_config->reqs);
+ test_fw_config->reqs = NULL;
+}
+
+static void test_release_all_firmware(void)
+{
+ mutex_lock(&test_fw_mutex);
+ __test_release_all_firmware();
+ mutex_unlock(&test_fw_mutex);
+}
+
+
+static void __test_firmware_config_free(void)
+{
+ __test_release_all_firmware();
+ kfree_const(test_fw_config->name);
+ test_fw_config->name = NULL;
+}
+
+/*
+ * XXX: move to kstrncpy() once merged.
+ *
+ * Users should use kfree_const() when freeing these.
+ */
+static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
+{
+ *dst = kstrndup(name, count, gfp);
+ if (!*dst)
+ return -ENOSPC;
+ return count;
+}
+
+static int __test_firmware_config_init(void)
+{
+ int ret;
+
+ ret = __kstrncpy(&test_fw_config->name, TEST_FIRMWARE_NAME,
+ strlen(TEST_FIRMWARE_NAME), GFP_KERNEL);
+ if (ret < 0)
+ goto out;
+
+ test_fw_config->num_requests = TEST_FIRMWARE_NUM_REQS;
+ test_fw_config->send_uevent = true;
+ test_fw_config->sync_direct = false;
+ test_fw_config->req_firmware = request_firmware;
+ test_fw_config->test_result = 0;
+ test_fw_config->reqs = NULL;
+
+ return 0;
+
+out:
+ __test_firmware_config_free();
+ return ret;
+}
+
+static ssize_t reset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+
+ mutex_lock(&test_fw_mutex);
+
+ __test_firmware_config_free();
+
+ ret = __test_firmware_config_init();
+ if (ret < 0) {
+ ret = -ENOMEM;
+ pr_err("could not alloc settings for config trigger: %d\n",
+ ret);
+ goto out;
+ }
+
+ pr_info("reset\n");
+ ret = count;
+
+out:
+ mutex_unlock(&test_fw_mutex);
+
+ return ret;
+}
+static DEVICE_ATTR_WO(reset);
+
+static ssize_t config_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int len = 0;
+
+ mutex_lock(&test_fw_mutex);
+
+ len += snprintf(buf, PAGE_SIZE,
+ "Custom trigger configuration for: %s\n",
+ dev_name(dev));
+
+ if (test_fw_config->name)
+ len += snprintf(buf+len, PAGE_SIZE,
+ "name:\t%s\n",
+ test_fw_config->name);
+ else
+ len += snprintf(buf+len, PAGE_SIZE,
+ "name:\tEMTPY\n");
+
+ len += snprintf(buf+len, PAGE_SIZE,
+ "num_requests:\t%u\n", test_fw_config->num_requests);
+
+ len += snprintf(buf+len, PAGE_SIZE,
+ "send_uevent:\t\t%s\n",
+ test_fw_config->send_uevent ?
+ "FW_ACTION_HOTPLUG" :
+ "FW_ACTION_NOHOTPLUG");
+ len += snprintf(buf+len, PAGE_SIZE,
+ "sync_direct:\t\t%s\n",
+ test_fw_config->sync_direct ? "true" : "false");
+ len += snprintf(buf+len, PAGE_SIZE,
+ "read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
+
+ mutex_unlock(&test_fw_mutex);
+
+ return len;
+}
+static DEVICE_ATTR_RO(config);
+
+static ssize_t config_name_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+
+ mutex_lock(&test_fw_mutex);
+ kfree_const(test_fw_config->name);
+ ret = __kstrncpy(&test_fw_config->name, buf, count, GFP_KERNEL);
+ mutex_unlock(&test_fw_mutex);
+
+ return ret;
+}
+
+/*
+ * As per sysfs_kf_seq_show() the buf is max PAGE_SIZE.
+ */
+static ssize_t config_test_show_str(char *dst,
+ char *src)
+{
+ int len;
+
+ mutex_lock(&test_fw_mutex);
+ len = snprintf(dst, PAGE_SIZE, "%s\n", src);
+ mutex_unlock(&test_fw_mutex);
+
+ return len;
+}
+
+static int test_dev_config_update_bool(const char *buf, size_t size,
+ bool *cfg)
+{
+ int ret;
+
+ mutex_lock(&test_fw_mutex);
+ if (strtobool(buf, cfg) < 0)
+ ret = -EINVAL;
+ else
+ ret = size;
+ mutex_unlock(&test_fw_mutex);
+
+ return ret;
+}
+
+static ssize_t
+test_dev_config_show_bool(char *buf,
+ bool config)
+{
+ bool val;
+
+ mutex_lock(&test_fw_mutex);
+ val = config;
+ mutex_unlock(&test_fw_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t test_dev_config_show_int(char *buf, int cfg)
+{
+ int val;
+
+ mutex_lock(&test_fw_mutex);
+ val = cfg;
+ mutex_unlock(&test_fw_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
+{
+ int ret;
+ long new;
+
+ ret = kstrtol(buf, 10, &new);
+ if (ret)
+ return ret;
+
+ if (new > U8_MAX)
+ return -EINVAL;
+
+ mutex_lock(&test_fw_mutex);
+ *(u8 *)cfg = new;
+ mutex_unlock(&test_fw_mutex);
+
+ /* Always return full write size even if we didn't consume all */
+ return size;
+}
+
+static ssize_t test_dev_config_show_u8(char *buf, u8 cfg)
+{
+ u8 val;
+
+ mutex_lock(&test_fw_mutex);
+ val = cfg;
+ mutex_unlock(&test_fw_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t config_name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return config_test_show_str(buf, test_fw_config->name);
+}
+static DEVICE_ATTR(config_name, 0644, config_name_show, config_name_store);
+
+static ssize_t config_num_requests_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+
+ mutex_lock(&test_fw_mutex);
+ if (test_fw_config->reqs) {
+ pr_err("Must call release_all_firmware prior to changing config\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ mutex_unlock(&test_fw_mutex);
+
+ rc = test_dev_config_update_u8(buf, count,
+ &test_fw_config->num_requests);
+
+out:
+ return rc;
+}
+
+static ssize_t config_num_requests_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return test_dev_config_show_u8(buf, test_fw_config->num_requests);
+}
+static DEVICE_ATTR(config_num_requests, 0644, config_num_requests_show,
+ config_num_requests_store);
+
+static ssize_t config_sync_direct_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc = test_dev_config_update_bool(buf, count,
+ &test_fw_config->sync_direct);
+
+ if (rc == count)
+ test_fw_config->req_firmware = test_fw_config->sync_direct ?
+ request_firmware_direct :
+ request_firmware;
+ return rc;
+}
+
+static ssize_t config_sync_direct_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return test_dev_config_show_bool(buf, test_fw_config->sync_direct);
+}
+static DEVICE_ATTR(config_sync_direct, 0644, config_sync_direct_show,
+ config_sync_direct_store);
+
+static ssize_t config_send_uevent_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return test_dev_config_update_bool(buf, count,
+ &test_fw_config->send_uevent);
+}
+
+static ssize_t config_send_uevent_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return test_dev_config_show_bool(buf, test_fw_config->send_uevent);
+}
+static DEVICE_ATTR(config_send_uevent, 0644, config_send_uevent_show,
+ config_send_uevent_store);
+
+static ssize_t config_read_fw_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return test_dev_config_update_u8(buf, count,
+ &test_fw_config->read_fw_idx);
+}
+
+static ssize_t config_read_fw_idx_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return test_dev_config_show_u8(buf, test_fw_config->read_fw_idx);
+}
+static DEVICE_ATTR(config_read_fw_idx, 0644, config_read_fw_idx_show,
+ config_read_fw_idx_store);
+
+
static ssize_t trigger_request_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -170,12 +577,301 @@ out:
}
static DEVICE_ATTR_WO(trigger_custom_fallback);
+static int test_fw_run_batch_request(void *data)
+{
+ struct test_batched_req *req = data;
+
+ if (!req) {
+ test_fw_config->test_result = -EINVAL;
+ return -EINVAL;
+ }
+
+ req->rc = test_fw_config->req_firmware(&req->fw, req->name, req->dev);
+ if (req->rc) {
+ pr_info("#%u: batched sync load failed: %d\n",
+ req->idx, req->rc);
+ if (!test_fw_config->test_result)
+ test_fw_config->test_result = req->rc;
+ } else if (req->fw) {
+ req->sent = true;
+ pr_info("#%u: batched sync loaded %zu\n",
+ req->idx, req->fw->size);
+ }
+ complete(&req->completion);
+
+ req->task = NULL;
+
+ return 0;
+}
+
+/*
+ * We use a kthread as otherwise the kernel serializes all our sync requests
+ * and we would not be able to mimic batched requests on a sync call. Batched
+ * requests on a sync call can for instance happen on a device driver when
+ * multiple cards are used and firmware loading happens outside of probe.
+ */
+static ssize_t trigger_batched_requests_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct test_batched_req *req;
+ int rc;
+ u8 i;
+
+ mutex_lock(&test_fw_mutex);
+
+ test_fw_config->reqs = vzalloc(sizeof(struct test_batched_req) *
+ test_fw_config->num_requests * 2);
+ if (!test_fw_config->reqs) {
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+
+ pr_info("batched sync firmware loading '%s' %u times\n",
+ test_fw_config->name, test_fw_config->num_requests);
+
+ for (i = 0; i < test_fw_config->num_requests; i++) {
+ req = &test_fw_config->reqs[i];
+ if (!req) {
+ WARN_ON(1);
+ rc = -ENOMEM;
+ goto out_bail;
+ }
+ req->fw = NULL;
+ req->idx = i;
+ req->name = test_fw_config->name;
+ req->dev = dev;
+ init_completion(&req->completion);
+ req->task = kthread_run(test_fw_run_batch_request, req,
+ "%s-%u", KBUILD_MODNAME, req->idx);
+ if (!req->task || IS_ERR(req->task)) {
+ pr_err("Setting up thread %u failed\n", req->idx);
+ req->task = NULL;
+ rc = -ENOMEM;
+ goto out_bail;
+ }
+ }
+
+ rc = count;
+
+ /*
+ * We require an explicit release to enable more time and delay of
+ * calling release_firmware() to improve our chances of forcing a
+ * batched request. If we instead called release_firmware() right away
+ * then we might miss on an opportunity of having a successful firmware
+ * request pass on the opportunity to be come a batched request.
+ */
+
+out_bail:
+ for (i = 0; i < test_fw_config->num_requests; i++) {
+ req = &test_fw_config->reqs[i];
+ if (req->task || req->sent)
+ wait_for_completion(&req->completion);
+ }
+
+ /* Override any worker error if we had a general setup error */
+ if (rc < 0)
+ test_fw_config->test_result = rc;
+
+out_unlock:
+ mutex_unlock(&test_fw_mutex);
+
+ return rc;
+}
+static DEVICE_ATTR_WO(trigger_batched_requests);
+
+/*
+ * We wait for each callback to return with the lock held, no need to lock here
+ */
+static void trigger_batched_cb(const struct firmware *fw, void *context)
+{
+ struct test_batched_req *req = context;
+
+ if (!req) {
+ test_fw_config->test_result = -EINVAL;
+ return;
+ }
+
+ /* forces *some* batched requests to queue up */
+ if (!req->idx)
+ ssleep(2);
+
+ req->fw = fw;
+
+ /*
+ * Unfortunately the firmware API gives us nothing other than a null FW
+ * if the firmware was not found on async requests. Best we can do is
+ * just assume -ENOENT. A better API would pass the actual return
+ * value to the callback.
+ */
+ if (!fw && !test_fw_config->test_result)
+ test_fw_config->test_result = -ENOENT;
+
+ complete(&req->completion);
+}
+
+static
+ssize_t trigger_batched_requests_async_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct test_batched_req *req;
+ bool send_uevent;
+ int rc;
+ u8 i;
+
+ mutex_lock(&test_fw_mutex);
+
+ test_fw_config->reqs = vzalloc(sizeof(struct test_batched_req) *
+ test_fw_config->num_requests * 2);
+ if (!test_fw_config->reqs) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ pr_info("batched loading '%s' custom fallback mechanism %u times\n",
+ test_fw_config->name, test_fw_config->num_requests);
+
+ send_uevent = test_fw_config->send_uevent ? FW_ACTION_HOTPLUG :
+ FW_ACTION_NOHOTPLUG;
+
+ for (i = 0; i < test_fw_config->num_requests; i++) {
+ req = &test_fw_config->reqs[i];
+ if (!req) {
+ WARN_ON(1);
+ goto out_bail;
+ }
+ req->name = test_fw_config->name;
+ req->fw = NULL;
+ req->idx = i;
+ init_completion(&req->completion);
+ rc = request_firmware_nowait(THIS_MODULE, send_uevent,
+ req->name,
+ dev, GFP_KERNEL, req,
+ trigger_batched_cb);
+ if (rc) {
+ pr_info("#%u: batched async load failed setup: %d\n",
+ i, rc);
+ req->rc = rc;
+ goto out_bail;
+ } else
+ req->sent = true;
+ }
+
+ rc = count;
+
+out_bail:
+
+ /*
+ * We require an explicit release to enable more time and delay of
+ * calling release_firmware() to improve our chances of forcing a
+ * batched request. If we instead called release_firmware() right away
+ * then we might miss on an opportunity of having a successful firmware
+ * request pass on the opportunity to be come a batched request.
+ */
+
+ for (i = 0; i < test_fw_config->num_requests; i++) {
+ req = &test_fw_config->reqs[i];
+ if (req->sent)
+ wait_for_completion(&req->completion);
+ }
+
+ /* Override any worker error if we had a general setup error */
+ if (rc < 0)
+ test_fw_config->test_result = rc;
+
+out:
+ mutex_unlock(&test_fw_mutex);
+
+ return rc;
+}
+static DEVICE_ATTR_WO(trigger_batched_requests_async);
+
+static ssize_t test_result_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return test_dev_config_show_int(buf, test_fw_config->test_result);
+}
+static DEVICE_ATTR_RO(test_result);
+
+static ssize_t release_all_firmware_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ test_release_all_firmware();
+ return count;
+}
+static DEVICE_ATTR_WO(release_all_firmware);
+
+static ssize_t read_firmware_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct test_batched_req *req;
+ u8 idx;
+ ssize_t rc = 0;
+
+ mutex_lock(&test_fw_mutex);
+
+ idx = test_fw_config->read_fw_idx;
+ if (idx >= test_fw_config->num_requests) {
+ rc = -ERANGE;
+ goto out;
+ }
+
+ if (!test_fw_config->reqs) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ req = &test_fw_config->reqs[idx];
+ if (!req->fw) {
+ pr_err("#%u: failed to async load firmware\n", idx);
+ rc = -ENOENT;
+ goto out;
+ }
+
+ pr_info("#%u: loaded %zu\n", idx, req->fw->size);
+
+ if (req->fw->size > PAGE_SIZE) {
+ pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
+ rc = -EINVAL;
+ }
+ memcpy(buf, req->fw->data, req->fw->size);
+
+ rc = req->fw->size;
+out:
+ mutex_unlock(&test_fw_mutex);
+
+ return rc;
+}
+static DEVICE_ATTR_RO(read_firmware);
+
#define TEST_FW_DEV_ATTR(name) &dev_attr_##name.attr
static struct attribute *test_dev_attrs[] = {
+ TEST_FW_DEV_ATTR(reset),
+
+ TEST_FW_DEV_ATTR(config),
+ TEST_FW_DEV_ATTR(config_name),
+ TEST_FW_DEV_ATTR(config_num_requests),
+ TEST_FW_DEV_ATTR(config_sync_direct),
+ TEST_FW_DEV_ATTR(config_send_uevent),
+ TEST_FW_DEV_ATTR(config_read_fw_idx),
+
+ /* These don't use the config at all - they could be ported! */
TEST_FW_DEV_ATTR(trigger_request),
TEST_FW_DEV_ATTR(trigger_async_request),
TEST_FW_DEV_ATTR(trigger_custom_fallback),
+
+ /* These use the config and can use the test_result */
+ TEST_FW_DEV_ATTR(trigger_batched_requests),
+ TEST_FW_DEV_ATTR(trigger_batched_requests_async),
+
+ TEST_FW_DEV_ATTR(release_all_firmware),
+ TEST_FW_DEV_ATTR(test_result),
+ TEST_FW_DEV_ATTR(read_firmware),
NULL,
};
@@ -192,8 +888,17 @@ static int __init test_firmware_init(void)
{
int rc;
+ test_fw_config = kzalloc(sizeof(struct test_config), GFP_KERNEL);
+ if (!test_fw_config)
+ return -ENOMEM;
+
+ rc = __test_firmware_config_init();
+ if (rc)
+ return rc;
+
rc = misc_register(&test_fw_misc_device);
if (rc) {
+ kfree(test_fw_config);
pr_err("could not register misc device: %d\n", rc);
return rc;
}
@@ -207,8 +912,13 @@ module_init(test_firmware_init);
static void __exit test_firmware_exit(void)
{
+ mutex_lock(&test_fw_mutex);
release_firmware(test_firmware);
misc_deregister(&test_fw_misc_device);
+ __test_firmware_config_free();
+ kfree(test_fw_config);
+ mutex_unlock(&test_fw_mutex);
+
pr_warn("removed interface\n");
}
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
new file mode 100644
index 000000000000..ff9148969b92
--- /dev/null
+++ b/lib/test_kmod.c
@@ -0,0 +1,1246 @@
+/*
+ * kmod stress test driver
+ *
+ * Copyright (C) 2017 Luis R. Rodriguez <mcgrof@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or at your option any
+ * later version; or, when distributed separately from the Linux kernel or
+ * when incorporated into other software packages, subject to the following
+ * license:
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of copyleft-next (version 0.3.1 or later) as published
+ * at http://copyleft-next.org/.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+/*
+ * This driver provides an interface to trigger and test the kernel's
+ * module loader through a series of configurations and a few triggers.
+ * To test this driver use the following script as root:
+ *
+ * tools/testing/selftests/kmod/kmod.sh --help
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/printk.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#define TEST_START_NUM_THREADS 50
+#define TEST_START_DRIVER "test_module"
+#define TEST_START_TEST_FS "xfs"
+#define TEST_START_TEST_CASE TEST_KMOD_DRIVER
+
+
+static bool force_init_test = false;
+module_param(force_init_test, bool_enable_only, 0644);
+MODULE_PARM_DESC(force_init_test,
+ "Force kicking a test immediately after driver loads");
+
+/*
+ * For device allocation / registration
+ */
+static DEFINE_MUTEX(reg_dev_mutex);
+static LIST_HEAD(reg_test_devs);
+
+/*
+ * num_test_devs actually represents the *next* ID of the next
+ * device we will allow to create.
+ */
+static int num_test_devs;
+
+/**
+ * enum kmod_test_case - linker table test case
+ *
+ * If you add a test case, please be sure to review if you need to se
+ * @need_mod_put for your tests case.
+ *
+ * @TEST_KMOD_DRIVER: stress tests request_module()
+ * @TEST_KMOD_FS_TYPE: stress tests get_fs_type()
+ */
+enum kmod_test_case {
+ __TEST_KMOD_INVALID = 0,
+
+ TEST_KMOD_DRIVER,
+ TEST_KMOD_FS_TYPE,
+
+ __TEST_KMOD_MAX,
+};
+
+struct test_config {
+ char *test_driver;
+ char *test_fs;
+ unsigned int num_threads;
+ enum kmod_test_case test_case;
+ int test_result;
+};
+
+struct kmod_test_device;
+
+/**
+ * kmod_test_device_info - thread info
+ *
+ * @ret_sync: return value if request_module() is used, sync request for
+ * @TEST_KMOD_DRIVER
+ * @fs_sync: return value of get_fs_type() for @TEST_KMOD_FS_TYPE
+ * @thread_idx: thread ID
+ * @test_dev: test device test is being performed under
+ * @need_mod_put: Some tests (get_fs_type() is one) requires putting the module
+ * (module_put(fs_sync->owner)) when done, otherwise you will not be able
+ * to unload the respective modules and re-test. We use this to keep
+ * accounting of when we need this and to help out in case we need to
+ * error out and deal with module_put() on error.
+ */
+struct kmod_test_device_info {
+ int ret_sync;
+ struct file_system_type *fs_sync;
+ struct task_struct *task_sync;
+ unsigned int thread_idx;
+ struct kmod_test_device *test_dev;
+ bool need_mod_put;
+};
+
+/**
+ * kmod_test_device - test device to help test kmod
+ *
+ * @dev_idx: unique ID for test device
+ * @config: configuration for the test
+ * @misc_dev: we use a misc device under the hood
+ * @dev: pointer to misc_dev's own struct device
+ * @config_mutex: protects configuration of test
+ * @trigger_mutex: the test trigger can only be fired once at a time
+ * @thread_lock: protects @done count, and the @info per each thread
+ * @done: number of threads which have completed or failed
+ * @test_is_oom: when we run out of memory, use this to halt moving forward
+ * @kthreads_done: completion used to signal when all work is done
+ * @list: needed to be part of the reg_test_devs
+ * @info: array of info for each thread
+ */
+struct kmod_test_device {
+ int dev_idx;
+ struct test_config config;
+ struct miscdevice misc_dev;
+ struct device *dev;
+ struct mutex config_mutex;
+ struct mutex trigger_mutex;
+ struct mutex thread_mutex;
+
+ unsigned int done;
+
+ bool test_is_oom;
+ struct completion kthreads_done;
+ struct list_head list;
+
+ struct kmod_test_device_info *info;
+};
+
+static const char *test_case_str(enum kmod_test_case test_case)
+{
+ switch (test_case) {
+ case TEST_KMOD_DRIVER:
+ return "TEST_KMOD_DRIVER";
+ case TEST_KMOD_FS_TYPE:
+ return "TEST_KMOD_FS_TYPE";
+ default:
+ return "invalid";
+ }
+}
+
+static struct miscdevice *dev_to_misc_dev(struct device *dev)
+{
+ return dev_get_drvdata(dev);
+}
+
+static struct kmod_test_device *misc_dev_to_test_dev(struct miscdevice *misc_dev)
+{
+ return container_of(misc_dev, struct kmod_test_device, misc_dev);
+}
+
+static struct kmod_test_device *dev_to_test_dev(struct device *dev)
+{
+ struct miscdevice *misc_dev;
+
+ misc_dev = dev_to_misc_dev(dev);
+
+ return misc_dev_to_test_dev(misc_dev);
+}
+
+/* Must run with thread_mutex held */
+static void kmod_test_done_check(struct kmod_test_device *test_dev,
+ unsigned int idx)
+{
+ struct test_config *config = &test_dev->config;
+
+ test_dev->done++;
+ dev_dbg(test_dev->dev, "Done thread count: %u\n", test_dev->done);
+
+ if (test_dev->done == config->num_threads) {
+ dev_info(test_dev->dev, "Done: %u threads have all run now\n",
+ test_dev->done);
+ dev_info(test_dev->dev, "Last thread to run: %u\n", idx);
+ complete(&test_dev->kthreads_done);
+ }
+}
+
+static void test_kmod_put_module(struct kmod_test_device_info *info)
+{
+ struct kmod_test_device *test_dev = info->test_dev;
+ struct test_config *config = &test_dev->config;
+
+ if (!info->need_mod_put)
+ return;
+
+ switch (config->test_case) {
+ case TEST_KMOD_DRIVER:
+ break;
+ case TEST_KMOD_FS_TYPE:
+ if (info && info->fs_sync && info->fs_sync->owner)
+ module_put(info->fs_sync->owner);
+ break;
+ default:
+ BUG();
+ }
+
+ info->need_mod_put = true;
+}
+
+static int run_request(void *data)
+{
+ struct kmod_test_device_info *info = data;
+ struct kmod_test_device *test_dev = info->test_dev;
+ struct test_config *config = &test_dev->config;
+
+ switch (config->test_case) {
+ case TEST_KMOD_DRIVER:
+ info->ret_sync = request_module("%s", config->test_driver);
+ break;
+ case TEST_KMOD_FS_TYPE:
+ info->fs_sync = get_fs_type(config->test_fs);
+ info->need_mod_put = true;
+ break;
+ default:
+ /* __trigger_config_run() already checked for test sanity */
+ BUG();
+ return -EINVAL;
+ }
+
+ dev_dbg(test_dev->dev, "Ran thread %u\n", info->thread_idx);
+
+ test_kmod_put_module(info);
+
+ mutex_lock(&test_dev->thread_mutex);
+ info->task_sync = NULL;
+ kmod_test_done_check(test_dev, info->thread_idx);
+ mutex_unlock(&test_dev->thread_mutex);
+
+ return 0;
+}
+
+static int tally_work_test(struct kmod_test_device_info *info)
+{
+ struct kmod_test_device *test_dev = info->test_dev;
+ struct test_config *config = &test_dev->config;
+ int err_ret = 0;
+
+ switch (config->test_case) {
+ case TEST_KMOD_DRIVER:
+ /*
+ * Only capture errors, if one is found that's
+ * enough, for now.
+ */
+ if (info->ret_sync != 0)
+ err_ret = info->ret_sync;
+ dev_info(test_dev->dev,
+ "Sync thread %d return status: %d\n",
+ info->thread_idx, info->ret_sync);
+ break;
+ case TEST_KMOD_FS_TYPE:
+ /* For now we make this simple */
+ if (!info->fs_sync)
+ err_ret = -EINVAL;
+ dev_info(test_dev->dev, "Sync thread %u fs: %s\n",
+ info->thread_idx, info->fs_sync ? config->test_fs :
+ "NULL");
+ break;
+ default:
+ BUG();
+ }
+
+ return err_ret;
+}
+
+/*
+ * XXX: add result option to display if all errors did not match.
+ * For now we just keep any error code if one was found.
+ *
+ * If this ran it means *all* tasks were created fine and we
+ * are now just collecting results.
+ *
+ * Only propagate errors, do not override with a subsequent sucess case.
+ */
+static void tally_up_work(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+ struct kmod_test_device_info *info;
+ unsigned int idx;
+ int err_ret = 0;
+ int ret = 0;
+
+ mutex_lock(&test_dev->thread_mutex);
+
+ dev_info(test_dev->dev, "Results:\n");
+
+ for (idx=0; idx < config->num_threads; idx++) {
+ info = &test_dev->info[idx];
+ ret = tally_work_test(info);
+ if (ret)
+ err_ret = ret;
+ }
+
+ /*
+ * Note: request_module() returns 256 for a module not found even
+ * though modprobe itself returns 1.
+ */
+ config->test_result = err_ret;
+
+ mutex_unlock(&test_dev->thread_mutex);
+}
+
+static int try_one_request(struct kmod_test_device *test_dev, unsigned int idx)
+{
+ struct kmod_test_device_info *info = &test_dev->info[idx];
+ int fail_ret = -ENOMEM;
+
+ mutex_lock(&test_dev->thread_mutex);
+
+ info->thread_idx = idx;
+ info->test_dev = test_dev;
+ info->task_sync = kthread_run(run_request, info, "%s-%u",
+ KBUILD_MODNAME, idx);
+
+ if (!info->task_sync || IS_ERR(info->task_sync)) {
+ test_dev->test_is_oom = true;
+ dev_err(test_dev->dev, "Setting up thread %u failed\n", idx);
+ info->task_sync = NULL;
+ goto err_out;
+ } else
+ dev_dbg(test_dev->dev, "Kicked off thread %u\n", idx);
+
+ mutex_unlock(&test_dev->thread_mutex);
+
+ return 0;
+
+err_out:
+ info->ret_sync = fail_ret;
+ mutex_unlock(&test_dev->thread_mutex);
+
+ return fail_ret;
+}
+
+static void test_dev_kmod_stop_tests(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+ struct kmod_test_device_info *info;
+ unsigned int i;
+
+ dev_info(test_dev->dev, "Ending request_module() tests\n");
+
+ mutex_lock(&test_dev->thread_mutex);
+
+ for (i=0; i < config->num_threads; i++) {
+ info = &test_dev->info[i];
+ if (info->task_sync && !IS_ERR(info->task_sync)) {
+ dev_info(test_dev->dev,
+ "Stopping still-running thread %i\n", i);
+ kthread_stop(info->task_sync);
+ }
+
+ /*
+ * info->task_sync is well protected, it can only be
+ * NULL or a pointer to a struct. If its NULL we either
+ * never ran, or we did and we completed the work. Completed
+ * tasks *always* put the module for us. This is a sanity
+ * check -- just in case.
+ */
+ if (info->task_sync && info->need_mod_put)
+ test_kmod_put_module(info);
+ }
+
+ mutex_unlock(&test_dev->thread_mutex);
+}
+
+/*
+ * Only wait *iff* we did not run into any errors during all of our thread
+ * set up. If run into any issues we stop threads and just bail out with
+ * an error to the trigger. This also means we don't need any tally work
+ * for any threads which fail.
+ */
+static int try_requests(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+ unsigned int idx;
+ int ret;
+ bool any_error = false;
+
+ for (idx=0; idx < config->num_threads; idx++) {
+ if (test_dev->test_is_oom) {
+ any_error = true;
+ break;
+ }
+
+ ret = try_one_request(test_dev, idx);
+ if (ret) {
+ any_error = true;
+ break;
+ }
+ }
+
+ if (!any_error) {
+ test_dev->test_is_oom = false;
+ dev_info(test_dev->dev,
+ "No errors were found while initializing threads\n");
+ wait_for_completion(&test_dev->kthreads_done);
+ tally_up_work(test_dev);
+ } else {
+ test_dev->test_is_oom = true;
+ dev_info(test_dev->dev,
+ "At least one thread failed to start, stop all work\n");
+ test_dev_kmod_stop_tests(test_dev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int run_test_driver(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+
+ dev_info(test_dev->dev, "Test case: %s (%u)\n",
+ test_case_str(config->test_case),
+ config->test_case);
+ dev_info(test_dev->dev, "Test driver to load: %s\n",
+ config->test_driver);
+ dev_info(test_dev->dev, "Number of threads to run: %u\n",
+ config->num_threads);
+ dev_info(test_dev->dev, "Thread IDs will range from 0 - %u\n",
+ config->num_threads - 1);
+
+ return try_requests(test_dev);
+}
+
+static int run_test_fs_type(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+
+ dev_info(test_dev->dev, "Test case: %s (%u)\n",
+ test_case_str(config->test_case),
+ config->test_case);
+ dev_info(test_dev->dev, "Test filesystem to load: %s\n",
+ config->test_fs);
+ dev_info(test_dev->dev, "Number of threads to run: %u\n",
+ config->num_threads);
+ dev_info(test_dev->dev, "Thread IDs will range from 0 - %u\n",
+ config->num_threads - 1);
+
+ return try_requests(test_dev);
+}
+
+static ssize_t config_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+ int len = 0;
+
+ mutex_lock(&test_dev->config_mutex);
+
+ len += snprintf(buf, PAGE_SIZE,
+ "Custom trigger configuration for: %s\n",
+ dev_name(dev));
+
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "Number of threads:\t%u\n",
+ config->num_threads);
+
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "Test_case:\t%s (%u)\n",
+ test_case_str(config->test_case),
+ config->test_case);
+
+ if (config->test_driver)
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "driver:\t%s\n",
+ config->test_driver);
+ else
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "driver:\tEMPTY\n");
+
+ if (config->test_fs)
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "fs:\t%s\n",
+ config->test_fs);
+ else
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "fs:\tEMPTY\n");
+
+ mutex_unlock(&test_dev->config_mutex);
+
+ return len;
+}
+static DEVICE_ATTR_RO(config);
+
+/*
+ * This ensures we don't allow kicking threads through if our configuration
+ * is faulty.
+ */
+static int __trigger_config_run(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+
+ test_dev->done = 0;
+
+ switch (config->test_case) {
+ case TEST_KMOD_DRIVER:
+ return run_test_driver(test_dev);
+ case TEST_KMOD_FS_TYPE:
+ return run_test_fs_type(test_dev);
+ default:
+ dev_warn(test_dev->dev,
+ "Invalid test case requested: %u\n",
+ config->test_case);
+ return -EINVAL;
+ }
+}
+
+static int trigger_config_run(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+ int ret;
+
+ mutex_lock(&test_dev->trigger_mutex);
+ mutex_lock(&test_dev->config_mutex);
+
+ ret = __trigger_config_run(test_dev);
+ if (ret < 0)
+ goto out;
+ dev_info(test_dev->dev, "General test result: %d\n",
+ config->test_result);
+
+ /*
+ * We must return 0 after a trigger even unless something went
+ * wrong with the setup of the test. If the test setup went fine
+ * then userspace must just check the result of config->test_result.
+ * One issue with relying on the return from a call in the kernel
+ * is if the kernel returns a possitive value using this trigger
+ * will not return the value to userspace, it would be lost.
+ *
+ * By not relying on capturing the return value of tests we are using
+ * through the trigger it also us to run tests with set -e and only
+ * fail when something went wrong with the driver upon trigger
+ * requests.
+ */
+ ret = 0;
+
+out:
+ mutex_unlock(&test_dev->config_mutex);
+ mutex_unlock(&test_dev->trigger_mutex);
+
+ return ret;
+}
+
+static ssize_t
+trigger_config_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ int ret;
+
+ if (test_dev->test_is_oom)
+ return -ENOMEM;
+
+ /* For all intents and purposes we don't care what userspace
+ * sent this trigger, we care only that we were triggered.
+ * We treat the return value only for caputuring issues with
+ * the test setup. At this point all the test variables should
+ * have been allocated so typically this should never fail.
+ */
+ ret = trigger_config_run(test_dev);
+ if (unlikely(ret < 0))
+ goto out;
+
+ /*
+ * Note: any return > 0 will be treated as success
+ * and the error value will not be available to userspace.
+ * Do not rely on trying to send to userspace a test value
+ * return value as possitive return errors will be lost.
+ */
+ if (WARN_ON(ret > 0))
+ return -EINVAL;
+
+ ret = count;
+out:
+ return ret;
+}
+static DEVICE_ATTR_WO(trigger_config);
+
+/*
+ * XXX: move to kstrncpy() once merged.
+ *
+ * Users should use kfree_const() when freeing these.
+ */
+static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
+{
+ *dst = kstrndup(name, count, gfp);
+ if (!*dst)
+ return -ENOSPC;
+ return count;
+}
+
+static int config_copy_test_driver_name(struct test_config *config,
+ const char *name,
+ size_t count)
+{
+ return __kstrncpy(&config->test_driver, name, count, GFP_KERNEL);
+}
+
+
+static int config_copy_test_fs(struct test_config *config, const char *name,
+ size_t count)
+{
+ return __kstrncpy(&config->test_fs, name, count, GFP_KERNEL);
+}
+
+static void __kmod_config_free(struct test_config *config)
+{
+ if (!config)
+ return;
+
+ kfree_const(config->test_driver);
+ config->test_driver = NULL;
+
+ kfree_const(config->test_fs);
+ config->test_driver = NULL;
+}
+
+static void kmod_config_free(struct kmod_test_device *test_dev)
+{
+ struct test_config *config;
+
+ if (!test_dev)
+ return;
+
+ config = &test_dev->config;
+
+ mutex_lock(&test_dev->config_mutex);
+ __kmod_config_free(config);
+ mutex_unlock(&test_dev->config_mutex);
+}
+
+static ssize_t config_test_driver_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+ int copied;
+
+ mutex_lock(&test_dev->config_mutex);
+
+ kfree_const(config->test_driver);
+ config->test_driver = NULL;
+
+ copied = config_copy_test_driver_name(config, buf, count);
+ mutex_unlock(&test_dev->config_mutex);
+
+ return copied;
+}
+
+/*
+ * As per sysfs_kf_seq_show() the buf is max PAGE_SIZE.
+ */
+static ssize_t config_test_show_str(struct mutex *config_mutex,
+ char *dst,
+ char *src)
+{
+ int len;
+
+ mutex_lock(config_mutex);
+ len = snprintf(dst, PAGE_SIZE, "%s\n", src);
+ mutex_unlock(config_mutex);
+
+ return len;
+}
+
+static ssize_t config_test_driver_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return config_test_show_str(&test_dev->config_mutex, buf,
+ config->test_driver);
+}
+static DEVICE_ATTR(config_test_driver, 0644, config_test_driver_show,
+ config_test_driver_store);
+
+static ssize_t config_test_fs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+ int copied;
+
+ mutex_lock(&test_dev->config_mutex);
+
+ kfree_const(config->test_fs);
+ config->test_fs = NULL;
+
+ copied = config_copy_test_fs(config, buf, count);
+ mutex_unlock(&test_dev->config_mutex);
+
+ return copied;
+}
+
+static ssize_t config_test_fs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return config_test_show_str(&test_dev->config_mutex, buf,
+ config->test_fs);
+}
+static DEVICE_ATTR(config_test_fs, 0644, config_test_fs_show,
+ config_test_fs_store);
+
+static int trigger_config_run_type(struct kmod_test_device *test_dev,
+ enum kmod_test_case test_case,
+ const char *test_str)
+{
+ int copied = 0;
+ struct test_config *config = &test_dev->config;
+
+ mutex_lock(&test_dev->config_mutex);
+
+ switch (test_case) {
+ case TEST_KMOD_DRIVER:
+ kfree_const(config->test_driver);
+ config->test_driver = NULL;
+ copied = config_copy_test_driver_name(config, test_str,
+ strlen(test_str));
+ break;
+ case TEST_KMOD_FS_TYPE:
+ kfree_const(config->test_fs);
+ config->test_driver = NULL;
+ copied = config_copy_test_fs(config, test_str,
+ strlen(test_str));
+ break;
+ default:
+ mutex_unlock(&test_dev->config_mutex);
+ return -EINVAL;
+ }
+
+ config->test_case = test_case;
+
+ mutex_unlock(&test_dev->config_mutex);
+
+ if (copied <= 0 || copied != strlen(test_str)) {
+ test_dev->test_is_oom = true;
+ return -ENOMEM;
+ }
+
+ test_dev->test_is_oom = false;
+
+ return trigger_config_run(test_dev);
+}
+
+static void free_test_dev_info(struct kmod_test_device *test_dev)
+{
+ vfree(test_dev->info);
+ test_dev->info = NULL;
+}
+
+static int kmod_config_sync_info(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+
+ free_test_dev_info(test_dev);
+ test_dev->info = vzalloc(config->num_threads *
+ sizeof(struct kmod_test_device_info));
+ if (!test_dev->info) {
+ dev_err(test_dev->dev, "Cannot alloc test_dev info\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * Old kernels may not have this, if you want to port this code to
+ * test it on older kernels.
+ */
+#ifdef get_kmod_umh_limit
+static unsigned int kmod_init_test_thread_limit(void)
+{
+ return get_kmod_umh_limit();
+}
+#else
+static unsigned int kmod_init_test_thread_limit(void)
+{
+ return TEST_START_NUM_THREADS;
+}
+#endif
+
+static int __kmod_config_init(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+ int ret = -ENOMEM, copied;
+
+ __kmod_config_free(config);
+
+ copied = config_copy_test_driver_name(config, TEST_START_DRIVER,
+ strlen(TEST_START_DRIVER));
+ if (copied != strlen(TEST_START_DRIVER))
+ goto err_out;
+
+ copied = config_copy_test_fs(config, TEST_START_TEST_FS,
+ strlen(TEST_START_TEST_FS));
+ if (copied != strlen(TEST_START_TEST_FS))
+ goto err_out;
+
+ config->num_threads = kmod_init_test_thread_limit();
+ config->test_result = 0;
+ config->test_case = TEST_START_TEST_CASE;
+
+ ret = kmod_config_sync_info(test_dev);
+ if (ret)
+ goto err_out;
+
+ test_dev->test_is_oom = false;
+
+ return 0;
+
+err_out:
+ test_dev->test_is_oom = true;
+ WARN_ON(test_dev->test_is_oom);
+
+ __kmod_config_free(config);
+
+ return ret;
+}
+
+static ssize_t reset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ int ret;
+
+ mutex_lock(&test_dev->trigger_mutex);
+ mutex_lock(&test_dev->config_mutex);
+
+ ret = __kmod_config_init(test_dev);
+ if (ret < 0) {
+ ret = -ENOMEM;
+ dev_err(dev, "could not alloc settings for config trigger: %d\n",
+ ret);
+ goto out;
+ }
+
+ dev_info(dev, "reset\n");
+ ret = count;
+
+out:
+ mutex_unlock(&test_dev->config_mutex);
+ mutex_unlock(&test_dev->trigger_mutex);
+
+ return ret;
+}
+static DEVICE_ATTR_WO(reset);
+
+static int test_dev_config_update_uint_sync(struct kmod_test_device *test_dev,
+ const char *buf, size_t size,
+ unsigned int *config,
+ int (*test_sync)(struct kmod_test_device *test_dev))
+{
+ int ret;
+ unsigned long new;
+ unsigned int old_val;
+
+ ret = kstrtoul(buf, 10, &new);
+ if (ret)
+ return ret;
+
+ if (new > UINT_MAX)
+ return -EINVAL;
+
+ mutex_lock(&test_dev->config_mutex);
+
+ old_val = *config;
+ *(unsigned int *)config = new;
+
+ ret = test_sync(test_dev);
+ if (ret) {
+ *(unsigned int *)config = old_val;
+
+ ret = test_sync(test_dev);
+ WARN_ON(ret);
+
+ mutex_unlock(&test_dev->config_mutex);
+ return -EINVAL;
+ }
+
+ mutex_unlock(&test_dev->config_mutex);
+ /* Always return full write size even if we didn't consume all */
+ return size;
+}
+
+static int test_dev_config_update_uint_range(struct kmod_test_device *test_dev,
+ const char *buf, size_t size,
+ unsigned int *config,
+ unsigned int min,
+ unsigned int max)
+{
+ int ret;
+ unsigned long new;
+
+ ret = kstrtoul(buf, 10, &new);
+ if (ret)
+ return ret;
+
+ if (new < min || new > max || new > UINT_MAX)
+ return -EINVAL;
+
+ mutex_lock(&test_dev->config_mutex);
+ *config = new;
+ mutex_unlock(&test_dev->config_mutex);
+
+ /* Always return full write size even if we didn't consume all */
+ return size;
+}
+
+static int test_dev_config_update_int(struct kmod_test_device *test_dev,
+ const char *buf, size_t size,
+ int *config)
+{
+ int ret;
+ long new;
+
+ ret = kstrtol(buf, 10, &new);
+ if (ret)
+ return ret;
+
+ if (new > INT_MAX || new < INT_MIN)
+ return -EINVAL;
+
+ mutex_lock(&test_dev->config_mutex);
+ *config = new;
+ mutex_unlock(&test_dev->config_mutex);
+ /* Always return full write size even if we didn't consume all */
+ return size;
+}
+
+static ssize_t test_dev_config_show_int(struct kmod_test_device *test_dev,
+ char *buf,
+ int config)
+{
+ int val;
+
+ mutex_lock(&test_dev->config_mutex);
+ val = config;
+ mutex_unlock(&test_dev->config_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t test_dev_config_show_uint(struct kmod_test_device *test_dev,
+ char *buf,
+ unsigned int config)
+{
+ unsigned int val;
+
+ mutex_lock(&test_dev->config_mutex);
+ val = config;
+ mutex_unlock(&test_dev->config_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t test_result_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return test_dev_config_update_int(test_dev, buf, count,
+ &config->test_result);
+}
+
+static ssize_t config_num_threads_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return test_dev_config_update_uint_sync(test_dev, buf, count,
+ &config->num_threads,
+ kmod_config_sync_info);
+}
+
+static ssize_t config_num_threads_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return test_dev_config_show_int(test_dev, buf, config->num_threads);
+}
+static DEVICE_ATTR(config_num_threads, 0644, config_num_threads_show,
+ config_num_threads_store);
+
+static ssize_t config_test_case_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return test_dev_config_update_uint_range(test_dev, buf, count,
+ &config->test_case,
+ __TEST_KMOD_INVALID + 1,
+ __TEST_KMOD_MAX - 1);
+}
+
+static ssize_t config_test_case_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return test_dev_config_show_uint(test_dev, buf, config->test_case);
+}
+static DEVICE_ATTR(config_test_case, 0644, config_test_case_show,
+ config_test_case_store);
+
+static ssize_t test_result_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return test_dev_config_show_int(test_dev, buf, config->test_result);
+}
+static DEVICE_ATTR(test_result, 0644, test_result_show, test_result_store);
+
+#define TEST_KMOD_DEV_ATTR(name) &dev_attr_##name.attr
+
+static struct attribute *test_dev_attrs[] = {
+ TEST_KMOD_DEV_ATTR(trigger_config),
+ TEST_KMOD_DEV_ATTR(config),
+ TEST_KMOD_DEV_ATTR(reset),
+
+ TEST_KMOD_DEV_ATTR(config_test_driver),
+ TEST_KMOD_DEV_ATTR(config_test_fs),
+ TEST_KMOD_DEV_ATTR(config_num_threads),
+ TEST_KMOD_DEV_ATTR(config_test_case),
+ TEST_KMOD_DEV_ATTR(test_result),
+
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(test_dev);
+
+static int kmod_config_init(struct kmod_test_device *test_dev)
+{
+ int ret;
+
+ mutex_lock(&test_dev->config_mutex);
+ ret = __kmod_config_init(test_dev);
+ mutex_unlock(&test_dev->config_mutex);
+
+ return ret;
+}
+
+static struct kmod_test_device *alloc_test_dev_kmod(int idx)
+{
+ int ret;
+ struct kmod_test_device *test_dev;
+ struct miscdevice *misc_dev;
+
+ test_dev = vzalloc(sizeof(struct kmod_test_device));
+ if (!test_dev) {
+ pr_err("Cannot alloc test_dev\n");
+ goto err_out;
+ }
+
+ mutex_init(&test_dev->config_mutex);
+ mutex_init(&test_dev->trigger_mutex);
+ mutex_init(&test_dev->thread_mutex);
+
+ init_completion(&test_dev->kthreads_done);
+
+ ret = kmod_config_init(test_dev);
+ if (ret < 0) {
+ pr_err("Cannot alloc kmod_config_init()\n");
+ goto err_out_free;
+ }
+
+ test_dev->dev_idx = idx;
+ misc_dev = &test_dev->misc_dev;
+
+ misc_dev->minor = MISC_DYNAMIC_MINOR;
+ misc_dev->name = kasprintf(GFP_KERNEL, "test_kmod%d", idx);
+ if (!misc_dev->name) {
+ pr_err("Cannot alloc misc_dev->name\n");
+ goto err_out_free_config;
+ }
+ misc_dev->groups = test_dev_groups;
+
+ return test_dev;
+
+err_out_free_config:
+ free_test_dev_info(test_dev);
+ kmod_config_free(test_dev);
+err_out_free:
+ vfree(test_dev);
+ test_dev = NULL;
+err_out:
+ return NULL;
+}
+
+static void free_test_dev_kmod(struct kmod_test_device *test_dev)
+{
+ if (test_dev) {
+ kfree_const(test_dev->misc_dev.name);
+ test_dev->misc_dev.name = NULL;
+ free_test_dev_info(test_dev);
+ kmod_config_free(test_dev);
+ vfree(test_dev);
+ test_dev = NULL;
+ }
+}
+
+static struct kmod_test_device *register_test_dev_kmod(void)
+{
+ struct kmod_test_device *test_dev = NULL;
+ int ret;
+
+ mutex_lock(&reg_dev_mutex);
+
+ /* int should suffice for number of devices, test for wrap */
+ if (unlikely(num_test_devs + 1) < 0) {
+ pr_err("reached limit of number of test devices\n");
+ goto out;
+ }
+
+ test_dev = alloc_test_dev_kmod(num_test_devs);
+ if (!test_dev)
+ goto out;
+
+ ret = misc_register(&test_dev->misc_dev);
+ if (ret) {
+ pr_err("could not register misc device: %d\n", ret);
+ free_test_dev_kmod(test_dev);
+ goto out;
+ }
+
+ test_dev->dev = test_dev->misc_dev.this_device;
+ list_add_tail(&test_dev->list, &reg_test_devs);
+ dev_info(test_dev->dev, "interface ready\n");
+
+ num_test_devs++;
+
+out:
+ mutex_unlock(&reg_dev_mutex);
+
+ return test_dev;
+
+}
+
+static int __init test_kmod_init(void)
+{
+ struct kmod_test_device *test_dev;
+ int ret;
+
+ test_dev = register_test_dev_kmod();
+ if (!test_dev) {
+ pr_err("Cannot add first test kmod device\n");
+ return -ENODEV;
+ }
+
+ /*
+ * With some work we might be able to gracefully enable
+ * testing with this driver built-in, for now this seems
+ * rather risky. For those willing to try have at it,
+ * and enable the below. Good luck! If that works, try
+ * lowering the init level for more fun.
+ */
+ if (force_init_test) {
+ ret = trigger_config_run_type(test_dev,
+ TEST_KMOD_DRIVER, "tun");
+ if (WARN_ON(ret))
+ return ret;
+ ret = trigger_config_run_type(test_dev,
+ TEST_KMOD_FS_TYPE, "btrfs");
+ if (WARN_ON(ret))
+ return ret;
+ }
+
+ return 0;
+}
+late_initcall(test_kmod_init);
+
+static
+void unregister_test_dev_kmod(struct kmod_test_device *test_dev)
+{
+ mutex_lock(&test_dev->trigger_mutex);
+ mutex_lock(&test_dev->config_mutex);
+
+ test_dev_kmod_stop_tests(test_dev);
+
+ dev_info(test_dev->dev, "removing interface\n");
+ misc_deregister(&test_dev->misc_dev);
+ kfree(&test_dev->misc_dev.name);
+
+ mutex_unlock(&test_dev->config_mutex);
+ mutex_unlock(&test_dev->trigger_mutex);
+
+ free_test_dev_kmod(test_dev);
+}
+
+static void __exit test_kmod_exit(void)
+{
+ struct kmod_test_device *test_dev, *tmp;
+
+ mutex_lock(&reg_dev_mutex);
+ list_for_each_entry_safe(test_dev, tmp, &reg_test_devs, list) {
+ list_del(&test_dev->list);
+ unregister_test_dev_kmod(test_dev);
+ }
+ mutex_unlock(&reg_dev_mutex);
+}
+module_exit(test_kmod_exit);
+
+MODULE_AUTHOR("Luis R. Rodriguez <mcgrof@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 64e899b63337..0ffca990a833 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -56,8 +56,13 @@ static bool enomem_retry = false;
module_param(enomem_retry, bool, 0);
MODULE_PARM_DESC(enomem_retry, "Retry insert even if -ENOMEM was returned (default: off)");
+struct test_obj_val {
+ int id;
+ int tid;
+};
+
struct test_obj {
- int value;
+ struct test_obj_val value;
struct rhash_head node;
};
@@ -72,7 +77,7 @@ static struct test_obj array[MAX_ENTRIES];
static struct rhashtable_params test_rht_params = {
.head_offset = offsetof(struct test_obj, node),
.key_offset = offsetof(struct test_obj, value),
- .key_len = sizeof(int),
+ .key_len = sizeof(struct test_obj_val),
.hashfn = jhash,
.nulls_base = (3U << RHT_BASE_SHIFT),
};
@@ -109,24 +114,26 @@ static int __init test_rht_lookup(struct rhashtable *ht)
for (i = 0; i < entries * 2; i++) {
struct test_obj *obj;
bool expected = !(i % 2);
- u32 key = i;
+ struct test_obj_val key = {
+ .id = i,
+ };
- if (array[i / 2].value == TEST_INSERT_FAIL)
+ if (array[i / 2].value.id == TEST_INSERT_FAIL)
expected = false;
obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
if (expected && !obj) {
- pr_warn("Test failed: Could not find key %u\n", key);
+ pr_warn("Test failed: Could not find key %u\n", key.id);
return -ENOENT;
} else if (!expected && obj) {
pr_warn("Test failed: Unexpected entry found for key %u\n",
- key);
+ key.id);
return -EEXIST;
} else if (expected && obj) {
- if (obj->value != i) {
+ if (obj->value.id != i) {
pr_warn("Test failed: Lookup value mismatch %u!=%u\n",
- obj->value, i);
+ obj->value.id, i);
return -EINVAL;
}
}
@@ -195,7 +202,7 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
for (i = 0; i < entries; i++) {
struct test_obj *obj = &array[i];
- obj->value = i * 2;
+ obj->value.id = i * 2;
err = insert_retry(ht, &obj->node, test_rht_params);
if (err > 0)
insert_retries += err;
@@ -216,9 +223,11 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
pr_info(" Deleting %d keys\n", entries);
for (i = 0; i < entries; i++) {
- u32 key = i * 2;
+ struct test_obj_val key = {
+ .id = i * 2,
+ };
- if (array[i].value != TEST_INSERT_FAIL) {
+ if (array[i].value.id != TEST_INSERT_FAIL) {
obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
BUG_ON(!obj);
@@ -242,18 +251,21 @@ static int thread_lookup_test(struct thread_data *tdata)
for (i = 0; i < entries; i++) {
struct test_obj *obj;
- int key = (tdata->id << 16) | i;
+ struct test_obj_val key = {
+ .id = i,
+ .tid = tdata->id,
+ };
obj = rhashtable_lookup_fast(&ht, &key, test_rht_params);
- if (obj && (tdata->objs[i].value == TEST_INSERT_FAIL)) {
- pr_err(" found unexpected object %d\n", key);
+ if (obj && (tdata->objs[i].value.id == TEST_INSERT_FAIL)) {
+ pr_err(" found unexpected object %d-%d\n", key.tid, key.id);
err++;
- } else if (!obj && (tdata->objs[i].value != TEST_INSERT_FAIL)) {
- pr_err(" object %d not found!\n", key);
+ } else if (!obj && (tdata->objs[i].value.id != TEST_INSERT_FAIL)) {
+ pr_err(" object %d-%d not found!\n", key.tid, key.id);
err++;
- } else if (obj && (obj->value != key)) {
- pr_err(" wrong object returned (got %d, expected %d)\n",
- obj->value, key);
+ } else if (obj && memcmp(&obj->value, &key, sizeof(key))) {
+ pr_err(" wrong object returned (got %d-%d, expected %d-%d)\n",
+ obj->value.tid, obj->value.id, key.tid, key.id);
err++;
}
@@ -272,7 +284,8 @@ static int threadfunc(void *data)
pr_err(" thread[%d]: down_interruptible failed\n", tdata->id);
for (i = 0; i < entries; i++) {
- tdata->objs[i].value = (tdata->id << 16) | i;
+ tdata->objs[i].value.id = i;
+ tdata->objs[i].value.tid = tdata->id;
err = insert_retry(&ht, &tdata->objs[i].node, test_rht_params);
if (err > 0) {
insert_retries += err;
@@ -295,7 +308,7 @@ static int threadfunc(void *data)
for (step = 10; step > 0; step--) {
for (i = 0; i < entries; i += step) {
- if (tdata->objs[i].value == TEST_INSERT_FAIL)
+ if (tdata->objs[i].value.id == TEST_INSERT_FAIL)
continue;
err = rhashtable_remove_fast(&ht, &tdata->objs[i].node,
test_rht_params);
@@ -304,7 +317,7 @@ static int threadfunc(void *data)
tdata->id);
goto out;
}
- tdata->objs[i].value = TEST_INSERT_FAIL;
+ tdata->objs[i].value.id = TEST_INSERT_FAIL;
cond_resched();
}
diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c
new file mode 100644
index 000000000000..3dd801c1c85b
--- /dev/null
+++ b/lib/test_sysctl.c
@@ -0,0 +1,148 @@
+/*
+ * proc sysctl test driver
+ *
+ * Copyright (C) 2017 Luis R. Rodriguez <mcgrof@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or at your option any
+ * later version; or, when distributed separately from the Linux kernel or
+ * when incorporated into other software packages, subject to the following
+ * license:
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of copyleft-next (version 0.3.1 or later) as published
+ * at http://copyleft-next.org/.
+ */
+
+/*
+ * This module provides an interface to the the proc sysctl interfaces. This
+ * driver requires CONFIG_PROC_SYSCTL. It will not normally be loaded by the
+ * system unless explicitly requested by name. You can also build this driver
+ * into your kernel.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/async.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+
+static int i_zero;
+static int i_one_hundred = 100;
+
+struct test_sysctl_data {
+ int int_0001;
+ int int_0002;
+ int int_0003[4];
+
+ unsigned int uint_0001;
+
+ char string_0001[65];
+};
+
+static struct test_sysctl_data test_data = {
+ .int_0001 = 60,
+ .int_0002 = 1,
+
+ .int_0003[0] = 0,
+ .int_0003[1] = 1,
+ .int_0003[2] = 2,
+ .int_0003[3] = 3,
+
+ .uint_0001 = 314,
+
+ .string_0001 = "(none)",
+};
+
+/* These are all under /proc/sys/debug/test_sysctl/ */
+static struct ctl_table test_table[] = {
+ {
+ .procname = "int_0001",
+ .data = &test_data.int_0001,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &i_zero,
+ .extra2 = &i_one_hundred,
+ },
+ {
+ .procname = "int_0002",
+ .data = &test_data.int_0002,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "int_0003",
+ .data = &test_data.int_0003,
+ .maxlen = sizeof(test_data.int_0003),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "uint_0001",
+ .data = &test_data.uint_0001,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_douintvec,
+ },
+ {
+ .procname = "string_0001",
+ .data = &test_data.string_0001,
+ .maxlen = sizeof(test_data.string_0001),
+ .mode = 0644,
+ .proc_handler = proc_dostring,
+ },
+ { }
+};
+
+static struct ctl_table test_sysctl_table[] = {
+ {
+ .procname = "test_sysctl",
+ .maxlen = 0,
+ .mode = 0555,
+ .child = test_table,
+ },
+ { }
+};
+
+static struct ctl_table test_sysctl_root_table[] = {
+ {
+ .procname = "debug",
+ .maxlen = 0,
+ .mode = 0555,
+ .child = test_sysctl_table,
+ },
+ { }
+};
+
+static struct ctl_table_header *test_sysctl_header;
+
+static int __init test_sysctl_init(void)
+{
+ test_sysctl_header = register_sysctl_table(test_sysctl_root_table);
+ if (!test_sysctl_header)
+ return -ENOMEM;
+ return 0;
+}
+late_initcall(test_sysctl_init);
+
+static void __exit test_sysctl_exit(void)
+{
+ if (test_sysctl_header)
+ unregister_sysctl_table(test_sysctl_header);
+}
+
+module_exit(test_sysctl_exit);
+
+MODULE_AUTHOR("Luis R. Rodriguez <mcgrof@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_uuid.c b/lib/test_uuid.c
index 478c049630b5..cd819c397dc7 100644
--- a/lib/test_uuid.c
+++ b/lib/test_uuid.c
@@ -82,7 +82,7 @@ static void __init test_uuid_test(const struct test_uuid_data *data)
test_uuid_failed("conversion", false, true, data->uuid, NULL);
total_tests++;
- if (uuid_equal(&data->be, &be)) {
+ if (!uuid_equal(&data->be, &be)) {
sprintf(buf, "%pUb", &be);
test_uuid_failed("cmp", false, true, data->uuid, buf);
}
diff --git a/lib/usercopy.c b/lib/usercopy.c
index 1b6010a3beb8..f5d9f08ee032 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -6,8 +6,11 @@
unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
- if (likely(access_ok(VERIFY_READ, from, n)))
+ might_fault();
+ if (likely(access_ok(VERIFY_READ, from, n))) {
+ kasan_check_write(to, n);
res = raw_copy_from_user(to, from, n);
+ }
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
@@ -18,8 +21,11 @@ EXPORT_SYMBOL(_copy_from_user);
#ifndef INLINE_COPY_TO_USER
unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n)
{
- if (likely(access_ok(VERIFY_WRITE, to, n)))
+ might_fault();
+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
+ kasan_check_read(from, n);
n = raw_copy_to_user(to, from, n);
+ }
return n;
}
EXPORT_SYMBOL(_copy_to_user);