summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug118
-rw-r--r--lib/Kconfig.kasan168
-rw-r--r--lib/Kconfig.kcsan4
-rw-r--r--lib/Kconfig.ubsan2
-rw-r--r--lib/Makefile2
-rw-r--r--lib/bug.c15
-rw-r--r--lib/crypto/Kconfig6
-rw-r--r--lib/crypto/Makefile6
-rw-r--r--lib/crypto/sm3.c246
-rw-r--r--lib/crypto/sm4.c176
-rw-r--r--lib/debugobjects.c5
-rw-r--r--lib/dim/net_dim.c44
-rw-r--r--lib/dump_stack.c4
-rw-r--r--lib/fault-inject.c3
-rw-r--r--lib/glob.c2
-rw-r--r--lib/hexdump.c41
-rw-r--r--lib/irq_poll.c8
-rw-r--r--lib/kstrtox.c6
-rw-r--r--lib/kunit/Makefile1
-rw-r--r--lib/kunit/debugfs.c2
-rw-r--r--lib/kunit/executor.c32
-rw-r--r--lib/kunit/executor_test.c4
-rw-r--r--lib/kunit/kunit-example-test.c16
-rw-r--r--lib/kunit/kunit-test.c37
-rw-r--r--lib/kunit/resource.c79
-rw-r--r--lib/kunit/test.c145
-rw-r--r--lib/list-test.c397
-rw-r--r--lib/lockref.c9
-rw-r--r--lib/nmi_backtrace.c4
-rw-r--r--lib/percpu-refcount.c1
-rw-r--r--lib/polynomial.c108
-rw-r--r--lib/random32.c347
-rw-r--r--lib/siphash.c32
-rw-r--r--lib/slub_kunit.c10
-rw-r--r--lib/stackdepot.c67
-rw-r--r--lib/string.c25
-rw-r--r--lib/string_helpers.c3
-rw-r--r--lib/strncpy_from_user.c2
-rw-r--r--lib/strnlen_user.c2
-rw-r--r--lib/test_bpf.c315
-rw-r--r--lib/test_kasan.c2
-rw-r--r--lib/test_meminit.c12
-rw-r--r--lib/test_string.c33
-rw-r--r--lib/test_sysctl.c32
-rw-r--r--lib/vsprintf.c67
-rw-r--r--lib/xarray.c2
47 files changed, 1377 insertions, 1268 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 087e06b4cdfd..6a843639814f 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -737,3 +737,6 @@ config PLDMFW
config ASN1_ENCODER
tristate
+
+config POLYNOMIAL
+ tristate
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 075cd25363ac..2e24db4bff19 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -485,24 +485,25 @@ config FRAME_POINTER
larger and slower, but it gives very useful debugging information
in case of kernel bugs. (precise oopses/stacktraces/warnings)
+config OBJTOOL
+ bool
+
config STACK_VALIDATION
bool "Compile-time stack metadata validation"
- depends on HAVE_STACK_VALIDATION
+ depends on HAVE_STACK_VALIDATION && UNWINDER_FRAME_POINTER
+ select OBJTOOL
default n
help
- Add compile-time checks to validate stack metadata, including frame
- pointers (if CONFIG_FRAME_POINTER is enabled). This helps ensure
- that runtime stack traces are more reliable.
-
- This is also a prerequisite for generation of ORC unwind data, which
- is needed for CONFIG_UNWINDER_ORC.
+ Validate frame pointer rules at compile-time. This helps ensure that
+ runtime stack traces are more reliable.
For more information, see
tools/objtool/Documentation/stack-validation.txt.
-config VMLINUX_VALIDATION
+config NOINSTR_VALIDATION
bool
- depends on STACK_VALIDATION && DEBUG_ENTRY
+ depends on HAVE_NOINSTR_VALIDATION && DEBUG_ENTRY
+ select OBJTOOL
default y
config VMLINUX_MAP
@@ -698,40 +699,6 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT
help
Debug objects boot parameter default value
-config DEBUG_SLAB
- bool "Debug slab memory allocations"
- depends on DEBUG_KERNEL && SLAB
- help
- Say Y here to have the kernel do limited verification on memory
- allocation as well as poisoning memory on free to catch use of freed
- memory. This can make kmalloc/kfree-intensive workloads much slower.
-
-config SLUB_DEBUG_ON
- bool "SLUB debugging on by default"
- depends on SLUB && SLUB_DEBUG
- default n
- help
- Boot with debugging on by default. SLUB boots by default with
- the runtime debug capabilities switched off. Enabling this is
- equivalent to specifying the "slub_debug" parameter on boot.
- There is no support for more fine grained debug control like
- possible with slub_debug=xxx. SLUB debugging may be switched
- off in a kernel built with CONFIG_SLUB_DEBUG_ON by specifying
- "slub_debug=-".
-
-config SLUB_STATS
- default n
- bool "Enable SLUB performance statistics"
- depends on SLUB && SYSFS
- help
- SLUB statistics are useful to debug SLUBs allocation behavior in
- order find ways to optimize the allocator. This should never be
- enabled for production use since keeping statistics slows down
- the allocator by a few percentage points. The slabinfo command
- supports the determination of the most active slabs to figure
- out which slabs are relevant to a particular load.
- Try running: slabinfo -DA
-
config HAVE_DEBUG_KMEMLEAK
bool
@@ -1071,13 +1038,6 @@ config BOOTPARAM_SOFTLOCKUP_PANIC
Say N if unsure.
-config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
- int
- depends on SOFTLOCKUP_DETECTOR
- range 0 1
- default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
- default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
-
config HARDLOCKUP_DETECTOR_PERF
bool
select SOFTLOCKUP_DETECTOR
@@ -1119,13 +1079,6 @@ config BOOTPARAM_HARDLOCKUP_PANIC
Say N if unsure.
-config BOOTPARAM_HARDLOCKUP_PANIC_VALUE
- int
- depends on HARDLOCKUP_DETECTOR
- range 0 1
- default 0 if !BOOTPARAM_HARDLOCKUP_PANIC
- default 1 if BOOTPARAM_HARDLOCKUP_PANIC
-
config DETECT_HUNG_TASK
bool "Detect Hung Tasks"
depends on DEBUG_KERNEL
@@ -1173,13 +1126,6 @@ config BOOTPARAM_HUNG_TASK_PANIC
Say N if unsure.
-config BOOTPARAM_HUNG_TASK_PANIC_VALUE
- int
- depends on DETECT_HUNG_TASK
- range 0 1
- default 0 if !BOOTPARAM_HUNG_TASK_PANIC
- default 1 if BOOTPARAM_HUNG_TASK_PANIC
-
config WQ_WATCHDOG
bool "Detect Workqueue Stalls"
depends on DEBUG_KERNEL
@@ -1544,29 +1490,6 @@ config CSD_LOCK_WAIT_DEBUG
include the IPI handler function currently executing (if any)
and relevant stack traces.
-choice
- prompt "Lock debugging: prove subsystem device_lock() correctness"
- depends on PROVE_LOCKING
- help
- For subsystems that have instrumented their usage of the device_lock()
- with nested annotations, enable lock dependency checking. The locking
- hierarchy 'subclass' identifiers are not compatible across
- sub-systems, so only one can be enabled at a time.
-
-config PROVE_NVDIMM_LOCKING
- bool "NVDIMM"
- depends on LIBNVDIMM
- help
- Enable lockdep to validate nd_device_lock() usage.
-
-config PROVE_CXL_LOCKING
- bool "CXL"
- depends on CXL_BUS
- help
- Enable lockdep to validate cxl_device_lock() usage.
-
-endchoice
-
endmenu # lock debugging
config TRACE_IRQFLAGS
@@ -1616,8 +1539,7 @@ config WARN_ALL_UNSEEDED_RANDOM
so architecture maintainers really need to do what they can
to get the CRNG seeded sooner after the system is booted.
However, since users cannot do anything actionable to
- address this, by default the kernel will issue only a single
- warning for the first use of unseeded randomness.
+ address this, by default this option is disabled.
Say Y here if you want to receive warnings for all uses of
unseeded randomness. This will be of use primarily for
@@ -2035,10 +1957,11 @@ config KCOV
bool "Code coverage for fuzzing"
depends on ARCH_HAS_KCOV
depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS
- depends on !ARCH_WANTS_NO_INSTR || STACK_VALIDATION || \
+ depends on !ARCH_WANTS_NO_INSTR || HAVE_NOINSTR_HACK || \
GCC_VERSION >= 120000 || CLANG_VERSION >= 130000
select DEBUG_FS
select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC
+ select OBJTOOL if HAVE_NOINSTR_HACK
help
KCOV exposes kernel code coverage information in a form suitable
for coverage-guided fuzzing (randomized testing).
@@ -2140,10 +2063,11 @@ config TEST_DIV64
If unsure, say N.
config KPROBES_SANITY_TEST
- tristate "Kprobes sanity tests"
+ tristate "Kprobes sanity tests" if !KUNIT_ALL_TESTS
depends on DEBUG_KERNEL
depends on KPROBES
depends on KUNIT
+ default KUNIT_ALL_TESTS
help
This option provides for testing basic kprobes functionality on
boot. Samples of kprobe and kretprobe are inserted and
@@ -2417,8 +2341,9 @@ config TEST_SYSCTL
If unsure, say N.
config BITFIELD_KUNIT
- tristate "KUnit test bitfield functions at runtime"
+ tristate "KUnit test bitfield functions at runtime" if !KUNIT_ALL_TESTS
depends on KUNIT
+ default KUNIT_ALL_TESTS
help
Enable this option to test the bitfield functions at boot.
@@ -2452,8 +2377,9 @@ config HASH_KUNIT_TEST
optimized versions. If unsure, say N.
config RESOURCE_KUNIT_TEST
- tristate "KUnit test for resource API"
+ tristate "KUnit test for resource API" if !KUNIT_ALL_TESTS
depends on KUNIT
+ default KUNIT_ALL_TESTS
help
This builds the resource API unit test.
Tests the logic of API provided by resource.c and ioport.h.
@@ -2506,8 +2432,9 @@ config LINEAR_RANGES_TEST
If unsure, say N.
config CMDLINE_KUNIT_TEST
- tristate "KUnit test for cmdline API"
+ tristate "KUnit test for cmdline API" if !KUNIT_ALL_TESTS
depends on KUNIT
+ default KUNIT_ALL_TESTS
help
This builds the cmdline API unit test.
Tests the logic of API provided by cmdline.c.
@@ -2517,8 +2444,9 @@ config CMDLINE_KUNIT_TEST
If unsure, say N.
config BITS_TEST
- tristate "KUnit test for bits.h"
+ tristate "KUnit test for bits.h" if !KUNIT_ALL_TESTS
depends on KUNIT
+ default KUNIT_ALL_TESTS
help
This builds the bits unit test.
Tests the logic of macros defined in bits.h.
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 1f3e620188a2..f0973da583e0 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+
# This config refers to the generic KASAN mode.
config HAVE_ARCH_KASAN
bool
@@ -15,9 +16,8 @@ config HAVE_ARCH_KASAN_VMALLOC
config ARCH_DISABLE_KASAN_INLINE
bool
help
- An architecture might not support inline instrumentation.
- When this option is selected, inline and stack instrumentation are
- disabled.
+ Disables both inline and stack instrumentation. Selected by
+ architectures that do not support these instrumentation types.
config CC_HAS_KASAN_GENERIC
def_bool $(cc-option, -fsanitize=kernel-address)
@@ -26,13 +26,13 @@ config CC_HAS_KASAN_SW_TAGS
def_bool $(cc-option, -fsanitize=kernel-hwaddress)
# This option is only required for software KASAN modes.
-# Old GCC versions don't have proper support for no_sanitize_address.
+# Old GCC versions do not have proper support for no_sanitize_address.
# See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89124 for details.
config CC_HAS_WORKING_NOSANITIZE_ADDRESS
def_bool !CC_IS_GCC || GCC_VERSION >= 80300
menuconfig KASAN
- bool "KASAN: runtime memory debugger"
+ bool "KASAN: dynamic memory safety error detector"
depends on (((HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \
(HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)) && \
CC_HAS_WORKING_NOSANITIZE_ADDRESS) || \
@@ -40,10 +40,13 @@ menuconfig KASAN
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
select STACKDEPOT_ALWAYS_INIT
help
- Enables KASAN (KernelAddressSANitizer) - runtime memory debugger,
- designed to find out-of-bounds accesses and use-after-free bugs.
+ Enables KASAN (Kernel Address Sanitizer) - a dynamic memory safety
+ error detector designed to find out-of-bounds and use-after-free bugs.
+
See Documentation/dev-tools/kasan.rst for details.
+ For better error reports, also enable CONFIG_STACKTRACE.
+
if KASAN
choice
@@ -51,75 +54,71 @@ choice
default KASAN_GENERIC
help
KASAN has three modes:
- 1. generic KASAN (similar to userspace ASan,
- x86_64/arm64/xtensa, enabled with CONFIG_KASAN_GENERIC),
- 2. software tag-based KASAN (arm64 only, based on software
- memory tagging (similar to userspace HWASan), enabled with
- CONFIG_KASAN_SW_TAGS), and
- 3. hardware tag-based KASAN (arm64 only, based on hardware
- memory tagging, enabled with CONFIG_KASAN_HW_TAGS).
- All KASAN modes are strictly debugging features.
+ 1. Generic KASAN (supported by many architectures, enabled with
+ CONFIG_KASAN_GENERIC, similar to userspace ASan),
+ 2. Software Tag-Based KASAN (arm64 only, based on software memory
+ tagging, enabled with CONFIG_KASAN_SW_TAGS, similar to userspace
+ HWASan), and
+ 3. Hardware Tag-Based KASAN (arm64 only, based on hardware memory
+ tagging, enabled with CONFIG_KASAN_HW_TAGS).
- For better error reports enable CONFIG_STACKTRACE.
+ See Documentation/dev-tools/kasan.rst for details about each mode.
config KASAN_GENERIC
- bool "Generic mode"
+ bool "Generic KASAN"
depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC
depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
select SLUB_DEBUG if SLUB
select CONSTRUCTORS
help
- Enables generic KASAN mode.
+ Enables Generic KASAN.
- This mode is supported in both GCC and Clang. With GCC it requires
- version 8.3.0 or later. Any supported Clang version is compatible,
- but detection of out-of-bounds accesses for global variables is
- supported only since Clang 11.
+ Requires GCC 8.3.0+ or Clang.
- This mode consumes about 1/8th of available memory at kernel start
- and introduces an overhead of ~x1.5 for the rest of the allocations.
+ Consumes about 1/8th of available memory at kernel start and adds an
+ overhead of ~50% for dynamic allocations.
The performance slowdown is ~x3.
- Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
- (the resulting kernel does not boot).
+ (Incompatible with CONFIG_DEBUG_SLAB: the kernel does not boot.)
config KASAN_SW_TAGS
- bool "Software tag-based mode"
+ bool "Software Tag-Based KASAN"
depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS
depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
select SLUB_DEBUG if SLUB
select CONSTRUCTORS
help
- Enables software tag-based KASAN mode.
+ Enables Software Tag-Based KASAN.
- This mode require software memory tagging support in the form of
- HWASan-like compiler instrumentation.
+ Requires GCC 11+ or Clang.
- Currently this mode is only implemented for arm64 CPUs and relies on
- Top Byte Ignore. This mode requires Clang.
+ Supported only on arm64 CPUs and relies on Top Byte Ignore.
- This mode consumes about 1/16th of available memory at kernel start
- and introduces an overhead of ~20% for the rest of the allocations.
- This mode may potentially introduce problems relating to pointer
- casting and comparison, as it embeds tags into the top byte of each
- pointer.
+ Consumes about 1/16th of available memory at kernel start and
+ add an overhead of ~20% for dynamic allocations.
- Currently CONFIG_KASAN_SW_TAGS doesn't work with CONFIG_DEBUG_SLAB
- (the resulting kernel does not boot).
+ May potentially introduce problems related to pointer casting and
+ comparison, as it embeds a tag into the top byte of each pointer.
+
+ (Incompatible with CONFIG_DEBUG_SLAB: the kernel does not boot.)
config KASAN_HW_TAGS
- bool "Hardware tag-based mode"
+ bool "Hardware Tag-Based KASAN"
depends on HAVE_ARCH_KASAN_HW_TAGS
depends on SLUB
help
- Enables hardware tag-based KASAN mode.
+ Enables Hardware Tag-Based KASAN.
+
+ Requires GCC 10+ or Clang 12+.
- This mode requires hardware memory tagging support, and can be used
- by any architecture that provides it.
+ Supported only on arm64 CPUs starting from ARMv8.5 and relies on
+ Memory Tagging Extension and Top Byte Ignore.
- Currently this mode is only implemented for arm64 CPUs starting from
- ARMv8.5 and relies on Memory Tagging Extension and Top Byte Ignore.
+ Consumes about 1/32nd of available memory.
+
+ May potentially introduce problems related to pointer casting and
+ comparison, as it embeds a tag into the top byte of each pointer.
endchoice
@@ -131,83 +130,80 @@ choice
config KASAN_OUTLINE
bool "Outline instrumentation"
help
- Before every memory access compiler insert function call
- __asan_load*/__asan_store*. These functions performs check
- of shadow memory. This is slower than inline instrumentation,
- however it doesn't bloat size of kernel's .text section so
- much as inline does.
+ Makes the compiler insert function calls that check whether the memory
+ is accessible before each memory access. Slower than KASAN_INLINE, but
+ does not bloat the size of the kernel's .text section so much.
config KASAN_INLINE
bool "Inline instrumentation"
depends on !ARCH_DISABLE_KASAN_INLINE
help
- Compiler directly inserts code checking shadow memory before
- memory accesses. This is faster than outline (in some workloads
- it gives about x2 boost over outline instrumentation), but
- make kernel's .text size much bigger.
+ Makes the compiler directly insert memory accessibility checks before
+ each memory access. Faster than KASAN_OUTLINE (gives ~x2 boost for
+ some workloads), but makes the kernel's .text size much bigger.
endchoice
config KASAN_STACK
- bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
+ bool "Stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
depends on KASAN_GENERIC || KASAN_SW_TAGS
depends on !ARCH_DISABLE_KASAN_INLINE
default y if CC_IS_GCC
help
- The LLVM stack address sanitizer has a know problem that
- causes excessive stack usage in a lot of functions, see
- https://bugs.llvm.org/show_bug.cgi?id=38809
- Disabling asan-stack makes it safe to run kernels build
- with clang-8 with KASAN enabled, though it loses some of
- the functionality.
- This feature is always disabled when compile-testing with clang
- to avoid cluttering the output in stack overflow warnings,
- but clang users can still enable it for builds without
- CONFIG_COMPILE_TEST. On gcc it is assumed to always be safe
- to use and enabled by default.
- If the architecture disables inline instrumentation, stack
- instrumentation is also disabled as it adds inline-style
- instrumentation that is run unconditionally.
+ Disables stack instrumentation and thus KASAN's ability to detect
+ out-of-bounds bugs in stack variables.
+
+ With Clang, stack instrumentation has a problem that causes excessive
+ stack usage, see https://bugs.llvm.org/show_bug.cgi?id=38809. Thus,
+ with Clang, this option is deemed unsafe.
+
+ This option is always disabled when compile-testing with Clang to
+ avoid cluttering the log with stack overflow warnings.
+
+ With GCC, enabling stack instrumentation is assumed to be safe.
+
+ If the architecture disables inline instrumentation via
+ ARCH_DISABLE_KASAN_INLINE, stack instrumentation gets disabled
+ as well, as it adds inline-style instrumentation that is run
+ unconditionally.
config KASAN_TAGS_IDENTIFY
- bool "Enable memory corruption identification"
+ bool "Memory corruption type identification"
depends on KASAN_SW_TAGS || KASAN_HW_TAGS
help
- This option enables best-effort identification of bug type
- (use-after-free or out-of-bounds) at the cost of increased
- memory consumption.
+ Enables best-effort identification of the bug types (use-after-free
+ or out-of-bounds) at the cost of increased memory consumption.
+ Only applicable for the tag-based KASAN modes.
config KASAN_VMALLOC
bool "Check accesses to vmalloc allocations"
depends on HAVE_ARCH_KASAN_VMALLOC
help
- This mode makes KASAN check accesses to vmalloc allocations for
- validity.
+ Makes KASAN check the validity of accesses to vmalloc allocations.
- With software KASAN modes, checking is done for all types of vmalloc
- allocations. Enabling this option leads to higher memory usage.
+ With software KASAN modes, all types vmalloc allocations are
+ checked. Enabling this option leads to higher memory usage.
- With hardware tag-based KASAN, only VM_ALLOC mappings are checked.
- There is no additional memory usage.
+ With Hardware Tag-Based KASAN, only non-executable VM_ALLOC mappings
+ are checked. There is no additional memory usage.
config KASAN_KUNIT_TEST
tristate "KUnit-compatible tests of KASAN bug detection capabilities" if !KUNIT_ALL_TESTS
depends on KASAN && KUNIT
default KUNIT_ALL_TESTS
help
- This is a KUnit test suite doing various nasty things like
- out of bounds and use after free accesses. It is useful for testing
- kernel debugging features like KASAN.
+ A KUnit-based KASAN test suite. Triggers different kinds of
+ out-of-bounds and use-after-free accesses. Useful for testing whether
+ KASAN can detect certain bug types.
For more information on KUnit and unit tests in general, please refer
- to the KUnit documentation in Documentation/dev-tools/kunit.
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
config KASAN_MODULE_TEST
tristate "KUnit-incompatible tests of KASAN bug detection capabilities"
depends on m && KASAN && !KASAN_HW_TAGS
help
- This is a part of the KASAN test suite that is incompatible with
- KUnit. Currently includes tests that do bad copy_from/to_user
- accesses.
+ A part of the KASAN test suite that is not integrated with KUnit.
+ Incompatible with Hardware Tag-Based KASAN.
endif # KASAN
diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
index de022445fbba..47a693c45864 100644
--- a/lib/Kconfig.kcsan
+++ b/lib/Kconfig.kcsan
@@ -187,7 +187,9 @@ config KCSAN_WEAK_MEMORY
# We can either let objtool nop __tsan_func_{entry,exit}() and builtin
# atomics instrumentation in .noinstr.text, or use a compiler that can
# implement __no_kcsan to really remove all instrumentation.
- depends on STACK_VALIDATION || CC_IS_GCC || CLANG_VERSION >= 140000
+ depends on !ARCH_WANTS_NO_INSTR || HAVE_NOINSTR_HACK || \
+ CC_IS_GCC || CLANG_VERSION >= 140000
+ select OBJTOOL if HAVE_NOINSTR_HACK
help
Enable support for modeling a subset of weak memory, which allows
detecting a subset of data races due to missing memory barriers.
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index f3c57ed51838..c4fe15d38b60 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -94,7 +94,7 @@ config UBSAN_UNREACHABLE
bool "Perform checking for unreachable code"
# objtool already handles unreachable checking and gets angry about
# seeing UBSan instrumentation located in unreachable places.
- depends on !STACK_VALIDATION
+ depends on !(OBJTOOL && (STACK_VALIDATION || UNWINDER_ORC || X86_SMAP))
depends on $(cc-option,-fsanitize=unreachable)
help
This option enables -fsanitize=unreachable which checks for control
diff --git a/lib/Makefile b/lib/Makefile
index 95268d6c75b7..ea54294d73bf 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -263,6 +263,8 @@ obj-$(CONFIG_MEMREGION) += memregion.o
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
obj-$(CONFIG_IRQ_POLL) += irq_poll.o
+obj-$(CONFIG_POLYNOMIAL) += polynomial.o
+
# stackdepot.c should not be instrumented or call instrumented functions.
# Prevent the compiler from calling builtins like memcmp() or bcmp() from this
# file.
diff --git a/lib/bug.c b/lib/bug.c
index 45a0584f6541..c223a2575b72 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -6,8 +6,7 @@
CONFIG_BUG - emit BUG traps. Nothing happens without this.
CONFIG_GENERIC_BUG - enable this code.
- CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit pointers relative to
- the containing struct bug_entry for bug_addr and file.
+ CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit relative pointers for bug_addr and file
CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG
CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable
@@ -53,10 +52,10 @@ extern struct bug_entry __start___bug_table[], __stop___bug_table[];
static inline unsigned long bug_addr(const struct bug_entry *bug)
{
-#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
- return bug->bug_addr;
+#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+ return (unsigned long)&bug->bug_addr_disp + bug->bug_addr_disp;
#else
- return (unsigned long)bug + bug->bug_addr_disp;
+ return bug->bug_addr;
#endif
}
@@ -131,10 +130,10 @@ void bug_get_file_line(struct bug_entry *bug, const char **file,
unsigned int *line)
{
#ifdef CONFIG_DEBUG_BUGVERBOSE
-#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
- *file = bug->file;
+#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+ *file = (const char *)&bug->file_disp + bug->file_disp;
#else
- *file = (const char *)bug + bug->file_disp;
+ *file = bug->file;
#endif
*line = bug->line;
#else
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index 379a66d7f504..9856e291f414 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -123,10 +123,4 @@ config CRYPTO_LIB_CHACHA20POLY1305
config CRYPTO_LIB_SHA256
tristate
-config CRYPTO_LIB_SM3
- tristate
-
-config CRYPTO_LIB_SM4
- tristate
-
endmenu
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index 6c872d05d1e6..26be2bbe09c5 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -37,12 +37,6 @@ libpoly1305-y += poly1305.o
obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o
libsha256-y := sha256.o
-obj-$(CONFIG_CRYPTO_LIB_SM3) += libsm3.o
-libsm3-y := sm3.o
-
-obj-$(CONFIG_CRYPTO_LIB_SM4) += libsm4.o
-libsm4-y := sm4.o
-
ifneq ($(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS),y)
libblake2s-y += blake2s-selftest.o
libchacha20poly1305-y += chacha20poly1305-selftest.o
diff --git a/lib/crypto/sm3.c b/lib/crypto/sm3.c
deleted file mode 100644
index d473e358a873..000000000000
--- a/lib/crypto/sm3.c
+++ /dev/null
@@ -1,246 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and described
- * at https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02
- *
- * Copyright (C) 2017 ARM Limited or its affiliates.
- * Copyright (C) 2017 Gilad Ben-Yossef <gilad@benyossef.com>
- * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
- */
-
-#include <linux/module.h>
-#include <asm/unaligned.h>
-#include <crypto/sm3.h>
-
-static const u32 ____cacheline_aligned K[64] = {
- 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb,
- 0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc,
- 0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce,
- 0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6,
- 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c,
- 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce,
- 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec,
- 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5,
- 0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53,
- 0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d,
- 0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4,
- 0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43,
- 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c,
- 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce,
- 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec,
- 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5
-};
-
-/*
- * Transform the message X which consists of 16 32-bit-words. See
- * GM/T 004-2012 for details.
- */
-#define R(i, a, b, c, d, e, f, g, h, t, w1, w2) \
- do { \
- ss1 = rol32((rol32((a), 12) + (e) + (t)), 7); \
- ss2 = ss1 ^ rol32((a), 12); \
- d += FF ## i(a, b, c) + ss2 + ((w1) ^ (w2)); \
- h += GG ## i(e, f, g) + ss1 + (w1); \
- b = rol32((b), 9); \
- f = rol32((f), 19); \
- h = P0((h)); \
- } while (0)
-
-#define R1(a, b, c, d, e, f, g, h, t, w1, w2) \
- R(1, a, b, c, d, e, f, g, h, t, w1, w2)
-#define R2(a, b, c, d, e, f, g, h, t, w1, w2) \
- R(2, a, b, c, d, e, f, g, h, t, w1, w2)
-
-#define FF1(x, y, z) (x ^ y ^ z)
-#define FF2(x, y, z) ((x & y) | (x & z) | (y & z))
-
-#define GG1(x, y, z) FF1(x, y, z)
-#define GG2(x, y, z) ((x & y) | (~x & z))
-
-/* Message expansion */
-#define P0(x) ((x) ^ rol32((x), 9) ^ rol32((x), 17))
-#define P1(x) ((x) ^ rol32((x), 15) ^ rol32((x), 23))
-#define I(i) (W[i] = get_unaligned_be32(data + i * 4))
-#define W1(i) (W[i & 0x0f])
-#define W2(i) (W[i & 0x0f] = \
- P1(W[i & 0x0f] \
- ^ W[(i-9) & 0x0f] \
- ^ rol32(W[(i-3) & 0x0f], 15)) \
- ^ rol32(W[(i-13) & 0x0f], 7) \
- ^ W[(i-6) & 0x0f])
-
-static void sm3_transform(struct sm3_state *sctx, u8 const *data, u32 W[16])
-{
- u32 a, b, c, d, e, f, g, h, ss1, ss2;
-
- a = sctx->state[0];
- b = sctx->state[1];
- c = sctx->state[2];
- d = sctx->state[3];
- e = sctx->state[4];
- f = sctx->state[5];
- g = sctx->state[6];
- h = sctx->state[7];
-
- R1(a, b, c, d, e, f, g, h, K[0], I(0), I(4));
- R1(d, a, b, c, h, e, f, g, K[1], I(1), I(5));
- R1(c, d, a, b, g, h, e, f, K[2], I(2), I(6));
- R1(b, c, d, a, f, g, h, e, K[3], I(3), I(7));
- R1(a, b, c, d, e, f, g, h, K[4], W1(4), I(8));
- R1(d, a, b, c, h, e, f, g, K[5], W1(5), I(9));
- R1(c, d, a, b, g, h, e, f, K[6], W1(6), I(10));
- R1(b, c, d, a, f, g, h, e, K[7], W1(7), I(11));
- R1(a, b, c, d, e, f, g, h, K[8], W1(8), I(12));
- R1(d, a, b, c, h, e, f, g, K[9], W1(9), I(13));
- R1(c, d, a, b, g, h, e, f, K[10], W1(10), I(14));
- R1(b, c, d, a, f, g, h, e, K[11], W1(11), I(15));
- R1(a, b, c, d, e, f, g, h, K[12], W1(12), W2(16));
- R1(d, a, b, c, h, e, f, g, K[13], W1(13), W2(17));
- R1(c, d, a, b, g, h, e, f, K[14], W1(14), W2(18));
- R1(b, c, d, a, f, g, h, e, K[15], W1(15), W2(19));
-
- R2(a, b, c, d, e, f, g, h, K[16], W1(16), W2(20));
- R2(d, a, b, c, h, e, f, g, K[17], W1(17), W2(21));
- R2(c, d, a, b, g, h, e, f, K[18], W1(18), W2(22));
- R2(b, c, d, a, f, g, h, e, K[19], W1(19), W2(23));
- R2(a, b, c, d, e, f, g, h, K[20], W1(20), W2(24));
- R2(d, a, b, c, h, e, f, g, K[21], W1(21), W2(25));
- R2(c, d, a, b, g, h, e, f, K[22], W1(22), W2(26));
- R2(b, c, d, a, f, g, h, e, K[23], W1(23), W2(27));
- R2(a, b, c, d, e, f, g, h, K[24], W1(24), W2(28));
- R2(d, a, b, c, h, e, f, g, K[25], W1(25), W2(29));
- R2(c, d, a, b, g, h, e, f, K[26], W1(26), W2(30));
- R2(b, c, d, a, f, g, h, e, K[27], W1(27), W2(31));
- R2(a, b, c, d, e, f, g, h, K[28], W1(28), W2(32));
- R2(d, a, b, c, h, e, f, g, K[29], W1(29), W2(33));
- R2(c, d, a, b, g, h, e, f, K[30], W1(30), W2(34));
- R2(b, c, d, a, f, g, h, e, K[31], W1(31), W2(35));
-
- R2(a, b, c, d, e, f, g, h, K[32], W1(32), W2(36));
- R2(d, a, b, c, h, e, f, g, K[33], W1(33), W2(37));
- R2(c, d, a, b, g, h, e, f, K[34], W1(34), W2(38));
- R2(b, c, d, a, f, g, h, e, K[35], W1(35), W2(39));
- R2(a, b, c, d, e, f, g, h, K[36], W1(36), W2(40));
- R2(d, a, b, c, h, e, f, g, K[37], W1(37), W2(41));
- R2(c, d, a, b, g, h, e, f, K[38], W1(38), W2(42));
- R2(b, c, d, a, f, g, h, e, K[39], W1(39), W2(43));
- R2(a, b, c, d, e, f, g, h, K[40], W1(40), W2(44));
- R2(d, a, b, c, h, e, f, g, K[41], W1(41), W2(45));
- R2(c, d, a, b, g, h, e, f, K[42], W1(42), W2(46));
- R2(b, c, d, a, f, g, h, e, K[43], W1(43), W2(47));
- R2(a, b, c, d, e, f, g, h, K[44], W1(44), W2(48));
- R2(d, a, b, c, h, e, f, g, K[45], W1(45), W2(49));
- R2(c, d, a, b, g, h, e, f, K[46], W1(46), W2(50));
- R2(b, c, d, a, f, g, h, e, K[47], W1(47), W2(51));
-
- R2(a, b, c, d, e, f, g, h, K[48], W1(48), W2(52));
- R2(d, a, b, c, h, e, f, g, K[49], W1(49), W2(53));
- R2(c, d, a, b, g, h, e, f, K[50], W1(50), W2(54));
- R2(b, c, d, a, f, g, h, e, K[51], W1(51), W2(55));
- R2(a, b, c, d, e, f, g, h, K[52], W1(52), W2(56));
- R2(d, a, b, c, h, e, f, g, K[53], W1(53), W2(57));
- R2(c, d, a, b, g, h, e, f, K[54], W1(54), W2(58));
- R2(b, c, d, a, f, g, h, e, K[55], W1(55), W2(59));
- R2(a, b, c, d, e, f, g, h, K[56], W1(56), W2(60));
- R2(d, a, b, c, h, e, f, g, K[57], W1(57), W2(61));
- R2(c, d, a, b, g, h, e, f, K[58], W1(58), W2(62));
- R2(b, c, d, a, f, g, h, e, K[59], W1(59), W2(63));
- R2(a, b, c, d, e, f, g, h, K[60], W1(60), W2(64));
- R2(d, a, b, c, h, e, f, g, K[61], W1(61), W2(65));
- R2(c, d, a, b, g, h, e, f, K[62], W1(62), W2(66));
- R2(b, c, d, a, f, g, h, e, K[63], W1(63), W2(67));
-
- sctx->state[0] ^= a;
- sctx->state[1] ^= b;
- sctx->state[2] ^= c;
- sctx->state[3] ^= d;
- sctx->state[4] ^= e;
- sctx->state[5] ^= f;
- sctx->state[6] ^= g;
- sctx->state[7] ^= h;
-}
-#undef R
-#undef R1
-#undef R2
-#undef I
-#undef W1
-#undef W2
-
-static inline void sm3_block(struct sm3_state *sctx,
- u8 const *data, int blocks, u32 W[16])
-{
- while (blocks--) {
- sm3_transform(sctx, data, W);
- data += SM3_BLOCK_SIZE;
- }
-}
-
-void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len)
-{
- unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
- u32 W[16];
-
- sctx->count += len;
-
- if ((partial + len) >= SM3_BLOCK_SIZE) {
- int blocks;
-
- if (partial) {
- int p = SM3_BLOCK_SIZE - partial;
-
- memcpy(sctx->buffer + partial, data, p);
- data += p;
- len -= p;
-
- sm3_block(sctx, sctx->buffer, 1, W);
- }
-
- blocks = len / SM3_BLOCK_SIZE;
- len %= SM3_BLOCK_SIZE;
-
- if (blocks) {
- sm3_block(sctx, data, blocks, W);
- data += blocks * SM3_BLOCK_SIZE;
- }
-
- memzero_explicit(W, sizeof(W));
-
- partial = 0;
- }
- if (len)
- memcpy(sctx->buffer + partial, data, len);
-}
-EXPORT_SYMBOL_GPL(sm3_update);
-
-void sm3_final(struct sm3_state *sctx, u8 *out)
-{
- const int bit_offset = SM3_BLOCK_SIZE - sizeof(u64);
- __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
- __be32 *digest = (__be32 *)out;
- unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
- u32 W[16];
- int i;
-
- sctx->buffer[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buffer + partial, 0, SM3_BLOCK_SIZE - partial);
- partial = 0;
-
- sm3_block(sctx, sctx->buffer, 1, W);
- }
-
- memset(sctx->buffer + partial, 0, bit_offset - partial);
- *bits = cpu_to_be64(sctx->count << 3);
- sm3_block(sctx, sctx->buffer, 1, W);
-
- for (i = 0; i < 8; i++)
- put_unaligned_be32(sctx->state[i], digest++);
-
- /* Zeroize sensitive information. */
- memzero_explicit(W, sizeof(W));
- memzero_explicit(sctx, sizeof(*sctx));
-}
-EXPORT_SYMBOL_GPL(sm3_final);
-
-MODULE_DESCRIPTION("Generic SM3 library");
-MODULE_LICENSE("GPL v2");
diff --git a/lib/crypto/sm4.c b/lib/crypto/sm4.c
deleted file mode 100644
index 284e62576d0c..000000000000
--- a/lib/crypto/sm4.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * SM4, as specified in
- * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
- *
- * Copyright (C) 2018 ARM Limited or its affiliates.
- * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
- */
-
-#include <linux/module.h>
-#include <asm/unaligned.h>
-#include <crypto/sm4.h>
-
-static const u32 fk[4] = {
- 0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc
-};
-
-static const u32 ____cacheline_aligned ck[32] = {
- 0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269,
- 0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9,
- 0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
- 0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9,
- 0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229,
- 0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
- 0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209,
- 0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279
-};
-
-static const u8 ____cacheline_aligned sbox[256] = {
- 0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7,
- 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05,
- 0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3,
- 0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99,
- 0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a,
- 0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62,
- 0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95,
- 0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6,
- 0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba,
- 0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8,
- 0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b,
- 0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35,
- 0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2,
- 0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87,
- 0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52,
- 0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e,
- 0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5,
- 0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1,
- 0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55,
- 0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3,
- 0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60,
- 0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f,
- 0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f,
- 0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51,
- 0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f,
- 0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8,
- 0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd,
- 0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0,
- 0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e,
- 0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84,
- 0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20,
- 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48
-};
-
-static inline u32 sm4_t_non_lin_sub(u32 x)
-{
- u32 out;
-
- out = (u32)sbox[x & 0xff];
- out |= (u32)sbox[(x >> 8) & 0xff] << 8;
- out |= (u32)sbox[(x >> 16) & 0xff] << 16;
- out |= (u32)sbox[(x >> 24) & 0xff] << 24;
-
- return out;
-}
-
-static inline u32 sm4_key_lin_sub(u32 x)
-{
- return x ^ rol32(x, 13) ^ rol32(x, 23);
-}
-
-static inline u32 sm4_enc_lin_sub(u32 x)
-{
- return x ^ rol32(x, 2) ^ rol32(x, 10) ^ rol32(x, 18) ^ rol32(x, 24);
-}
-
-static inline u32 sm4_key_sub(u32 x)
-{
- return sm4_key_lin_sub(sm4_t_non_lin_sub(x));
-}
-
-static inline u32 sm4_enc_sub(u32 x)
-{
- return sm4_enc_lin_sub(sm4_t_non_lin_sub(x));
-}
-
-static inline u32 sm4_round(u32 x0, u32 x1, u32 x2, u32 x3, u32 rk)
-{
- return x0 ^ sm4_enc_sub(x1 ^ x2 ^ x3 ^ rk);
-}
-
-
-/**
- * sm4_expandkey - Expands the SM4 key as described in GB/T 32907-2016
- * @ctx: The location where the computed key will be stored.
- * @in_key: The supplied key.
- * @key_len: The length of the supplied key.
- *
- * Returns 0 on success. The function fails only if an invalid key size (or
- * pointer) is supplied.
- */
-int sm4_expandkey(struct sm4_ctx *ctx, const u8 *in_key,
- unsigned int key_len)
-{
- u32 rk[4];
- const u32 *key = (u32 *)in_key;
- int i;
-
- if (key_len != SM4_KEY_SIZE)
- return -EINVAL;
-
- rk[0] = get_unaligned_be32(&key[0]) ^ fk[0];
- rk[1] = get_unaligned_be32(&key[1]) ^ fk[1];
- rk[2] = get_unaligned_be32(&key[2]) ^ fk[2];
- rk[3] = get_unaligned_be32(&key[3]) ^ fk[3];
-
- for (i = 0; i < 32; i += 4) {
- rk[0] ^= sm4_key_sub(rk[1] ^ rk[2] ^ rk[3] ^ ck[i + 0]);
- rk[1] ^= sm4_key_sub(rk[2] ^ rk[3] ^ rk[0] ^ ck[i + 1]);
- rk[2] ^= sm4_key_sub(rk[3] ^ rk[0] ^ rk[1] ^ ck[i + 2]);
- rk[3] ^= sm4_key_sub(rk[0] ^ rk[1] ^ rk[2] ^ ck[i + 3]);
-
- ctx->rkey_enc[i + 0] = rk[0];
- ctx->rkey_enc[i + 1] = rk[1];
- ctx->rkey_enc[i + 2] = rk[2];
- ctx->rkey_enc[i + 3] = rk[3];
- ctx->rkey_dec[31 - 0 - i] = rk[0];
- ctx->rkey_dec[31 - 1 - i] = rk[1];
- ctx->rkey_dec[31 - 2 - i] = rk[2];
- ctx->rkey_dec[31 - 3 - i] = rk[3];
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(sm4_expandkey);
-
-/**
- * sm4_crypt_block - Encrypt or decrypt a single SM4 block
- * @rk: The rkey_enc for encrypt or rkey_dec for decrypt
- * @out: Buffer to store output data
- * @in: Buffer containing the input data
- */
-void sm4_crypt_block(const u32 *rk, u8 *out, const u8 *in)
-{
- u32 x[4], i;
-
- x[0] = get_unaligned_be32(in + 0 * 4);
- x[1] = get_unaligned_be32(in + 1 * 4);
- x[2] = get_unaligned_be32(in + 2 * 4);
- x[3] = get_unaligned_be32(in + 3 * 4);
-
- for (i = 0; i < 32; i += 4) {
- x[0] = sm4_round(x[0], x[1], x[2], x[3], rk[i + 0]);
- x[1] = sm4_round(x[1], x[2], x[3], x[0], rk[i + 1]);
- x[2] = sm4_round(x[2], x[3], x[0], x[1], rk[i + 2]);
- x[3] = sm4_round(x[3], x[0], x[1], x[2], rk[i + 3]);
- }
-
- put_unaligned_be32(x[3 - 0], out + 0 * 4);
- put_unaligned_be32(x[3 - 1], out + 1 * 4);
- put_unaligned_be32(x[3 - 2], out + 2 * 4);
- put_unaligned_be32(x[3 - 3], out + 3 * 4);
-}
-EXPORT_SYMBOL_GPL(sm4_crypt_block);
-
-MODULE_DESCRIPTION("Generic SM4 library");
-MODULE_LICENSE("GPL v2");
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 6946f8e204e3..337d797a7141 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Generic infrastructure for lifetime debugging of objects.
*
- * Started by Thomas Gleixner
- *
* Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
- *
- * For licencing details see kernel-base/COPYING
*/
#define pr_fmt(fmt) "ODEBUG: " fmt
diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c
index 06811d866775..53f6b9c6e936 100644
--- a/lib/dim/net_dim.c
+++ b/lib/dim/net_dim.c
@@ -12,41 +12,41 @@
* Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES
*/
#define NET_DIM_PARAMS_NUM_PROFILES 5
-#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
-#define NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE 128
+#define NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE 256
+#define NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE 128
#define NET_DIM_DEF_PROFILE_CQE 1
#define NET_DIM_DEF_PROFILE_EQE 1
#define NET_DIM_RX_EQE_PROFILES { \
- {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {.usec = 1, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 8, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 64, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 128, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 256, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,} \
}
#define NET_DIM_RX_CQE_PROFILES { \
- {2, 256}, \
- {8, 128}, \
- {16, 64}, \
- {32, 64}, \
- {64, 64} \
+ {.usec = 2, .pkts = 256,}, \
+ {.usec = 8, .pkts = 128,}, \
+ {.usec = 16, .pkts = 64,}, \
+ {.usec = 32, .pkts = 64,}, \
+ {.usec = 64, .pkts = 64,} \
}
#define NET_DIM_TX_EQE_PROFILES { \
- {1, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {8, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {32, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {64, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {128, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE} \
+ {.usec = 1, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 8, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 32, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 64, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 128, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,} \
}
#define NET_DIM_TX_CQE_PROFILES { \
- {5, 128}, \
- {8, 64}, \
- {16, 32}, \
- {32, 32}, \
- {64, 32} \
+ {.usec = 5, .pkts = 128,}, \
+ {.usec = 8, .pkts = 64,}, \
+ {.usec = 16, .pkts = 32,}, \
+ {.usec = 32, .pkts = 32,}, \
+ {.usec = 64, .pkts = 32,} \
}
static const struct dim_cq_moder
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 6b7f1bf6715d..83471e81501a 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -102,9 +102,9 @@ asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
* Permit this cpu to perform nested stack dumps while serialising
* against other CPUs
*/
- printk_cpu_lock_irqsave(flags);
+ printk_cpu_sync_get_irqsave(flags);
__dump_stack(log_lvl);
- printk_cpu_unlock_irqrestore(flags);
+ printk_cpu_sync_put_irqrestore(flags);
}
EXPORT_SYMBOL(dump_stack_lvl);
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index ce12621b4275..423784d9c058 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -41,6 +41,9 @@ EXPORT_SYMBOL_GPL(setup_fault_attr);
static void fail_dump(struct fault_attr *attr)
{
+ if (attr->no_warn)
+ return;
+
if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) {
printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n"
"name %pd, interval %lu, probability %lu, "
diff --git a/lib/glob.c b/lib/glob.c
index 85ecbda45cd8..15b73f490720 100644
--- a/lib/glob.c
+++ b/lib/glob.c
@@ -45,7 +45,7 @@ bool __pure glob_match(char const *pat, char const *str)
* (no exception for /), it can be easily proved that there's
* never a need to backtrack multiple levels.
*/
- char const *back_pat = NULL, *back_str = back_str;
+ char const *back_pat = NULL, *back_str;
/*
* Loop over each token (character or class) in pat, matching
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 9301578f98e8..06833d404398 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -22,15 +22,33 @@ EXPORT_SYMBOL(hex_asc_upper);
*
* hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
* input.
+ *
+ * This function is used to load cryptographic keys, so it is coded in such a
+ * way that there are no conditions or memory accesses that depend on data.
+ *
+ * Explanation of the logic:
+ * (ch - '9' - 1) is negative if ch <= '9'
+ * ('0' - 1 - ch) is negative if ch >= '0'
+ * we "and" these two values, so the result is negative if ch is in the range
+ * '0' ... '9'
+ * we are only interested in the sign, so we do a shift ">> 8"; note that right
+ * shift of a negative value is implementation-defined, so we cast the
+ * value to (unsigned) before the shift --- we have 0xffffff if ch is in
+ * the range '0' ... '9', 0 otherwise
+ * we "and" this value with (ch - '0' + 1) --- we have a value 1 ... 10 if ch is
+ * in the range '0' ... '9', 0 otherwise
+ * we add this value to -1 --- we have a value 0 ... 9 if ch is in the range '0'
+ * ... '9', -1 otherwise
+ * the next line is similar to the previous one, but we need to decode both
+ * uppercase and lowercase letters, so we use (ch & 0xdf), which converts
+ * lowercase to uppercase
*/
-int hex_to_bin(char ch)
+int hex_to_bin(unsigned char ch)
{
- if ((ch >= '0') && (ch <= '9'))
- return ch - '0';
- ch = tolower(ch);
- if ((ch >= 'a') && (ch <= 'f'))
- return ch - 'a' + 10;
- return -1;
+ unsigned char cu = ch & 0xdf;
+ return -1 +
+ ((ch - '0' + 1) & (unsigned)((ch - '9' - 1) & ('0' - 1 - ch)) >> 8) +
+ ((cu - 'A' + 11) & (unsigned)((cu - 'F' - 1) & ('A' - 1 - cu)) >> 8);
}
EXPORT_SYMBOL(hex_to_bin);
@@ -45,10 +63,13 @@ EXPORT_SYMBOL(hex_to_bin);
int hex2bin(u8 *dst, const char *src, size_t count)
{
while (count--) {
- int hi = hex_to_bin(*src++);
- int lo = hex_to_bin(*src++);
+ int hi, lo;
- if ((hi < 0) || (lo < 0))
+ hi = hex_to_bin(*src++);
+ if (unlikely(hi < 0))
+ return -EINVAL;
+ lo = hex_to_bin(*src++);
+ if (unlikely(lo < 0))
return -EINVAL;
*dst++ = (hi << 4) | lo;
diff --git a/lib/irq_poll.c b/lib/irq_poll.c
index 2f17b488d58e..2d5329a42105 100644
--- a/lib/irq_poll.c
+++ b/lib/irq_poll.c
@@ -188,14 +188,18 @@ EXPORT_SYMBOL(irq_poll_init);
static int irq_poll_cpu_dead(unsigned int cpu)
{
/*
- * If a CPU goes away, splice its entries to the current CPU
- * and trigger a run of the softirq
+ * If a CPU goes away, splice its entries to the current CPU and
+ * set the POLL softirq bit. The local_bh_disable()/enable() pair
+ * ensures that it is handled. Otherwise the current CPU could
+ * reach idle with the POLL softirq pending.
*/
+ local_bh_disable();
local_irq_disable();
list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
this_cpu_ptr(&blk_cpu_iopoll));
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
local_irq_enable();
+ local_bh_enable();
return 0;
}
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index 886510d248e5..08c14019841a 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -340,7 +340,7 @@ EXPORT_SYMBOL(kstrtos8);
* @s: input string
* @res: result
*
- * This routine returns 0 iff the first character is one of 'Yy1Nn0', or
+ * This routine returns 0 iff the first character is one of 'YyTt1NnFf0', or
* [oO][NnFf] for "on" and "off". Otherwise it will return -EINVAL. Value
* pointed to by res is updated upon finding a match.
*/
@@ -353,11 +353,15 @@ int kstrtobool(const char *s, bool *res)
switch (s[0]) {
case 'y':
case 'Y':
+ case 't':
+ case 'T':
case '1':
*res = true;
return 0;
case 'n':
case 'N':
+ case 'f':
+ case 'F':
case '0':
*res = false;
return 0;
diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile
index c49f4ffb6273..29aff6562b42 100644
--- a/lib/kunit/Makefile
+++ b/lib/kunit/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_KUNIT) += kunit.o
kunit-objs += test.o \
+ resource.o \
string-stream.o \
assert.o \
try-catch.o \
diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
index b71db0abc12b..1048ef1b8d6e 100644
--- a/lib/kunit/debugfs.c
+++ b/lib/kunit/debugfs.c
@@ -52,7 +52,7 @@ static void debugfs_print_result(struct seq_file *seq,
static int debugfs_print_results(struct seq_file *seq, void *v)
{
struct kunit_suite *suite = (struct kunit_suite *)seq->private;
- bool success = kunit_suite_has_succeeded(suite);
+ enum kunit_status success = kunit_suite_has_succeeded(suite);
struct kunit_case *test_case;
if (!suite || !suite->log)
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 22640c9ee819..96f96e42ce06 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -71,9 +71,13 @@ kunit_filter_tests(struct kunit_suite *const suite, const char *test_glob)
/* Use memcpy to workaround copy->name being const. */
copy = kmalloc(sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return ERR_PTR(-ENOMEM);
memcpy(copy, suite, sizeof(*copy));
filtered = kcalloc(n + 1, sizeof(*filtered), GFP_KERNEL);
+ if (!filtered)
+ return ERR_PTR(-ENOMEM);
n = 0;
kunit_suite_for_each_test_case(suite, test_case) {
@@ -106,14 +110,16 @@ kunit_filter_subsuite(struct kunit_suite * const * const subsuite,
filtered = kmalloc_array(n + 1, sizeof(*filtered), GFP_KERNEL);
if (!filtered)
- return NULL;
+ return ERR_PTR(-ENOMEM);
n = 0;
for (i = 0; subsuite[i] != NULL; ++i) {
if (!glob_match(filter->suite_glob, subsuite[i]->name))
continue;
filtered_suite = kunit_filter_tests(subsuite[i], filter->test_glob);
- if (filtered_suite)
+ if (IS_ERR(filtered_suite))
+ return ERR_CAST(filtered_suite);
+ else if (filtered_suite)
filtered[n++] = filtered_suite;
}
filtered[n] = NULL;
@@ -146,7 +152,8 @@ static void kunit_free_suite_set(struct suite_set suite_set)
}
static struct suite_set kunit_filter_suites(const struct suite_set *suite_set,
- const char *filter_glob)
+ const char *filter_glob,
+ int *err)
{
int i;
struct kunit_suite * const **copy, * const *filtered_subsuite;
@@ -166,6 +173,10 @@ static struct suite_set kunit_filter_suites(const struct suite_set *suite_set,
for (i = 0; i < max; ++i) {
filtered_subsuite = kunit_filter_subsuite(suite_set->start[i], &filter);
+ if (IS_ERR(filtered_subsuite)) {
+ *err = PTR_ERR(filtered_subsuite);
+ return filtered;
+ }
if (filtered_subsuite)
*copy++ = filtered_subsuite;
}
@@ -236,9 +247,15 @@ int kunit_run_all_tests(void)
.start = __kunit_suites_start,
.end = __kunit_suites_end,
};
+ int err = 0;
- if (filter_glob_param)
- suite_set = kunit_filter_suites(&suite_set, filter_glob_param);
+ if (filter_glob_param) {
+ suite_set = kunit_filter_suites(&suite_set, filter_glob_param, &err);
+ if (err) {
+ pr_err("kunit executor: error filtering suites: %d\n", err);
+ goto out;
+ }
+ }
if (!action_param)
kunit_exec_run_tests(&suite_set);
@@ -251,9 +268,10 @@ int kunit_run_all_tests(void)
kunit_free_suite_set(suite_set);
}
- kunit_handle_shutdown();
- return 0;
+out:
+ kunit_handle_shutdown();
+ return err;
}
#if IS_BUILTIN(CONFIG_KUNIT_TEST)
diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
index 4ed57fd94e42..eac6ff480273 100644
--- a/lib/kunit/executor_test.c
+++ b/lib/kunit/executor_test.c
@@ -137,14 +137,16 @@ static void filter_suites_test(struct kunit *test)
.end = suites + 2,
};
struct suite_set filtered = {.start = NULL, .end = NULL};
+ int err = 0;
/* Emulate two files, each having one suite */
subsuites[0][0] = alloc_fake_suite(test, "suite0", dummy_test_cases);
subsuites[1][0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
/* Filter out suite1 */
- filtered = kunit_filter_suites(&suite_set, "suite0");
+ filtered = kunit_filter_suites(&suite_set, "suite0", &err);
kfree_subsuites_at_end(test, &filtered); /* let us use ASSERTs without leaking */
+ KUNIT_EXPECT_EQ(test, err, 0);
KUNIT_ASSERT_EQ(test, filtered.end - filtered.start, (ptrdiff_t)1);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start);
diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c
index 4bbf37c04eba..f8fe582c9e36 100644
--- a/lib/kunit/kunit-example-test.c
+++ b/lib/kunit/kunit-example-test.c
@@ -41,6 +41,17 @@ static int example_test_init(struct kunit *test)
}
/*
+ * This is run once before all test cases in the suite.
+ * See the comment on example_test_suite for more information.
+ */
+static int example_test_init_suite(struct kunit_suite *suite)
+{
+ kunit_info(suite, "initializing suite\n");
+
+ return 0;
+}
+
+/*
* This test should always be skipped.
*/
static void example_skip_test(struct kunit *test)
@@ -91,6 +102,8 @@ static void example_all_expect_macros_test(struct kunit *test)
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, test);
KUNIT_EXPECT_PTR_EQ(test, NULL, NULL);
KUNIT_EXPECT_PTR_NE(test, test, NULL);
+ KUNIT_EXPECT_NULL(test, NULL);
+ KUNIT_EXPECT_NOT_NULL(test, test);
/* String assertions */
KUNIT_EXPECT_STREQ(test, "hi", "hi");
@@ -140,17 +153,20 @@ static struct kunit_case example_test_cases[] = {
* may be specified which runs after every test case and can be used to for
* cleanup. For clarity, running tests in a test suite would behave as follows:
*
+ * suite.suite_init(suite);
* suite.init(test);
* suite.test_case[0](test);
* suite.exit(test);
* suite.init(test);
* suite.test_case[1](test);
* suite.exit(test);
+ * suite.suite_exit(suite);
* ...;
*/
static struct kunit_suite example_test_suite = {
.name = "example",
.init = example_test_init,
+ .suite_init = example_test_init_suite,
.test_cases = example_test_cases,
};
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index 555601d17f79..13d0bd8b07a9 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -190,6 +190,40 @@ static void kunit_resource_test_destroy_resource(struct kunit *test)
KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
}
+static void kunit_resource_test_remove_resource(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx = test->priv;
+ struct kunit_resource *res = kunit_alloc_and_get_resource(
+ &ctx->test,
+ fake_resource_init,
+ fake_resource_free,
+ GFP_KERNEL,
+ ctx);
+
+ /* The resource is in the list */
+ KUNIT_EXPECT_FALSE(test, list_empty(&ctx->test.resources));
+
+ /* Remove the resource. The pointer is still valid, but it can't be
+ * found.
+ */
+ kunit_remove_resource(test, res);
+ KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
+ /* We haven't been freed yet. */
+ KUNIT_EXPECT_TRUE(test, ctx->is_resource_initialized);
+
+ /* Removing the resource multiple times is valid. */
+ kunit_remove_resource(test, res);
+ KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
+ /* Despite having been removed twice (from only one reference), the
+ * resource still has not been freed.
+ */
+ KUNIT_EXPECT_TRUE(test, ctx->is_resource_initialized);
+
+ /* Free the resource. */
+ kunit_put_resource(res);
+ KUNIT_EXPECT_FALSE(test, ctx->is_resource_initialized);
+}
+
static void kunit_resource_test_cleanup_resources(struct kunit *test)
{
int i;
@@ -387,6 +421,7 @@ static struct kunit_case kunit_resource_test_cases[] = {
KUNIT_CASE(kunit_resource_test_init_resources),
KUNIT_CASE(kunit_resource_test_alloc_resource),
KUNIT_CASE(kunit_resource_test_destroy_resource),
+ KUNIT_CASE(kunit_resource_test_remove_resource),
KUNIT_CASE(kunit_resource_test_cleanup_resources),
KUNIT_CASE(kunit_resource_test_proper_free_ordering),
KUNIT_CASE(kunit_resource_test_static),
@@ -435,7 +470,7 @@ static void kunit_log_test(struct kunit *test)
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
strstr(suite.log, "along with this."));
#else
- KUNIT_EXPECT_PTR_EQ(test, test->log, (char *)NULL);
+ KUNIT_EXPECT_NULL(test, test->log);
#endif
}
diff --git a/lib/kunit/resource.c b/lib/kunit/resource.c
new file mode 100644
index 000000000000..c414df922f34
--- /dev/null
+++ b/lib/kunit/resource.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit resource API for test managed resources (allocations, etc.).
+ *
+ * Copyright (C) 2022, Google LLC.
+ * Author: Daniel Latypov <dlatypov@google.com>
+ */
+
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/kref.h>
+
+/*
+ * Used for static resources and when a kunit_resource * has been created by
+ * kunit_alloc_resource(). When an init function is supplied, @data is passed
+ * into the init function; otherwise, we simply set the resource data field to
+ * the data value passed in. Doesn't initialize res->should_kfree.
+ */
+int __kunit_add_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ struct kunit_resource *res,
+ void *data)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ res->free = free;
+ kref_init(&res->refcount);
+
+ if (init) {
+ ret = init(res, data);
+ if (ret)
+ return ret;
+ } else {
+ res->data = data;
+ }
+
+ spin_lock_irqsave(&test->lock, flags);
+ list_add_tail(&res->node, &test->resources);
+ /* refcount for list is established by kref_init() */
+ spin_unlock_irqrestore(&test->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__kunit_add_resource);
+
+void kunit_remove_resource(struct kunit *test, struct kunit_resource *res)
+{
+ unsigned long flags;
+ bool was_linked;
+
+ spin_lock_irqsave(&test->lock, flags);
+ was_linked = !list_empty(&res->node);
+ list_del_init(&res->node);
+ spin_unlock_irqrestore(&test->lock, flags);
+
+ if (was_linked)
+ kunit_put_resource(res);
+}
+EXPORT_SYMBOL_GPL(kunit_remove_resource);
+
+int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match,
+ void *match_data)
+{
+ struct kunit_resource *res = kunit_find_resource(test, match,
+ match_data);
+
+ if (!res)
+ return -ENOENT;
+
+ kunit_remove_resource(test, res);
+
+ /* We have a reference also via _find(); drop it. */
+ kunit_put_resource(res);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kunit_destroy_resource);
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 3bca3bf5c15b..a5053a07409f 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -6,10 +6,10 @@
* Author: Brendan Higgins <brendanhiggins@google.com>
*/
+#include <kunit/resource.h>
#include <kunit/test.h>
#include <kunit/test-bug.h>
#include <linux/kernel.h>
-#include <linux/kref.h>
#include <linux/moduleparam.h>
#include <linux/sched/debug.h>
#include <linux/sched.h>
@@ -134,7 +134,7 @@ size_t kunit_suite_num_test_cases(struct kunit_suite *suite)
}
EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases);
-static void kunit_print_subtest_start(struct kunit_suite *suite)
+static void kunit_print_suite_start(struct kunit_suite *suite)
{
kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "# Subtest: %s",
suite->name);
@@ -179,6 +179,9 @@ enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite)
const struct kunit_case *test_case;
enum kunit_status status = KUNIT_SKIPPED;
+ if (suite->suite_init_err)
+ return KUNIT_FAILURE;
+
kunit_suite_for_each_test_case(suite, test_case) {
if (test_case->status == KUNIT_FAILURE)
return KUNIT_FAILURE;
@@ -192,7 +195,7 @@ EXPORT_SYMBOL_GPL(kunit_suite_has_succeeded);
static size_t kunit_suite_counter = 1;
-static void kunit_print_subtest_end(struct kunit_suite *suite)
+static void kunit_print_suite_end(struct kunit_suite *suite)
{
kunit_print_ok_not_ok((void *)suite, false,
kunit_suite_has_succeeded(suite),
@@ -241,7 +244,7 @@ static void kunit_print_string_stream(struct kunit *test,
}
static void kunit_fail(struct kunit *test, const struct kunit_loc *loc,
- enum kunit_assert_type type, struct kunit_assert *assert,
+ enum kunit_assert_type type, const struct kunit_assert *assert,
const struct va_format *message)
{
struct string_stream *stream;
@@ -281,7 +284,7 @@ static void __noreturn kunit_abort(struct kunit *test)
void kunit_do_failed_assertion(struct kunit *test,
const struct kunit_loc *loc,
enum kunit_assert_type type,
- struct kunit_assert *assert,
+ const struct kunit_assert *assert,
const char *fmt, ...)
{
va_list args;
@@ -498,7 +501,16 @@ int kunit_run_tests(struct kunit_suite *suite)
struct kunit_result_stats suite_stats = { 0 };
struct kunit_result_stats total_stats = { 0 };
- kunit_print_subtest_start(suite);
+ if (suite->suite_init) {
+ suite->suite_init_err = suite->suite_init(suite);
+ if (suite->suite_init_err) {
+ kunit_err(suite, KUNIT_SUBTEST_INDENT
+ "# failed to initialize (%d)", suite->suite_init_err);
+ goto suite_end;
+ }
+ }
+
+ kunit_print_suite_start(suite);
kunit_suite_for_each_test_case(suite, test_case) {
struct kunit test = { .param_value = NULL, .param_index = 0 };
@@ -551,8 +563,12 @@ int kunit_run_tests(struct kunit_suite *suite)
kunit_accumulate_stats(&total_stats, param_stats);
}
+ if (suite->suite_exit)
+ suite->suite_exit(suite);
+
kunit_print_suite_stats(suite, suite_stats, total_stats);
- kunit_print_subtest_end(suite);
+suite_end:
+ kunit_print_suite_end(suite);
return 0;
}
@@ -562,6 +578,7 @@ static void kunit_init_suite(struct kunit_suite *suite)
{
kunit_debugfs_create_suite(suite);
suite->status_comment[0] = '\0';
+ suite->suite_init_err = 0;
}
int __kunit_test_suites_init(struct kunit_suite * const * const suites)
@@ -592,120 +609,6 @@ void __kunit_test_suites_exit(struct kunit_suite **suites)
}
EXPORT_SYMBOL_GPL(__kunit_test_suites_exit);
-/*
- * Used for static resources and when a kunit_resource * has been created by
- * kunit_alloc_resource(). When an init function is supplied, @data is passed
- * into the init function; otherwise, we simply set the resource data field to
- * the data value passed in.
- */
-int kunit_add_resource(struct kunit *test,
- kunit_resource_init_t init,
- kunit_resource_free_t free,
- struct kunit_resource *res,
- void *data)
-{
- int ret = 0;
- unsigned long flags;
-
- res->free = free;
- kref_init(&res->refcount);
-
- if (init) {
- ret = init(res, data);
- if (ret)
- return ret;
- } else {
- res->data = data;
- }
-
- spin_lock_irqsave(&test->lock, flags);
- list_add_tail(&res->node, &test->resources);
- /* refcount for list is established by kref_init() */
- spin_unlock_irqrestore(&test->lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(kunit_add_resource);
-
-int kunit_add_named_resource(struct kunit *test,
- kunit_resource_init_t init,
- kunit_resource_free_t free,
- struct kunit_resource *res,
- const char *name,
- void *data)
-{
- struct kunit_resource *existing;
-
- if (!name)
- return -EINVAL;
-
- existing = kunit_find_named_resource(test, name);
- if (existing) {
- kunit_put_resource(existing);
- return -EEXIST;
- }
-
- res->name = name;
-
- return kunit_add_resource(test, init, free, res, data);
-}
-EXPORT_SYMBOL_GPL(kunit_add_named_resource);
-
-struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test,
- kunit_resource_init_t init,
- kunit_resource_free_t free,
- gfp_t internal_gfp,
- void *data)
-{
- struct kunit_resource *res;
- int ret;
-
- res = kzalloc(sizeof(*res), internal_gfp);
- if (!res)
- return NULL;
-
- ret = kunit_add_resource(test, init, free, res, data);
- if (!ret) {
- /*
- * bump refcount for get; kunit_resource_put() should be called
- * when done.
- */
- kunit_get_resource(res);
- return res;
- }
- return NULL;
-}
-EXPORT_SYMBOL_GPL(kunit_alloc_and_get_resource);
-
-void kunit_remove_resource(struct kunit *test, struct kunit_resource *res)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&test->lock, flags);
- list_del(&res->node);
- spin_unlock_irqrestore(&test->lock, flags);
- kunit_put_resource(res);
-}
-EXPORT_SYMBOL_GPL(kunit_remove_resource);
-
-int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match,
- void *match_data)
-{
- struct kunit_resource *res = kunit_find_resource(test, match,
- match_data);
-
- if (!res)
- return -ENOENT;
-
- kunit_remove_resource(test, res);
-
- /* We have a reference also via _find(); drop it. */
- kunit_put_resource(res);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(kunit_destroy_resource);
-
struct kunit_kmalloc_array_params {
size_t n;
size_t size;
diff --git a/lib/list-test.c b/lib/list-test.c
index 035ef6597640..d374cf5d1a57 100644
--- a/lib/list-test.c
+++ b/lib/list-test.c
@@ -804,6 +804,401 @@ static struct kunit_suite list_test_module = {
.test_cases = list_test_cases,
};
-kunit_test_suites(&list_test_module);
+struct hlist_test_struct {
+ int data;
+ struct hlist_node list;
+};
+
+static void hlist_test_init(struct kunit *test)
+{
+ /* Test the different ways of initialising a list. */
+ struct hlist_head list1 = HLIST_HEAD_INIT;
+ struct hlist_head list2;
+ HLIST_HEAD(list3);
+ struct hlist_head *list4;
+ struct hlist_head *list5;
+
+ INIT_HLIST_HEAD(&list2);
+
+ list4 = kzalloc(sizeof(*list4), GFP_KERNEL | __GFP_NOFAIL);
+ INIT_HLIST_HEAD(list4);
+
+ list5 = kmalloc(sizeof(*list5), GFP_KERNEL | __GFP_NOFAIL);
+ memset(list5, 0xFF, sizeof(*list5));
+ INIT_HLIST_HEAD(list5);
+
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list1));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list2));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list3));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(list4));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(list5));
+
+ kfree(list4);
+ kfree(list5);
+}
+
+static void hlist_test_unhashed(struct kunit *test)
+{
+ struct hlist_node a;
+ HLIST_HEAD(list);
+
+ INIT_HLIST_NODE(&a);
+
+ /* is unhashed by default */
+ KUNIT_EXPECT_TRUE(test, hlist_unhashed(&a));
+
+ hlist_add_head(&a, &list);
+
+ /* is hashed once added to list */
+ KUNIT_EXPECT_FALSE(test, hlist_unhashed(&a));
+
+ hlist_del_init(&a);
+
+ /* is again unhashed after del_init */
+ KUNIT_EXPECT_TRUE(test, hlist_unhashed(&a));
+}
+
+/* Doesn't test concurrency guarantees */
+static void hlist_test_unhashed_lockless(struct kunit *test)
+{
+ struct hlist_node a;
+ HLIST_HEAD(list);
+
+ INIT_HLIST_NODE(&a);
+
+ /* is unhashed by default */
+ KUNIT_EXPECT_TRUE(test, hlist_unhashed_lockless(&a));
+
+ hlist_add_head(&a, &list);
+
+ /* is hashed once added to list */
+ KUNIT_EXPECT_FALSE(test, hlist_unhashed_lockless(&a));
+
+ hlist_del_init(&a);
+
+ /* is again unhashed after del_init */
+ KUNIT_EXPECT_TRUE(test, hlist_unhashed_lockless(&a));
+}
+
+static void hlist_test_del(struct kunit *test)
+{
+ struct hlist_node a, b;
+ HLIST_HEAD(list);
+
+ hlist_add_head(&a, &list);
+ hlist_add_behind(&b, &a);
+
+ /* before: [list] -> a -> b */
+ hlist_del(&a);
+
+ /* now: [list] -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.first, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.pprev, &list.first);
+}
+
+static void hlist_test_del_init(struct kunit *test)
+{
+ struct hlist_node a, b;
+ HLIST_HEAD(list);
+
+ hlist_add_head(&a, &list);
+ hlist_add_behind(&b, &a);
+
+ /* before: [list] -> a -> b */
+ hlist_del_init(&a);
+
+ /* now: [list] -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.first, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.pprev, &list.first);
+
+ /* a is now initialised */
+ KUNIT_EXPECT_PTR_EQ(test, a.next, NULL);
+ KUNIT_EXPECT_PTR_EQ(test, a.pprev, NULL);
+}
+
+/* Tests all three hlist_add_* functions */
+static void hlist_test_add(struct kunit *test)
+{
+ struct hlist_node a, b, c, d;
+ HLIST_HEAD(list);
+
+ hlist_add_head(&a, &list);
+ hlist_add_head(&b, &list);
+ hlist_add_before(&c, &a);
+ hlist_add_behind(&d, &a);
+
+ /* should be [list] -> b -> c -> a -> d */
+ KUNIT_EXPECT_PTR_EQ(test, list.first, &b);
+
+ KUNIT_EXPECT_PTR_EQ(test, c.pprev, &(b.next));
+ KUNIT_EXPECT_PTR_EQ(test, b.next, &c);
+
+ KUNIT_EXPECT_PTR_EQ(test, a.pprev, &(c.next));
+ KUNIT_EXPECT_PTR_EQ(test, c.next, &a);
+
+ KUNIT_EXPECT_PTR_EQ(test, d.pprev, &(a.next));
+ KUNIT_EXPECT_PTR_EQ(test, a.next, &d);
+}
+
+/* Tests both hlist_fake() and hlist_add_fake() */
+static void hlist_test_fake(struct kunit *test)
+{
+ struct hlist_node a;
+
+ INIT_HLIST_NODE(&a);
+
+ /* not fake after init */
+ KUNIT_EXPECT_FALSE(test, hlist_fake(&a));
+
+ hlist_add_fake(&a);
+
+ /* is now fake */
+ KUNIT_EXPECT_TRUE(test, hlist_fake(&a));
+}
+
+static void hlist_test_is_singular_node(struct kunit *test)
+{
+ struct hlist_node a, b;
+ HLIST_HEAD(list);
+
+ INIT_HLIST_NODE(&a);
+ KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&a, &list));
+
+ hlist_add_head(&a, &list);
+ KUNIT_EXPECT_TRUE(test, hlist_is_singular_node(&a, &list));
+
+ hlist_add_head(&b, &list);
+ KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&a, &list));
+ KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&b, &list));
+}
+
+static void hlist_test_empty(struct kunit *test)
+{
+ struct hlist_node a;
+ HLIST_HEAD(list);
+
+ /* list starts off empty */
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list));
+
+ hlist_add_head(&a, &list);
+
+ /* list is no longer empty */
+ KUNIT_EXPECT_FALSE(test, hlist_empty(&list));
+}
+
+static void hlist_test_move_list(struct kunit *test)
+{
+ struct hlist_node a;
+ HLIST_HEAD(list1);
+ HLIST_HEAD(list2);
+
+ hlist_add_head(&a, &list1);
+
+ KUNIT_EXPECT_FALSE(test, hlist_empty(&list1));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list2));
+ hlist_move_list(&list1, &list2);
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list1));
+ KUNIT_EXPECT_FALSE(test, hlist_empty(&list2));
+
+}
+
+static void hlist_test_entry(struct kunit *test)
+{
+ struct hlist_test_struct test_struct;
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct,
+ hlist_entry(&(test_struct.list),
+ struct hlist_test_struct, list));
+}
+
+static void hlist_test_entry_safe(struct kunit *test)
+{
+ struct hlist_test_struct test_struct;
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct,
+ hlist_entry_safe(&(test_struct.list),
+ struct hlist_test_struct, list));
+
+ KUNIT_EXPECT_PTR_EQ(test, NULL,
+ hlist_entry_safe((struct hlist_node *)NULL,
+ struct hlist_test_struct, list));
+}
+
+static void hlist_test_for_each(struct kunit *test)
+{
+ struct hlist_node entries[3], *cur;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ hlist_add_head(&entries[0], &list);
+ hlist_add_behind(&entries[1], &entries[0]);
+ hlist_add_behind(&entries[2], &entries[1]);
+
+ hlist_for_each(cur, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 3);
+}
+
+
+static void hlist_test_for_each_safe(struct kunit *test)
+{
+ struct hlist_node entries[3], *cur, *n;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ hlist_add_head(&entries[0], &list);
+ hlist_add_behind(&entries[1], &entries[0]);
+ hlist_add_behind(&entries[2], &entries[1]);
+
+ hlist_for_each_safe(cur, n, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ hlist_del(&entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 3);
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list));
+}
+
+static void hlist_test_for_each_entry(struct kunit *test)
+{
+ struct hlist_test_struct entries[5], *cur;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ entries[0].data = 0;
+ hlist_add_head(&entries[0].list, &list);
+ for (i = 1; i < 5; ++i) {
+ entries[i].data = i;
+ hlist_add_behind(&entries[i].list, &entries[i-1].list);
+ }
+
+ i = 0;
+
+ hlist_for_each_entry(cur, &list, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+}
+
+static void hlist_test_for_each_entry_continue(struct kunit *test)
+{
+ struct hlist_test_struct entries[5], *cur;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ entries[0].data = 0;
+ hlist_add_head(&entries[0].list, &list);
+ for (i = 1; i < 5; ++i) {
+ entries[i].data = i;
+ hlist_add_behind(&entries[i].list, &entries[i-1].list);
+ }
+
+ /* We skip the first (zero-th) entry. */
+ i = 1;
+
+ cur = &entries[0];
+ hlist_for_each_entry_continue(cur, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ /* Stamp over the entry. */
+ cur->data = 42;
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+ /* The first entry was not visited. */
+ KUNIT_EXPECT_EQ(test, entries[0].data, 0);
+ /* The second (and presumably others), were. */
+ KUNIT_EXPECT_EQ(test, entries[1].data, 42);
+}
+
+static void hlist_test_for_each_entry_from(struct kunit *test)
+{
+ struct hlist_test_struct entries[5], *cur;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ entries[0].data = 0;
+ hlist_add_head(&entries[0].list, &list);
+ for (i = 1; i < 5; ++i) {
+ entries[i].data = i;
+ hlist_add_behind(&entries[i].list, &entries[i-1].list);
+ }
+
+ i = 0;
+
+ cur = &entries[0];
+ hlist_for_each_entry_from(cur, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ /* Stamp over the entry. */
+ cur->data = 42;
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+ /* The first entry was visited. */
+ KUNIT_EXPECT_EQ(test, entries[0].data, 42);
+}
+
+static void hlist_test_for_each_entry_safe(struct kunit *test)
+{
+ struct hlist_test_struct entries[5], *cur;
+ struct hlist_node *tmp_node;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ entries[0].data = 0;
+ hlist_add_head(&entries[0].list, &list);
+ for (i = 1; i < 5; ++i) {
+ entries[i].data = i;
+ hlist_add_behind(&entries[i].list, &entries[i-1].list);
+ }
+
+ i = 0;
+
+ hlist_for_each_entry_safe(cur, tmp_node, &list, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ hlist_del(&cur->list);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list));
+}
+
+
+static struct kunit_case hlist_test_cases[] = {
+ KUNIT_CASE(hlist_test_init),
+ KUNIT_CASE(hlist_test_unhashed),
+ KUNIT_CASE(hlist_test_unhashed_lockless),
+ KUNIT_CASE(hlist_test_del),
+ KUNIT_CASE(hlist_test_del_init),
+ KUNIT_CASE(hlist_test_add),
+ KUNIT_CASE(hlist_test_fake),
+ KUNIT_CASE(hlist_test_is_singular_node),
+ KUNIT_CASE(hlist_test_empty),
+ KUNIT_CASE(hlist_test_move_list),
+ KUNIT_CASE(hlist_test_entry),
+ KUNIT_CASE(hlist_test_entry_safe),
+ KUNIT_CASE(hlist_test_for_each),
+ KUNIT_CASE(hlist_test_for_each_safe),
+ KUNIT_CASE(hlist_test_for_each_entry),
+ KUNIT_CASE(hlist_test_for_each_entry_continue),
+ KUNIT_CASE(hlist_test_for_each_entry_from),
+ KUNIT_CASE(hlist_test_for_each_entry_safe),
+ {},
+};
+
+static struct kunit_suite hlist_test_module = {
+ .name = "hlist",
+ .test_cases = hlist_test_cases,
+};
+
+kunit_test_suites(&list_test_module, &hlist_test_module);
MODULE_LICENSE("GPL v2");
diff --git a/lib/lockref.c b/lib/lockref.c
index 5b34bbd3eba8..c6f0b183b937 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -14,12 +14,11 @@
BUILD_BUG_ON(sizeof(old) != 8); \
old.lock_count = READ_ONCE(lockref->lock_count); \
while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
- struct lockref new = old, prev = old; \
+ struct lockref new = old; \
CODE \
- old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
- old.lock_count, \
- new.lock_count); \
- if (likely(old.lock_count == prev.lock_count)) { \
+ if (likely(try_cmpxchg64_relaxed(&lockref->lock_count, \
+ &old.lock_count, \
+ new.lock_count))) { \
SUCCESS; \
} \
if (!--retry) \
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
index 199ab201d501..d01aec6ae15c 100644
--- a/lib/nmi_backtrace.c
+++ b/lib/nmi_backtrace.c
@@ -99,7 +99,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
* Allow nested NMI backtraces while serializing
* against other CPUs.
*/
- printk_cpu_lock_irqsave(flags);
+ printk_cpu_sync_get_irqsave(flags);
if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) {
pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
cpu, (void *)instruction_pointer(regs));
@@ -110,7 +110,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
else
dump_stack();
}
- printk_cpu_unlock_irqrestore(flags);
+ printk_cpu_sync_put_irqrestore(flags);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return true;
}
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index af9302141bcf..e5c5315da274 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -76,6 +76,7 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
data = kzalloc(sizeof(*ref->data), gfp);
if (!data) {
free_percpu((void __percpu *)ref->percpu_count_ptr);
+ ref->percpu_count_ptr = 0;
return -ENOMEM;
}
diff --git a/lib/polynomial.c b/lib/polynomial.c
new file mode 100644
index 000000000000..66d383445fec
--- /dev/null
+++ b/lib/polynomial.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generic polynomial calculation using integer coefficients.
+ *
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Maxim Kaurkin <maxim.kaurkin@baikalelectronics.ru>
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/polynomial.h>
+
+/*
+ * Originally this was part of drivers/hwmon/bt1-pvt.c.
+ * There the following conversion is used and should serve as an example here:
+ *
+ * The original translation formulae of the temperature (in degrees of Celsius)
+ * to PVT data and vice-versa are following:
+ *
+ * N = 1.8322e-8*(T^4) + 2.343e-5*(T^3) + 8.7018e-3*(T^2) + 3.9269*(T^1) +
+ * 1.7204e2
+ * T = -1.6743e-11*(N^4) + 8.1542e-8*(N^3) + -1.8201e-4*(N^2) +
+ * 3.1020e-1*(N^1) - 4.838e1
+ *
+ * where T = [-48.380, 147.438]C and N = [0, 1023].
+ *
+ * They must be accordingly altered to be suitable for the integer arithmetics.
+ * The technique is called 'factor redistribution', which just makes sure the
+ * multiplications and divisions are made so to have a result of the operations
+ * within the integer numbers limit. In addition we need to translate the
+ * formulae to accept millidegrees of Celsius. Here what they look like after
+ * the alterations:
+ *
+ * N = (18322e-20*(T^4) + 2343e-13*(T^3) + 87018e-9*(T^2) + 39269e-3*T +
+ * 17204e2) / 1e4
+ * T = -16743e-12*(D^4) + 81542e-9*(D^3) - 182010e-6*(D^2) + 310200e-3*D -
+ * 48380
+ * where T = [-48380, 147438] mC and N = [0, 1023].
+ *
+ * static const struct polynomial poly_temp_to_N = {
+ * .total_divider = 10000,
+ * .terms = {
+ * {4, 18322, 10000, 10000},
+ * {3, 2343, 10000, 10},
+ * {2, 87018, 10000, 10},
+ * {1, 39269, 1000, 1},
+ * {0, 1720400, 1, 1}
+ * }
+ * };
+ *
+ * static const struct polynomial poly_N_to_temp = {
+ * .total_divider = 1,
+ * .terms = {
+ * {4, -16743, 1000, 1},
+ * {3, 81542, 1000, 1},
+ * {2, -182010, 1000, 1},
+ * {1, 310200, 1000, 1},
+ * {0, -48380, 1, 1}
+ * }
+ * };
+ */
+
+/**
+ * polynomial_calc - calculate a polynomial using integer arithmetic
+ *
+ * @poly: pointer to the descriptor of the polynomial
+ * @data: input value of the polynimal
+ *
+ * Calculate the result of a polynomial using only integer arithmetic. For
+ * this to work without too much loss of precision the coefficients has to
+ * be altered. This is called factor redistribution.
+ *
+ * Returns the result of the polynomial calculation.
+ */
+long polynomial_calc(const struct polynomial *poly, long data)
+{
+ const struct polynomial_term *term = poly->terms;
+ long total_divider = poly->total_divider ?: 1;
+ long tmp, ret = 0;
+ int deg;
+
+ /*
+ * Here is the polynomial calculation function, which performs the
+ * redistributed terms calculations. It's pretty straightforward.
+ * We walk over each degree term up to the free one, and perform
+ * the redistributed multiplication of the term coefficient, its
+ * divider (as for the rationale fraction representation), data
+ * power and the rational fraction divider leftover. Then all of
+ * this is collected in a total sum variable, which value is
+ * normalized by the total divider before being returned.
+ */
+ do {
+ tmp = term->coef;
+ for (deg = 0; deg < term->deg; ++deg)
+ tmp = mult_frac(tmp, data, term->divider);
+ ret += tmp / term->divider_leftover;
+ } while ((term++)->deg);
+
+ return ret / total_divider;
+}
+EXPORT_SYMBOL_GPL(polynomial_calc);
+
+MODULE_DESCRIPTION("Generic polynomial calculations");
+MODULE_LICENSE("GPL");
diff --git a/lib/random32.c b/lib/random32.c
index 976632003ec6..d5d9029362cb 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -245,25 +245,13 @@ static struct prandom_test2 {
{ 407983964U, 921U, 728767059U },
};
-static u32 __extract_hwseed(void)
-{
- unsigned int val = 0;
-
- (void)(arch_get_random_seed_int(&val) ||
- arch_get_random_int(&val));
-
- return val;
-}
-
-static void prandom_seed_early(struct rnd_state *state, u32 seed,
- bool mix_with_hwseed)
+static void prandom_state_selftest_seed(struct rnd_state *state, u32 seed)
{
#define LCG(x) ((x) * 69069U) /* super-duper LCG */
-#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
- state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
- state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
- state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
- state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
+ state->s1 = __seed(LCG(seed), 2U);
+ state->s2 = __seed(LCG(state->s1), 8U);
+ state->s3 = __seed(LCG(state->s2), 16U);
+ state->s4 = __seed(LCG(state->s3), 128U);
}
static int __init prandom_state_selftest(void)
@@ -274,7 +262,7 @@ static int __init prandom_state_selftest(void)
for (i = 0; i < ARRAY_SIZE(test1); i++) {
struct rnd_state state;
- prandom_seed_early(&state, test1[i].seed, false);
+ prandom_state_selftest_seed(&state, test1[i].seed);
prandom_warmup(&state);
if (test1[i].result != prandom_u32_state(&state))
@@ -289,7 +277,7 @@ static int __init prandom_state_selftest(void)
for (i = 0; i < ARRAY_SIZE(test2); i++) {
struct rnd_state state;
- prandom_seed_early(&state, test2[i].seed, false);
+ prandom_state_selftest_seed(&state, test2[i].seed);
prandom_warmup(&state);
for (j = 0; j < test2[i].iteration - 1; j++)
@@ -310,324 +298,3 @@ static int __init prandom_state_selftest(void)
}
core_initcall(prandom_state_selftest);
#endif
-
-/*
- * The prandom_u32() implementation is now completely separate from the
- * prandom_state() functions, which are retained (for now) for compatibility.
- *
- * Because of (ab)use in the networking code for choosing random TCP/UDP port
- * numbers, which open DoS possibilities if guessable, we want something
- * stronger than a standard PRNG. But the performance requirements of
- * the network code do not allow robust crypto for this application.
- *
- * So this is a homebrew Junior Spaceman implementation, based on the
- * lowest-latency trustworthy crypto primitive available, SipHash.
- * (The authors of SipHash have not been consulted about this abuse of
- * their work.)
- *
- * Standard SipHash-2-4 uses 2n+4 rounds to hash n words of input to
- * one word of output. This abbreviated version uses 2 rounds per word
- * of output.
- */
-
-struct siprand_state {
- unsigned long v0;
- unsigned long v1;
- unsigned long v2;
- unsigned long v3;
-};
-
-static DEFINE_PER_CPU(struct siprand_state, net_rand_state) __latent_entropy;
-DEFINE_PER_CPU(unsigned long, net_rand_noise);
-EXPORT_PER_CPU_SYMBOL(net_rand_noise);
-
-/*
- * This is the core CPRNG function. As "pseudorandom", this is not used
- * for truly valuable things, just intended to be a PITA to guess.
- * For maximum speed, we do just two SipHash rounds per word. This is
- * the same rate as 4 rounds per 64 bits that SipHash normally uses,
- * so hopefully it's reasonably secure.
- *
- * There are two changes from the official SipHash finalization:
- * - We omit some constants XORed with v2 in the SipHash spec as irrelevant;
- * they are there only to make the output rounds distinct from the input
- * rounds, and this application has no input rounds.
- * - Rather than returning v0^v1^v2^v3, return v1+v3.
- * If you look at the SipHash round, the last operation on v3 is
- * "v3 ^= v0", so "v0 ^ v3" just undoes that, a waste of time.
- * Likewise "v1 ^= v2". (The rotate of v2 makes a difference, but
- * it still cancels out half of the bits in v2 for no benefit.)
- * Second, since the last combining operation was xor, continue the
- * pattern of alternating xor/add for a tiny bit of extra non-linearity.
- */
-static inline u32 siprand_u32(struct siprand_state *s)
-{
- unsigned long v0 = s->v0, v1 = s->v1, v2 = s->v2, v3 = s->v3;
- unsigned long n = raw_cpu_read(net_rand_noise);
-
- v3 ^= n;
- PRND_SIPROUND(v0, v1, v2, v3);
- PRND_SIPROUND(v0, v1, v2, v3);
- v0 ^= n;
- s->v0 = v0; s->v1 = v1; s->v2 = v2; s->v3 = v3;
- return v1 + v3;
-}
-
-
-/**
- * prandom_u32 - pseudo random number generator
- *
- * A 32 bit pseudo-random number is generated using a fast
- * algorithm suitable for simulation. This algorithm is NOT
- * considered safe for cryptographic use.
- */
-u32 prandom_u32(void)
-{
- struct siprand_state *state = get_cpu_ptr(&net_rand_state);
- u32 res = siprand_u32(state);
-
- put_cpu_ptr(&net_rand_state);
- return res;
-}
-EXPORT_SYMBOL(prandom_u32);
-
-/**
- * prandom_bytes - get the requested number of pseudo-random bytes
- * @buf: where to copy the pseudo-random bytes to
- * @bytes: the requested number of bytes
- */
-void prandom_bytes(void *buf, size_t bytes)
-{
- struct siprand_state *state = get_cpu_ptr(&net_rand_state);
- u8 *ptr = buf;
-
- while (bytes >= sizeof(u32)) {
- put_unaligned(siprand_u32(state), (u32 *)ptr);
- ptr += sizeof(u32);
- bytes -= sizeof(u32);
- }
-
- if (bytes > 0) {
- u32 rem = siprand_u32(state);
-
- do {
- *ptr++ = (u8)rem;
- rem >>= BITS_PER_BYTE;
- } while (--bytes > 0);
- }
- put_cpu_ptr(&net_rand_state);
-}
-EXPORT_SYMBOL(prandom_bytes);
-
-/**
- * prandom_seed - add entropy to pseudo random number generator
- * @entropy: entropy value
- *
- * Add some additional seed material to the prandom pool.
- * The "entropy" is actually our IP address (the only caller is
- * the network code), not for unpredictability, but to ensure that
- * different machines are initialized differently.
- */
-void prandom_seed(u32 entropy)
-{
- int i;
-
- add_device_randomness(&entropy, sizeof(entropy));
-
- for_each_possible_cpu(i) {
- struct siprand_state *state = per_cpu_ptr(&net_rand_state, i);
- unsigned long v0 = state->v0, v1 = state->v1;
- unsigned long v2 = state->v2, v3 = state->v3;
-
- do {
- v3 ^= entropy;
- PRND_SIPROUND(v0, v1, v2, v3);
- PRND_SIPROUND(v0, v1, v2, v3);
- v0 ^= entropy;
- } while (unlikely(!v0 || !v1 || !v2 || !v3));
-
- WRITE_ONCE(state->v0, v0);
- WRITE_ONCE(state->v1, v1);
- WRITE_ONCE(state->v2, v2);
- WRITE_ONCE(state->v3, v3);
- }
-}
-EXPORT_SYMBOL(prandom_seed);
-
-/*
- * Generate some initially weak seeding values to allow
- * the prandom_u32() engine to be started.
- */
-static int __init prandom_init_early(void)
-{
- int i;
- unsigned long v0, v1, v2, v3;
-
- if (!arch_get_random_long(&v0))
- v0 = jiffies;
- if (!arch_get_random_long(&v1))
- v1 = random_get_entropy();
- v2 = v0 ^ PRND_K0;
- v3 = v1 ^ PRND_K1;
-
- for_each_possible_cpu(i) {
- struct siprand_state *state;
-
- v3 ^= i;
- PRND_SIPROUND(v0, v1, v2, v3);
- PRND_SIPROUND(v0, v1, v2, v3);
- v0 ^= i;
-
- state = per_cpu_ptr(&net_rand_state, i);
- state->v0 = v0; state->v1 = v1;
- state->v2 = v2; state->v3 = v3;
- }
-
- return 0;
-}
-core_initcall(prandom_init_early);
-
-
-/* Stronger reseeding when available, and periodically thereafter. */
-static void prandom_reseed(struct timer_list *unused);
-
-static DEFINE_TIMER(seed_timer, prandom_reseed);
-
-static void prandom_reseed(struct timer_list *unused)
-{
- unsigned long expires;
- int i;
-
- /*
- * Reinitialize each CPU's PRNG with 128 bits of key.
- * No locking on the CPUs, but then somewhat random results are,
- * well, expected.
- */
- for_each_possible_cpu(i) {
- struct siprand_state *state;
- unsigned long v0 = get_random_long(), v2 = v0 ^ PRND_K0;
- unsigned long v1 = get_random_long(), v3 = v1 ^ PRND_K1;
-#if BITS_PER_LONG == 32
- int j;
-
- /*
- * On 32-bit machines, hash in two extra words to
- * approximate 128-bit key length. Not that the hash
- * has that much security, but this prevents a trivial
- * 64-bit brute force.
- */
- for (j = 0; j < 2; j++) {
- unsigned long m = get_random_long();
-
- v3 ^= m;
- PRND_SIPROUND(v0, v1, v2, v3);
- PRND_SIPROUND(v0, v1, v2, v3);
- v0 ^= m;
- }
-#endif
- /*
- * Probably impossible in practice, but there is a
- * theoretical risk that a race between this reseeding
- * and the target CPU writing its state back could
- * create the all-zero SipHash fixed point.
- *
- * To ensure that never happens, ensure the state
- * we write contains no zero words.
- */
- state = per_cpu_ptr(&net_rand_state, i);
- WRITE_ONCE(state->v0, v0 ? v0 : -1ul);
- WRITE_ONCE(state->v1, v1 ? v1 : -1ul);
- WRITE_ONCE(state->v2, v2 ? v2 : -1ul);
- WRITE_ONCE(state->v3, v3 ? v3 : -1ul);
- }
-
- /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
- expires = round_jiffies(jiffies + 40 * HZ + prandom_u32_max(40 * HZ));
- mod_timer(&seed_timer, expires);
-}
-
-/*
- * The random ready callback can be called from almost any interrupt.
- * To avoid worrying about whether it's safe to delay that interrupt
- * long enough to seed all CPUs, just schedule an immediate timer event.
- */
-static int prandom_timer_start(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- mod_timer(&seed_timer, jiffies);
- return 0;
-}
-
-#ifdef CONFIG_RANDOM32_SELFTEST
-/* Principle: True 32-bit random numbers will all have 16 differing bits on
- * average. For each 32-bit number, there are 601M numbers differing by 16
- * bits, and 89% of the numbers differ by at least 12 bits. Note that more
- * than 16 differing bits also implies a correlation with inverted bits. Thus
- * we take 1024 random numbers and compare each of them to the other ones,
- * counting the deviation of correlated bits to 16. Constants report 32,
- * counters 32-log2(TEST_SIZE), and pure randoms, around 6 or lower. With the
- * u32 total, TEST_SIZE may be as large as 4096 samples.
- */
-#define TEST_SIZE 1024
-static int __init prandom32_state_selftest(void)
-{
- unsigned int x, y, bits, samples;
- u32 xor, flip;
- u32 total;
- u32 *data;
-
- data = kmalloc(sizeof(*data) * TEST_SIZE, GFP_KERNEL);
- if (!data)
- return 0;
-
- for (samples = 0; samples < TEST_SIZE; samples++)
- data[samples] = prandom_u32();
-
- flip = total = 0;
- for (x = 0; x < samples; x++) {
- for (y = 0; y < samples; y++) {
- if (x == y)
- continue;
- xor = data[x] ^ data[y];
- flip |= xor;
- bits = hweight32(xor);
- total += (bits - 16) * (bits - 16);
- }
- }
-
- /* We'll return the average deviation as 2*sqrt(corr/samples), which
- * is also sqrt(4*corr/samples) which provides a better resolution.
- */
- bits = int_sqrt(total / (samples * (samples - 1)) * 4);
- if (bits > 6)
- pr_warn("prandom32: self test failed (at least %u bits"
- " correlated, fixed_mask=%#x fixed_value=%#x\n",
- bits, ~flip, data[0] & ~flip);
- else
- pr_info("prandom32: self test passed (less than %u bits"
- " correlated)\n",
- bits+1);
- kfree(data);
- return 0;
-}
-core_initcall(prandom32_state_selftest);
-#endif /* CONFIG_RANDOM32_SELFTEST */
-
-/*
- * Start periodic full reseeding as soon as strong
- * random numbers are available.
- */
-static int __init prandom_init_late(void)
-{
- static struct notifier_block random_ready = {
- .notifier_call = prandom_timer_start
- };
- int ret = register_random_ready_notifier(&random_ready);
-
- if (ret == -EALREADY) {
- prandom_timer_start(&random_ready, 0, NULL);
- ret = 0;
- }
- return ret;
-}
-late_initcall(prandom_init_late);
diff --git a/lib/siphash.c b/lib/siphash.c
index 72b9068ab57b..71d315a6ad62 100644
--- a/lib/siphash.c
+++ b/lib/siphash.c
@@ -18,19 +18,13 @@
#include <asm/word-at-a-time.h>
#endif
-#define SIPROUND \
- do { \
- v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \
- v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \
- v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \
- v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \
- } while (0)
+#define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3)
#define PREAMBLE(len) \
- u64 v0 = 0x736f6d6570736575ULL; \
- u64 v1 = 0x646f72616e646f6dULL; \
- u64 v2 = 0x6c7967656e657261ULL; \
- u64 v3 = 0x7465646279746573ULL; \
+ u64 v0 = SIPHASH_CONST_0; \
+ u64 v1 = SIPHASH_CONST_1; \
+ u64 v2 = SIPHASH_CONST_2; \
+ u64 v3 = SIPHASH_CONST_3; \
u64 b = ((u64)(len)) << 56; \
v3 ^= key->key[1]; \
v2 ^= key->key[0]; \
@@ -389,19 +383,13 @@ u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
}
EXPORT_SYMBOL(hsiphash_4u32);
#else
-#define HSIPROUND \
- do { \
- v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \
- v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \
- v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \
- v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \
- } while (0)
+#define HSIPROUND HSIPHASH_PERMUTATION(v0, v1, v2, v3)
#define HPREAMBLE(len) \
- u32 v0 = 0; \
- u32 v1 = 0; \
- u32 v2 = 0x6c796765U; \
- u32 v3 = 0x74656462U; \
+ u32 v0 = HSIPHASH_CONST_0; \
+ u32 v1 = HSIPHASH_CONST_1; \
+ u32 v2 = HSIPHASH_CONST_2; \
+ u32 v3 = HSIPHASH_CONST_3; \
u32 b = ((u32)(len)) << 24; \
v3 ^= key->key[1]; \
v2 ^= key->key[0]; \
diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
index 8662dc6cb509..7a0564d7cb7a 100644
--- a/lib/slub_kunit.c
+++ b/lib/slub_kunit.c
@@ -12,7 +12,7 @@ static int slab_errors;
static void test_clobber_zone(struct kunit *test)
{
struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_alloc", 64, 0,
- SLAB_RED_ZONE, NULL);
+ SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kasan_disable_current();
@@ -30,7 +30,7 @@ static void test_clobber_zone(struct kunit *test)
static void test_next_pointer(struct kunit *test)
{
struct kmem_cache *s = kmem_cache_create("TestSlub_next_ptr_free", 64, 0,
- SLAB_POISON, NULL);
+ SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
unsigned long tmp;
unsigned long *ptr_addr;
@@ -75,7 +75,7 @@ static void test_next_pointer(struct kunit *test)
static void test_first_word(struct kunit *test)
{
struct kmem_cache *s = kmem_cache_create("TestSlub_1th_word_free", 64, 0,
- SLAB_POISON, NULL);
+ SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kmem_cache_free(s, p);
@@ -90,7 +90,7 @@ static void test_first_word(struct kunit *test)
static void test_clobber_50th_byte(struct kunit *test)
{
struct kmem_cache *s = kmem_cache_create("TestSlub_50th_word_free", 64, 0,
- SLAB_POISON, NULL);
+ SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kmem_cache_free(s, p);
@@ -106,7 +106,7 @@ static void test_clobber_50th_byte(struct kunit *test)
static void test_clobber_redzone_free(struct kunit *test)
{
struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_free", 64, 0,
- SLAB_RED_ZONE, NULL);
+ SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kasan_disable_current();
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index bf5ba9af0500..5ca0d086ef4a 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -66,6 +66,9 @@ struct stack_record {
unsigned long entries[]; /* Variable-sized array of entries. */
};
+static bool __stack_depot_want_early_init __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
+static bool __stack_depot_early_init_passed __initdata;
+
static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
static int depot_index;
@@ -162,38 +165,58 @@ static int __init is_stack_depot_disabled(char *str)
}
early_param("stack_depot_disable", is_stack_depot_disabled);
-/*
- * __ref because of memblock_alloc(), which will not be actually called after
- * the __init code is gone, because at that point slab_is_available() is true
- */
-__ref int stack_depot_init(void)
+void __init stack_depot_want_early_init(void)
+{
+ /* Too late to request early init now */
+ WARN_ON(__stack_depot_early_init_passed);
+
+ __stack_depot_want_early_init = true;
+}
+
+int __init stack_depot_early_init(void)
+{
+ size_t size;
+
+ /* This is supposed to be called only once, from mm_init() */
+ if (WARN_ON(__stack_depot_early_init_passed))
+ return 0;
+
+ __stack_depot_early_init_passed = true;
+
+ if (!__stack_depot_want_early_init || stack_depot_disable)
+ return 0;
+
+ size = (STACK_HASH_SIZE * sizeof(struct stack_record *));
+ pr_info("Stack Depot early init allocating hash table with memblock_alloc, %zu bytes\n",
+ size);
+ stack_table = memblock_alloc(size, SMP_CACHE_BYTES);
+
+ if (!stack_table) {
+ pr_err("Stack Depot hash table allocation failed, disabling\n");
+ stack_depot_disable = true;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int stack_depot_init(void)
{
static DEFINE_MUTEX(stack_depot_init_mutex);
+ int ret = 0;
mutex_lock(&stack_depot_init_mutex);
if (!stack_depot_disable && !stack_table) {
- size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *));
- int i;
-
- if (slab_is_available()) {
- pr_info("Stack Depot allocating hash table with kvmalloc\n");
- stack_table = kvmalloc(size, GFP_KERNEL);
- } else {
- pr_info("Stack Depot allocating hash table with memblock_alloc\n");
- stack_table = memblock_alloc(size, SMP_CACHE_BYTES);
- }
- if (stack_table) {
- for (i = 0; i < STACK_HASH_SIZE; i++)
- stack_table[i] = NULL;
- } else {
+ pr_info("Stack Depot allocating hash table with kvcalloc\n");
+ stack_table = kvcalloc(STACK_HASH_SIZE, sizeof(struct stack_record *), GFP_KERNEL);
+ if (!stack_table) {
pr_err("Stack Depot hash table allocation failed, disabling\n");
stack_depot_disable = true;
- mutex_unlock(&stack_depot_init_mutex);
- return -ENOMEM;
+ ret = -ENOMEM;
}
}
mutex_unlock(&stack_depot_init_mutex);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(stack_depot_init);
diff --git a/lib/string.c b/lib/string.c
index 485777c9da83..6f334420f687 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -517,21 +517,13 @@ EXPORT_SYMBOL(strnlen);
size_t strspn(const char *s, const char *accept)
{
const char *p;
- const char *a;
- size_t count = 0;
for (p = s; *p != '\0'; ++p) {
- for (a = accept; *a != '\0'; ++a) {
- if (*p == *a)
- break;
- }
- if (*a == '\0')
- return count;
- ++count;
+ if (!strchr(accept, *p))
+ break;
}
- return count;
+ return p - s;
}
-
EXPORT_SYMBOL(strspn);
#endif
@@ -544,17 +536,12 @@ EXPORT_SYMBOL(strspn);
size_t strcspn(const char *s, const char *reject)
{
const char *p;
- const char *r;
- size_t count = 0;
for (p = s; *p != '\0'; ++p) {
- for (r = reject; *r != '\0'; ++r) {
- if (*p == *r)
- return count;
- }
- ++count;
+ if (strchr(reject, *p))
+ break;
}
- return count;
+ return p - s;
}
EXPORT_SYMBOL(strcspn);
#endif
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 4f877e9551d5..5ed3beb066e6 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -757,6 +757,9 @@ char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n)
return ERR_PTR(-ENOMEM);
}
+ ptr->n = n;
+ devres_add(dev, ptr);
+
return ptr->array;
}
EXPORT_SYMBOL_GPL(devm_kasprintf_strarray);
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 08fc72d3ed16..6432b8c3e431 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -25,7 +25,7 @@
* hit it), 'max' is the address space maximum (and we return
* -EFAULT if we hit it).
*/
-static inline long do_strncpy_from_user(char *dst, const char __user *src,
+static __always_inline long do_strncpy_from_user(char *dst, const char __user *src,
unsigned long count, unsigned long max)
{
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index bffa0ebf9f8b..feeb935a2299 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -20,7 +20,7 @@
* if it fits in a aligned 'long'. The caller needs to check
* the return value against "> max".
*/
-static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
+static __always_inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
{
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
unsigned long align, res = 0;
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 0c5cb2d6436a..2a7836e115b4 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -53,6 +53,7 @@
#define FLAG_EXPECTED_FAIL BIT(1)
#define FLAG_SKB_FRAG BIT(2)
#define FLAG_VERIFIER_ZEXT BIT(3)
+#define FLAG_LARGE_MEM BIT(4)
enum {
CLASSIC = BIT(6), /* Old BPF instructions only. */
@@ -7838,7 +7839,7 @@ static struct bpf_test tests[] = {
},
/* BPF_LDX_MEM B/H/W/DW */
{
- "BPF_LDX_MEM | BPF_B",
+ "BPF_LDX_MEM | BPF_B, base",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0102030405060708ULL),
BPF_LD_IMM64(R2, 0x0000000000000008ULL),
@@ -7878,7 +7879,56 @@ static struct bpf_test tests[] = {
.stack_depth = 8,
},
{
- "BPF_LDX_MEM | BPF_H",
+ "BPF_LDX_MEM | BPF_B, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000000088ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_B, R1, R2, -256),
+ BPF_LDX_MEM(BPF_B, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_B, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000000088ULL),
+ BPF_STX_MEM(BPF_B, R1, R2, 256),
+ BPF_LDX_MEM(BPF_B, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_B, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000000088ULL),
+ BPF_STX_MEM(BPF_B, R1, R2, 4096),
+ BPF_LDX_MEM(BPF_B, R0, R1, 4096),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 4096 + 16, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, base",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0102030405060708ULL),
BPF_LD_IMM64(R2, 0x0000000000000708ULL),
@@ -7918,7 +7968,72 @@ static struct bpf_test tests[] = {
.stack_depth = 8,
},
{
- "BPF_LDX_MEM | BPF_W",
+ "BPF_LDX_MEM | BPF_H, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_H, R1, R2, -256),
+ BPF_LDX_MEM(BPF_H, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_H, R1, R2, 256),
+ BPF_LDX_MEM(BPF_H, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_H, R1, R2, 8192),
+ BPF_LDX_MEM(BPF_H, R0, R1, 8192),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 8192 + 16, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, unaligned positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_H, R1, R2, 13),
+ BPF_LDX_MEM(BPF_H, R0, R1, 13),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, base",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0102030405060708ULL),
BPF_LD_IMM64(R2, 0x0000000005060708ULL),
@@ -7957,6 +8072,162 @@ static struct bpf_test tests[] = {
{ { 0, 0 } },
.stack_depth = 8,
},
+ {
+ "BPF_LDX_MEM | BPF_W, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_W, R1, R2, -256),
+ BPF_LDX_MEM(BPF_W, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_W, R1, R2, 256),
+ BPF_LDX_MEM(BPF_W, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_W, R1, R2, 16384),
+ BPF_LDX_MEM(BPF_W, R0, R1, 16384),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 16384 + 16, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, unaligned positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_W, R1, R2, 13),
+ BPF_LDX_MEM(BPF_W, R0, R1, 13),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, base",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0102030405060708ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_DW, R1, R2, -256),
+ BPF_LDX_MEM(BPF_DW, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R1, R2, 256),
+ BPF_LDX_MEM(BPF_DW, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R1, R2, 32760),
+ BPF_LDX_MEM(BPF_DW, R0, R1, 32760),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32768, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, unaligned positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R1, R2, 13),
+ BPF_LDX_MEM(BPF_DW, R0, R1, 13),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32, 0 } },
+ .stack_depth = 0,
+ },
/* BPF_STX_MEM B/H/W/DW */
{
"BPF_STX_MEM | BPF_B",
@@ -14094,6 +14365,9 @@ static void *generate_test_data(struct bpf_test *test, int sub)
if (test->aux & FLAG_NO_DATA)
return NULL;
+ if (test->aux & FLAG_LARGE_MEM)
+ return kmalloc(test->test[sub].data_size, GFP_KERNEL);
+
/* Test case expects an skb, so populate one. Various
* subtests generate skbs of different sizes based on
* the same data.
@@ -14137,7 +14411,10 @@ static void release_test_data(const struct bpf_test *test, void *data)
if (test->aux & FLAG_NO_DATA)
return;
- kfree_skb(data);
+ if (test->aux & FLAG_LARGE_MEM)
+ kfree(data);
+ else
+ kfree_skb(data);
}
static int filter_length(int which)
@@ -14674,6 +14951,36 @@ static struct tail_call_test tail_call_tests[] = {
.result = 10,
},
{
+ "Tail call load/store leaf",
+ .insns = {
+ BPF_ALU64_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R2, 2),
+ BPF_ALU64_REG(BPF_MOV, R3, BPF_REG_FP),
+ BPF_STX_MEM(BPF_DW, R3, R1, -8),
+ BPF_STX_MEM(BPF_DW, R3, R2, -16),
+ BPF_LDX_MEM(BPF_DW, R0, BPF_REG_FP, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 3),
+ BPF_LDX_MEM(BPF_DW, R0, BPF_REG_FP, -16),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = 0,
+ .stack_depth = 32,
+ },
+ {
+ "Tail call load/store",
+ .insns = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 3),
+ BPF_STX_MEM(BPF_DW, BPF_REG_FP, R0, -8),
+ TAIL_CALL(-1),
+ BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .result = 0,
+ .stack_depth = 16,
+ },
+ {
"Tail call error path, max count reached",
.insns = {
BPF_LDX_MEM(BPF_W, R2, R1, 0),
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index ad880231dfa8..c233b1a4e984 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -391,7 +391,7 @@ static void krealloc_uaf(struct kunit *test)
kfree(ptr1);
KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
- KUNIT_ASSERT_PTR_EQ(test, (void *)ptr2, NULL);
+ KUNIT_ASSERT_NULL(test, ptr2);
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
}
diff --git a/lib/test_meminit.c b/lib/test_meminit.c
index 3ca717f11397..c95db11a6906 100644
--- a/lib/test_meminit.c
+++ b/lib/test_meminit.c
@@ -279,13 +279,18 @@ static int __init do_kmem_cache_rcu_persistent(int size, int *total_failures)
c = kmem_cache_create("test_cache", size, size, SLAB_TYPESAFE_BY_RCU,
NULL);
buf = kmem_cache_alloc(c, GFP_KERNEL);
+ if (!buf)
+ goto out;
saved_ptr = buf;
fill_with_garbage(buf, size);
buf_contents = kmalloc(size, GFP_KERNEL);
- if (!buf_contents)
+ if (!buf_contents) {
+ kmem_cache_free(c, buf);
goto out;
+ }
used_objects = kmalloc_array(maxiter, sizeof(void *), GFP_KERNEL);
if (!used_objects) {
+ kmem_cache_free(c, buf);
kfree(buf_contents);
goto out;
}
@@ -306,11 +311,14 @@ static int __init do_kmem_cache_rcu_persistent(int size, int *total_failures)
}
}
+ for (iter = 0; iter < maxiter; iter++)
+ kmem_cache_free(c, used_objects[iter]);
+
free_out:
- kmem_cache_destroy(c);
kfree(buf_contents);
kfree(used_objects);
out:
+ kmem_cache_destroy(c);
*total_failures += fail;
return 1;
}
diff --git a/lib/test_string.c b/lib/test_string.c
index 9dfd6f52de92..c5cb92fb710e 100644
--- a/lib/test_string.c
+++ b/lib/test_string.c
@@ -179,6 +179,34 @@ static __init int strnchr_selftest(void)
return 0;
}
+static __init int strspn_selftest(void)
+{
+ static const struct strspn_test {
+ const char str[16];
+ const char accept[16];
+ const char reject[16];
+ unsigned a;
+ unsigned r;
+ } tests[] __initconst = {
+ { "foobar", "", "", 0, 6 },
+ { "abba", "abc", "ABBA", 4, 4 },
+ { "abba", "a", "b", 1, 1 },
+ { "", "abc", "abc", 0, 0},
+ };
+ const struct strspn_test *s = tests;
+ size_t i, res;
+
+ for (i = 0; i < ARRAY_SIZE(tests); ++i, ++s) {
+ res = strspn(s->str, s->accept);
+ if (res != s->a)
+ return 0x100 + 2*i;
+ res = strcspn(s->str, s->reject);
+ if (res != s->r)
+ return 0x100 + 2*i + 1;
+ }
+ return 0;
+}
+
static __exit void string_selftest_remove(void)
{
}
@@ -212,6 +240,11 @@ static __init int string_selftest_init(void)
if (subtest)
goto fail;
+ test = 6;
+ subtest = strspn_selftest();
+ if (subtest)
+ goto fail;
+
pr_info("String selftests succeeded\n");
return 0;
fail:
diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c
index a5a3d6c27e1f..9a564971f539 100644
--- a/lib/test_sysctl.c
+++ b/lib/test_sysctl.c
@@ -38,6 +38,7 @@
static int i_zero;
static int i_one_hundred = 100;
+static int match_int_ok = 1;
struct test_sysctl_data {
int int_0001;
@@ -96,6 +97,13 @@ static struct ctl_table test_table[] = {
.proc_handler = proc_dointvec,
},
{
+ .procname = "match_int",
+ .data = &match_int_ok,
+ .maxlen = sizeof(match_int_ok),
+ .mode = 0444,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "boot_int",
.data = &test_data.boot_int,
.maxlen = sizeof(test_data.boot_int),
@@ -132,6 +140,30 @@ static struct ctl_table_header *test_sysctl_header;
static int __init test_sysctl_init(void)
{
+ int i;
+
+ struct {
+ int defined;
+ int wanted;
+ } match_int[] = {
+ {.defined = *(int *)SYSCTL_ZERO, .wanted = 0},
+ {.defined = *(int *)SYSCTL_ONE, .wanted = 1},
+ {.defined = *(int *)SYSCTL_TWO, .wanted = 2},
+ {.defined = *(int *)SYSCTL_THREE, .wanted = 3},
+ {.defined = *(int *)SYSCTL_FOUR, .wanted = 4},
+ {.defined = *(int *)SYSCTL_ONE_HUNDRED, .wanted = 100},
+ {.defined = *(int *)SYSCTL_TWO_HUNDRED, .wanted = 200},
+ {.defined = *(int *)SYSCTL_ONE_THOUSAND, .wanted = 1000},
+ {.defined = *(int *)SYSCTL_THREE_THOUSAND, .wanted = 3000},
+ {.defined = *(int *)SYSCTL_INT_MAX, .wanted = INT_MAX},
+ {.defined = *(int *)SYSCTL_MAXOLDUID, .wanted = 65535},
+ {.defined = *(int *)SYSCTL_NEG_ONE, .wanted = -1},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(match_int); i++)
+ if (match_int[i].defined != match_int[i].wanted)
+ match_int_ok = 0;
+
test_data.bitmap_0001 = kzalloc(SYSCTL_TEST_BITMAP_SIZE/8, GFP_KERNEL);
if (!test_data.bitmap_0001)
return -ENOMEM;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 40d26a07a133..fb77f7bfd126 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -750,61 +750,38 @@ static int __init debug_boot_weak_hash_enable(char *str)
}
early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
-static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
-static siphash_key_t ptr_key __read_mostly;
+static DEFINE_STATIC_KEY_FALSE(filled_random_ptr_key);
static void enable_ptr_key_workfn(struct work_struct *work)
{
- get_random_bytes(&ptr_key, sizeof(ptr_key));
- /* Needs to run from preemptible context */
- static_branch_disable(&not_filled_random_ptr_key);
+ static_branch_enable(&filled_random_ptr_key);
}
-static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
-
-static int fill_random_ptr_key(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- /* This may be in an interrupt handler. */
- queue_work(system_unbound_wq, &enable_ptr_key_work);
- return 0;
-}
-
-static struct notifier_block random_ready = {
- .notifier_call = fill_random_ptr_key
-};
-
-static int __init initialize_ptr_random(void)
-{
- int key_size = sizeof(ptr_key);
- int ret;
-
- /* Use hw RNG if available. */
- if (get_random_bytes_arch(&ptr_key, key_size) == key_size) {
- static_branch_disable(&not_filled_random_ptr_key);
- return 0;
- }
-
- ret = register_random_ready_notifier(&random_ready);
- if (!ret) {
- return 0;
- } else if (ret == -EALREADY) {
- /* This is in preemptible context */
- enable_ptr_key_workfn(&enable_ptr_key_work);
- return 0;
- }
-
- return ret;
-}
-early_initcall(initialize_ptr_random);
-
/* Maps a pointer to a 32 bit unique identifier. */
static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
{
+ static siphash_key_t ptr_key __read_mostly;
unsigned long hashval;
- if (static_branch_unlikely(&not_filled_random_ptr_key))
- return -EAGAIN;
+ if (!static_branch_likely(&filled_random_ptr_key)) {
+ static bool filled = false;
+ static DEFINE_SPINLOCK(filling);
+ static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
+ unsigned long flags;
+
+ if (!system_unbound_wq ||
+ (!rng_is_initialized() && !rng_has_arch_random()) ||
+ !spin_trylock_irqsave(&filling, flags))
+ return -EAGAIN;
+
+ if (!filled) {
+ get_random_bytes(&ptr_key, sizeof(ptr_key));
+ queue_work(system_unbound_wq, &enable_ptr_key_work);
+ filled = true;
+ }
+ spin_unlock_irqrestore(&filling, flags);
+ }
+
#ifdef CONFIG_64BIT
hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
diff --git a/lib/xarray.c b/lib/xarray.c
index 4acc88ea7c21..54e646e8e6ee 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -207,6 +207,8 @@ static void *xas_descend(struct xa_state *xas, struct xa_node *node)
if (xa_is_sibling(entry)) {
offset = xa_to_sibling(entry);
entry = xa_entry(xas->xa, node, offset);
+ if (node->shift && xa_is_node(entry))
+ entry = XA_RETRY_ENTRY;
}
xas->xa_offset = offset;