summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/process/deprecated.rst11
-rw-r--r--MAINTAINERS1
-rw-r--r--Makefile4
-rw-r--r--arch/arm/boot/compressed/vmlinux.lds.S2
-rw-r--r--arch/sh/include/asm/sections.h2
-rw-r--r--arch/sh/kernel/machvec.c10
-rw-r--r--arch/sparc/include/asm/smp_32.h15
-rw-r--r--arch/sparc/kernel/leon_smp.c12
-rw-r--r--arch/sparc/kernel/sun4d_smp.c12
-rw-r--r--arch/sparc/kernel/sun4m_smp.c10
-rw-r--r--arch/sparc/mm/srmmu.c29
-rw-r--r--arch/um/Kconfig1
-rw-r--r--arch/um/os-Linux/user_syms.c1
-rw-r--r--arch/x86/include/asm/paravirt_types.h27
-rw-r--r--arch/x86/xen/enlighten_pv.c3
-rw-r--r--drivers/md/dm-verity-loadpin.c8
-rw-r--r--drivers/md/dm-verity-target.c16
-rw-r--r--drivers/md/dm-verity.h1
-rw-r--r--drivers/misc/lkdtm/fortify.c96
-rw-r--r--include/linux/fortify-string.h245
-rw-r--r--include/linux/overflow.h72
-rw-r--r--include/linux/string.h43
-rw-r--r--lib/Kconfig.debug21
-rw-r--r--lib/Makefile2
-rw-r--r--lib/fortify_kunit.c76
-rw-r--r--lib/is_signed_type_kunit.c53
-rw-r--r--lib/memcpy_kunit.c59
-rw-r--r--lib/overflow_kunit.c177
-rw-r--r--scripts/Makefile.extrawarn1
-rw-r--r--security/Kconfig.hardening14
-rw-r--r--security/loadpin/Kconfig7
-rw-r--r--security/loadpin/loadpin.c16
-rw-r--r--tools/testing/selftests/lkdtm/tests.txt8
33 files changed, 815 insertions, 240 deletions
diff --git a/Documentation/process/deprecated.rst b/Documentation/process/deprecated.rst
index a6e36d9c3d14..c8fd53a11a20 100644
--- a/Documentation/process/deprecated.rst
+++ b/Documentation/process/deprecated.rst
@@ -138,17 +138,20 @@ be NUL terminated. This can lead to various linear read overflows and
other misbehavior due to the missing termination. It also NUL-pads
the destination buffer if the source contents are shorter than the
destination buffer size, which may be a needless performance penalty
-for callers using only NUL-terminated strings. The safe replacement is
+for callers using only NUL-terminated strings.
+
+When the destination is required to be NUL-terminated, the replacement is
strscpy(), though care must be given to any cases where the return value
of strncpy() was used, since strscpy() does not return a pointer to the
destination, but rather a count of non-NUL bytes copied (or negative
errno when it truncates). Any cases still needing NUL-padding should
instead use strscpy_pad().
-If a caller is using non-NUL-terminated strings, strncpy() can
-still be used, but destinations should be marked with the `__nonstring
+If a caller is using non-NUL-terminated strings, strtomem() should be
+used, and the destinations should be marked with the `__nonstring
<https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html>`_
-attribute to avoid future compiler warnings.
+attribute to avoid future compiler warnings. For cases still needing
+NUL-padding, strtomem_pad() can be used.
strlcpy()
---------
diff --git a/MAINTAINERS b/MAINTAINERS
index 486e8e0450aa..66ef84d8b304 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8001,6 +8001,7 @@ L: linux-hardening@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
F: include/linux/fortify-string.h
+F: lib/fortify_kunit.c
F: lib/test_fortify/*
F: scripts/test_fortify.sh
K: \b__NO_FORTIFY\b
diff --git a/Makefile b/Makefile
index 89df76780615..f659d3085121 100644
--- a/Makefile
+++ b/Makefile
@@ -909,8 +909,8 @@ endif
# Initialize all stack variables with a zero value.
ifdef CONFIG_INIT_STACK_ALL_ZERO
KBUILD_CFLAGS += -ftrivial-auto-var-init=zero
-ifdef CONFIG_CC_IS_CLANG
-# https://bugs.llvm.org/show_bug.cgi?id=45497
+ifdef CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_ENABLER
+# https://github.com/llvm/llvm-project/issues/44842
KBUILD_CFLAGS += -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
endif
endif
diff --git a/arch/arm/boot/compressed/vmlinux.lds.S b/arch/arm/boot/compressed/vmlinux.lds.S
index 1bcb68ac4b01..3fcb3e62dc56 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.S
+++ b/arch/arm/boot/compressed/vmlinux.lds.S
@@ -23,6 +23,7 @@ SECTIONS
*(.ARM.extab*)
*(.note.*)
*(.rel.*)
+ *(.printk_index)
/*
* Discard any r/w data - this produces a link error if we have any,
* which is required for PIC decompression. Local data generates
@@ -57,6 +58,7 @@ SECTIONS
*(.rodata)
*(.rodata.*)
*(.data.rel.ro)
+ *(.data.rel.ro.*)
}
.piggydata : {
*(.piggydata)
diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h
index 8edb824049b9..0cb0ca149ac3 100644
--- a/arch/sh/include/asm/sections.h
+++ b/arch/sh/include/asm/sections.h
@@ -4,7 +4,7 @@
#include <asm-generic/sections.h>
-extern long __machvec_start, __machvec_end;
+extern char __machvec_start[], __machvec_end[];
extern char __uncached_start, __uncached_end;
extern char __start_eh_frame[], __stop_eh_frame[];
diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c
index d606679a211e..57efaf5b82ae 100644
--- a/arch/sh/kernel/machvec.c
+++ b/arch/sh/kernel/machvec.c
@@ -20,8 +20,8 @@
#define MV_NAME_SIZE 32
#define for_each_mv(mv) \
- for ((mv) = (struct sh_machine_vector *)&__machvec_start; \
- (mv) && (unsigned long)(mv) < (unsigned long)&__machvec_end; \
+ for ((mv) = (struct sh_machine_vector *)__machvec_start; \
+ (mv) && (unsigned long)(mv) < (unsigned long)__machvec_end; \
(mv)++)
static struct sh_machine_vector * __init get_mv_byname(const char *name)
@@ -87,8 +87,8 @@ void __init sh_mv_setup(void)
if (!machvec_selected) {
unsigned long machvec_size;
- machvec_size = ((unsigned long)&__machvec_end -
- (unsigned long)&__machvec_start);
+ machvec_size = ((unsigned long)__machvec_end -
+ (unsigned long)__machvec_start);
/*
* Sanity check for machvec section alignment. Ensure
@@ -102,7 +102,7 @@ void __init sh_mv_setup(void)
* vector (usually the only one) from .machvec.init.
*/
if (machvec_size >= sizeof(struct sh_machine_vector))
- sh_mv = *(struct sh_machine_vector *)&__machvec_start;
+ sh_mv = *(struct sh_machine_vector *)__machvec_start;
}
pr_notice("Booting machvec: %s\n", get_system_type());
diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h
index 856081761b0f..2cf7971d7f6c 100644
--- a/arch/sparc/include/asm/smp_32.h
+++ b/arch/sparc/include/asm/smp_32.h
@@ -33,9 +33,6 @@ extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern cpumask_t smp_commenced_mask;
extern struct linux_prom_registers smp_penguin_ctable;
-typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
- unsigned long, unsigned long);
-
void cpu_panic(void);
/*
@@ -57,7 +54,7 @@ void smp_bogo(struct seq_file *);
void smp_info(struct seq_file *);
struct sparc32_ipi_ops {
- void (*cross_call)(smpfunc_t func, cpumask_t mask, unsigned long arg1,
+ void (*cross_call)(void *func, cpumask_t mask, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4);
void (*resched)(int cpu);
@@ -66,28 +63,28 @@ struct sparc32_ipi_ops {
};
extern const struct sparc32_ipi_ops *sparc32_ipi_ops;
-static inline void xc0(smpfunc_t func)
+static inline void xc0(void *func)
{
sparc32_ipi_ops->cross_call(func, *cpu_online_mask, 0, 0, 0, 0);
}
-static inline void xc1(smpfunc_t func, unsigned long arg1)
+static inline void xc1(void *func, unsigned long arg1)
{
sparc32_ipi_ops->cross_call(func, *cpu_online_mask, arg1, 0, 0, 0);
}
-static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
+static inline void xc2(void *func, unsigned long arg1, unsigned long arg2)
{
sparc32_ipi_ops->cross_call(func, *cpu_online_mask, arg1, arg2, 0, 0);
}
-static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
+static inline void xc3(void *func, unsigned long arg1, unsigned long arg2,
unsigned long arg3)
{
sparc32_ipi_ops->cross_call(func, *cpu_online_mask,
arg1, arg2, arg3, 0);
}
-static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
+static inline void xc4(void *func, unsigned long arg1, unsigned long arg2,
unsigned long arg3, unsigned long arg4)
{
sparc32_ipi_ops->cross_call(func, *cpu_online_mask,
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index 1eed26d423fb..991e9ad3d3e8 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -359,7 +359,7 @@ void leonsmp_ipi_interrupt(void)
}
static struct smp_funcall {
- smpfunc_t func;
+ void *func;
unsigned long arg1;
unsigned long arg2;
unsigned long arg3;
@@ -372,7 +372,7 @@ static struct smp_funcall {
static DEFINE_SPINLOCK(cross_call_lock);
/* Cross calls must be serialized, at least currently. */
-static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
+static void leon_cross_call(void *func, cpumask_t mask, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4)
{
@@ -384,7 +384,7 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
{
/* If you make changes here, make sure gcc generates proper code... */
- register smpfunc_t f asm("i0") = func;
+ register void *f asm("i0") = func;
register unsigned long a1 asm("i1") = arg1;
register unsigned long a2 asm("i2") = arg2;
register unsigned long a3 asm("i3") = arg3;
@@ -444,11 +444,13 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
/* Running cross calls. */
void leon_cross_call_irq(void)
{
+ void (*func)(unsigned long, unsigned long, unsigned long, unsigned long,
+ unsigned long) = ccall_info.func;
int i = smp_processor_id();
ccall_info.processors_in[i] = 1;
- ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
- ccall_info.arg4, ccall_info.arg5);
+ func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, ccall_info.arg4,
+ ccall_info.arg5);
ccall_info.processors_out[i] = 1;
}
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index ff30f03beb7c..9a62a5cf3337 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -268,7 +268,7 @@ static void sun4d_ipi_resched(int cpu)
}
static struct smp_funcall {
- smpfunc_t func;
+ void *func;
unsigned long arg1;
unsigned long arg2;
unsigned long arg3;
@@ -281,7 +281,7 @@ static struct smp_funcall {
static DEFINE_SPINLOCK(cross_call_lock);
/* Cross calls must be serialized, at least currently. */
-static void sun4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
+static void sun4d_cross_call(void *func, cpumask_t mask, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4)
{
@@ -296,7 +296,7 @@ static void sun4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
* If you make changes here, make sure
* gcc generates proper code...
*/
- register smpfunc_t f asm("i0") = func;
+ register void *f asm("i0") = func;
register unsigned long a1 asm("i1") = arg1;
register unsigned long a2 asm("i2") = arg2;
register unsigned long a3 asm("i3") = arg3;
@@ -353,11 +353,13 @@ static void sun4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
/* Running cross calls. */
void smp4d_cross_call_irq(void)
{
+ void (*func)(unsigned long, unsigned long, unsigned long, unsigned long,
+ unsigned long) = ccall_info.func;
int i = hard_smp_processor_id();
ccall_info.processors_in[i] = 1;
- ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
- ccall_info.arg4, ccall_info.arg5);
+ func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, ccall_info.arg4,
+ ccall_info.arg5);
ccall_info.processors_out[i] = 1;
}
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 228a6527082d..056df034e79e 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -157,7 +157,7 @@ static void sun4m_ipi_mask_one(int cpu)
}
static struct smp_funcall {
- smpfunc_t func;
+ void *func;
unsigned long arg1;
unsigned long arg2;
unsigned long arg3;
@@ -170,7 +170,7 @@ static struct smp_funcall {
static DEFINE_SPINLOCK(cross_call_lock);
/* Cross calls must be serialized, at least currently. */
-static void sun4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
+static void sun4m_cross_call(void *func, cpumask_t mask, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4)
{
@@ -230,11 +230,13 @@ static void sun4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
/* Running cross calls. */
void smp4m_cross_call_irq(void)
{
+ void (*func)(unsigned long, unsigned long, unsigned long, unsigned long,
+ unsigned long) = ccall_info.func;
int i = smp_processor_id();
ccall_info.processors_in[i] = 1;
- ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
- ccall_info.arg4, ccall_info.arg5);
+ func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, ccall_info.arg4,
+ ccall_info.arg5);
ccall_info.processors_out[i] = 1;
}
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index a9aa6a92c7fe..13f027afc875 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -1636,19 +1636,19 @@ static void __init get_srmmu_type(void)
/* Local cross-calls. */
static void smp_flush_page_for_dma(unsigned long page)
{
- xc1((smpfunc_t) local_ops->page_for_dma, page);
+ xc1(local_ops->page_for_dma, page);
local_ops->page_for_dma(page);
}
static void smp_flush_cache_all(void)
{
- xc0((smpfunc_t) local_ops->cache_all);
+ xc0(local_ops->cache_all);
local_ops->cache_all();
}
static void smp_flush_tlb_all(void)
{
- xc0((smpfunc_t) local_ops->tlb_all);
+ xc0(local_ops->tlb_all);
local_ops->tlb_all();
}
@@ -1659,7 +1659,7 @@ static void smp_flush_cache_mm(struct mm_struct *mm)
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
+ xc1(local_ops->cache_mm, (unsigned long)mm);
local_ops->cache_mm(mm);
}
}
@@ -1671,7 +1671,7 @@ static void smp_flush_tlb_mm(struct mm_struct *mm)
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask)) {
- xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
+ xc1(local_ops->tlb_mm, (unsigned long)mm);
if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
cpumask_copy(mm_cpumask(mm),
cpumask_of(smp_processor_id()));
@@ -1691,8 +1691,8 @@ static void smp_flush_cache_range(struct vm_area_struct *vma,
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc3((smpfunc_t) local_ops->cache_range,
- (unsigned long) vma, start, end);
+ xc3(local_ops->cache_range, (unsigned long)vma, start,
+ end);
local_ops->cache_range(vma, start, end);
}
}
@@ -1708,8 +1708,8 @@ static void smp_flush_tlb_range(struct vm_area_struct *vma,
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc3((smpfunc_t) local_ops->tlb_range,
- (unsigned long) vma, start, end);
+ xc3(local_ops->tlb_range, (unsigned long)vma, start,
+ end);
local_ops->tlb_range(vma, start, end);
}
}
@@ -1723,8 +1723,7 @@ static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc2((smpfunc_t) local_ops->cache_page,
- (unsigned long) vma, page);
+ xc2(local_ops->cache_page, (unsigned long)vma, page);
local_ops->cache_page(vma, page);
}
}
@@ -1738,8 +1737,7 @@ static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc2((smpfunc_t) local_ops->tlb_page,
- (unsigned long) vma, page);
+ xc2(local_ops->tlb_page, (unsigned long)vma, page);
local_ops->tlb_page(vma, page);
}
}
@@ -1753,7 +1751,7 @@ static void smp_flush_page_to_ram(unsigned long page)
* XXX This experiment failed, research further... -DaveM
*/
#if 1
- xc1((smpfunc_t) local_ops->page_to_ram, page);
+ xc1(local_ops->page_to_ram, page);
#endif
local_ops->page_to_ram(page);
}
@@ -1764,8 +1762,7 @@ static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc2((smpfunc_t) local_ops->sig_insns,
- (unsigned long) mm, insn_addr);
+ xc2(local_ops->sig_insns, (unsigned long)mm, insn_addr);
local_ops->sig_insns(mm, insn_addr);
}
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 78de31ac1da7..ad4ff3b0e91e 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -6,6 +6,7 @@ config UML
bool
default y
select ARCH_EPHEMERAL_INODES
+ select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV
select ARCH_HAS_STRNCPY_FROM_USER
diff --git a/arch/um/os-Linux/user_syms.c b/arch/um/os-Linux/user_syms.c
index cb667c9225ab..fd575ecbcaec 100644
--- a/arch/um/os-Linux/user_syms.c
+++ b/arch/um/os-Linux/user_syms.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#define __NO_FORTIFY
#include <linux/types.h>
#include <linux/module.h>
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 89df6c6617f5..b1ab5d94881b 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -328,7 +328,7 @@ int paravirt_disable_iospace(void);
* Unfortunately, this is a relatively slow operation for modern CPUs,
* because it cannot necessarily determine what the destination
* address is. In this case, the address is a runtime constant, so at
- * the very least we can patch the call to e a simple direct call, or
+ * the very least we can patch the call to a simple direct call, or,
* ideally, patch an inline implementation into the callsite. (Direct
* calls are essentially free, because the call and return addresses
* are completely predictable.)
@@ -339,10 +339,10 @@ int paravirt_disable_iospace(void);
* on the stack. All caller-save registers (eax,edx,ecx) are expected
* to be modified (either clobbered or used for return values).
* X86_64, on the other hand, already specifies a register-based calling
- * conventions, returning at %rax, with parameters going on %rdi, %rsi,
+ * conventions, returning at %rax, with parameters going in %rdi, %rsi,
* %rdx, and %rcx. Note that for this reason, x86_64 does not need any
* special handling for dealing with 4 arguments, unlike i386.
- * However, x86_64 also have to clobber all caller saved registers, which
+ * However, x86_64 also has to clobber all caller saved registers, which
* unfortunately, are quite a bit (r8 - r11)
*
* The call instruction itself is marked by placing its start address
@@ -360,22 +360,22 @@ int paravirt_disable_iospace(void);
* There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
* It could be extended to more arguments, but there would be little
* to be gained from that. For each number of arguments, there are
- * the two VCALL and CALL variants for void and non-void functions.
+ * two VCALL and CALL variants for void and non-void functions.
*
* When there is a return value, the invoker of the macro must specify
* the return type. The macro then uses sizeof() on that type to
- * determine whether its a 32 or 64 bit value, and places the return
+ * determine whether it's a 32 or 64 bit value and places the return
* in the right register(s) (just %eax for 32-bit, and %edx:%eax for
- * 64-bit). For x86_64 machines, it just returns at %rax regardless of
+ * 64-bit). For x86_64 machines, it just returns in %rax regardless of
* the return value size.
*
- * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
+ * 64-bit arguments are passed as a pair of adjacent 32-bit arguments;
* i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
* in low,high order
*
* Small structures are passed and returned in registers. The macro
* calling convention can't directly deal with this, so the wrapper
- * functions must do this.
+ * functions must do it.
*
* These PVOP_* macros are only defined within this header. This
* means that all uses must be wrapped in inline functions. This also
@@ -414,8 +414,17 @@ int paravirt_disable_iospace(void);
"=c" (__ecx)
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
-/* void functions are still allowed [re]ax for scratch */
+/*
+ * void functions are still allowed [re]ax for scratch.
+ *
+ * The ZERO_CALL_USED REGS feature may end up zeroing out callee-saved
+ * registers. Make sure we model this with the appropriate clobbers.
+ */
+#ifdef CONFIG_ZERO_CALL_USED_REGS
+#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), PVOP_VCALL_CLOBBERS
+#else
#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
+#endif
#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 0ed2e487a693..9b1a58dda935 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -765,6 +765,7 @@ static void xen_load_idt(const struct desc_ptr *desc)
{
static DEFINE_SPINLOCK(lock);
static struct trap_info traps[257];
+ static const struct trap_info zero = { };
unsigned out;
trace_xen_cpu_load_idt(desc);
@@ -774,7 +775,7 @@ static void xen_load_idt(const struct desc_ptr *desc)
memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
out = xen_convert_trap_info(desc, traps, false);
- memset(&traps[out], 0, sizeof(traps[0]));
+ traps[out] = zero;
xen_mc_flush();
if (HYPERVISOR_set_trap_table(traps))
diff --git a/drivers/md/dm-verity-loadpin.c b/drivers/md/dm-verity-loadpin.c
index 387ec43aef72..4f78cc55c251 100644
--- a/drivers/md/dm-verity-loadpin.c
+++ b/drivers/md/dm-verity-loadpin.c
@@ -14,6 +14,7 @@ LIST_HEAD(dm_verity_loadpin_trusted_root_digests);
static bool is_trusted_verity_target(struct dm_target *ti)
{
+ int verity_mode;
u8 *root_digest;
unsigned int digest_size;
struct dm_verity_loadpin_trusted_root_digest *trd;
@@ -22,6 +23,13 @@ static bool is_trusted_verity_target(struct dm_target *ti)
if (!dm_is_verity_target(ti))
return false;
+ verity_mode = dm_verity_get_mode(ti);
+
+ if ((verity_mode != DM_VERITY_MODE_EIO) &&
+ (verity_mode != DM_VERITY_MODE_RESTART) &&
+ (verity_mode != DM_VERITY_MODE_PANIC))
+ return false;
+
if (dm_verity_get_root_digest(ti, &root_digest, &digest_size))
return false;
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 94b6cb599db4..8a00cc42e498 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -1447,6 +1447,22 @@ bool dm_is_verity_target(struct dm_target *ti)
}
/*
+ * Get the verity mode (error behavior) of a verity target.
+ *
+ * Returns the verity mode of the target, or -EINVAL if 'ti' is not a verity
+ * target.
+ */
+int dm_verity_get_mode(struct dm_target *ti)
+{
+ struct dm_verity *v = ti->private;
+
+ if (!dm_is_verity_target(ti))
+ return -EINVAL;
+
+ return v->mode;
+}
+
+/*
* Get the root digest of a verity target.
*
* Returns a copy of the root digest, the caller is responsible for
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index 45455de1b4bc..98f306ec6a33 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -134,6 +134,7 @@ extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
sector_t block, u8 *digest, bool *is_zero);
extern bool dm_is_verity_target(struct dm_target *ti);
+extern int dm_verity_get_mode(struct dm_target *ti);
extern int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest,
unsigned int *digest_size);
diff --git a/drivers/misc/lkdtm/fortify.c b/drivers/misc/lkdtm/fortify.c
index 080293fa3c52..015927665678 100644
--- a/drivers/misc/lkdtm/fortify.c
+++ b/drivers/misc/lkdtm/fortify.c
@@ -10,28 +10,31 @@
static volatile int fortify_scratch_space;
-static void lkdtm_FORTIFIED_OBJECT(void)
+static void lkdtm_FORTIFY_STR_OBJECT(void)
{
struct target {
char a[10];
- } target[2] = {};
+ int foo;
+ } target[3] = {};
/*
* Using volatile prevents the compiler from determining the value of
* 'size' at compile time. Without that, we would get a compile error
* rather than a runtime error.
*/
- volatile int size = 11;
+ volatile int size = 20;
+
+ pr_info("trying to strcmp() past the end of a struct\n");
- pr_info("trying to read past the end of a struct\n");
+ strncpy(target[0].a, target[1].a, size);
/* Store result to global to prevent the code from being eliminated */
- fortify_scratch_space = memcmp(&target[0], &target[1], size);
+ fortify_scratch_space = target[0].a[3];
- pr_err("FAIL: fortify did not block an object overread!\n");
+ pr_err("FAIL: fortify did not block a strncpy() object write overflow!\n");
pr_expected_config(CONFIG_FORTIFY_SOURCE);
}
-static void lkdtm_FORTIFIED_SUBOBJECT(void)
+static void lkdtm_FORTIFY_STR_MEMBER(void)
{
struct target {
char a[10];
@@ -44,7 +47,7 @@ static void lkdtm_FORTIFIED_SUBOBJECT(void)
strscpy(src, "over ten bytes", size);
size = strlen(src) + 1;
- pr_info("trying to strncpy past the end of a member of a struct\n");
+ pr_info("trying to strncpy() past the end of a struct member...\n");
/*
* strncpy(target.a, src, 20); will hit a compile error because the
@@ -56,7 +59,72 @@ static void lkdtm_FORTIFIED_SUBOBJECT(void)
/* Store result to global to prevent the code from being eliminated */
fortify_scratch_space = target.a[3];
- pr_err("FAIL: fortify did not block an sub-object overrun!\n");
+ pr_err("FAIL: fortify did not block a strncpy() struct member write overflow!\n");
+ pr_expected_config(CONFIG_FORTIFY_SOURCE);
+
+ kfree(src);
+}
+
+static void lkdtm_FORTIFY_MEM_OBJECT(void)
+{
+ int before[10];
+ struct target {
+ char a[10];
+ int foo;
+ } target = {};
+ int after[10];
+ /*
+ * Using volatile prevents the compiler from determining the value of
+ * 'size' at compile time. Without that, we would get a compile error
+ * rather than a runtime error.
+ */
+ volatile int size = 20;
+
+ memset(before, 0, sizeof(before));
+ memset(after, 0, sizeof(after));
+ fortify_scratch_space = before[5];
+ fortify_scratch_space = after[5];
+
+ pr_info("trying to memcpy() past the end of a struct\n");
+
+ pr_info("0: %zu\n", __builtin_object_size(&target, 0));
+ pr_info("1: %zu\n", __builtin_object_size(&target, 1));
+ pr_info("s: %d\n", size);
+ memcpy(&target, &before, size);
+
+ /* Store result to global to prevent the code from being eliminated */
+ fortify_scratch_space = target.a[3];
+
+ pr_err("FAIL: fortify did not block a memcpy() object write overflow!\n");
+ pr_expected_config(CONFIG_FORTIFY_SOURCE);
+}
+
+static void lkdtm_FORTIFY_MEM_MEMBER(void)
+{
+ struct target {
+ char a[10];
+ char b[10];
+ } target;
+ volatile int size = 20;
+ char *src;
+
+ src = kmalloc(size, GFP_KERNEL);
+ strscpy(src, "over ten bytes", size);
+ size = strlen(src) + 1;
+
+ pr_info("trying to memcpy() past the end of a struct member...\n");
+
+ /*
+ * strncpy(target.a, src, 20); will hit a compile error because the
+ * compiler knows at build time that target.a < 20 bytes. Use a
+ * volatile to force a runtime error.
+ */
+ memcpy(target.a, src, size);
+
+ /* Store result to global to prevent the code from being eliminated */
+ fortify_scratch_space = target.a[3];
+
+ pr_err("FAIL: fortify did not block a memcpy() struct member write overflow!\n");
pr_expected_config(CONFIG_FORTIFY_SOURCE);
kfree(src);
@@ -67,7 +135,7 @@ static void lkdtm_FORTIFIED_SUBOBJECT(void)
* strscpy and generate a panic because there is a write overflow (i.e. src
* length is greater than dst length).
*/
-static void lkdtm_FORTIFIED_STRSCPY(void)
+static void lkdtm_FORTIFY_STRSCPY(void)
{
char *src;
char dst[5];
@@ -136,9 +204,11 @@ static void lkdtm_FORTIFIED_STRSCPY(void)
}
static struct crashtype crashtypes[] = {
- CRASHTYPE(FORTIFIED_OBJECT),
- CRASHTYPE(FORTIFIED_SUBOBJECT),
- CRASHTYPE(FORTIFIED_STRSCPY),
+ CRASHTYPE(FORTIFY_STR_OBJECT),
+ CRASHTYPE(FORTIFY_STR_MEMBER),
+ CRASHTYPE(FORTIFY_MEM_OBJECT),
+ CRASHTYPE(FORTIFY_MEM_MEMBER),
+ CRASHTYPE(FORTIFY_STRSCPY),
};
struct crashtype_category fortify_crashtypes = {
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
index 3b401fa0f374..b62c90cfafaf 100644
--- a/include/linux/fortify-string.h
+++ b/include/linux/fortify-string.h
@@ -2,7 +2,9 @@
#ifndef _LINUX_FORTIFY_STRING_H_
#define _LINUX_FORTIFY_STRING_H_
+#include <linux/bug.h>
#include <linux/const.h>
+#include <linux/limits.h>
#define __FORTIFY_INLINE extern __always_inline __gnu_inline __overloadable
#define __RENAME(x) __asm__(#x)
@@ -17,9 +19,10 @@ void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning("
#define __compiletime_strlen(p) \
({ \
unsigned char *__p = (unsigned char *)(p); \
- size_t __ret = (size_t)-1; \
- size_t __p_size = __builtin_object_size(p, 1); \
- if (__p_size != (size_t)-1) { \
+ size_t __ret = SIZE_MAX; \
+ size_t __p_size = __member_size(p); \
+ if (__p_size != SIZE_MAX && \
+ __builtin_constant_p(*__p)) { \
size_t __p_len = __p_size - 1; \
if (__builtin_constant_p(__p[__p_len]) && \
__p[__p_len] == '\0') \
@@ -69,20 +72,59 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size)
__underlying_memcpy(dst, src, bytes)
/*
- * Clang's use of __builtin_object_size() within inlines needs hinting via
- * __pass_object_size(). The preference is to only ever use type 1 (member
+ * Clang's use of __builtin_*object_size() within inlines needs hinting via
+ * __pass_*object_size(). The preference is to only ever use type 1 (member
* size, rather than struct size), but there remain some stragglers using
* type 0 that will be converted in the future.
*/
-#define POS __pass_object_size(1)
-#define POS0 __pass_object_size(0)
+#define POS __pass_object_size(1)
+#define POS0 __pass_object_size(0)
+#define __struct_size(p) __builtin_object_size(p, 0)
+#define __member_size(p) __builtin_object_size(p, 1)
+#define __compiletime_lessthan(bounds, length) ( \
+ __builtin_constant_p((bounds) < (length)) && \
+ (bounds) < (length) \
+)
+
+/**
+ * strncpy - Copy a string to memory with non-guaranteed NUL padding
+ *
+ * @p: pointer to destination of copy
+ * @q: pointer to NUL-terminated source string to copy
+ * @size: bytes to write at @p
+ *
+ * If strlen(@q) >= @size, the copy of @q will stop after @size bytes,
+ * and @p will NOT be NUL-terminated
+ *
+ * If strlen(@q) < @size, following the copy of @q, trailing NUL bytes
+ * will be written to @p until @size total bytes have been written.
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * over-reads of @q, it cannot defend against writing unterminated
+ * results to @p. Using strncpy() remains ambiguous and fragile.
+ * Instead, please choose an alternative, so that the expectation
+ * of @p's contents is unambiguous:
+ *
+ * +--------------------+-----------------+------------+
+ * | @p needs to be: | padded to @size | not padded |
+ * +====================+=================+============+
+ * | NUL-terminated | strscpy_pad() | strscpy() |
+ * +--------------------+-----------------+------------+
+ * | not NUL-terminated | strtomem_pad() | strtomem() |
+ * +--------------------+-----------------+------------+
+ *
+ * Note strscpy*()'s differing return values for detecting truncation,
+ * and strtomem*()'s expectation that the destination is marked with
+ * __nonstring when it is a character array.
+ *
+ */
__FORTIFY_INLINE __diagnose_as(__builtin_strncpy, 1, 2, 3)
char *strncpy(char * const POS p, const char *q, __kernel_size_t size)
{
- size_t p_size = __builtin_object_size(p, 1);
+ size_t p_size = __member_size(p);
- if (__builtin_constant_p(size) && p_size < size)
+ if (__compiletime_lessthan(p_size, size))
__write_overflow();
if (p_size < size)
fortify_panic(__func__);
@@ -92,9 +134,9 @@ char *strncpy(char * const POS p, const char *q, __kernel_size_t size)
__FORTIFY_INLINE __diagnose_as(__builtin_strcat, 1, 2)
char *strcat(char * const POS p, const char *q)
{
- size_t p_size = __builtin_object_size(p, 1);
+ size_t p_size = __member_size(p);
- if (p_size == (size_t)-1)
+ if (p_size == SIZE_MAX)
return __underlying_strcat(p, q);
if (strlcat(p, q, p_size) >= p_size)
fortify_panic(__func__);
@@ -104,12 +146,12 @@ char *strcat(char * const POS p, const char *q)
extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
__FORTIFY_INLINE __kernel_size_t strnlen(const char * const POS p, __kernel_size_t maxlen)
{
- size_t p_size = __builtin_object_size(p, 1);
+ size_t p_size = __member_size(p);
size_t p_len = __compiletime_strlen(p);
size_t ret;
/* We can take compile-time actions when maxlen is const. */
- if (__builtin_constant_p(maxlen) && p_len != (size_t)-1) {
+ if (__builtin_constant_p(maxlen) && p_len != SIZE_MAX) {
/* If p is const, we can use its compile-time-known len. */
if (maxlen >= p_size)
return p_len;
@@ -134,10 +176,10 @@ __FORTIFY_INLINE __diagnose_as(__builtin_strlen, 1)
__kernel_size_t __fortify_strlen(const char * const POS p)
{
__kernel_size_t ret;
- size_t p_size = __builtin_object_size(p, 1);
+ size_t p_size = __member_size(p);
/* Give up if we don't know how large p is. */
- if (p_size == (size_t)-1)
+ if (p_size == SIZE_MAX)
return __underlying_strlen(p);
ret = strnlen(p, p_size);
if (p_size <= ret)
@@ -149,12 +191,12 @@ __kernel_size_t __fortify_strlen(const char * const POS p)
extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
__FORTIFY_INLINE size_t strlcpy(char * const POS p, const char * const POS q, size_t size)
{
- size_t p_size = __builtin_object_size(p, 1);
- size_t q_size = __builtin_object_size(q, 1);
+ size_t p_size = __member_size(p);
+ size_t q_size = __member_size(q);
size_t q_len; /* Full count of source string length. */
size_t len; /* Count of characters going into destination. */
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ if (p_size == SIZE_MAX && q_size == SIZE_MAX)
return __real_strlcpy(p, q, size);
q_len = strlen(q);
len = (q_len >= size) ? size - 1 : q_len;
@@ -178,18 +220,18 @@ __FORTIFY_INLINE ssize_t strscpy(char * const POS p, const char * const POS q, s
{
size_t len;
/* Use string size rather than possible enclosing struct size. */
- size_t p_size = __builtin_object_size(p, 1);
- size_t q_size = __builtin_object_size(q, 1);
+ size_t p_size = __member_size(p);
+ size_t q_size = __member_size(q);
/* If we cannot get size of p and q default to call strscpy. */
- if (p_size == (size_t) -1 && q_size == (size_t) -1)
+ if (p_size == SIZE_MAX && q_size == SIZE_MAX)
return __real_strscpy(p, q, size);
/*
* If size can be known at compile time and is greater than
* p_size, generate a compile time write overflow error.
*/
- if (__builtin_constant_p(size) && size > p_size)
+ if (__compiletime_lessthan(p_size, size))
__write_overflow();
/*
@@ -224,10 +266,10 @@ __FORTIFY_INLINE __diagnose_as(__builtin_strncat, 1, 2, 3)
char *strncat(char * const POS p, const char * const POS q, __kernel_size_t count)
{
size_t p_len, copy_len;
- size_t p_size = __builtin_object_size(p, 1);
- size_t q_size = __builtin_object_size(q, 1);
+ size_t p_size = __member_size(p);
+ size_t q_size = __member_size(q);
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ if (p_size == SIZE_MAX && q_size == SIZE_MAX)
return __underlying_strncat(p, q, count);
p_len = strlen(p);
copy_len = strnlen(q, count);
@@ -246,15 +288,16 @@ __FORTIFY_INLINE void fortify_memset_chk(__kernel_size_t size,
/*
* Length argument is a constant expression, so we
* can perform compile-time bounds checking where
- * buffer sizes are known.
+ * buffer sizes are also known at compile time.
*/
/* Error when size is larger than enclosing struct. */
- if (p_size > p_size_field && p_size < size)
+ if (__compiletime_lessthan(p_size_field, p_size) &&
+ __compiletime_lessthan(p_size, size))
__write_overflow();
/* Warn when write size is larger than dest field. */
- if (p_size_field < size)
+ if (__compiletime_lessthan(p_size_field, size))
__write_overflow_field(p_size_field, size);
}
/*
@@ -268,10 +311,10 @@ __FORTIFY_INLINE void fortify_memset_chk(__kernel_size_t size,
/*
* Always stop accesses beyond the struct that contains the
* field, when the buffer's remaining size is known.
- * (The -1 test is to optimize away checks where the buffer
+ * (The SIZE_MAX test is to optimize away checks where the buffer
* lengths are unknown.)
*/
- if (p_size != (size_t)(-1) && p_size < size)
+ if (p_size != SIZE_MAX && p_size < size)
fortify_panic("memset");
}
@@ -282,11 +325,11 @@ __FORTIFY_INLINE void fortify_memset_chk(__kernel_size_t size,
})
/*
- * __builtin_object_size() must be captured here to avoid evaluating argument
- * side-effects further into the macro layers.
+ * __struct_size() vs __member_size() must be captured here to avoid
+ * evaluating argument side-effects further into the macro layers.
*/
#define memset(p, c, s) __fortify_memset_chk(p, c, s, \
- __builtin_object_size(p, 0), __builtin_object_size(p, 1))
+ __struct_size(p), __member_size(p))
/*
* To make sure the compiler can enforce protection against buffer overflows,
@@ -319,7 +362,7 @@ __FORTIFY_INLINE void fortify_memset_chk(__kernel_size_t size,
* V = vulnerable to run-time overflow (will need refactoring to solve)
*
*/
-__FORTIFY_INLINE void fortify_memcpy_chk(__kernel_size_t size,
+__FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
const size_t p_size,
const size_t q_size,
const size_t p_size_field,
@@ -330,25 +373,28 @@ __FORTIFY_INLINE void fortify_memcpy_chk(__kernel_size_t size,
/*
* Length argument is a constant expression, so we
* can perform compile-time bounds checking where
- * buffer sizes are known.
+ * buffer sizes are also known at compile time.
*/
/* Error when size is larger than enclosing struct. */
- if (p_size > p_size_field && p_size < size)
+ if (__compiletime_lessthan(p_size_field, p_size) &&
+ __compiletime_lessthan(p_size, size))
__write_overflow();
- if (q_size > q_size_field && q_size < size)
+ if (__compiletime_lessthan(q_size_field, q_size) &&
+ __compiletime_lessthan(q_size, size))
__read_overflow2();
/* Warn when write size argument larger than dest field. */
- if (p_size_field < size)
+ if (__compiletime_lessthan(p_size_field, size))
__write_overflow_field(p_size_field, size);
/*
* Warn for source field over-read when building with W=1
* or when an over-write happened, so both can be fixed at
* the same time.
*/
- if ((IS_ENABLED(KBUILD_EXTRA_WARN1) || p_size_field < size) &&
- q_size_field < size)
+ if ((IS_ENABLED(KBUILD_EXTRA_WARN1) ||
+ __compiletime_lessthan(p_size_field, size)) &&
+ __compiletime_lessthan(q_size_field, size))
__read_overflow2_field(q_size_field, size);
}
/*
@@ -362,41 +408,104 @@ __FORTIFY_INLINE void fortify_memcpy_chk(__kernel_size_t size,
/*
* Always stop accesses beyond the struct that contains the
* field, when the buffer's remaining size is known.
- * (The -1 test is to optimize away checks where the buffer
+ * (The SIZE_MAX test is to optimize away checks where the buffer
* lengths are unknown.)
*/
- if ((p_size != (size_t)(-1) && p_size < size) ||
- (q_size != (size_t)(-1) && q_size < size))
+ if ((p_size != SIZE_MAX && p_size < size) ||
+ (q_size != SIZE_MAX && q_size < size))
fortify_panic(func);
+
+ /*
+ * Warn when writing beyond destination field size.
+ *
+ * We must ignore p_size_field == 0 for existing 0-element
+ * fake flexible arrays, until they are all converted to
+ * proper flexible arrays.
+ *
+ * The implementation of __builtin_*object_size() behaves
+ * like sizeof() when not directly referencing a flexible
+ * array member, which means there will be many bounds checks
+ * that will appear at run-time, without a way for them to be
+ * detected at compile-time (as can be done when the destination
+ * is specifically the flexible array member).
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101832
+ */
+ if (p_size_field != 0 && p_size_field != SIZE_MAX &&
+ p_size != p_size_field && p_size_field < size)
+ return true;
+
+ return false;
}
#define __fortify_memcpy_chk(p, q, size, p_size, q_size, \
p_size_field, q_size_field, op) ({ \
size_t __fortify_size = (size_t)(size); \
- fortify_memcpy_chk(__fortify_size, p_size, q_size, \
- p_size_field, q_size_field, #op); \
+ WARN_ONCE(fortify_memcpy_chk(__fortify_size, p_size, q_size, \
+ p_size_field, q_size_field, #op), \
+ #op ": detected field-spanning write (size %zu) of single %s (size %zu)\n", \
+ __fortify_size, \
+ "field \"" #p "\" at " __FILE__ ":" __stringify(__LINE__), \
+ p_size_field); \
__underlying_##op(p, q, __fortify_size); \
})
/*
- * __builtin_object_size() must be captured here to avoid evaluating argument
- * side-effects further into the macro layers.
+ * Notes about compile-time buffer size detection:
+ *
+ * With these types...
+ *
+ * struct middle {
+ * u16 a;
+ * u8 middle_buf[16];
+ * int b;
+ * };
+ * struct end {
+ * u16 a;
+ * u8 end_buf[16];
+ * };
+ * struct flex {
+ * int a;
+ * u8 flex_buf[];
+ * };
+ *
+ * void func(TYPE *ptr) { ... }
+ *
+ * Cases where destination size cannot be currently detected:
+ * - the size of ptr's object (seemingly by design, gcc & clang fail):
+ * __builtin_object_size(ptr, 1) == SIZE_MAX
+ * - the size of flexible arrays in ptr's obj (by design, dynamic size):
+ * __builtin_object_size(ptr->flex_buf, 1) == SIZE_MAX
+ * - the size of ANY array at the end of ptr's obj (gcc and clang bug):
+ * __builtin_object_size(ptr->end_buf, 1) == SIZE_MAX
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101836
+ *
+ * Cases where destination size is currently detected:
+ * - the size of non-array members within ptr's object:
+ * __builtin_object_size(ptr->a, 1) == 2
+ * - the size of non-flexible-array in the middle of ptr's obj:
+ * __builtin_object_size(ptr->middle_buf, 1) == 16
+ *
+ */
+
+/*
+ * __struct_size() vs __member_size() must be captured here to avoid
+ * evaluating argument side-effects further into the macro layers.
*/
#define memcpy(p, q, s) __fortify_memcpy_chk(p, q, s, \
- __builtin_object_size(p, 0), __builtin_object_size(q, 0), \
- __builtin_object_size(p, 1), __builtin_object_size(q, 1), \
+ __struct_size(p), __struct_size(q), \
+ __member_size(p), __member_size(q), \
memcpy)
#define memmove(p, q, s) __fortify_memcpy_chk(p, q, s, \
- __builtin_object_size(p, 0), __builtin_object_size(q, 0), \
- __builtin_object_size(p, 1), __builtin_object_size(q, 1), \
+ __struct_size(p), __struct_size(q), \
+ __member_size(p), __member_size(q), \
memmove)
extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
__FORTIFY_INLINE void *memscan(void * const POS0 p, int c, __kernel_size_t size)
{
- size_t p_size = __builtin_object_size(p, 0);
+ size_t p_size = __struct_size(p);
- if (__builtin_constant_p(size) && p_size < size)
+ if (__compiletime_lessthan(p_size, size))
__read_overflow();
if (p_size < size)
fortify_panic(__func__);
@@ -406,13 +515,13 @@ __FORTIFY_INLINE void *memscan(void * const POS0 p, int c, __kernel_size_t size)
__FORTIFY_INLINE __diagnose_as(__builtin_memcmp, 1, 2, 3)
int memcmp(const void * const POS0 p, const void * const POS0 q, __kernel_size_t size)
{
- size_t p_size = __builtin_object_size(p, 0);
- size_t q_size = __builtin_object_size(q, 0);
+ size_t p_size = __struct_size(p);
+ size_t q_size = __struct_size(q);
if (__builtin_constant_p(size)) {
- if (p_size < size)
+ if (__compiletime_lessthan(p_size, size))
__read_overflow();
- if (q_size < size)
+ if (__compiletime_lessthan(q_size, size))
__read_overflow2();
}
if (p_size < size || q_size < size)
@@ -423,9 +532,9 @@ int memcmp(const void * const POS0 p, const void * const POS0 q, __kernel_size_t
__FORTIFY_INLINE __diagnose_as(__builtin_memchr, 1, 2, 3)
void *memchr(const void * const POS0 p, int c, __kernel_size_t size)
{
- size_t p_size = __builtin_object_size(p, 0);
+ size_t p_size = __struct_size(p);
- if (__builtin_constant_p(size) && p_size < size)
+ if (__compiletime_lessthan(p_size, size))
__read_overflow();
if (p_size < size)
fortify_panic(__func__);
@@ -435,9 +544,9 @@ void *memchr(const void * const POS0 p, int c, __kernel_size_t size)
void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
__FORTIFY_INLINE void *memchr_inv(const void * const POS0 p, int c, size_t size)
{
- size_t p_size = __builtin_object_size(p, 0);
+ size_t p_size = __struct_size(p);
- if (__builtin_constant_p(size) && p_size < size)
+ if (__compiletime_lessthan(p_size, size))
__read_overflow();
if (p_size < size)
fortify_panic(__func__);
@@ -447,9 +556,9 @@ __FORTIFY_INLINE void *memchr_inv(const void * const POS0 p, int c, size_t size)
extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup);
__FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp)
{
- size_t p_size = __builtin_object_size(p, 0);
+ size_t p_size = __struct_size(p);
- if (__builtin_constant_p(size) && p_size < size)
+ if (__compiletime_lessthan(p_size, size))
__read_overflow();
if (p_size < size)
fortify_panic(__func__);
@@ -460,16 +569,18 @@ __FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp
__FORTIFY_INLINE __diagnose_as(__builtin_strcpy, 1, 2)
char *strcpy(char * const POS p, const char * const POS q)
{
- size_t p_size = __builtin_object_size(p, 1);
- size_t q_size = __builtin_object_size(q, 1);
+ size_t p_size = __member_size(p);
+ size_t q_size = __member_size(q);
size_t size;
/* If neither buffer size is known, immediately give up. */
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ if (__builtin_constant_p(p_size) &&
+ __builtin_constant_p(q_size) &&
+ p_size == SIZE_MAX && q_size == SIZE_MAX)
return __underlying_strcpy(p, q);
size = strlen(q) + 1;
/* Compile-time check for const size overflow. */
- if (__builtin_constant_p(size) && p_size < size)
+ if (__compiletime_lessthan(p_size, size))
__write_overflow();
/* Run-time check for dynamic size overflow. */
if (p_size < size)
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index 0eb3b192f07a..19dfdd74835e 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -51,40 +51,50 @@ static inline bool __must_check __must_check_overflow(bool overflow)
return unlikely(overflow);
}
-/*
- * For simplicity and code hygiene, the fallback code below insists on
- * a, b and *d having the same type (similar to the min() and max()
- * macros), whereas gcc's type-generic overflow checkers accept
- * different types. Hence we don't just make check_add_overflow an
- * alias for __builtin_add_overflow, but add type checks similar to
- * below.
+/** check_add_overflow() - Calculate addition with overflow checking
+ *
+ * @a: first addend
+ * @b: second addend
+ * @d: pointer to store sum
+ *
+ * Returns 0 on success.
+ *
+ * *@d holds the results of the attempted addition, but is not considered
+ * "safe for use" on a non-zero return value, which indicates that the
+ * sum has overflowed or been truncated.
*/
-#define check_add_overflow(a, b, d) __must_check_overflow(({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- __builtin_add_overflow(__a, __b, __d); \
-}))
+#define check_add_overflow(a, b, d) \
+ __must_check_overflow(__builtin_add_overflow(a, b, d))
-#define check_sub_overflow(a, b, d) __must_check_overflow(({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- __builtin_sub_overflow(__a, __b, __d); \
-}))
+/** check_sub_overflow() - Calculate subtraction with overflow checking
+ *
+ * @a: minuend; value to subtract from
+ * @b: subtrahend; value to subtract from @a
+ * @d: pointer to store difference
+ *
+ * Returns 0 on success.
+ *
+ * *@d holds the results of the attempted subtraction, but is not considered
+ * "safe for use" on a non-zero return value, which indicates that the
+ * difference has underflowed or been truncated.
+ */
+#define check_sub_overflow(a, b, d) \
+ __must_check_overflow(__builtin_sub_overflow(a, b, d))
-#define check_mul_overflow(a, b, d) __must_check_overflow(({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- __builtin_mul_overflow(__a, __b, __d); \
-}))
+/** check_mul_overflow() - Calculate multiplication with overflow checking
+ *
+ * @a: first factor
+ * @b: second factor
+ * @d: pointer to store product
+ *
+ * Returns 0 on success.
+ *
+ * *@d holds the results of the attempted multiplication, but is not
+ * considered "safe for use" on a non-zero return value, which indicates
+ * that the product has overflowed or been truncated.
+ */
+#define check_mul_overflow(a, b, d) \
+ __must_check_overflow(__builtin_mul_overflow(a, b, d))
/** check_shl_overflow() - Calculate a left-shifted value and check overflow
*
diff --git a/include/linux/string.h b/include/linux/string.h
index 61ec7e4f6311..cf7607b32102 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -261,6 +261,49 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
int pad);
/**
+ * strtomem_pad - Copy NUL-terminated string to non-NUL-terminated buffer
+ *
+ * @dest: Pointer of destination character array (marked as __nonstring)
+ * @src: Pointer to NUL-terminated string
+ * @pad: Padding character to fill any remaining bytes of @dest after copy
+ *
+ * This is a replacement for strncpy() uses where the destination is not
+ * a NUL-terminated string, but with bounds checking on the source size, and
+ * an explicit padding character. If padding is not required, use strtomem().
+ *
+ * Note that the size of @dest is not an argument, as the length of @dest
+ * must be discoverable by the compiler.
+ */
+#define strtomem_pad(dest, src, pad) do { \
+ const size_t _dest_len = __builtin_object_size(dest, 1); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
+ _dest_len == (size_t)-1); \
+ memcpy_and_pad(dest, _dest_len, src, strnlen(src, _dest_len), pad); \
+} while (0)
+
+/**
+ * strtomem - Copy NUL-terminated string to non-NUL-terminated buffer
+ *
+ * @dest: Pointer of destination character array (marked as __nonstring)
+ * @src: Pointer to NUL-terminated string
+ *
+ * This is a replacement for strncpy() uses where the destination is not
+ * a NUL-terminated string, but with bounds checking on the source size, and
+ * without trailing padding. If padding is required, use strtomem_pad().
+ *
+ * Note that the size of @dest is not an argument, as the length of @dest
+ * must be discoverable by the compiler.
+ */
+#define strtomem(dest, src) do { \
+ const size_t _dest_len = __builtin_object_size(dest, 1); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
+ _dest_len == (size_t)-1); \
+ memcpy(dest, src, min(_dest_len, strnlen(src, _dest_len))); \
+} while (0)
+
+/**
* memset_after - Set a value after a struct member to the end of a struct
*
* @obj: Address of target struct instance
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e62271da937f..f473f7d8a0a2 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2511,6 +2511,18 @@ config MEMCPY_KUNIT_TEST
If unsure, say N.
+config IS_SIGNED_TYPE_KUNIT_TEST
+ tristate "Test is_signed_type() macro" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Builds unit tests for the is_signed_type() macro.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
config OVERFLOW_KUNIT_TEST
tristate "Test check_*_overflow() functions at runtime" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -2535,6 +2547,15 @@ config STACKINIT_KUNIT_TEST
CONFIG_GCC_PLUGIN_STRUCTLEAK, CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF,
or CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL.
+config FORTIFY_KUNIT_TEST
+ tristate "Test fortified str*() and mem*() function internals at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT && FORTIFY_SOURCE
+ default KUNIT_ALL_TESTS
+ help
+ Builds unit tests for checking internals of FORTIFY_SOURCE as used
+ by the str*() and mem*() family of functions. For testing runtime
+ traps of FORTIFY_SOURCE, see LKDTM's "FORTIFY_*" tests.
+
config TEST_UDELAY
tristate "udelay test driver"
help
diff --git a/lib/Makefile b/lib/Makefile
index ffabc30a27d4..7d7c9f67eff6 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -377,9 +377,11 @@ obj-$(CONFIG_BITS_TEST) += test_bits.o
obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o
obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o
obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o
+obj-$(CONFIG_IS_SIGNED_TYPE_KUNIT_TEST) += is_signed_type_kunit.o
obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o
CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable)
obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o
+obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
new file mode 100644
index 000000000000..409af07f340a
--- /dev/null
+++ b/lib/fortify_kunit.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Runtime test cases for CONFIG_FORTIFY_SOURCE that aren't expected to
+ * Oops the kernel on success. (For those, see drivers/misc/lkdtm/fortify.c)
+ *
+ * For corner cases with UBSAN, try testing with:
+ *
+ * ./tools/testing/kunit/kunit.py run --arch=x86_64 \
+ * --kconfig_add CONFIG_FORTIFY_SOURCE=y \
+ * --kconfig_add CONFIG_UBSAN=y \
+ * --kconfig_add CONFIG_UBSAN_TRAP=y \
+ * --kconfig_add CONFIG_UBSAN_BOUNDS=y \
+ * --kconfig_add CONFIG_UBSAN_LOCAL_BOUNDS=y \
+ * --make_options LLVM=1 fortify
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/string.h>
+
+static const char array_of_10[] = "this is 10";
+static const char *ptr_of_11 = "this is 11!";
+static char array_unknown[] = "compiler thinks I might change";
+
+static void known_sizes_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8);
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_of_10), 10);
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(ptr_of_11), 11);
+
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_unknown), SIZE_MAX);
+ /* Externally defined and dynamically sized string pointer: */
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(test->name), SIZE_MAX);
+}
+
+/* This is volatile so the optimizer can't perform DCE below. */
+static volatile int pick;
+
+/* Not inline to keep optimizer from figuring out which string we want. */
+static noinline size_t want_minus_one(int pick)
+{
+ const char *str;
+
+ switch (pick) {
+ case 1:
+ str = "4444";
+ break;
+ case 2:
+ str = "333";
+ break;
+ default:
+ str = "1";
+ break;
+ }
+ return __compiletime_strlen(str);
+}
+
+static void control_flow_split_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, want_minus_one(pick), SIZE_MAX);
+}
+
+static struct kunit_case fortify_test_cases[] = {
+ KUNIT_CASE(known_sizes_test),
+ KUNIT_CASE(control_flow_split_test),
+ {}
+};
+
+static struct kunit_suite fortify_test_suite = {
+ .name = "fortify",
+ .test_cases = fortify_test_cases,
+};
+
+kunit_test_suite(fortify_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/is_signed_type_kunit.c b/lib/is_signed_type_kunit.c
new file mode 100644
index 000000000000..207207522925
--- /dev/null
+++ b/lib/is_signed_type_kunit.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * ./tools/testing/kunit/kunit.py run is_signed_type [--raw_output]
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/compiler.h>
+
+enum unsigned_enum {
+ constant_a = 3,
+};
+
+enum signed_enum {
+ constant_b = -1,
+ constant_c = 2,
+};
+
+static void is_signed_type_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, is_signed_type(bool), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(signed char), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(unsigned char), false);
+#ifdef __CHAR_UNSIGNED__
+ KUNIT_EXPECT_EQ(test, is_signed_type(char), false);
+#else
+ KUNIT_EXPECT_EQ(test, is_signed_type(char), true);
+#endif
+ KUNIT_EXPECT_EQ(test, is_signed_type(int), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(unsigned int), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(long), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(unsigned long), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(long long), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(unsigned long long), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(enum unsigned_enum), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(enum signed_enum), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(void *), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(const char *), false);
+}
+
+static struct kunit_case is_signed_type_test_cases[] = {
+ KUNIT_CASE(is_signed_type_test),
+ {}
+};
+
+static struct kunit_suite is_signed_type_test_suite = {
+ .name = "is_signed_type",
+ .test_cases = is_signed_type_test_cases,
+};
+
+kunit_test_suite(is_signed_type_test_suite);
+
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
index 62f8ffcbbaa3..2b5cc70ac53f 100644
--- a/lib/memcpy_kunit.c
+++ b/lib/memcpy_kunit.c
@@ -29,9 +29,8 @@ struct some_bytes {
};
#define check(instance, v) do { \
- int i; \
BUILD_BUG_ON(sizeof(instance.data) != 32); \
- for (i = 0; i < sizeof(instance.data); i++) { \
+ for (size_t i = 0; i < sizeof(instance.data); i++) { \
KUNIT_ASSERT_EQ_MSG(test, instance.data[i], v, \
"line %d: '%s' not initialized to 0x%02x @ %d (saw 0x%02x)\n", \
__LINE__, #instance, v, i, instance.data[i]); \
@@ -39,9 +38,8 @@ struct some_bytes {
} while (0)
#define compare(name, one, two) do { \
- int i; \
BUILD_BUG_ON(sizeof(one) != sizeof(two)); \
- for (i = 0; i < sizeof(one); i++) { \
+ for (size_t i = 0; i < sizeof(one); i++) { \
KUNIT_EXPECT_EQ_MSG(test, one.data[i], two.data[i], \
"line %d: %s.data[%d] (0x%02x) != %s.data[%d] (0x%02x)\n", \
__LINE__, #one, i, one.data[i], #two, i, two.data[i]); \
@@ -272,10 +270,63 @@ static void memset_test(struct kunit *test)
#undef TEST_OP
}
+static void strtomem_test(struct kunit *test)
+{
+ static const char input[sizeof(unsigned long)] = "hi";
+ static const char truncate[] = "this is too long";
+ struct {
+ unsigned long canary1;
+ unsigned char output[sizeof(unsigned long)] __nonstring;
+ unsigned long canary2;
+ } wrap;
+
+ memset(&wrap, 0xFF, sizeof(wrap));
+ KUNIT_EXPECT_EQ_MSG(test, wrap.canary1, ULONG_MAX,
+ "bad initial canary value");
+ KUNIT_EXPECT_EQ_MSG(test, wrap.canary2, ULONG_MAX,
+ "bad initial canary value");
+
+ /* Check unpadded copy leaves surroundings untouched. */
+ strtomem(wrap.output, input);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
+ KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
+ for (size_t i = 2; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], 0xFF);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check truncated copy leaves surroundings untouched. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem(wrap.output, truncate);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ for (size_t i = 0; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check padded copy leaves only string padded. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem_pad(wrap.output, input, 0xAA);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
+ KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
+ for (size_t i = 2; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], 0xAA);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check truncated padded copy has no padding. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem(wrap.output, truncate);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ for (size_t i = 0; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+}
+
static struct kunit_case memcpy_test_cases[] = {
KUNIT_CASE(memset_test),
KUNIT_CASE(memcpy_test),
KUNIT_CASE(memmove_test),
+ KUNIT_CASE(strtomem_test),
{}
};
diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c
index 7e3e43679b73..f385ca652b74 100644
--- a/lib/overflow_kunit.c
+++ b/lib/overflow_kunit.c
@@ -16,12 +16,15 @@
#include <linux/types.h>
#include <linux/vmalloc.h>
-#define DEFINE_TEST_ARRAY(t) \
- static const struct test_ ## t { \
- t a, b; \
- t sum, diff, prod; \
- bool s_of, d_of, p_of; \
- } t ## _tests[]
+#define DEFINE_TEST_ARRAY_TYPED(t1, t2, t) \
+ static const struct test_ ## t1 ## _ ## t2 ## __ ## t { \
+ t1 a; \
+ t2 b; \
+ t sum, diff, prod; \
+ bool s_of, d_of, p_of; \
+ } t1 ## _ ## t2 ## __ ## t ## _tests[]
+
+#define DEFINE_TEST_ARRAY(t) DEFINE_TEST_ARRAY_TYPED(t, t, t)
DEFINE_TEST_ARRAY(u8) = {
{0, 0, 0, 0, 0, false, false, false},
@@ -222,21 +225,27 @@ DEFINE_TEST_ARRAY(s64) = {
};
#endif
-#define check_one_op(t, fmt, op, sym, a, b, r, of) do { \
- t _r; \
- bool _of; \
- \
- _of = check_ ## op ## _overflow(a, b, &_r); \
- KUNIT_EXPECT_EQ_MSG(test, _of, of, \
+#define check_one_op(t, fmt, op, sym, a, b, r, of) do { \
+ int _a_orig = a, _a_bump = a + 1; \
+ int _b_orig = b, _b_bump = b + 1; \
+ bool _of; \
+ t _r; \
+ \
+ _of = check_ ## op ## _overflow(a, b, &_r); \
+ KUNIT_EXPECT_EQ_MSG(test, _of, of, \
"expected "fmt" "sym" "fmt" to%s overflow (type %s)\n", \
- a, b, of ? "" : " not", #t); \
- KUNIT_EXPECT_EQ_MSG(test, _r, r, \
+ a, b, of ? "" : " not", #t); \
+ KUNIT_EXPECT_EQ_MSG(test, _r, r, \
"expected "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \
- a, b, r, _r, #t); \
+ a, b, r, _r, #t); \
+ /* Check for internal macro side-effects. */ \
+ _of = check_ ## op ## _overflow(_a_orig++, _b_orig++, &_r); \
+ KUNIT_EXPECT_EQ_MSG(test, _a_orig, _a_bump, "Unexpected " #op " macro side-effect!\n"); \
+ KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, "Unexpected " #op " macro side-effect!\n"); \
} while (0)
-#define DEFINE_TEST_FUNC(t, fmt) \
-static void do_test_ ## t(struct kunit *test, const struct test_ ## t *p) \
+#define DEFINE_TEST_FUNC_TYPED(n, t, fmt) \
+static void do_test_ ## n(struct kunit *test, const struct test_ ## n *p) \
{ \
check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \
check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \
@@ -245,15 +254,18 @@ static void do_test_ ## t(struct kunit *test, const struct test_ ## t *p) \
check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \
} \
\
-static void t ## _overflow_test(struct kunit *test) { \
+static void n ## _overflow_test(struct kunit *test) { \
unsigned i; \
\
- for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i) \
- do_test_ ## t(test, &t ## _tests[i]); \
+ for (i = 0; i < ARRAY_SIZE(n ## _tests); ++i) \
+ do_test_ ## n(test, &n ## _tests[i]); \
kunit_info(test, "%zu %s arithmetic tests finished\n", \
- ARRAY_SIZE(t ## _tests), #t); \
+ ARRAY_SIZE(n ## _tests), #n); \
}
+#define DEFINE_TEST_FUNC(t, fmt) \
+ DEFINE_TEST_FUNC_TYPED(t ## _ ## t ## __ ## t, t, fmt)
+
DEFINE_TEST_FUNC(u8, "%d");
DEFINE_TEST_FUNC(s8, "%d");
DEFINE_TEST_FUNC(u16, "%d");
@@ -265,9 +277,32 @@ DEFINE_TEST_FUNC(u64, "%llu");
DEFINE_TEST_FUNC(s64, "%lld");
#endif
-static void overflow_shift_test(struct kunit *test)
-{
- int count = 0;
+DEFINE_TEST_ARRAY_TYPED(u32, u32, u8) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {U8_MAX, 2, 1, U8_MAX - 2, U8_MAX - 1, true, false, true},
+ {U8_MAX + 1, 0, 0, 0, 0, true, true, false},
+};
+DEFINE_TEST_FUNC_TYPED(u32_u32__u8, u8, "%d");
+
+DEFINE_TEST_ARRAY_TYPED(u32, u32, int) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {U32_MAX, 0, -1, -1, 0, true, true, false},
+};
+DEFINE_TEST_FUNC_TYPED(u32_u32__int, int, "%d");
+
+DEFINE_TEST_ARRAY_TYPED(u8, u8, int) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {U8_MAX, U8_MAX, 2 * U8_MAX, 0, U8_MAX * U8_MAX, false, false, false},
+ {1, 2, 3, -1, 2, false, false, false},
+};
+DEFINE_TEST_FUNC_TYPED(u8_u8__int, int, "%d");
+
+DEFINE_TEST_ARRAY_TYPED(int, int, u8) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 2, 3, U8_MAX, 2, false, true, false},
+ {-1, 0, U8_MAX, U8_MAX, 0, true, true, false},
+};
+DEFINE_TEST_FUNC_TYPED(int_int__u8, u8, "%d");
/* Args are: value, shift, type, expected result, overflow expected */
#define TEST_ONE_SHIFT(a, s, t, expect, of) do { \
@@ -292,6 +327,10 @@ static void overflow_shift_test(struct kunit *test)
count++; \
} while (0)
+static void shift_sane_test(struct kunit *test)
+{
+ int count = 0;
+
/* Sane shifts. */
TEST_ONE_SHIFT(1, 0, u8, 1 << 0, false);
TEST_ONE_SHIFT(1, 4, u8, 1 << 4, false);
@@ -334,6 +373,13 @@ static void overflow_shift_test(struct kunit *test)
TEST_ONE_SHIFT(0, 30, s32, 0, false);
TEST_ONE_SHIFT(0, 62, s64, 0, false);
+ kunit_info(test, "%d sane shift tests finished\n", count);
+}
+
+static void shift_overflow_test(struct kunit *test)
+{
+ int count = 0;
+
/* Overflow: shifted the bit off the end. */
TEST_ONE_SHIFT(1, 8, u8, 0, true);
TEST_ONE_SHIFT(1, 16, u16, 0, true);
@@ -381,6 +427,13 @@ static void overflow_shift_test(struct kunit *test)
/* 0100000100001000001000000010000001000010000001000100010001001011 */
TEST_ONE_SHIFT(4686030735197619275LL, 2, s64, 0, true);
+ kunit_info(test, "%d overflow shift tests finished\n", count);
+}
+
+static void shift_truncate_test(struct kunit *test)
+{
+ int count = 0;
+
/* Overflow: values larger than destination type. */
TEST_ONE_SHIFT(0x100, 0, u8, 0, true);
TEST_ONE_SHIFT(0xFF, 0, s8, 0, true);
@@ -392,6 +445,33 @@ static void overflow_shift_test(struct kunit *test)
TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, int, 0, true);
TEST_ONE_SHIFT(0xFFFFFFFFFFFFFFFFULL, 0, s64, 0, true);
+ /* Overflow: shifted at or beyond entire type's bit width. */
+ TEST_ONE_SHIFT(0, 8, u8, 0, true);
+ TEST_ONE_SHIFT(0, 9, u8, 0, true);
+ TEST_ONE_SHIFT(0, 8, s8, 0, true);
+ TEST_ONE_SHIFT(0, 9, s8, 0, true);
+ TEST_ONE_SHIFT(0, 16, u16, 0, true);
+ TEST_ONE_SHIFT(0, 17, u16, 0, true);
+ TEST_ONE_SHIFT(0, 16, s16, 0, true);
+ TEST_ONE_SHIFT(0, 17, s16, 0, true);
+ TEST_ONE_SHIFT(0, 32, u32, 0, true);
+ TEST_ONE_SHIFT(0, 33, u32, 0, true);
+ TEST_ONE_SHIFT(0, 32, int, 0, true);
+ TEST_ONE_SHIFT(0, 33, int, 0, true);
+ TEST_ONE_SHIFT(0, 32, s32, 0, true);
+ TEST_ONE_SHIFT(0, 33, s32, 0, true);
+ TEST_ONE_SHIFT(0, 64, u64, 0, true);
+ TEST_ONE_SHIFT(0, 65, u64, 0, true);
+ TEST_ONE_SHIFT(0, 64, s64, 0, true);
+ TEST_ONE_SHIFT(0, 65, s64, 0, true);
+
+ kunit_info(test, "%d truncate shift tests finished\n", count);
+}
+
+static void shift_nonsense_test(struct kunit *test)
+{
+ int count = 0;
+
/* Nonsense: negative initial value. */
TEST_ONE_SHIFT(-1, 0, s8, 0, true);
TEST_ONE_SHIFT(-1, 0, u8, 0, true);
@@ -416,26 +496,6 @@ static void overflow_shift_test(struct kunit *test)
TEST_ONE_SHIFT(0, -30, s64, 0, true);
TEST_ONE_SHIFT(0, -30, u64, 0, true);
- /* Overflow: shifted at or beyond entire type's bit width. */
- TEST_ONE_SHIFT(0, 8, u8, 0, true);
- TEST_ONE_SHIFT(0, 9, u8, 0, true);
- TEST_ONE_SHIFT(0, 8, s8, 0, true);
- TEST_ONE_SHIFT(0, 9, s8, 0, true);
- TEST_ONE_SHIFT(0, 16, u16, 0, true);
- TEST_ONE_SHIFT(0, 17, u16, 0, true);
- TEST_ONE_SHIFT(0, 16, s16, 0, true);
- TEST_ONE_SHIFT(0, 17, s16, 0, true);
- TEST_ONE_SHIFT(0, 32, u32, 0, true);
- TEST_ONE_SHIFT(0, 33, u32, 0, true);
- TEST_ONE_SHIFT(0, 32, int, 0, true);
- TEST_ONE_SHIFT(0, 33, int, 0, true);
- TEST_ONE_SHIFT(0, 32, s32, 0, true);
- TEST_ONE_SHIFT(0, 33, s32, 0, true);
- TEST_ONE_SHIFT(0, 64, u64, 0, true);
- TEST_ONE_SHIFT(0, 65, u64, 0, true);
- TEST_ONE_SHIFT(0, 64, s64, 0, true);
- TEST_ONE_SHIFT(0, 65, s64, 0, true);
-
/*
* Corner case: for unsigned types, we fail when we've shifted
* through the entire width of bits. For signed types, we might
@@ -451,9 +511,9 @@ static void overflow_shift_test(struct kunit *test)
TEST_ONE_SHIFT(0, 31, s32, 0, false);
TEST_ONE_SHIFT(0, 63, s64, 0, false);
- kunit_info(test, "%d shift tests finished\n", count);
-#undef TEST_ONE_SHIFT
+ kunit_info(test, "%d nonsense shift tests finished\n", count);
}
+#undef TEST_ONE_SHIFT
/*
* Deal with the various forms of allocator arguments. See comments above
@@ -649,18 +709,25 @@ static void overflow_size_helpers_test(struct kunit *test)
}
static struct kunit_case overflow_test_cases[] = {
- KUNIT_CASE(u8_overflow_test),
- KUNIT_CASE(s8_overflow_test),
- KUNIT_CASE(u16_overflow_test),
- KUNIT_CASE(s16_overflow_test),
- KUNIT_CASE(u32_overflow_test),
- KUNIT_CASE(s32_overflow_test),
+ KUNIT_CASE(u8_u8__u8_overflow_test),
+ KUNIT_CASE(s8_s8__s8_overflow_test),
+ KUNIT_CASE(u16_u16__u16_overflow_test),
+ KUNIT_CASE(s16_s16__s16_overflow_test),
+ KUNIT_CASE(u32_u32__u32_overflow_test),
+ KUNIT_CASE(s32_s32__s32_overflow_test),
/* Clang 13 and earlier generate unwanted libcalls on 32-bit. */
#if BITS_PER_LONG == 64
- KUNIT_CASE(u64_overflow_test),
- KUNIT_CASE(s64_overflow_test),
+ KUNIT_CASE(u64_u64__u64_overflow_test),
+ KUNIT_CASE(s64_s64__s64_overflow_test),
#endif
- KUNIT_CASE(overflow_shift_test),
+ KUNIT_CASE(u32_u32__u8_overflow_test),
+ KUNIT_CASE(u32_u32__int_overflow_test),
+ KUNIT_CASE(u8_u8__int_overflow_test),
+ KUNIT_CASE(int_int__u8_overflow_test),
+ KUNIT_CASE(shift_sane_test),
+ KUNIT_CASE(shift_overflow_test),
+ KUNIT_CASE(shift_truncate_test),
+ KUNIT_CASE(shift_nonsense_test),
KUNIT_CASE(overflow_allocation_test),
KUNIT_CASE(overflow_size_helpers_test),
{}
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index 6ae482158bc4..52bd7df84fd6 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -64,6 +64,7 @@ KBUILD_CFLAGS += -Wno-sign-compare
KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast)
KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare
KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access)
+KBUILD_CFLAGS += $(call cc-disable-warning, cast-function-type-strict)
endif
endif
diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
index bd2aabb2c60f..995bc42003e6 100644
--- a/security/Kconfig.hardening
+++ b/security/Kconfig.hardening
@@ -22,11 +22,17 @@ menu "Memory initialization"
config CC_HAS_AUTO_VAR_INIT_PATTERN
def_bool $(cc-option,-ftrivial-auto-var-init=pattern)
-config CC_HAS_AUTO_VAR_INIT_ZERO
- # GCC ignores the -enable flag, so we can test for the feature with
- # a single invocation using the flag, but drop it as appropriate in
- # the Makefile, depending on the presence of Clang.
+config CC_HAS_AUTO_VAR_INIT_ZERO_BARE
+ def_bool $(cc-option,-ftrivial-auto-var-init=zero)
+
+config CC_HAS_AUTO_VAR_INIT_ZERO_ENABLER
+ # Clang 16 and later warn about using the -enable flag, but it
+ # is required before then.
def_bool $(cc-option,-ftrivial-auto-var-init=zero -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang)
+ depends on !CC_HAS_AUTO_VAR_INIT_ZERO_BARE
+
+config CC_HAS_AUTO_VAR_INIT_ZERO
+ def_bool CC_HAS_AUTO_VAR_INIT_ZERO_BARE || CC_HAS_AUTO_VAR_INIT_ZERO_ENABLER
choice
prompt "Initialize kernel stack variables at function entry"
diff --git a/security/loadpin/Kconfig b/security/loadpin/Kconfig
index 70e7985b2561..6724eaba3d36 100644
--- a/security/loadpin/Kconfig
+++ b/security/loadpin/Kconfig
@@ -33,4 +33,9 @@ config SECURITY_LOADPIN_VERITY
on the LoadPin securityfs entry 'dm-verity'. The ioctl
expects a file descriptor of a file with verity digests as
parameter. The file must be located on the pinned root and
- contain a comma separated list of digests.
+ start with the line:
+
+ # LOADPIN_TRUSTED_VERITY_ROOT_DIGESTS
+
+ This is followed by the verity digests, with one digest per
+ line.
diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c
index 44521582dcba..de41621f4998 100644
--- a/security/loadpin/loadpin.c
+++ b/security/loadpin/loadpin.c
@@ -21,6 +21,8 @@
#include <linux/dm-verity-loadpin.h>
#include <uapi/linux/loadpin.h>
+#define VERITY_DIGEST_FILE_HEADER "# LOADPIN_TRUSTED_VERITY_ROOT_DIGESTS"
+
static void report_load(const char *origin, struct file *file, char *operation)
{
char *cmdline, *pathname;
@@ -292,9 +294,21 @@ static int read_trusted_verity_root_digests(unsigned int fd)
p = strim(data);
while ((d = strsep(&p, "\n")) != NULL) {
- int len = strlen(d);
+ int len;
struct dm_verity_loadpin_trusted_root_digest *trd;
+ if (d == data) {
+ /* first line, validate header */
+ if (strcmp(d, VERITY_DIGEST_FILE_HEADER)) {
+ rc = -EPROTO;
+ goto err;
+ }
+
+ continue;
+ }
+
+ len = strlen(d);
+
if (len % 2) {
rc = -EPROTO;
goto err;
diff --git a/tools/testing/selftests/lkdtm/tests.txt b/tools/testing/selftests/lkdtm/tests.txt
index 65e53eb0840b..607b8d7e3ea3 100644
--- a/tools/testing/selftests/lkdtm/tests.txt
+++ b/tools/testing/selftests/lkdtm/tests.txt
@@ -75,7 +75,9 @@ USERCOPY_KERNEL
STACKLEAK_ERASING OK: the rest of the thread stack is properly erased
CFI_FORWARD_PROTO
CFI_BACKWARD call trace:|ok: control flow unchanged
-FORTIFIED_STRSCPY
-FORTIFIED_OBJECT
-FORTIFIED_SUBOBJECT
+FORTIFY_STRSCPY detected buffer overflow
+FORTIFY_STR_OBJECT detected buffer overflow
+FORTIFY_STR_MEMBER detected buffer overflow
+FORTIFY_MEM_OBJECT detected buffer overflow
+FORTIFY_MEM_MEMBER detected field-spanning write
PPC_SLB_MULTIHIT Recovered