diff options
Diffstat (limited to 'mm/kmsan')
-rw-r--r-- | mm/kmsan/core.c | 2 | ||||
-rw-r--r-- | mm/kmsan/hooks.c | 55 | ||||
-rw-r--r-- | mm/kmsan/init.c | 6 | ||||
-rw-r--r-- | mm/kmsan/kmsan_test.c | 119 | ||||
-rw-r--r-- | mm/kmsan/shadow.c | 27 |
5 files changed, 162 insertions, 47 deletions
diff --git a/mm/kmsan/core.c b/mm/kmsan/core.c index f710257d6867..7d1e4aa30bae 100644 --- a/mm/kmsan/core.c +++ b/mm/kmsan/core.c @@ -73,7 +73,7 @@ depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags, nr_entries = stack_trace_save(entries, KMSAN_STACK_DEPTH, 0); - /* Don't sleep (see might_sleep_if() in __alloc_pages_nodemask()). */ + /* Don't sleep. */ flags &= ~__GFP_DIRECT_RECLAIM; handle = __stack_depot_save(entries, nr_entries, flags, true); diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c index 3807502766a3..ec0da72e65aa 100644 --- a/mm/kmsan/hooks.c +++ b/mm/kmsan/hooks.c @@ -148,35 +148,74 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end) * into the virtual memory. If those physical pages already had shadow/origin, * those are ignored. */ -void kmsan_ioremap_page_range(unsigned long start, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot, - unsigned int page_shift) +int kmsan_ioremap_page_range(unsigned long start, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot, + unsigned int page_shift) { gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO; struct page *shadow, *origin; unsigned long off = 0; - int nr; + int nr, err = 0, clean = 0, mapped; if (!kmsan_enabled || kmsan_in_runtime()) - return; + return 0; nr = (end - start) / PAGE_SIZE; kmsan_enter_runtime(); - for (int i = 0; i < nr; i++, off += PAGE_SIZE) { + for (int i = 0; i < nr; i++, off += PAGE_SIZE, clean = i) { shadow = alloc_pages(gfp_mask, 1); origin = alloc_pages(gfp_mask, 1); - __vmap_pages_range_noflush( + if (!shadow || !origin) { + err = -ENOMEM; + goto ret; + } + mapped = __vmap_pages_range_noflush( vmalloc_shadow(start + off), vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow, PAGE_SHIFT); - __vmap_pages_range_noflush( + if (mapped) { + err = mapped; + goto ret; + } + shadow = NULL; + mapped = __vmap_pages_range_noflush( vmalloc_origin(start + off), vmalloc_origin(start + off + PAGE_SIZE), prot, &origin, PAGE_SHIFT); + if (mapped) { + __vunmap_range_noflush( + vmalloc_shadow(start + off), + vmalloc_shadow(start + off + PAGE_SIZE)); + err = mapped; + goto ret; + } + origin = NULL; + } + /* Page mapping loop finished normally, nothing to clean up. */ + clean = 0; + +ret: + if (clean > 0) { + /* + * Something went wrong. Clean up shadow/origin pages allocated + * on the last loop iteration, then delete mappings created + * during the previous iterations. + */ + if (shadow) + __free_pages(shadow, 1); + if (origin) + __free_pages(origin, 1); + __vunmap_range_noflush( + vmalloc_shadow(start), + vmalloc_shadow(start + clean * PAGE_SIZE)); + __vunmap_range_noflush( + vmalloc_origin(start), + vmalloc_origin(start + clean * PAGE_SIZE)); } flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end)); flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end)); kmsan_leave_runtime(); + return err; } void kmsan_iounmap_page_range(unsigned long start, unsigned long end) diff --git a/mm/kmsan/init.c b/mm/kmsan/init.c index 7fb794242fad..ffedf4dbc49d 100644 --- a/mm/kmsan/init.c +++ b/mm/kmsan/init.c @@ -96,7 +96,7 @@ void __init kmsan_init_shadow(void) struct metadata_page_pair { struct page *shadow, *origin; }; -static struct metadata_page_pair held_back[MAX_ORDER] __initdata; +static struct metadata_page_pair held_back[MAX_ORDER + 1] __initdata; /* * Eager metadata allocation. When the memblock allocator is freeing pages to @@ -211,8 +211,8 @@ static void kmsan_memblock_discard(void) * order=N-1, * - repeat. */ - collect.order = MAX_ORDER - 1; - for (int i = MAX_ORDER - 1; i >= 0; i--) { + collect.order = MAX_ORDER; + for (int i = MAX_ORDER; i >= 0; i--) { if (held_back[i].shadow) smallstack_push(&collect, held_back[i].shadow); if (held_back[i].origin) diff --git a/mm/kmsan/kmsan_test.c b/mm/kmsan/kmsan_test.c index 088e21a48dc4..312989aa2865 100644 --- a/mm/kmsan/kmsan_test.c +++ b/mm/kmsan/kmsan_test.c @@ -408,6 +408,37 @@ static void test_printk(struct kunit *test) } /* + * Prevent the compiler from optimizing @var away. Without this, Clang may + * notice that @var is uninitialized and drop memcpy() calls that use it. + * + * There is OPTIMIZER_HIDE_VAR() in linux/compier.h that we cannot use here, + * because it is implemented as inline assembly receiving @var as a parameter + * and will enforce a KMSAN check. Same is true for e.g. barrier_data(var). + */ +#define DO_NOT_OPTIMIZE(var) barrier() + +/* + * Test case: ensure that memcpy() correctly copies initialized values. + * Also serves as a regression test to ensure DO_NOT_OPTIMIZE() does not cause + * extra checks. + */ +static void test_init_memcpy(struct kunit *test) +{ + EXPECTATION_NO_REPORT(expect); + volatile int src; + volatile int dst = 0; + + DO_NOT_OPTIMIZE(src); + src = 1; + kunit_info( + test, + "memcpy()ing aligned initialized src to aligned dst (no reports)\n"); + memcpy((void *)&dst, (void *)&src, sizeof(src)); + kmsan_check_memory((void *)&dst, sizeof(dst)); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); +} + +/* * Test case: ensure that memcpy() correctly copies uninitialized values between * aligned `src` and `dst`. */ @@ -420,7 +451,7 @@ static void test_memcpy_aligned_to_aligned(struct kunit *test) kunit_info( test, "memcpy()ing aligned uninit src to aligned dst (UMR report)\n"); - OPTIMIZER_HIDE_VAR(uninit_src); + DO_NOT_OPTIMIZE(uninit_src); memcpy((void *)&dst, (void *)&uninit_src, sizeof(uninit_src)); kmsan_check_memory((void *)&dst, sizeof(dst)); KUNIT_EXPECT_TRUE(test, report_matches(&expect)); @@ -443,7 +474,7 @@ static void test_memcpy_aligned_to_unaligned(struct kunit *test) kunit_info( test, "memcpy()ing aligned uninit src to unaligned dst (UMR report)\n"); - OPTIMIZER_HIDE_VAR(uninit_src); + DO_NOT_OPTIMIZE(uninit_src); memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src)); kmsan_check_memory((void *)dst, 4); KUNIT_EXPECT_TRUE(test, report_matches(&expect)); @@ -467,13 +498,33 @@ static void test_memcpy_aligned_to_unaligned2(struct kunit *test) kunit_info( test, "memcpy()ing aligned uninit src to unaligned dst - part 2 (UMR report)\n"); - OPTIMIZER_HIDE_VAR(uninit_src); + DO_NOT_OPTIMIZE(uninit_src); memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src)); kmsan_check_memory((void *)&dst[4], sizeof(uninit_src)); KUNIT_EXPECT_TRUE(test, report_matches(&expect)); } -static noinline void fibonacci(int *array, int size, int start) { +/* Generate test cases for memset16(), memset32(), memset64(). */ +#define DEFINE_TEST_MEMSETXX(size) \ + static void test_memset##size(struct kunit *test) \ + { \ + EXPECTATION_NO_REPORT(expect); \ + volatile uint##size##_t uninit; \ + \ + kunit_info(test, \ + "memset" #size "() should initialize memory\n"); \ + DO_NOT_OPTIMIZE(uninit); \ + memset##size((uint##size##_t *)&uninit, 0, 1); \ + kmsan_check_memory((void *)&uninit, sizeof(uninit)); \ + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); \ + } + +DEFINE_TEST_MEMSETXX(16) +DEFINE_TEST_MEMSETXX(32) +DEFINE_TEST_MEMSETXX(64) + +static noinline void fibonacci(int *array, int size, int start) +{ if (start < 2 || (start == size)) return; array[start] = array[start - 1] + array[start - 2]; @@ -482,8 +533,7 @@ static noinline void fibonacci(int *array, int size, int start) { static void test_long_origin_chain(struct kunit *test) { - EXPECTATION_UNINIT_VALUE_FN(expect, - "test_long_origin_chain"); + EXPECTATION_UNINIT_VALUE_FN(expect, "test_long_origin_chain"); /* (KMSAN_MAX_ORIGIN_DEPTH * 2) recursive calls to fibonacci(). */ volatile int accum[KMSAN_MAX_ORIGIN_DEPTH * 2 + 2]; int last = ARRAY_SIZE(accum) - 1; @@ -501,6 +551,36 @@ static void test_long_origin_chain(struct kunit *test) KUNIT_EXPECT_TRUE(test, report_matches(&expect)); } +/* + * Test case: ensure that saving/restoring/printing stacks to/from stackdepot + * does not trigger errors. + * + * KMSAN uses stackdepot to store origin stack traces, that's why we do not + * instrument lib/stackdepot.c. Yet it must properly mark its outputs as + * initialized because other kernel features (e.g. netdev tracker) may also + * access stackdepot from instrumented code. + */ +static void test_stackdepot_roundtrip(struct kunit *test) +{ + unsigned long src_entries[16], *dst_entries; + unsigned int src_nentries, dst_nentries; + EXPECTATION_NO_REPORT(expect); + depot_stack_handle_t handle; + + kunit_info(test, "testing stackdepot roundtrip (no reports)\n"); + + src_nentries = + stack_trace_save(src_entries, ARRAY_SIZE(src_entries), 1); + handle = stack_depot_save(src_entries, src_nentries, GFP_KERNEL); + stack_depot_print(handle); + dst_nentries = stack_depot_fetch(handle, &dst_entries); + KUNIT_EXPECT_TRUE(test, src_nentries == dst_nentries); + + kmsan_check_memory((void *)dst_entries, + sizeof(*dst_entries) * dst_nentries); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); +} + static struct kunit_case kmsan_test_cases[] = { KUNIT_CASE(test_uninit_kmalloc), KUNIT_CASE(test_init_kmalloc), @@ -515,10 +595,15 @@ static struct kunit_case kmsan_test_cases[] = { KUNIT_CASE(test_uaf), KUNIT_CASE(test_percpu_propagate), KUNIT_CASE(test_printk), + KUNIT_CASE(test_init_memcpy), KUNIT_CASE(test_memcpy_aligned_to_aligned), KUNIT_CASE(test_memcpy_aligned_to_unaligned), KUNIT_CASE(test_memcpy_aligned_to_unaligned2), + KUNIT_CASE(test_memset16), + KUNIT_CASE(test_memset32), + KUNIT_CASE(test_memset64), KUNIT_CASE(test_long_origin_chain), + KUNIT_CASE(test_stackdepot_roundtrip), {}, }; @@ -541,33 +626,15 @@ static void test_exit(struct kunit *test) { } -static void register_tracepoints(struct tracepoint *tp, void *ignore) -{ - check_trace_callback_type_console(probe_console); - if (!strcmp(tp->name, "console")) - WARN_ON(tracepoint_probe_register(tp, probe_console, NULL)); -} - -static void unregister_tracepoints(struct tracepoint *tp, void *ignore) -{ - if (!strcmp(tp->name, "console")) - tracepoint_probe_unregister(tp, probe_console, NULL); -} - static int kmsan_suite_init(struct kunit_suite *suite) { - /* - * Because we want to be able to build the test as a module, we need to - * iterate through all known tracepoints, since the static registration - * won't work here. - */ - for_each_kernel_tracepoint(register_tracepoints, NULL); + register_trace_console(probe_console, NULL); return 0; } static void kmsan_suite_exit(struct kunit_suite *suite) { - for_each_kernel_tracepoint(unregister_tracepoints, NULL); + unregister_trace_console(probe_console, NULL); tracepoint_synchronize_unregister(); } diff --git a/mm/kmsan/shadow.c b/mm/kmsan/shadow.c index a787c04e9583..b8bb95eea5e3 100644 --- a/mm/kmsan/shadow.c +++ b/mm/kmsan/shadow.c @@ -216,27 +216,29 @@ void kmsan_free_page(struct page *page, unsigned int order) kmsan_leave_runtime(); } -void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end, - pgprot_t prot, struct page **pages, - unsigned int page_shift) +int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end, + pgprot_t prot, struct page **pages, + unsigned int page_shift) { unsigned long shadow_start, origin_start, shadow_end, origin_end; struct page **s_pages, **o_pages; - int nr, mapped; + int nr, mapped, err = 0; if (!kmsan_enabled) - return; + return 0; shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW); shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW); if (!shadow_start) - return; + return 0; nr = (end - start) / PAGE_SIZE; s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL); o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL); - if (!s_pages || !o_pages) + if (!s_pages || !o_pages) { + err = -ENOMEM; goto ret; + } for (int i = 0; i < nr; i++) { s_pages[i] = shadow_page_for(pages[i]); o_pages[i] = origin_page_for(pages[i]); @@ -249,10 +251,16 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end, kmsan_enter_runtime(); mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot, s_pages, page_shift); - KMSAN_WARN_ON(mapped); + if (mapped) { + err = mapped; + goto ret; + } mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot, o_pages, page_shift); - KMSAN_WARN_ON(mapped); + if (mapped) { + err = mapped; + goto ret; + } kmsan_leave_runtime(); flush_tlb_kernel_range(shadow_start, shadow_end); flush_tlb_kernel_range(origin_start, origin_end); @@ -262,6 +270,7 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end, ret: kfree(s_pages); kfree(o_pages); + return err; } /* Allocate metadata for pages allocated at boot time. */ |