diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 10 | ||||
-rw-r--r-- | lib/Kconfig.kasan | 7 | ||||
-rw-r--r-- | lib/checksum.c | 11 | ||||
-rw-r--r-- | lib/dim/dim.c | 3 | ||||
-rw-r--r-- | lib/dim/net_dim.c | 10 | ||||
-rw-r--r-- | lib/dynamic_queue_limits.c | 2 | ||||
-rw-r--r-- | lib/iov_iter.c | 25 | ||||
-rw-r--r-- | lib/objpool.c | 18 | ||||
-rw-r--r-- | lib/slub_kunit.c | 2 |
9 files changed, 49 insertions, 39 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 7312ae7c3cc5..67b669d2e70e 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2115,6 +2115,16 @@ config FAIL_SUNRPC Provide fault-injection capability for SunRPC and its consumers. +config FAIL_SKB_REALLOC + bool "Fault-injection capability forcing skb to reallocate" + depends on FAULT_INJECTION_DEBUG_FS + help + Provide fault-injection capability that forces the skb to be + reallocated, catching possible invalid pointers to the skb. + + For more information, check + Documentation/dev-tools/fault-injection/fault-injection.rst + config FAULT_INJECTION_CONFIGFS bool "Configfs interface for fault-injection capabilities" depends on FAULT_INJECTION diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 233ab2096924..98016e137b7f 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -22,11 +22,8 @@ config ARCH_DISABLE_KASAN_INLINE config CC_HAS_KASAN_GENERIC def_bool $(cc-option, -fsanitize=kernel-address) -# GCC appears to ignore no_sanitize_address when -fsanitize=kernel-hwaddress -# is passed. See https://bugzilla.kernel.org/show_bug.cgi?id=218854 (and -# the linked LKML thread) for more details. config CC_HAS_KASAN_SW_TAGS - def_bool !CC_IS_GCC && $(cc-option, -fsanitize=kernel-hwaddress) + def_bool $(cc-option, -fsanitize=kernel-hwaddress) # This option is only required for software KASAN modes. # Old GCC versions do not have proper support for no_sanitize_address. @@ -101,7 +98,7 @@ config KASAN_SW_TAGS help Enables Software Tag-Based KASAN. - Requires Clang. + Requires GCC 11+ or Clang. Supported only on arm64 CPUs and relies on Top Byte Ignore. diff --git a/lib/checksum.c b/lib/checksum.c index 6860d6b05a17..025ba546e1ec 100644 --- a/lib/checksum.c +++ b/lib/checksum.c @@ -34,15 +34,6 @@ #include <asm/byteorder.h> #ifndef do_csum -static inline unsigned short from32to16(unsigned int x) -{ - /* add up 16-bit and 16-bit for 16+c bit */ - x = (x & 0xffff) + (x >> 16); - /* add up carry.. */ - x = (x & 0xffff) + (x >> 16); - return x; -} - static unsigned int do_csum(const unsigned char *buff, int len) { int odd; @@ -90,7 +81,7 @@ static unsigned int do_csum(const unsigned char *buff, int len) #else result += (*buff << 8); #endif - result = from32to16(result); + result = csum_from32to16(result); if (odd) result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); out: diff --git a/lib/dim/dim.c b/lib/dim/dim.c index 83b65ac74d73..97c3d084ebf0 100644 --- a/lib/dim/dim.c +++ b/lib/dim/dim.c @@ -54,7 +54,8 @@ void dim_park_tired(struct dim *dim) } EXPORT_SYMBOL(dim_park_tired); -bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end, +bool dim_calc_stats(const struct dim_sample *start, + const struct dim_sample *end, struct dim_stats *curr_stats) { /* u32 holds up to 71 minutes, should be enough */ diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c index d7e7028e9b19..d6aa09a979b3 100644 --- a/lib/dim/net_dim.c +++ b/lib/dim/net_dim.c @@ -347,7 +347,7 @@ static bool net_dim_decision(struct dim_stats *curr_stats, struct dim *dim) return dim->profile_ix != prev_ix; } -void net_dim(struct dim *dim, struct dim_sample end_sample) +void net_dim(struct dim *dim, const struct dim_sample *end_sample) { struct dim_stats curr_stats; u16 nevents; @@ -355,11 +355,11 @@ void net_dim(struct dim *dim, struct dim_sample end_sample) switch (dim->state) { case DIM_MEASURE_IN_PROGRESS: nevents = BIT_GAP(BITS_PER_TYPE(u16), - end_sample.event_ctr, + end_sample->event_ctr, dim->start_sample.event_ctr); if (nevents < DIM_NEVENTS) break; - if (!dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats)) + if (!dim_calc_stats(&dim->start_sample, end_sample, &curr_stats)) break; if (net_dim_decision(&curr_stats, dim)) { dim->state = DIM_APPLY_NEW_PROFILE; @@ -368,8 +368,8 @@ void net_dim(struct dim *dim, struct dim_sample end_sample) } fallthrough; case DIM_START_MEASURE: - dim_update_sample(end_sample.event_ctr, end_sample.pkt_ctr, - end_sample.byte_ctr, &dim->start_sample); + dim_update_sample(end_sample->event_ctr, end_sample->pkt_ctr, + end_sample->byte_ctr, &dim->start_sample); dim->state = DIM_MEASURE_IN_PROGRESS; break; case DIM_APPLY_NEW_PROFILE: diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c index e49deddd3de9..c1b7638a594a 100644 --- a/lib/dynamic_queue_limits.c +++ b/lib/dynamic_queue_limits.c @@ -179,7 +179,7 @@ void dql_completed(struct dql *dql, unsigned int count) dql->adj_limit = limit + completed; dql->prev_ovlimit = ovlimit; - dql->prev_last_obj_cnt = dql->last_obj_cnt; + dql->prev_last_obj_cnt = READ_ONCE(dql->last_obj_cnt); dql->num_completed = completed; dql->prev_num_queued = num_queued; diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 1abb32c0da50..908e75a28d90 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -461,6 +461,8 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { size_t n, copied = 0; + bool uses_kmap = IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) || + PageHighMem(page); if (!page_copy_sane(page, offset, bytes)) return 0; @@ -471,7 +473,7 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset, char *p; n = bytes - copied; - if (PageHighMem(page)) { + if (uses_kmap) { page += offset / PAGE_SIZE; offset %= PAGE_SIZE; n = min_t(size_t, n, PAGE_SIZE - offset); @@ -482,7 +484,7 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset, kunmap_atomic(p); copied += n; offset += n; - } while (PageHighMem(page) && copied != bytes && n > 0); + } while (uses_kmap && copied != bytes && n > 0); return copied; } @@ -1021,15 +1023,18 @@ static ssize_t iter_folioq_get_pages(struct iov_iter *iter, size_t offset = iov_offset, fsize = folioq_folio_size(folioq, slot); size_t part = PAGE_SIZE - offset % PAGE_SIZE; - part = umin(part, umin(maxsize - extracted, fsize - offset)); - count -= part; - iov_offset += part; - extracted += part; + if (offset < fsize) { + part = umin(part, umin(maxsize - extracted, fsize - offset)); + count -= part; + iov_offset += part; + extracted += part; + + *pages = folio_page(folio, offset / PAGE_SIZE); + get_page(*pages); + pages++; + maxpages--; + } - *pages = folio_page(folio, offset / PAGE_SIZE); - get_page(*pages); - pages++; - maxpages--; if (maxpages == 0 || extracted >= maxsize) break; diff --git a/lib/objpool.c b/lib/objpool.c index fd108fe0d095..b998b720c732 100644 --- a/lib/objpool.c +++ b/lib/objpool.c @@ -74,15 +74,21 @@ objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs, * warm caches and TLB hits. in default vmalloc is used to * reduce the pressure of kernel slab system. as we know, * mimimal size of vmalloc is one page since vmalloc would - * always align the requested size to page size + * always align the requested size to page size. + * but if vmalloc fails or it is not available (e.g. GFP_ATOMIC) + * allocate percpu slot with kmalloc. */ - if ((pool->gfp & GFP_ATOMIC) == GFP_ATOMIC) - slot = kmalloc_node(size, pool->gfp, cpu_to_node(i)); - else + slot = NULL; + + if ((pool->gfp & (GFP_ATOMIC | GFP_KERNEL)) != GFP_ATOMIC) slot = __vmalloc_node(size, sizeof(void *), pool->gfp, cpu_to_node(i), __builtin_return_address(0)); - if (!slot) - return -ENOMEM; + + if (!slot) { + slot = kmalloc_node(size, pool->gfp, cpu_to_node(i)); + if (!slot) + return -ENOMEM; + } memset(slot, 0, size); pool->cpu_slots[i] = slot; diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c index 80e39f003344..33564f965958 100644 --- a/lib/slub_kunit.c +++ b/lib/slub_kunit.c @@ -141,7 +141,7 @@ static void test_kmalloc_redzone_access(struct kunit *test) { struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_kmalloc", 32, SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE); - u8 *p = __kmalloc_cache_noprof(s, GFP_KERNEL, 18); + u8 *p = alloc_hooks(__kmalloc_cache_noprof(s, GFP_KERNEL, 18)); kasan_disable_current(); |