summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug33
-rw-r--r--lib/Kconfig.kcsan5
-rw-r--r--lib/bitmap.c49
-rw-r--r--lib/bug.c54
-rw-r--r--lib/devres.c22
-rw-r--r--lib/iov_iter.c318
-rw-r--r--lib/kobject_uevent.c9
-rw-r--r--lib/kunit/.kunitconfig3
-rw-r--r--lib/kunit/assert.c61
-rw-r--r--lib/kunit/test.c39
-rw-r--r--lib/list_sort.c17
-rw-r--r--lib/math/Makefile2
-rw-r--r--lib/math/test_div64.c249
-rw-r--r--lib/sbitmap.c210
-rw-r--r--lib/test_bitmap.c46
-rw-r--r--lib/test_kasan.c59
-rw-r--r--lib/test_list_sort.c3
-rw-r--r--lib/test_printf.c108
-rw-r--r--lib/test_rhashtable.c9
-rw-r--r--lib/test_vmalloc.c128
-rw-r--r--lib/vdso/gettimeofday.c31
-rw-r--r--lib/vsprintf.c117
22 files changed, 1210 insertions, 362 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2c7f46b366f1..678c13967580 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -284,8 +284,7 @@ config DEBUG_INFO_DWARF4
config DEBUG_INFO_DWARF5
bool "Generate DWARF Version 5 debuginfo"
- depends on GCC_VERSION >= 50000 || CC_IS_CLANG
- depends on CC_IS_GCC || $(success,$(srctree)/scripts/test_dwarf5_support.sh $(CC) $(CLANG_FLAGS))
+ depends on GCC_VERSION >= 50000 || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
depends on !DEBUG_INFO_BTF
help
Generate DWARF v5 debug info. Requires binutils 2.35.2, gcc 5.0+ (gcc
@@ -449,6 +448,16 @@ config VMLINUX_VALIDATION
depends on STACK_VALIDATION && DEBUG_ENTRY && !PARAVIRT
default y
+config VMLINUX_MAP
+ bool "Generate vmlinux.map file when linking"
+ depends on EXPERT
+ help
+ Selecting this option will pass "-Map=vmlinux.map" to ld
+ when linking vmlinux. That file can be useful for verifying
+ and debugging magic section games, and for seeing which
+ pieces of code get eliminated with
+ CONFIG_LD_DEAD_CODE_DATA_ELIMINATION.
+
config DEBUG_FORCE_WEAK_PER_CPU
bool "Force weak per-cpu definitions"
depends on DEBUG_KERNEL
@@ -1710,7 +1719,6 @@ config LATENCYTOP
select KALLSYMS_ALL
select STACKTRACE
select SCHEDSTATS
- select SCHED_DEBUG
help
Enable this option if you want to use the LatencyTOP tool
to find out which userspace is blocking on what kernel operations.
@@ -2067,6 +2075,16 @@ config TEST_SORT
If unsure, say N.
+config TEST_DIV64
+ tristate "64bit/32bit division and modulo test"
+ depends on DEBUG_KERNEL || m
+ help
+ Enable this to turn on 'do_div()' function test. This test is
+ executed only once during system boot (so affects only boot time),
+ or at module load time.
+
+ If unsure, say N.
+
config KPROBES_SANITY_TEST
bool "Kprobes sanity tests"
depends on DEBUG_KERNEL
@@ -2555,11 +2573,18 @@ config TEST_FPU
endif # RUNTIME_TESTING_MENU
+config ARCH_USE_MEMTEST
+ bool
+ help
+ An architecture should select this when it uses early_memtest()
+ during boot process.
+
config MEMTEST
bool "Memtest"
+ depends on ARCH_USE_MEMTEST
help
This option adds a kernel parameter 'memtest', which allows memtest
- to be set.
+ to be set and executed.
memtest=0, mean disabled; -- default
memtest=1, mean do 1 test pattern;
...
diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
index f271ff5fbb5a..0440f373248e 100644
--- a/lib/Kconfig.kcsan
+++ b/lib/Kconfig.kcsan
@@ -69,8 +69,9 @@ config KCSAN_SELFTEST
panic. Recommended to be enabled, ensuring critical functionality
works as intended.
-config KCSAN_TEST
- tristate "KCSAN test for integrated runtime behaviour"
+config KCSAN_KUNIT_TEST
+ tristate "KCSAN test for integrated runtime behaviour" if !KUNIT_ALL_TESTS
+ default KUNIT_ALL_TESTS
depends on TRACEPOINTS && KUNIT
select TORTURE_TEST
help
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 75006c4036e9..9f4626a4c95f 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -487,30 +487,25 @@ EXPORT_SYMBOL(bitmap_print_to_pagebuf);
/*
* Region 9-38:4/10 describes the following bitmap structure:
- * 0 9 12 18 38
- * .........****......****......****......
- * ^ ^ ^ ^
- * start off group_len end
+ * 0 9 12 18 38 N
+ * .........****......****......****..................
+ * ^ ^ ^ ^ ^
+ * start off group_len end nbits
*/
struct region {
unsigned int start;
unsigned int off;
unsigned int group_len;
unsigned int end;
+ unsigned int nbits;
};
-static int bitmap_set_region(const struct region *r,
- unsigned long *bitmap, int nbits)
+static void bitmap_set_region(const struct region *r, unsigned long *bitmap)
{
unsigned int start;
- if (r->end >= nbits)
- return -ERANGE;
-
for (start = r->start; start <= r->end; start += r->group_len)
bitmap_set(bitmap, start, min(r->end - start + 1, r->off));
-
- return 0;
}
static int bitmap_check_region(const struct region *r)
@@ -518,14 +513,23 @@ static int bitmap_check_region(const struct region *r)
if (r->start > r->end || r->group_len == 0 || r->off > r->group_len)
return -EINVAL;
+ if (r->end >= r->nbits)
+ return -ERANGE;
+
return 0;
}
-static const char *bitmap_getnum(const char *str, unsigned int *num)
+static const char *bitmap_getnum(const char *str, unsigned int *num,
+ unsigned int lastbit)
{
unsigned long long n;
unsigned int len;
+ if (str[0] == 'N') {
+ *num = lastbit;
+ return str + 1;
+ }
+
len = _parse_integer(str, 10, &n);
if (!len)
return ERR_PTR(-EINVAL);
@@ -573,7 +577,9 @@ static const char *bitmap_find_region_reverse(const char *start, const char *end
static const char *bitmap_parse_region(const char *str, struct region *r)
{
- str = bitmap_getnum(str, &r->start);
+ unsigned int lastbit = r->nbits - 1;
+
+ str = bitmap_getnum(str, &r->start, lastbit);
if (IS_ERR(str))
return str;
@@ -583,7 +589,7 @@ static const char *bitmap_parse_region(const char *str, struct region *r)
if (*str != '-')
return ERR_PTR(-EINVAL);
- str = bitmap_getnum(str + 1, &r->end);
+ str = bitmap_getnum(str + 1, &r->end, lastbit);
if (IS_ERR(str))
return str;
@@ -593,14 +599,14 @@ static const char *bitmap_parse_region(const char *str, struct region *r)
if (*str != ':')
return ERR_PTR(-EINVAL);
- str = bitmap_getnum(str + 1, &r->off);
+ str = bitmap_getnum(str + 1, &r->off, lastbit);
if (IS_ERR(str))
return str;
if (*str != '/')
return ERR_PTR(-EINVAL);
- return bitmap_getnum(str + 1, &r->group_len);
+ return bitmap_getnum(str + 1, &r->group_len, lastbit);
no_end:
r->end = r->start;
@@ -627,6 +633,10 @@ no_pattern:
* From each group will be used only defined amount of bits.
* Syntax: range:used_size/group_size
* Example: 0-1023:2/256 ==> 0,1,256,257,512,513,768,769
+ * The value 'N' can be used as a dynamically substituted token for the
+ * maximum allowed value; i.e (nmaskbits - 1). Keep in mind that it is
+ * dynamic, so if system changes cause the bitmap width to change, such
+ * as more cores in a CPU list, then any ranges using N will also change.
*
* Returns: 0 on success, -errno on invalid input strings. Error values:
*
@@ -640,7 +650,8 @@ int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits)
struct region r;
long ret;
- bitmap_zero(maskp, nmaskbits);
+ r.nbits = nmaskbits;
+ bitmap_zero(maskp, r.nbits);
while (buf) {
buf = bitmap_find_region(buf);
@@ -655,9 +666,7 @@ int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits)
if (ret)
return ret;
- ret = bitmap_set_region(&r, maskp, nmaskbits);
- if (ret)
- return ret;
+ bitmap_set_region(&r, maskp);
}
return 0;
diff --git a/lib/bug.c b/lib/bug.c
index 8f9d537bfb2a..45a0584f6541 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -127,6 +127,22 @@ static inline struct bug_entry *module_find_bug(unsigned long bugaddr)
}
#endif
+void bug_get_file_line(struct bug_entry *bug, const char **file,
+ unsigned int *line)
+{
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+ *file = bug->file;
+#else
+ *file = (const char *)bug + bug->file_disp;
+#endif
+ *line = bug->line;
+#else
+ *file = NULL;
+ *line = 0;
+#endif
+}
+
struct bug_entry *find_bug(unsigned long bugaddr)
{
struct bug_entry *bug;
@@ -153,32 +169,20 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
disable_trace_on_warning();
- file = NULL;
- line = 0;
- warning = 0;
+ bug_get_file_line(bug, &file, &line);
- if (bug) {
-#ifdef CONFIG_DEBUG_BUGVERBOSE
-#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
- file = bug->file;
-#else
- file = (const char *)bug + bug->file_disp;
-#endif
- line = bug->line;
-#endif
- warning = (bug->flags & BUGFLAG_WARNING) != 0;
- once = (bug->flags & BUGFLAG_ONCE) != 0;
- done = (bug->flags & BUGFLAG_DONE) != 0;
-
- if (warning && once) {
- if (done)
- return BUG_TRAP_TYPE_WARN;
-
- /*
- * Since this is the only store, concurrency is not an issue.
- */
- bug->flags |= BUGFLAG_DONE;
- }
+ warning = (bug->flags & BUGFLAG_WARNING) != 0;
+ once = (bug->flags & BUGFLAG_ONCE) != 0;
+ done = (bug->flags & BUGFLAG_DONE) != 0;
+
+ if (warning && once) {
+ if (done)
+ return BUG_TRAP_TYPE_WARN;
+
+ /*
+ * Since this is the only store, concurrency is not an issue.
+ */
+ bug->flags |= BUGFLAG_DONE;
}
/*
diff --git a/lib/devres.c b/lib/devres.c
index 2a4ff5d64288..4679dbb1bf5f 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -10,6 +10,7 @@ enum devm_ioremap_type {
DEVM_IOREMAP = 0,
DEVM_IOREMAP_UC,
DEVM_IOREMAP_WC,
+ DEVM_IOREMAP_NP,
};
void devm_ioremap_release(struct device *dev, void *res)
@@ -42,6 +43,9 @@ static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset,
case DEVM_IOREMAP_WC:
addr = ioremap_wc(offset, size);
break;
+ case DEVM_IOREMAP_NP:
+ addr = ioremap_np(offset, size);
+ break;
}
if (addr) {
@@ -99,6 +103,21 @@ void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
EXPORT_SYMBOL(devm_ioremap_wc);
/**
+ * devm_ioremap_np - Managed ioremap_np()
+ * @dev: Generic device to remap IO address for
+ * @offset: Resource address to map
+ * @size: Size of map
+ *
+ * Managed ioremap_np(). Map is automatically unmapped on driver detach.
+ */
+void __iomem *devm_ioremap_np(struct device *dev, resource_size_t offset,
+ resource_size_t size)
+{
+ return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_NP);
+}
+EXPORT_SYMBOL(devm_ioremap_np);
+
+/**
* devm_iounmap - Managed iounmap()
* @dev: Generic device to unmap for
* @addr: Address to unmap
@@ -128,6 +147,9 @@ __devm_ioremap_resource(struct device *dev, const struct resource *res,
return IOMEM_ERR_PTR(-EINVAL);
}
+ if (type == DEVM_IOREMAP && res->flags & IORESOURCE_MEM_NONPOSTED)
+ type = DEVM_IOREMAP_NP;
+
size = resource_size(res);
if (res->name)
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index f66c62aa7154..61228a6c69f8 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -76,7 +76,44 @@
} \
}
-#define iterate_all_kinds(i, n, v, I, B, K) { \
+#define iterate_xarray(i, n, __v, skip, STEP) { \
+ struct page *head = NULL; \
+ size_t wanted = n, seg, offset; \
+ loff_t start = i->xarray_start + skip; \
+ pgoff_t index = start >> PAGE_SHIFT; \
+ int j; \
+ \
+ XA_STATE(xas, i->xarray, index); \
+ \
+ rcu_read_lock(); \
+ xas_for_each(&xas, head, ULONG_MAX) { \
+ if (xas_retry(&xas, head)) \
+ continue; \
+ if (WARN_ON(xa_is_value(head))) \
+ break; \
+ if (WARN_ON(PageHuge(head))) \
+ break; \
+ for (j = (head->index < index) ? index - head->index : 0; \
+ j < thp_nr_pages(head); j++) { \
+ __v.bv_page = head + j; \
+ offset = (i->xarray_start + skip) & ~PAGE_MASK; \
+ seg = PAGE_SIZE - offset; \
+ __v.bv_offset = offset; \
+ __v.bv_len = min(n, seg); \
+ (void)(STEP); \
+ n -= __v.bv_len; \
+ skip += __v.bv_len; \
+ if (n == 0) \
+ break; \
+ } \
+ if (n == 0) \
+ break; \
+ } \
+ rcu_read_unlock(); \
+ n = wanted - n; \
+}
+
+#define iterate_all_kinds(i, n, v, I, B, K, X) { \
if (likely(n)) { \
size_t skip = i->iov_offset; \
if (unlikely(i->type & ITER_BVEC)) { \
@@ -88,6 +125,9 @@
struct kvec v; \
iterate_kvec(i, n, v, kvec, skip, (K)) \
} else if (unlikely(i->type & ITER_DISCARD)) { \
+ } else if (unlikely(i->type & ITER_XARRAY)) { \
+ struct bio_vec v; \
+ iterate_xarray(i, n, v, skip, (X)); \
} else { \
const struct iovec *iov; \
struct iovec v; \
@@ -96,7 +136,7 @@
} \
}
-#define iterate_and_advance(i, n, v, I, B, K) { \
+#define iterate_and_advance(i, n, v, I, B, K, X) { \
if (unlikely(i->count < n)) \
n = i->count; \
if (i->count) { \
@@ -121,6 +161,9 @@
i->kvec = kvec; \
} else if (unlikely(i->type & ITER_DISCARD)) { \
skip += n; \
+ } else if (unlikely(i->type & ITER_XARRAY)) { \
+ struct bio_vec v; \
+ iterate_xarray(i, n, v, skip, (X)) \
} else { \
const struct iovec *iov; \
struct iovec v; \
@@ -622,7 +665,9 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
memcpy_to_page(v.bv_page, v.bv_offset,
(from += v.bv_len) - v.bv_len, v.bv_len),
- memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
+ memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
+ memcpy_to_page(v.bv_page, v.bv_offset,
+ (from += v.bv_len) - v.bv_len, v.bv_len)
)
return bytes;
@@ -738,6 +783,18 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
bytes = curr_addr - s_addr - rem;
return bytes;
}
+ }),
+ ({
+ rem = copy_mc_to_page(v.bv_page, v.bv_offset,
+ (from += v.bv_len) - v.bv_len, v.bv_len);
+ if (rem) {
+ curr_addr = (unsigned long) from;
+ bytes = curr_addr - s_addr - rem;
+ rcu_read_unlock();
+ i->iov_offset += bytes;
+ i->count -= bytes;
+ return bytes;
+ }
})
)
@@ -759,7 +816,9 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+ memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
+ memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
+ v.bv_offset, v.bv_len)
)
return bytes;
@@ -785,7 +844,9 @@ bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
0;}),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+ memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
+ memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
+ v.bv_offset, v.bv_len)
)
iov_iter_advance(i, bytes);
@@ -805,7 +866,9 @@ size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
v.iov_base, v.iov_len),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+ memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
+ memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
+ v.bv_offset, v.bv_len)
)
return bytes;
@@ -840,7 +903,9 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
- v.iov_len)
+ v.iov_len),
+ memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
+ v.bv_offset, v.bv_len)
)
return bytes;
@@ -864,7 +929,9 @@ bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
0;}),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+ memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
+ memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
+ v.bv_offset, v.bv_len)
)
iov_iter_advance(i, bytes);
@@ -901,7 +968,7 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
{
if (unlikely(!page_copy_sane(page, offset, bytes)))
return 0;
- if (i->type & (ITER_BVEC|ITER_KVEC)) {
+ if (i->type & (ITER_BVEC | ITER_KVEC | ITER_XARRAY)) {
void *kaddr = kmap_atomic(page);
size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
kunmap_atomic(kaddr);
@@ -924,7 +991,7 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
WARN_ON(1);
return 0;
}
- if (i->type & (ITER_BVEC|ITER_KVEC)) {
+ if (i->type & (ITER_BVEC | ITER_KVEC | ITER_XARRAY)) {
void *kaddr = kmap_atomic(page);
size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
kunmap_atomic(kaddr);
@@ -968,7 +1035,8 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
iterate_and_advance(i, bytes, v,
clear_user(v.iov_base, v.iov_len),
memzero_page(v.bv_page, v.bv_offset, v.bv_len),
- memset(v.iov_base, 0, v.iov_len)
+ memset(v.iov_base, 0, v.iov_len),
+ memzero_page(v.bv_page, v.bv_offset, v.bv_len)
)
return bytes;
@@ -992,7 +1060,9 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
- memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+ memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
+ memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
+ v.bv_offset, v.bv_len)
)
kunmap_atomic(kaddr);
return bytes;
@@ -1078,11 +1148,17 @@ void iov_iter_advance(struct iov_iter *i, size_t size)
i->count -= size;
return;
}
+ if (unlikely(iov_iter_is_xarray(i))) {
+ size = min(size, i->count);
+ i->iov_offset += size;
+ i->count -= size;
+ return;
+ }
if (iov_iter_is_bvec(i)) {
iov_iter_bvec_advance(i, size);
return;
}
- iterate_and_advance(i, size, v, 0, 0, 0)
+ iterate_and_advance(i, size, v, 0, 0, 0, 0)
}
EXPORT_SYMBOL(iov_iter_advance);
@@ -1126,7 +1202,12 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
return;
}
unroll -= i->iov_offset;
- if (iov_iter_is_bvec(i)) {
+ if (iov_iter_is_xarray(i)) {
+ BUG(); /* We should never go beyond the start of the specified
+ * range since we might then be straying into pages that
+ * aren't pinned.
+ */
+ } else if (iov_iter_is_bvec(i)) {
const struct bio_vec *bvec = i->bvec;
while (1) {
size_t n = (--bvec)->bv_len;
@@ -1163,9 +1244,9 @@ size_t iov_iter_single_seg_count(const struct iov_iter *i)
return i->count; // it is a silly place, anyway
if (i->nr_segs == 1)
return i->count;
- if (unlikely(iov_iter_is_discard(i)))
+ if (unlikely(iov_iter_is_discard(i) || iov_iter_is_xarray(i)))
return i->count;
- else if (iov_iter_is_bvec(i))
+ if (iov_iter_is_bvec(i))
return min(i->count, i->bvec->bv_len - i->iov_offset);
else
return min(i->count, i->iov->iov_len - i->iov_offset);
@@ -1214,6 +1295,31 @@ void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
EXPORT_SYMBOL(iov_iter_pipe);
/**
+ * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
+ * @i: The iterator to initialise.
+ * @direction: The direction of the transfer.
+ * @xarray: The xarray to access.
+ * @start: The start file position.
+ * @count: The size of the I/O buffer in bytes.
+ *
+ * Set up an I/O iterator to either draw data out of the pages attached to an
+ * inode or to inject data into those pages. The pages *must* be prevented
+ * from evaporation, either by taking a ref on them or locking them by the
+ * caller.
+ */
+void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
+ struct xarray *xarray, loff_t start, size_t count)
+{
+ BUG_ON(direction & ~1);
+ i->type = ITER_XARRAY | (direction & (READ | WRITE));
+ i->xarray = xarray;
+ i->xarray_start = start;
+ i->count = count;
+ i->iov_offset = 0;
+}
+EXPORT_SYMBOL(iov_iter_xarray);
+
+/**
* iov_iter_discard - Initialise an I/O iterator that discards data
* @i: The iterator to initialise.
* @direction: The direction of the transfer.
@@ -1243,10 +1349,13 @@ unsigned long iov_iter_alignment(const struct iov_iter *i)
return size | i->iov_offset;
return size;
}
+ if (unlikely(iov_iter_is_xarray(i)))
+ return (i->xarray_start + i->iov_offset) | i->count;
iterate_all_kinds(i, size, v,
(res |= (unsigned long)v.iov_base | v.iov_len, 0),
res |= v.bv_offset | v.bv_len,
- res |= (unsigned long)v.iov_base | v.iov_len
+ res |= (unsigned long)v.iov_base | v.iov_len,
+ res |= v.bv_offset | v.bv_len
)
return res;
}
@@ -1268,7 +1377,9 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
(res |= (!res ? 0 : (unsigned long)v.bv_offset) |
(size != v.bv_len ? size : 0)),
(res |= (!res ? 0 : (unsigned long)v.iov_base) |
- (size != v.iov_len ? size : 0))
+ (size != v.iov_len ? size : 0)),
+ (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
+ (size != v.bv_len ? size : 0))
);
return res;
}
@@ -1318,6 +1429,75 @@ static ssize_t pipe_get_pages(struct iov_iter *i,
return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
}
+static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
+ pgoff_t index, unsigned int nr_pages)
+{
+ XA_STATE(xas, xa, index);
+ struct page *page;
+ unsigned int ret = 0;
+
+ rcu_read_lock();
+ for (page = xas_load(&xas); page; page = xas_next(&xas)) {
+ if (xas_retry(&xas, page))
+ continue;
+
+ /* Has the page moved or been split? */
+ if (unlikely(page != xas_reload(&xas))) {
+ xas_reset(&xas);
+ continue;
+ }
+
+ pages[ret] = find_subpage(page, xas.xa_index);
+ get_page(pages[ret]);
+ if (++ret == nr_pages)
+ break;
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+static ssize_t iter_xarray_get_pages(struct iov_iter *i,
+ struct page **pages, size_t maxsize,
+ unsigned maxpages, size_t *_start_offset)
+{
+ unsigned nr, offset;
+ pgoff_t index, count;
+ size_t size = maxsize, actual;
+ loff_t pos;
+
+ if (!size || !maxpages)
+ return 0;
+
+ pos = i->xarray_start + i->iov_offset;
+ index = pos >> PAGE_SHIFT;
+ offset = pos & ~PAGE_MASK;
+ *_start_offset = offset;
+
+ count = 1;
+ if (size > PAGE_SIZE - offset) {
+ size -= PAGE_SIZE - offset;
+ count += size >> PAGE_SHIFT;
+ size &= ~PAGE_MASK;
+ if (size)
+ count++;
+ }
+
+ if (count > maxpages)
+ count = maxpages;
+
+ nr = iter_xarray_populate_pages(pages, i->xarray, index, count);
+ if (nr == 0)
+ return 0;
+
+ actual = PAGE_SIZE * nr;
+ actual -= offset;
+ if (nr == count && size > 0) {
+ unsigned last_offset = (nr > 1) ? 0 : offset;
+ actual -= PAGE_SIZE - (last_offset + size);
+ }
+ return actual;
+}
+
ssize_t iov_iter_get_pages(struct iov_iter *i,
struct page **pages, size_t maxsize, unsigned maxpages,
size_t *start)
@@ -1327,6 +1507,8 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
if (unlikely(iov_iter_is_pipe(i)))
return pipe_get_pages(i, pages, maxsize, maxpages, start);
+ if (unlikely(iov_iter_is_xarray(i)))
+ return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
if (unlikely(iov_iter_is_discard(i)))
return -EFAULT;
@@ -1353,7 +1535,8 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
return v.bv_len;
}),({
return -EFAULT;
- })
+ }),
+ 0
)
return 0;
}
@@ -1397,6 +1580,51 @@ static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
return n;
}
+static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
+ struct page ***pages, size_t maxsize,
+ size_t *_start_offset)
+{
+ struct page **p;
+ unsigned nr, offset;
+ pgoff_t index, count;
+ size_t size = maxsize, actual;
+ loff_t pos;
+
+ if (!size)
+ return 0;
+
+ pos = i->xarray_start + i->iov_offset;
+ index = pos >> PAGE_SHIFT;
+ offset = pos & ~PAGE_MASK;
+ *_start_offset = offset;
+
+ count = 1;
+ if (size > PAGE_SIZE - offset) {
+ size -= PAGE_SIZE - offset;
+ count += size >> PAGE_SHIFT;
+ size &= ~PAGE_MASK;
+ if (size)
+ count++;
+ }
+
+ p = get_pages_array(count);
+ if (!p)
+ return -ENOMEM;
+ *pages = p;
+
+ nr = iter_xarray_populate_pages(p, i->xarray, index, count);
+ if (nr == 0)
+ return 0;
+
+ actual = PAGE_SIZE * nr;
+ actual -= offset;
+ if (nr == count && size > 0) {
+ unsigned last_offset = (nr > 1) ? 0 : offset;
+ actual -= PAGE_SIZE - (last_offset + size);
+ }
+ return actual;
+}
+
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
struct page ***pages, size_t maxsize,
size_t *start)
@@ -1408,6 +1636,8 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
if (unlikely(iov_iter_is_pipe(i)))
return pipe_get_pages_alloc(i, pages, maxsize, start);
+ if (unlikely(iov_iter_is_xarray(i)))
+ return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
if (unlikely(iov_iter_is_discard(i)))
return -EFAULT;
@@ -1440,7 +1670,7 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
return v.bv_len;
}),({
return -EFAULT;
- })
+ }), 0
)
return 0;
}
@@ -1478,6 +1708,13 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
v.iov_base, v.iov_len,
sum, off);
off += v.iov_len;
+ }), ({
+ char *p = kmap_atomic(v.bv_page);
+ sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
+ p + v.bv_offset, v.bv_len,
+ sum, off);
+ kunmap_atomic(p);
+ off += v.bv_len;
})
)
*csum = sum;
@@ -1519,6 +1756,13 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
v.iov_base, v.iov_len,
sum, off);
off += v.iov_len;
+ }), ({
+ char *p = kmap_atomic(v.bv_page);
+ sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
+ p + v.bv_offset, v.bv_len,
+ sum, off);
+ kunmap_atomic(p);
+ off += v.bv_len;
})
)
*csum = sum;
@@ -1565,6 +1809,13 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
(from += v.iov_len) - v.iov_len,
v.iov_len, sum, off);
off += v.iov_len;
+ }), ({
+ char *p = kmap_atomic(v.bv_page);
+ sum = csum_and_memcpy(p + v.bv_offset,
+ (from += v.bv_len) - v.bv_len,
+ v.bv_len, sum, off);
+ kunmap_atomic(p);
+ off += v.bv_len;
})
)
csstate->csum = sum;
@@ -1615,6 +1866,21 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
npages = pipe_space_for_user(iter_head, pipe->tail, pipe);
if (npages >= maxpages)
return maxpages;
+ } else if (unlikely(iov_iter_is_xarray(i))) {
+ unsigned offset;
+
+ offset = (i->xarray_start + i->iov_offset) & ~PAGE_MASK;
+
+ npages = 1;
+ if (size > PAGE_SIZE - offset) {
+ size -= PAGE_SIZE - offset;
+ npages += size >> PAGE_SHIFT;
+ size &= ~PAGE_MASK;
+ if (size)
+ npages++;
+ }
+ if (npages >= maxpages)
+ return maxpages;
} else iterate_all_kinds(i, size, v, ({
unsigned long p = (unsigned long)v.iov_base;
npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
@@ -1631,7 +1897,8 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
- p / PAGE_SIZE;
if (npages >= maxpages)
return maxpages;
- })
+ }),
+ 0
)
return npages;
}
@@ -1644,7 +1911,7 @@ const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
WARN_ON(1);
return NULL;
}
- if (unlikely(iov_iter_is_discard(new)))
+ if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new)))
return NULL;
if (iov_iter_is_bvec(new))
return new->bvec = kmemdup(new->bvec,
@@ -1849,7 +2116,12 @@ int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
kunmap(v.bv_page);
err;}), ({
w = v;
- err = f(&w, context);})
+ err = f(&w, context);}), ({
+ w.iov_base = kmap(v.bv_page) + v.bv_offset;
+ w.iov_len = v.bv_len;
+ err = f(&w, context);
+ kunmap(v.bv_page);
+ err;})
)
return err;
}
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 7998affa45d4..c87d5b6a8a55 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -251,12 +251,13 @@ static int kobj_usermode_filter(struct kobject *kobj)
static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem)
{
+ int buffer_size = sizeof(env->buf) - env->buflen;
int len;
- len = strlcpy(&env->buf[env->buflen], subsystem,
- sizeof(env->buf) - env->buflen);
- if (len >= (sizeof(env->buf) - env->buflen)) {
- WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n");
+ len = strlcpy(&env->buf[env->buflen], subsystem, buffer_size);
+ if (len >= buffer_size) {
+ pr_warn("init_uevent_argv: buffer size of %d too small, needed %d\n",
+ buffer_size, len);
return -ENOMEM;
}
diff --git a/lib/kunit/.kunitconfig b/lib/kunit/.kunitconfig
new file mode 100644
index 000000000000..9235b7d42d38
--- /dev/null
+++ b/lib/kunit/.kunitconfig
@@ -0,0 +1,3 @@
+CONFIG_KUNIT=y
+CONFIG_KUNIT_TEST=y
+CONFIG_KUNIT_EXAMPLE_TEST=y
diff --git a/lib/kunit/assert.c b/lib/kunit/assert.c
index e0ec7d6fed6f..b972bda61c0c 100644
--- a/lib/kunit/assert.c
+++ b/lib/kunit/assert.c
@@ -25,7 +25,7 @@ void kunit_base_assert_format(const struct kunit_assert *assert,
}
string_stream_add(stream, "%s FAILED at %s:%d\n",
- expect_or_assert, assert->file, assert->line);
+ expect_or_assert, assert->file, assert->line);
}
EXPORT_SYMBOL_GPL(kunit_base_assert_format);
@@ -48,8 +48,9 @@ EXPORT_SYMBOL_GPL(kunit_fail_assert_format);
void kunit_unary_assert_format(const struct kunit_assert *assert,
struct string_stream *stream)
{
- struct kunit_unary_assert *unary_assert = container_of(
- assert, struct kunit_unary_assert, assert);
+ struct kunit_unary_assert *unary_assert;
+
+ unary_assert = container_of(assert, struct kunit_unary_assert, assert);
kunit_base_assert_format(assert, stream);
if (unary_assert->expected_true)
@@ -67,8 +68,10 @@ EXPORT_SYMBOL_GPL(kunit_unary_assert_format);
void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
struct string_stream *stream)
{
- struct kunit_ptr_not_err_assert *ptr_assert = container_of(
- assert, struct kunit_ptr_not_err_assert, assert);
+ struct kunit_ptr_not_err_assert *ptr_assert;
+
+ ptr_assert = container_of(assert, struct kunit_ptr_not_err_assert,
+ assert);
kunit_base_assert_format(assert, stream);
if (!ptr_assert->value) {
@@ -111,8 +114,10 @@ static bool is_literal(struct kunit *test, const char *text, long long value,
void kunit_binary_assert_format(const struct kunit_assert *assert,
struct string_stream *stream)
{
- struct kunit_binary_assert *binary_assert = container_of(
- assert, struct kunit_binary_assert, assert);
+ struct kunit_binary_assert *binary_assert;
+
+ binary_assert = container_of(assert, struct kunit_binary_assert,
+ assert);
kunit_base_assert_format(assert, stream);
string_stream_add(stream,
@@ -137,8 +142,10 @@ EXPORT_SYMBOL_GPL(kunit_binary_assert_format);
void kunit_binary_ptr_assert_format(const struct kunit_assert *assert,
struct string_stream *stream)
{
- struct kunit_binary_ptr_assert *binary_assert = container_of(
- assert, struct kunit_binary_ptr_assert, assert);
+ struct kunit_binary_ptr_assert *binary_assert;
+
+ binary_assert = container_of(assert, struct kunit_binary_ptr_assert,
+ assert);
kunit_base_assert_format(assert, stream);
string_stream_add(stream,
@@ -156,11 +163,29 @@ void kunit_binary_ptr_assert_format(const struct kunit_assert *assert,
}
EXPORT_SYMBOL_GPL(kunit_binary_ptr_assert_format);
+/* Checks if KUNIT_EXPECT_STREQ() args were string literals.
+ * Note: `text` will have ""s where as `value` will not.
+ */
+static bool is_str_literal(const char *text, const char *value)
+{
+ int len;
+
+ len = strlen(text);
+ if (len < 2)
+ return false;
+ if (text[0] != '\"' || text[len - 1] != '\"')
+ return false;
+
+ return strncmp(text + 1, value, len - 2) == 0;
+}
+
void kunit_binary_str_assert_format(const struct kunit_assert *assert,
struct string_stream *stream)
{
- struct kunit_binary_str_assert *binary_assert = container_of(
- assert, struct kunit_binary_str_assert, assert);
+ struct kunit_binary_str_assert *binary_assert;
+
+ binary_assert = container_of(assert, struct kunit_binary_str_assert,
+ assert);
kunit_base_assert_format(assert, stream);
string_stream_add(stream,
@@ -168,12 +193,14 @@ void kunit_binary_str_assert_format(const struct kunit_assert *assert,
binary_assert->left_text,
binary_assert->operation,
binary_assert->right_text);
- string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %s\n",
- binary_assert->left_text,
- binary_assert->left_value);
- string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %s",
- binary_assert->right_text,
- binary_assert->right_value);
+ if (!is_str_literal(binary_assert->left_text, binary_assert->left_value))
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == \"%s\"\n",
+ binary_assert->left_text,
+ binary_assert->left_value);
+ if (!is_str_literal(binary_assert->right_text, binary_assert->right_value))
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == \"%s\"",
+ binary_assert->right_text,
+ binary_assert->right_value);
kunit_assert_print_msg(assert, stream);
}
EXPORT_SYMBOL_GPL(kunit_binary_str_assert_format);
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index ec9494e914ef..2f6cc0123232 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -7,6 +7,7 @@
*/
#include <kunit/test.h>
+#include <kunit/test-bug.h>
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/sched/debug.h>
@@ -16,6 +17,40 @@
#include "string-stream.h"
#include "try-catch-impl.h"
+#if IS_BUILTIN(CONFIG_KUNIT)
+/*
+ * Fail the current test and print an error message to the log.
+ */
+void __kunit_fail_current_test(const char *file, int line, const char *fmt, ...)
+{
+ va_list args;
+ int len;
+ char *buffer;
+
+ if (!current->kunit_test)
+ return;
+
+ kunit_set_failure(current->kunit_test);
+
+ /* kunit_err() only accepts literals, so evaluate the args first. */
+ va_start(args, fmt);
+ len = vsnprintf(NULL, 0, fmt, args) + 1;
+ va_end(args);
+
+ buffer = kunit_kmalloc(current->kunit_test, len, GFP_KERNEL);
+ if (!buffer)
+ return;
+
+ va_start(args, fmt);
+ vsnprintf(buffer, len, fmt, args);
+ va_end(args);
+
+ kunit_err(current->kunit_test, "%s:%d: %s", file, line, buffer);
+ kunit_kfree(current->kunit_test, buffer);
+}
+EXPORT_SYMBOL_GPL(__kunit_fail_current_test);
+#endif
+
/*
* Append formatted message to log, size of which is limited to
* KUNIT_LOG_SIZE bytes (including null terminating byte).
@@ -273,9 +308,7 @@ static void kunit_try_run_case(void *data)
struct kunit_suite *suite = ctx->suite;
struct kunit_case *test_case = ctx->test_case;
-#if (IS_ENABLED(CONFIG_KASAN) && IS_ENABLED(CONFIG_KUNIT))
current->kunit_test = test;
-#endif /* IS_ENABLED(CONFIG_KASAN) && IS_ENABLED(CONFIG_KUNIT) */
/*
* kunit_run_case_internal may encounter a fatal error; if it does,
@@ -624,9 +657,7 @@ void kunit_cleanup(struct kunit *test)
spin_unlock(&test->lock);
kunit_remove_resource(test, res);
}
-#if (IS_ENABLED(CONFIG_KASAN) && IS_ENABLED(CONFIG_KUNIT))
current->kunit_test = NULL;
-#endif /* IS_ENABLED(CONFIG_KASAN) && IS_ENABLED(CONFIG_KUNIT)*/
}
EXPORT_SYMBOL_GPL(kunit_cleanup);
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 52f0c258c895..a926d96ffd44 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -7,16 +7,13 @@
#include <linux/list_sort.h>
#include <linux/list.h>
-typedef int __attribute__((nonnull(2,3))) (*cmp_func)(void *,
- struct list_head const *, struct list_head const *);
-
/*
* Returns a list organized in an intermediate format suited
* to chaining of merge() calls: null-terminated, no reserved or
* sentinel head node, "prev" links not maintained.
*/
__attribute__((nonnull(2,3,4)))
-static struct list_head *merge(void *priv, cmp_func cmp,
+static struct list_head *merge(void *priv, list_cmp_func_t cmp,
struct list_head *a, struct list_head *b)
{
struct list_head *head, **tail = &head;
@@ -52,7 +49,7 @@ static struct list_head *merge(void *priv, cmp_func cmp,
* throughout.
*/
__attribute__((nonnull(2,3,4,5)))
-static void merge_final(void *priv, cmp_func cmp, struct list_head *head,
+static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head,
struct list_head *a, struct list_head *b)
{
struct list_head *tail = head;
@@ -185,9 +182,7 @@ static void merge_final(void *priv, cmp_func cmp, struct list_head *head,
* 2^(k+1) - 1 (second merge of case 5 when x == 2^(k-1) - 1).
*/
__attribute__((nonnull(2,3)))
-void list_sort(void *priv, struct list_head *head,
- int (*cmp)(void *priv, struct list_head *a,
- struct list_head *b))
+void list_sort(void *priv, struct list_head *head, list_cmp_func_t cmp)
{
struct list_head *list = head->next, *pending = NULL;
size_t count = 0; /* Count of pending */
@@ -227,7 +222,7 @@ void list_sort(void *priv, struct list_head *head,
if (likely(bits)) {
struct list_head *a = *tail, *b = a->prev;
- a = merge(priv, (cmp_func)cmp, b, a);
+ a = merge(priv, cmp, b, a);
/* Install the merged result in place of the inputs */
a->prev = b->prev;
*tail = a;
@@ -249,10 +244,10 @@ void list_sort(void *priv, struct list_head *head,
if (!next)
break;
- list = merge(priv, (cmp_func)cmp, pending, list);
+ list = merge(priv, cmp, pending, list);
pending = next;
}
/* The final merge, rebuilding prev links */
- merge_final(priv, (cmp_func)cmp, head, pending, list);
+ merge_final(priv, cmp, head, pending, list);
}
EXPORT_SYMBOL(list_sort);
diff --git a/lib/math/Makefile b/lib/math/Makefile
index be6909e943bd..7456edb864fc 100644
--- a/lib/math/Makefile
+++ b/lib/math/Makefile
@@ -4,3 +4,5 @@ obj-y += div64.o gcd.o lcm.o int_pow.o int_sqrt.o reciprocal_div.o
obj-$(CONFIG_CORDIC) += cordic.o
obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o
obj-$(CONFIG_RATIONAL) += rational.o
+
+obj-$(CONFIG_TEST_DIV64) += test_div64.o
diff --git a/lib/math/test_div64.c b/lib/math/test_div64.c
new file mode 100644
index 000000000000..c15edd688dd2
--- /dev/null
+++ b/lib/math/test_div64.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Maciej W. Rozycki
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+
+#include <asm/div64.h>
+
+#define TEST_DIV64_N_ITER 1024
+
+static const u64 test_div64_dividends[] = {
+ 0x00000000ab275080,
+ 0x0000000fe73c1959,
+ 0x000000e54c0a74b1,
+ 0x00000d4398ff1ef9,
+ 0x0000a18c2ee1c097,
+ 0x00079fb80b072e4a,
+ 0x0072db27380dd689,
+ 0x0842f488162e2284,
+ 0xf66745411d8ab063,
+};
+#define SIZE_DIV64_DIVIDENDS ARRAY_SIZE(test_div64_dividends)
+
+#define TEST_DIV64_DIVISOR_0 0x00000009
+#define TEST_DIV64_DIVISOR_1 0x0000007c
+#define TEST_DIV64_DIVISOR_2 0x00000204
+#define TEST_DIV64_DIVISOR_3 0x0000cb5b
+#define TEST_DIV64_DIVISOR_4 0x00010000
+#define TEST_DIV64_DIVISOR_5 0x0008a880
+#define TEST_DIV64_DIVISOR_6 0x003fd3ae
+#define TEST_DIV64_DIVISOR_7 0x0b658fac
+#define TEST_DIV64_DIVISOR_8 0xdc08b349
+
+static const u32 test_div64_divisors[] = {
+ TEST_DIV64_DIVISOR_0,
+ TEST_DIV64_DIVISOR_1,
+ TEST_DIV64_DIVISOR_2,
+ TEST_DIV64_DIVISOR_3,
+ TEST_DIV64_DIVISOR_4,
+ TEST_DIV64_DIVISOR_5,
+ TEST_DIV64_DIVISOR_6,
+ TEST_DIV64_DIVISOR_7,
+ TEST_DIV64_DIVISOR_8,
+};
+#define SIZE_DIV64_DIVISORS ARRAY_SIZE(test_div64_divisors)
+
+static const struct {
+ u64 quotient;
+ u32 remainder;
+} test_div64_results[SIZE_DIV64_DIVISORS][SIZE_DIV64_DIVIDENDS] = {
+ {
+ { 0x0000000013045e47, 0x00000001 },
+ { 0x000000000161596c, 0x00000030 },
+ { 0x000000000054e9d4, 0x00000130 },
+ { 0x000000000000d776, 0x0000278e },
+ { 0x000000000000ab27, 0x00005080 },
+ { 0x00000000000013c4, 0x0004ce80 },
+ { 0x00000000000002ae, 0x001e143c },
+ { 0x000000000000000f, 0x0033e56c },
+ { 0x0000000000000000, 0xab275080 },
+ }, {
+ { 0x00000001c45c02d1, 0x00000000 },
+ { 0x0000000020d5213c, 0x00000049 },
+ { 0x0000000007e3d65f, 0x000001dd },
+ { 0x0000000000140531, 0x000065ee },
+ { 0x00000000000fe73c, 0x00001959 },
+ { 0x000000000001d637, 0x0004e5d9 },
+ { 0x0000000000003fc9, 0x000713bb },
+ { 0x0000000000000165, 0x029abe7d },
+ { 0x0000000000000012, 0x6e9f7e37 },
+ }, {
+ { 0x000000197a3a0cf7, 0x00000002 },
+ { 0x00000001d9632e5c, 0x00000021 },
+ { 0x0000000071c28039, 0x000001cd },
+ { 0x000000000120a844, 0x0000b885 },
+ { 0x0000000000e54c0a, 0x000074b1 },
+ { 0x00000000001a7bb3, 0x00072331 },
+ { 0x00000000000397ad, 0x0002c61b },
+ { 0x000000000000141e, 0x06ea2e89 },
+ { 0x000000000000010a, 0xab002ad7 },
+ }, {
+ { 0x0000017949e37538, 0x00000001 },
+ { 0x0000001b62441f37, 0x00000055 },
+ { 0x0000000694a3391d, 0x00000085 },
+ { 0x0000000010b2a5d2, 0x0000a753 },
+ { 0x000000000d4398ff, 0x00001ef9 },
+ { 0x0000000001882ec6, 0x0005cbf9 },
+ { 0x000000000035333b, 0x0017abdf },
+ { 0x00000000000129f1, 0x0ab4520d },
+ { 0x0000000000000f6e, 0x8ac0ce9b },
+ }, {
+ { 0x000011f321a74e49, 0x00000006 },
+ { 0x0000014d8481d211, 0x0000005b },
+ { 0x0000005025cbd92d, 0x000001e3 },
+ { 0x00000000cb5e71e3, 0x000043e6 },
+ { 0x00000000a18c2ee1, 0x0000c097 },
+ { 0x0000000012a88828, 0x00036c97 },
+ { 0x000000000287f16f, 0x002c2a25 },
+ { 0x00000000000e2cc7, 0x02d581e3 },
+ { 0x000000000000bbf4, 0x1ba08c03 },
+ }, {
+ { 0x0000d8db8f72935d, 0x00000005 },
+ { 0x00000fbd5aed7a2e, 0x00000002 },
+ { 0x000003c84b6ea64a, 0x00000122 },
+ { 0x0000000998fa8829, 0x000044b7 },
+ { 0x000000079fb80b07, 0x00002e4a },
+ { 0x00000000e16b20fa, 0x0002a14a },
+ { 0x000000001e940d22, 0x00353b2e },
+ { 0x0000000000ab40ac, 0x06fba6ba },
+ { 0x000000000008debd, 0x72d98365 },
+ }, {
+ { 0x000cc3045b8fc281, 0x00000000 },
+ { 0x0000ed1f48b5c9fc, 0x00000079 },
+ { 0x000038fb9c63406a, 0x000000e1 },
+ { 0x000000909705b825, 0x00000a62 },
+ { 0x00000072db27380d, 0x0000d689 },
+ { 0x0000000d43fce827, 0x00082b09 },
+ { 0x00000001ccaba11a, 0x0037e8dd },
+ { 0x000000000a13f729, 0x0566dffd },
+ { 0x000000000085a14b, 0x23d36726 },
+ }, {
+ { 0x00eafeb9c993592b, 0x00000001 },
+ { 0x00110e5befa9a991, 0x00000048 },
+ { 0x00041947b4a1d36a, 0x000000dc },
+ { 0x00000a6679327311, 0x0000c079 },
+ { 0x00000842f488162e, 0x00002284 },
+ { 0x000000f4459740fc, 0x00084484 },
+ { 0x0000002122c47bf9, 0x002ca446 },
+ { 0x00000000b9936290, 0x004979c4 },
+ { 0x00000000099ca89d, 0x9db446bf },
+ }, {
+ { 0x1b60cece589da1d2, 0x00000001 },
+ { 0x01fcb42be1453f5b, 0x0000004f },
+ { 0x007a3f2457df0749, 0x0000013f },
+ { 0x0001363130e3ec7b, 0x000017aa },
+ { 0x0000f66745411d8a, 0x0000b063 },
+ { 0x00001c757dfab350, 0x00048863 },
+ { 0x000003dc4979c652, 0x00224ea7 },
+ { 0x000000159edc3144, 0x06409ab3 },
+ { 0x000000011eadfee3, 0xa99c48a8 },
+ },
+};
+
+static inline bool test_div64_verify(u64 quotient, u32 remainder, int i, int j)
+{
+ return (quotient == test_div64_results[i][j].quotient &&
+ remainder == test_div64_results[i][j].remainder);
+}
+
+/*
+ * This needs to be a macro, because we don't want to rely on the compiler
+ * to do constant propagation, and `do_div' may take a different path for
+ * constants, so we do want to verify that as well.
+ */
+#define test_div64_one(dividend, divisor, i, j) ({ \
+ bool result = true; \
+ u64 quotient; \
+ u32 remainder; \
+ \
+ quotient = dividend; \
+ remainder = do_div(quotient, divisor); \
+ if (!test_div64_verify(quotient, remainder, i, j)) { \
+ pr_err("ERROR: %016llx / %08x => %016llx,%08x\n", \
+ dividend, divisor, quotient, remainder); \
+ pr_err("ERROR: expected value => %016llx,%08x\n",\
+ test_div64_results[i][j].quotient, \
+ test_div64_results[i][j].remainder); \
+ result = false; \
+ } \
+ result; \
+})
+
+/*
+ * Run calculation for the same divisor value expressed as a constant
+ * and as a variable, so as to verify the implementation for both cases
+ * should they be handled by different code execution paths.
+ */
+static bool __init test_div64(void)
+{
+ u64 dividend;
+ int i, j;
+
+ for (i = 0; i < SIZE_DIV64_DIVIDENDS; i++) {
+ dividend = test_div64_dividends[i];
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_0, i, 0))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_1, i, 1))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_2, i, 2))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_3, i, 3))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_4, i, 4))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_5, i, 5))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_6, i, 6))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_7, i, 7))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_8, i, 8))
+ return false;
+ for (j = 0; j < SIZE_DIV64_DIVISORS; j++) {
+ if (!test_div64_one(dividend, test_div64_divisors[j],
+ i, j))
+ return false;
+ }
+ }
+ return true;
+}
+
+static int __init test_div64_init(void)
+{
+ struct timespec64 ts, ts0, ts1;
+ int i;
+
+ pr_info("Starting 64bit/32bit division and modulo test\n");
+ ktime_get_ts64(&ts0);
+
+ for (i = 0; i < TEST_DIV64_N_ITER; i++)
+ if (!test_div64())
+ break;
+
+ ktime_get_ts64(&ts1);
+ ts = timespec64_sub(ts1, ts0);
+ pr_info("Completed 64bit/32bit division and modulo test, "
+ "%llu.%09lus elapsed\n", ts.tv_sec, ts.tv_nsec);
+
+ return 0;
+}
+
+static void __exit test_div64_exit(void)
+{
+}
+
+module_init(test_div64_init);
+module_exit(test_div64_exit);
+
+MODULE_AUTHOR("Maciej W. Rozycki <macro@orcam.me.uk>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("64bit/32bit division and modulo test module");
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index d693d9213ceb..47b3691058eb 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -9,6 +9,54 @@
#include <linux/sbitmap.h>
#include <linux/seq_file.h>
+static int init_alloc_hint(struct sbitmap *sb, gfp_t flags)
+{
+ unsigned depth = sb->depth;
+
+ sb->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
+ if (!sb->alloc_hint)
+ return -ENOMEM;
+
+ if (depth && !sb->round_robin) {
+ int i;
+
+ for_each_possible_cpu(i)
+ *per_cpu_ptr(sb->alloc_hint, i) = prandom_u32() % depth;
+ }
+ return 0;
+}
+
+static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb,
+ unsigned int depth)
+{
+ unsigned hint;
+
+ hint = this_cpu_read(*sb->alloc_hint);
+ if (unlikely(hint >= depth)) {
+ hint = depth ? prandom_u32() % depth : 0;
+ this_cpu_write(*sb->alloc_hint, hint);
+ }
+
+ return hint;
+}
+
+static inline void update_alloc_hint_after_get(struct sbitmap *sb,
+ unsigned int depth,
+ unsigned int hint,
+ unsigned int nr)
+{
+ if (nr == -1) {
+ /* If the map is full, a hint won't do us much good. */
+ this_cpu_write(*sb->alloc_hint, 0);
+ } else if (nr == hint || unlikely(sb->round_robin)) {
+ /* Only update the hint if we used it. */
+ hint = nr + 1;
+ if (hint >= depth - 1)
+ hint = 0;
+ this_cpu_write(*sb->alloc_hint, hint);
+ }
+}
+
/*
* See if we have deferred clears that we can batch move
*/
@@ -33,24 +81,15 @@ static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
}
int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
- gfp_t flags, int node)
+ gfp_t flags, int node, bool round_robin,
+ bool alloc_hint)
{
unsigned int bits_per_word;
unsigned int i;
- if (shift < 0) {
- shift = ilog2(BITS_PER_LONG);
- /*
- * If the bitmap is small, shrink the number of bits per word so
- * we spread over a few cachelines, at least. If less than 4
- * bits, just forget about it, it's not going to work optimally
- * anyway.
- */
- if (depth >= 4) {
- while ((4U << shift) > depth)
- shift--;
- }
- }
+ if (shift < 0)
+ shift = sbitmap_calculate_shift(depth);
+
bits_per_word = 1U << shift;
if (bits_per_word > BITS_PER_LONG)
return -EINVAL;
@@ -58,15 +97,25 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
sb->shift = shift;
sb->depth = depth;
sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
+ sb->round_robin = round_robin;
if (depth == 0) {
sb->map = NULL;
return 0;
}
+ if (alloc_hint) {
+ if (init_alloc_hint(sb, flags))
+ return -ENOMEM;
+ } else {
+ sb->alloc_hint = NULL;
+ }
+
sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
- if (!sb->map)
+ if (!sb->map) {
+ free_percpu(sb->alloc_hint);
return -ENOMEM;
+ }
for (i = 0; i < sb->map_nr; i++) {
sb->map[i].depth = min(depth, bits_per_word);
@@ -129,14 +178,14 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
}
static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
- unsigned int alloc_hint, bool round_robin)
+ unsigned int alloc_hint)
{
struct sbitmap_word *map = &sb->map[index];
int nr;
do {
nr = __sbitmap_get_word(&map->word, map->depth, alloc_hint,
- !round_robin);
+ !sb->round_robin);
if (nr != -1)
break;
if (!sbitmap_deferred_clear(map))
@@ -146,7 +195,7 @@ static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
return nr;
}
-int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
+static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint)
{
unsigned int i, index;
int nr = -1;
@@ -158,14 +207,13 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
* alloc_hint to find the right word index. No point in looping
* twice in find_next_zero_bit() for that case.
*/
- if (round_robin)
+ if (sb->round_robin)
alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
else
alloc_hint = 0;
for (i = 0; i < sb->map_nr; i++) {
- nr = sbitmap_find_bit_in_index(sb, index, alloc_hint,
- round_robin);
+ nr = sbitmap_find_bit_in_index(sb, index, alloc_hint);
if (nr != -1) {
nr += index << sb->shift;
break;
@@ -179,10 +227,27 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
return nr;
}
+
+int sbitmap_get(struct sbitmap *sb)
+{
+ int nr;
+ unsigned int hint, depth;
+
+ if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
+ return -1;
+
+ depth = READ_ONCE(sb->depth);
+ hint = update_alloc_hint_before_get(sb, depth);
+ nr = __sbitmap_get(sb, hint);
+ update_alloc_hint_after_get(sb, depth, hint, nr);
+
+ return nr;
+}
EXPORT_SYMBOL_GPL(sbitmap_get);
-int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
- unsigned long shallow_depth)
+static int __sbitmap_get_shallow(struct sbitmap *sb,
+ unsigned int alloc_hint,
+ unsigned long shallow_depth)
{
unsigned int i, index;
int nr = -1;
@@ -214,6 +279,22 @@ again:
return nr;
}
+
+int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth)
+{
+ int nr;
+ unsigned int hint, depth;
+
+ if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
+ return -1;
+
+ depth = READ_ONCE(sb->depth);
+ hint = update_alloc_hint_before_get(sb, depth);
+ nr = __sbitmap_get_shallow(sb, hint, shallow_depth);
+ update_alloc_hint_after_get(sb, depth, hint, nr);
+
+ return nr;
+}
EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
bool sbitmap_any_bit_set(const struct sbitmap *sb)
@@ -243,20 +324,21 @@ static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
return weight;
}
-static unsigned int sbitmap_weight(const struct sbitmap *sb)
+static unsigned int sbitmap_cleared(const struct sbitmap *sb)
{
- return __sbitmap_weight(sb, true);
+ return __sbitmap_weight(sb, false);
}
-static unsigned int sbitmap_cleared(const struct sbitmap *sb)
+unsigned int sbitmap_weight(const struct sbitmap *sb)
{
- return __sbitmap_weight(sb, false);
+ return __sbitmap_weight(sb, true) - sbitmap_cleared(sb);
}
+EXPORT_SYMBOL_GPL(sbitmap_weight);
void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
{
seq_printf(m, "depth=%u\n", sb->depth);
- seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb));
+ seq_printf(m, "busy=%u\n", sbitmap_weight(sb));
seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
seq_printf(m, "map_nr=%u\n", sb->map_nr);
@@ -350,21 +432,11 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
int ret;
int i;
- ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
+ ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node,
+ round_robin, true);
if (ret)
return ret;
- sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
- if (!sbq->alloc_hint) {
- sbitmap_free(&sbq->sb);
- return -ENOMEM;
- }
-
- if (depth && !round_robin) {
- for_each_possible_cpu(i)
- *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
- }
-
sbq->min_shallow_depth = UINT_MAX;
sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
atomic_set(&sbq->wake_index, 0);
@@ -372,7 +444,6 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
if (!sbq->ws) {
- free_percpu(sbq->alloc_hint);
sbitmap_free(&sbq->sb);
return -ENOMEM;
}
@@ -382,7 +453,6 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
}
- sbq->round_robin = round_robin;
return 0;
}
EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
@@ -415,60 +485,16 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
int __sbitmap_queue_get(struct sbitmap_queue *sbq)
{
- unsigned int hint, depth;
- int nr;
-
- hint = this_cpu_read(*sbq->alloc_hint);
- depth = READ_ONCE(sbq->sb.depth);
- if (unlikely(hint >= depth)) {
- hint = depth ? prandom_u32() % depth : 0;
- this_cpu_write(*sbq->alloc_hint, hint);
- }
- nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
-
- if (nr == -1) {
- /* If the map is full, a hint won't do us much good. */
- this_cpu_write(*sbq->alloc_hint, 0);
- } else if (nr == hint || unlikely(sbq->round_robin)) {
- /* Only update the hint if we used it. */
- hint = nr + 1;
- if (hint >= depth - 1)
- hint = 0;
- this_cpu_write(*sbq->alloc_hint, hint);
- }
-
- return nr;
+ return sbitmap_get(&sbq->sb);
}
EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
unsigned int shallow_depth)
{
- unsigned int hint, depth;
- int nr;
-
WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
- hint = this_cpu_read(*sbq->alloc_hint);
- depth = READ_ONCE(sbq->sb.depth);
- if (unlikely(hint >= depth)) {
- hint = depth ? prandom_u32() % depth : 0;
- this_cpu_write(*sbq->alloc_hint, hint);
- }
- nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
-
- if (nr == -1) {
- /* If the map is full, a hint won't do us much good. */
- this_cpu_write(*sbq->alloc_hint, 0);
- } else if (nr == hint || unlikely(sbq->round_robin)) {
- /* Only update the hint if we used it. */
- hint = nr + 1;
- if (hint >= depth - 1)
- hint = 0;
- this_cpu_write(*sbq->alloc_hint, hint);
- }
-
- return nr;
+ return sbitmap_get_shallow(&sbq->sb, shallow_depth);
}
EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
@@ -576,8 +602,8 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
smp_mb__after_atomic();
sbitmap_queue_wake_up(sbq);
- if (likely(!sbq->round_robin && nr < sbq->sb.depth))
- *per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
+ if (likely(!sbq->sb.round_robin && nr < sbq->sb.depth))
+ *per_cpu_ptr(sbq->sb.alloc_hint, cpu) = nr;
}
EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
@@ -615,7 +641,7 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
if (!first)
seq_puts(m, ", ");
first = false;
- seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i));
+ seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i));
}
seq_puts(m, "}\n");
@@ -633,7 +659,7 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
}
seq_puts(m, "}\n");
- seq_printf(m, "round_robin=%d\n", sbq->round_robin);
+ seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin);
seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_show);
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 0ea0e8258f14..9cd575583180 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -34,6 +34,8 @@ static const unsigned long exp1[] __initconst = {
BITMAP_FROM_U64(0x3333333311111111ULL),
BITMAP_FROM_U64(0xffffffff77777777ULL),
BITMAP_FROM_U64(0),
+ BITMAP_FROM_U64(0x00008000),
+ BITMAP_FROM_U64(0x80000000),
};
static const unsigned long exp2[] __initconst = {
@@ -334,15 +336,47 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = {
{0, " , ,, , , ", &exp1[12 * step], 8, 0},
{0, " , ,, , , \n", &exp1[12 * step], 8, 0},
+ {0, "0-0", &exp1[0], 32, 0},
+ {0, "1-1", &exp1[1 * step], 32, 0},
+ {0, "15-15", &exp1[13 * step], 32, 0},
+ {0, "31-31", &exp1[14 * step], 32, 0},
+
+ {0, "0-0:0/1", &exp1[12 * step], 32, 0},
+ {0, "0-0:1/1", &exp1[0], 32, 0},
+ {0, "0-0:1/31", &exp1[0], 32, 0},
+ {0, "0-0:31/31", &exp1[0], 32, 0},
+ {0, "1-1:1/1", &exp1[1 * step], 32, 0},
+ {0, "0-15:16/31", &exp1[2 * step], 32, 0},
+ {0, "15-15:1/2", &exp1[13 * step], 32, 0},
+ {0, "15-15:31/31", &exp1[13 * step], 32, 0},
+ {0, "15-31:1/31", &exp1[13 * step], 32, 0},
+ {0, "16-31:16/31", &exp1[3 * step], 32, 0},
+ {0, "31-31:31/31", &exp1[14 * step], 32, 0},
+
+ {0, "N-N", &exp1[14 * step], 32, 0},
+ {0, "0-0:1/N", &exp1[0], 32, 0},
+ {0, "0-0:N/N", &exp1[0], 32, 0},
+ {0, "0-15:16/N", &exp1[2 * step], 32, 0},
+ {0, "15-15:N/N", &exp1[13 * step], 32, 0},
+ {0, "15-N:1/N", &exp1[13 * step], 32, 0},
+ {0, "16-N:16/N", &exp1[3 * step], 32, 0},
+ {0, "N-N:N/N", &exp1[14 * step], 32, 0},
+
+ {0, "0-N:1/3,1-N:1/3,2-N:1/3", &exp1[8 * step], 32, 0},
+ {0, "0-31:1/3,1-31:1/3,2-31:1/3", &exp1[8 * step], 32, 0},
+ {0, "1-10:8/12,8-31:24/29,0-31:0/3", &exp1[9 * step], 32, 0},
+
{-EINVAL, "-1", NULL, 8, 0},
{-EINVAL, "-0", NULL, 8, 0},
{-EINVAL, "10-1", NULL, 8, 0},
- {-EINVAL, "0-31:", NULL, 8, 0},
- {-EINVAL, "0-31:0", NULL, 8, 0},
- {-EINVAL, "0-31:0/", NULL, 8, 0},
- {-EINVAL, "0-31:0/0", NULL, 8, 0},
- {-EINVAL, "0-31:1/0", NULL, 8, 0},
- {-EINVAL, "0-31:10/1", NULL, 8, 0},
+ {-ERANGE, "8-8", NULL, 8, 0},
+ {-ERANGE, "0-31", NULL, 8, 0},
+ {-EINVAL, "0-31:", NULL, 32, 0},
+ {-EINVAL, "0-31:0", NULL, 32, 0},
+ {-EINVAL, "0-31:0/", NULL, 32, 0},
+ {-EINVAL, "0-31:0/0", NULL, 32, 0},
+ {-EINVAL, "0-31:1/0", NULL, 32, 0},
+ {-EINVAL, "0-31:10/1", NULL, 32, 0},
{-EOVERFLOW, "0-98765432123456789:10/1", NULL, 8, 0},
{-EINVAL, "a-31", NULL, 8, 0},
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 785e724ce0d8..dc05cfc2d12f 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -54,6 +54,10 @@ static int kasan_test_init(struct kunit *test)
multishot = kasan_save_enable_multi_shot();
kasan_set_tagging_report_once(false);
+ fail_data.report_found = false;
+ fail_data.report_expected = false;
+ kunit_add_named_resource(test, NULL, NULL, &resource,
+ "kasan_data", &fail_data);
return 0;
}
@@ -61,6 +65,7 @@ static void kasan_test_exit(struct kunit *test)
{
kasan_set_tagging_report_once(true);
kasan_restore_multi_shot(multishot);
+ KUNIT_EXPECT_FALSE(test, fail_data.report_found);
}
/**
@@ -78,33 +83,31 @@ static void kasan_test_exit(struct kunit *test)
* fields, it can reorder or optimize away the accesses to those fields.
* Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
* expression to prevent that.
+ *
+ * In between KUNIT_EXPECT_KASAN_FAIL checks, fail_data.report_found is kept as
+ * false. This allows detecting KASAN reports that happen outside of the checks
+ * by asserting !fail_data.report_found at the start of KUNIT_EXPECT_KASAN_FAIL
+ * and in kasan_test_exit.
*/
-#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
- if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
- !kasan_async_mode_enabled()) \
- migrate_disable(); \
- WRITE_ONCE(fail_data.report_expected, true); \
- WRITE_ONCE(fail_data.report_found, false); \
- kunit_add_named_resource(test, \
- NULL, \
- NULL, \
- &resource, \
- "kasan_data", &fail_data); \
- barrier(); \
- expression; \
- barrier(); \
- if (kasan_async_mode_enabled()) \
- kasan_force_async_fault(); \
- barrier(); \
- KUNIT_EXPECT_EQ(test, \
- READ_ONCE(fail_data.report_expected), \
- READ_ONCE(fail_data.report_found)); \
- if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
- !kasan_async_mode_enabled()) { \
- if (READ_ONCE(fail_data.report_found)) \
- kasan_enable_tagging_sync(); \
- migrate_enable(); \
- } \
+#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
+ if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
+ !kasan_async_mode_enabled()) \
+ migrate_disable(); \
+ KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found)); \
+ WRITE_ONCE(fail_data.report_expected, true); \
+ barrier(); \
+ expression; \
+ barrier(); \
+ KUNIT_EXPECT_EQ(test, \
+ READ_ONCE(fail_data.report_expected), \
+ READ_ONCE(fail_data.report_found)); \
+ if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \
+ if (READ_ONCE(fail_data.report_found)) \
+ kasan_enable_tagging_sync(); \
+ migrate_enable(); \
+ } \
+ WRITE_ONCE(fail_data.report_found, false); \
+ WRITE_ONCE(fail_data.report_expected, false); \
} while (0)
#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
@@ -1049,14 +1052,14 @@ static void match_all_mem_tag(struct kunit *test)
continue;
/* Mark the first memory granule with the chosen memory tag. */
- kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag);
+ kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
/* This access must cause a KASAN report. */
KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
}
/* Recover the memory tag and free. */
- kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr));
+ kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
kfree(ptr);
}
diff --git a/lib/test_list_sort.c b/lib/test_list_sort.c
index 1f017d3b610e..00daaf23316f 100644
--- a/lib/test_list_sort.c
+++ b/lib/test_list_sort.c
@@ -56,7 +56,8 @@ static int __init check(struct debug_el *ela, struct debug_el *elb)
return 0;
}
-static int __init cmp(void *priv, struct list_head *a, struct list_head *b)
+static int __init cmp(void *priv, const struct list_head *a,
+ const struct list_head *b)
{
struct debug_el *ela, *elb;
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 95a2f82427c7..ec0d5976bb69 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -577,24 +577,98 @@ netdev_features(void)
{
}
+struct page_flags_test {
+ int width;
+ int shift;
+ int mask;
+ unsigned long value;
+ const char *fmt;
+ const char *name;
+};
+
+static struct page_flags_test pft[] = {
+ {SECTIONS_WIDTH, SECTIONS_PGSHIFT, SECTIONS_MASK,
+ 0, "%d", "section"},
+ {NODES_WIDTH, NODES_PGSHIFT, NODES_MASK,
+ 0, "%d", "node"},
+ {ZONES_WIDTH, ZONES_PGSHIFT, ZONES_MASK,
+ 0, "%d", "zone"},
+ {LAST_CPUPID_WIDTH, LAST_CPUPID_PGSHIFT, LAST_CPUPID_MASK,
+ 0, "%#x", "lastcpupid"},
+ {KASAN_TAG_WIDTH, KASAN_TAG_PGSHIFT, KASAN_TAG_MASK,
+ 0, "%#x", "kasantag"},
+};
+
+static void __init
+page_flags_test(int section, int node, int zone, int last_cpupid,
+ int kasan_tag, int flags, const char *name, char *cmp_buf)
+{
+ unsigned long values[] = {section, node, zone, last_cpupid, kasan_tag};
+ unsigned long page_flags = 0;
+ unsigned long size = 0;
+ bool append = false;
+ int i;
+
+ flags &= BIT(NR_PAGEFLAGS) - 1;
+ if (flags) {
+ page_flags |= flags;
+ snprintf(cmp_buf + size, BUF_SIZE - size, "%s", name);
+ size = strlen(cmp_buf);
+#if SECTIONS_WIDTH || NODES_WIDTH || ZONES_WIDTH || \
+ LAST_CPUPID_WIDTH || KASAN_TAG_WIDTH
+ /* Other information also included in page flags */
+ snprintf(cmp_buf + size, BUF_SIZE - size, "|");
+ size = strlen(cmp_buf);
+#endif
+ }
+
+ /* Set the test value */
+ for (i = 0; i < ARRAY_SIZE(pft); i++)
+ pft[i].value = values[i];
+
+ for (i = 0; i < ARRAY_SIZE(pft); i++) {
+ if (!pft[i].width)
+ continue;
+
+ if (append) {
+ snprintf(cmp_buf + size, BUF_SIZE - size, "|");
+ size = strlen(cmp_buf);
+ }
+
+ page_flags |= (pft[i].value & pft[i].mask) << pft[i].shift;
+ snprintf(cmp_buf + size, BUF_SIZE - size, "%s=", pft[i].name);
+ size = strlen(cmp_buf);
+ snprintf(cmp_buf + size, BUF_SIZE - size, pft[i].fmt,
+ pft[i].value & pft[i].mask);
+ size = strlen(cmp_buf);
+ append = true;
+ }
+
+ test(cmp_buf, "%pGp", &page_flags);
+}
+
static void __init
flags(void)
{
unsigned long flags;
- gfp_t gfp;
char *cmp_buffer;
+ gfp_t gfp;
+
+ cmp_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
+ if (!cmp_buffer)
+ return;
flags = 0;
- test("", "%pGp", &flags);
+ page_flags_test(0, 0, 0, 0, 0, flags, "", cmp_buffer);
- /* Page flags should filter the zone id */
flags = 1UL << NR_PAGEFLAGS;
- test("", "%pGp", &flags);
+ page_flags_test(0, 0, 0, 0, 0, flags, "", cmp_buffer);
flags |= 1UL << PG_uptodate | 1UL << PG_dirty | 1UL << PG_lru
| 1UL << PG_active | 1UL << PG_swapbacked;
- test("uptodate|dirty|lru|active|swapbacked", "%pGp", &flags);
-
+ page_flags_test(1, 1, 1, 0x1fffff, 1, flags,
+ "uptodate|dirty|lru|active|swapbacked",
+ cmp_buffer);
flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC
| VM_DENYWRITE;
@@ -609,10 +683,6 @@ flags(void)
gfp = __GFP_ATOMIC;
test("__GFP_ATOMIC", "%pGg", &gfp);
- cmp_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
- if (!cmp_buffer)
- return;
-
/* Any flags not translated by the table should remain numeric */
gfp = ~__GFP_BITS_MASK;
snprintf(cmp_buffer, BUF_SIZE, "%#lx", (unsigned long) gfp);
@@ -655,6 +725,23 @@ static void __init fwnode_pointer(void)
software_node_unregister_nodes(softnodes);
}
+static void __init fourcc_pointer(void)
+{
+ struct {
+ u32 code;
+ char *str;
+ } const try[] = {
+ { 0x3231564e, "NV12 little-endian (0x3231564e)", },
+ { 0xb231564e, "NV12 big-endian (0xb231564e)", },
+ { 0x10111213, ".... little-endian (0x10111213)", },
+ { 0x20303159, "Y10 little-endian (0x20303159)", },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(try); i++)
+ test(try[i].str, "%p4cc", &try[i].code);
+}
+
static void __init
errptr(void)
{
@@ -700,6 +787,7 @@ test_pointer(void)
flags();
errptr();
fwnode_pointer();
+ fourcc_pointer();
}
static void __init selftest(void)
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 76c607ee6db5..5a1dd4736b56 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -487,6 +487,7 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
struct rhashtable *ht;
const struct bucket_table *tbl;
char buff[512] = "";
+ int offset = 0;
unsigned int i, cnt = 0;
ht = &rhlt->ht;
@@ -501,18 +502,18 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL;
if (!rht_is_a_nulls(pos)) {
- sprintf(buff, "%s\nbucket[%d] -> ", buff, i);
+ offset += sprintf(buff + offset, "\nbucket[%d] -> ", i);
}
while (!rht_is_a_nulls(pos)) {
struct rhlist_head *list = container_of(pos, struct rhlist_head, rhead);
- sprintf(buff, "%s[[", buff);
+ offset += sprintf(buff + offset, "[[");
do {
pos = &list->rhead;
list = rht_dereference(list->next, ht);
p = rht_obj(ht, pos);
- sprintf(buff, "%s val %d (tid=%d)%s", buff, p->value.id, p->value.tid,
+ offset += sprintf(buff + offset, " val %d (tid=%d)%s", p->value.id, p->value.tid,
list? ", " : " ");
cnt++;
} while (list);
@@ -521,7 +522,7 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
next = !rht_is_a_nulls(pos) ?
rht_dereference(pos->next, ht) : NULL;
- sprintf(buff, "%s]]%s", buff, !rht_is_a_nulls(pos) ? " -> " : "");
+ offset += sprintf(buff + offset, "]]%s", !rht_is_a_nulls(pos) ? " -> " : "");
}
}
printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff);
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index 5cf2fe9aab9e..01e9543de566 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -23,8 +23,8 @@
module_param(name, type, 0444); \
MODULE_PARM_DESC(name, msg) \
-__param(bool, single_cpu_test, false,
- "Use single first online CPU to run tests");
+__param(int, nr_threads, 0,
+ "Number of workers to perform tests(min: 1 max: USHRT_MAX)");
__param(bool, sequential_test_order, false,
"Use sequential stress tests order");
@@ -47,19 +47,10 @@ __param(int, run_test_mask, INT_MAX,
"\t\tid: 128, name: pcpu_alloc_test\n"
"\t\tid: 256, name: kvfree_rcu_1_arg_vmalloc_test\n"
"\t\tid: 512, name: kvfree_rcu_2_arg_vmalloc_test\n"
- "\t\tid: 1024, name: kvfree_rcu_1_arg_slab_test\n"
- "\t\tid: 2048, name: kvfree_rcu_2_arg_slab_test\n"
/* Add a new test case description here. */
);
/*
- * Depends on single_cpu_test parameter. If it is true, then
- * use first online CPU to trigger a test on, otherwise go with
- * all online CPUs.
- */
-static cpumask_t cpus_run_test_mask = CPU_MASK_NONE;
-
-/*
* Read write semaphore for synchronization of setup
* phase that is done in main thread and workers.
*/
@@ -363,42 +354,6 @@ kvfree_rcu_2_arg_vmalloc_test(void)
return 0;
}
-static int
-kvfree_rcu_1_arg_slab_test(void)
-{
- struct test_kvfree_rcu *p;
- int i;
-
- for (i = 0; i < test_loop_count; i++) {
- p = kmalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return -1;
-
- p->array[0] = 'a';
- kvfree_rcu(p);
- }
-
- return 0;
-}
-
-static int
-kvfree_rcu_2_arg_slab_test(void)
-{
- struct test_kvfree_rcu *p;
- int i;
-
- for (i = 0; i < test_loop_count; i++) {
- p = kmalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return -1;
-
- p->array[0] = 'a';
- kvfree_rcu(p, rcu);
- }
-
- return 0;
-}
-
struct test_case_desc {
const char *test_name;
int (*test_func)(void);
@@ -415,8 +370,6 @@ static struct test_case_desc test_case_array[] = {
{ "pcpu_alloc_test", pcpu_alloc_test },
{ "kvfree_rcu_1_arg_vmalloc_test", kvfree_rcu_1_arg_vmalloc_test },
{ "kvfree_rcu_2_arg_vmalloc_test", kvfree_rcu_2_arg_vmalloc_test },
- { "kvfree_rcu_1_arg_slab_test", kvfree_rcu_1_arg_slab_test },
- { "kvfree_rcu_2_arg_slab_test", kvfree_rcu_2_arg_slab_test },
/* Add a new test case here. */
};
@@ -426,16 +379,13 @@ struct test_case_data {
u64 time;
};
-/* Split it to get rid of: WARNING: line over 80 characters */
-static struct test_case_data
- per_cpu_test_data[NR_CPUS][ARRAY_SIZE(test_case_array)];
-
static struct test_driver {
struct task_struct *task;
+ struct test_case_data data[ARRAY_SIZE(test_case_array)];
+
unsigned long start;
unsigned long stop;
- int cpu;
-} per_cpu_test_driver[NR_CPUS];
+} *tdriver;
static void shuffle_array(int *arr, int n)
{
@@ -463,9 +413,6 @@ static int test_func(void *private)
ktime_t kt;
u64 delta;
- if (set_cpus_allowed_ptr(current, cpumask_of(t->cpu)) < 0)
- pr_err("Failed to set affinity to %d CPU\n", t->cpu);
-
for (i = 0; i < ARRAY_SIZE(test_case_array); i++)
random_array[i] = i;
@@ -490,9 +437,9 @@ static int test_func(void *private)
kt = ktime_get();
for (j = 0; j < test_repeat_count; j++) {
if (!test_case_array[index].test_func())
- per_cpu_test_data[t->cpu][index].test_passed++;
+ t->data[index].test_passed++;
else
- per_cpu_test_data[t->cpu][index].test_failed++;
+ t->data[index].test_failed++;
}
/*
@@ -501,7 +448,7 @@ static int test_func(void *private)
delta = (u64) ktime_us_delta(ktime_get(), kt);
do_div(delta, (u32) test_repeat_count);
- per_cpu_test_data[t->cpu][index].time = delta;
+ t->data[index].time = delta;
}
t->stop = get_cycles();
@@ -517,53 +464,56 @@ static int test_func(void *private)
return 0;
}
-static void
+static int
init_test_configurtion(void)
{
/*
- * Reset all data of all CPUs.
+ * A maximum number of workers is defined as hard-coded
+ * value and set to USHRT_MAX. We add such gap just in
+ * case and for potential heavy stressing.
*/
- memset(per_cpu_test_data, 0, sizeof(per_cpu_test_data));
+ nr_threads = clamp(nr_threads, 1, (int) USHRT_MAX);
- if (single_cpu_test)
- cpumask_set_cpu(cpumask_first(cpu_online_mask),
- &cpus_run_test_mask);
- else
- cpumask_and(&cpus_run_test_mask, cpu_online_mask,
- cpu_online_mask);
+ /* Allocate the space for test instances. */
+ tdriver = kvcalloc(nr_threads, sizeof(*tdriver), GFP_KERNEL);
+ if (tdriver == NULL)
+ return -1;
if (test_repeat_count <= 0)
test_repeat_count = 1;
if (test_loop_count <= 0)
test_loop_count = 1;
+
+ return 0;
}
static void do_concurrent_test(void)
{
- int cpu, ret;
+ int i, ret;
/*
* Set some basic configurations plus sanity check.
*/
- init_test_configurtion();
+ ret = init_test_configurtion();
+ if (ret < 0)
+ return;
/*
* Put on hold all workers.
*/
down_write(&prepare_for_test_rwsem);
- for_each_cpu(cpu, &cpus_run_test_mask) {
- struct test_driver *t = &per_cpu_test_driver[cpu];
+ for (i = 0; i < nr_threads; i++) {
+ struct test_driver *t = &tdriver[i];
- t->cpu = cpu;
- t->task = kthread_run(test_func, t, "vmalloc_test/%d", cpu);
+ t->task = kthread_run(test_func, t, "vmalloc_test/%d", i);
if (!IS_ERR(t->task))
/* Success. */
atomic_inc(&test_n_undone);
else
- pr_err("Failed to start kthread for %d CPU\n", cpu);
+ pr_err("Failed to start %d kthread\n", i);
}
/*
@@ -581,29 +531,31 @@ static void do_concurrent_test(void)
ret = wait_for_completion_timeout(&test_all_done_comp, HZ);
} while (!ret);
- for_each_cpu(cpu, &cpus_run_test_mask) {
- struct test_driver *t = &per_cpu_test_driver[cpu];
- int i;
+ for (i = 0; i < nr_threads; i++) {
+ struct test_driver *t = &tdriver[i];
+ int j;
if (!IS_ERR(t->task))
kthread_stop(t->task);
- for (i = 0; i < ARRAY_SIZE(test_case_array); i++) {
- if (!((run_test_mask & (1 << i)) >> i))
+ for (j = 0; j < ARRAY_SIZE(test_case_array); j++) {
+ if (!((run_test_mask & (1 << j)) >> j))
continue;
pr_info(
"Summary: %s passed: %d failed: %d repeat: %d loops: %d avg: %llu usec\n",
- test_case_array[i].test_name,
- per_cpu_test_data[cpu][i].test_passed,
- per_cpu_test_data[cpu][i].test_failed,
+ test_case_array[j].test_name,
+ t->data[j].test_passed,
+ t->data[j].test_failed,
test_repeat_count, test_loop_count,
- per_cpu_test_data[cpu][i].time);
+ t->data[j].time);
}
- pr_info("All test took CPU%d=%lu cycles\n",
- cpu, t->stop - t->start);
+ pr_info("All test took worker%d=%lu cycles\n",
+ i, t->stop - t->start);
}
+
+ kvfree(tdriver);
}
static int vmalloc_test_init(void)
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index 2919f1698140..ce2f69552003 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -46,16 +46,18 @@ static inline bool vdso_cycles_ok(u64 cycles)
#endif
#ifdef CONFIG_TIME_NS
-static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
- struct __kernel_timespec *ts)
+static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
+ struct __kernel_timespec *ts)
{
- const struct vdso_data *vd = __arch_get_timens_vdso_data();
+ const struct vdso_data *vd;
const struct timens_offset *offs = &vdns->offset[clk];
const struct vdso_timestamp *vdso_ts;
u64 cycles, last, ns;
u32 seq;
s64 sec;
+ vd = vdns - (clk == CLOCK_MONOTONIC_RAW ? CS_RAW : CS_HRES_COARSE);
+ vd = __arch_get_timens_vdso_data(vd);
if (clk != CLOCK_MONOTONIC_RAW)
vd = &vd[CS_HRES_COARSE];
else
@@ -92,13 +94,14 @@ static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
return 0;
}
#else
-static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)
+static __always_inline
+const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
{
return NULL;
}
-static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
- struct __kernel_timespec *ts)
+static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
+ struct __kernel_timespec *ts)
{
return -EINVAL;
}
@@ -159,10 +162,10 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
}
#ifdef CONFIG_TIME_NS
-static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
- struct __kernel_timespec *ts)
+static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
+ struct __kernel_timespec *ts)
{
- const struct vdso_data *vd = __arch_get_timens_vdso_data();
+ const struct vdso_data *vd = __arch_get_timens_vdso_data(vdns);
const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
const struct timens_offset *offs = &vdns->offset[clk];
u64 nsec;
@@ -188,8 +191,8 @@ static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
return 0;
}
#else
-static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
- struct __kernel_timespec *ts)
+static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
+ struct __kernel_timespec *ts)
{
return -1;
}
@@ -310,7 +313,7 @@ __cvdso_gettimeofday_data(const struct vdso_data *vd,
if (unlikely(tz != NULL)) {
if (IS_ENABLED(CONFIG_TIME_NS) &&
vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
- vd = __arch_get_timens_vdso_data();
+ vd = __arch_get_timens_vdso_data(vd);
tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest;
tz->tz_dsttime = vd[CS_HRES_COARSE].tz_dsttime;
@@ -333,7 +336,7 @@ __cvdso_time_data(const struct vdso_data *vd, __kernel_old_time_t *time)
if (IS_ENABLED(CONFIG_TIME_NS) &&
vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
- vd = __arch_get_timens_vdso_data();
+ vd = __arch_get_timens_vdso_data(vd);
t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
@@ -363,7 +366,7 @@ int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock,
if (IS_ENABLED(CONFIG_TIME_NS) &&
vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
- vd = __arch_get_timens_vdso_data();
+ vd = __arch_get_timens_vdso_data(vd);
/*
* Convert the clockid to a bitmask and use it to check which
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 41ddc353ebb8..f0c35d9b65bf 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1734,6 +1734,42 @@ char *netdev_bits(char *buf, char *end, const void *addr,
}
static noinline_for_stack
+char *fourcc_string(char *buf, char *end, const u32 *fourcc,
+ struct printf_spec spec, const char *fmt)
+{
+ char output[sizeof("0123 little-endian (0x01234567)")];
+ char *p = output;
+ unsigned int i;
+ u32 val;
+
+ if (fmt[1] != 'c' || fmt[2] != 'c')
+ return error_string(buf, end, "(%p4?)", spec);
+
+ if (check_pointer(&buf, end, fourcc, spec))
+ return buf;
+
+ val = *fourcc & ~BIT(31);
+
+ for (i = 0; i < sizeof(*fourcc); i++) {
+ unsigned char c = val >> (i * 8);
+
+ /* Print non-control ASCII characters as-is, dot otherwise */
+ *p++ = isascii(c) && isprint(c) ? c : '.';
+ }
+
+ strcpy(p, *fourcc & BIT(31) ? " big-endian" : " little-endian");
+ p += strlen(p);
+
+ *p++ = ' ';
+ *p++ = '(';
+ p = special_hex_number(p, output + sizeof(output) - 2, *fourcc, sizeof(u32));
+ *p++ = ')';
+ *p = '\0';
+
+ return string(buf, end, output, spec);
+}
+
+static noinline_for_stack
char *address_val(char *buf, char *end, const void *addr,
struct printf_spec spec, const char *fmt)
{
@@ -1916,6 +1952,66 @@ char *format_flags(char *buf, char *end, unsigned long flags,
return buf;
}
+struct page_flags_fields {
+ int width;
+ int shift;
+ int mask;
+ const struct printf_spec *spec;
+ const char *name;
+};
+
+static const struct page_flags_fields pff[] = {
+ {SECTIONS_WIDTH, SECTIONS_PGSHIFT, SECTIONS_MASK,
+ &default_dec_spec, "section"},
+ {NODES_WIDTH, NODES_PGSHIFT, NODES_MASK,
+ &default_dec_spec, "node"},
+ {ZONES_WIDTH, ZONES_PGSHIFT, ZONES_MASK,
+ &default_dec_spec, "zone"},
+ {LAST_CPUPID_WIDTH, LAST_CPUPID_PGSHIFT, LAST_CPUPID_MASK,
+ &default_flag_spec, "lastcpupid"},
+ {KASAN_TAG_WIDTH, KASAN_TAG_PGSHIFT, KASAN_TAG_MASK,
+ &default_flag_spec, "kasantag"},
+};
+
+static
+char *format_page_flags(char *buf, char *end, unsigned long flags)
+{
+ unsigned long main_flags = flags & (BIT(NR_PAGEFLAGS) - 1);
+ bool append = false;
+ int i;
+
+ /* Page flags from the main area. */
+ if (main_flags) {
+ buf = format_flags(buf, end, main_flags, pageflag_names);
+ append = true;
+ }
+
+ /* Page flags from the fields area */
+ for (i = 0; i < ARRAY_SIZE(pff); i++) {
+ /* Skip undefined fields. */
+ if (!pff[i].width)
+ continue;
+
+ /* Format: Flag Name + '=' (equals sign) + Number + '|' (separator) */
+ if (append) {
+ if (buf < end)
+ *buf = '|';
+ buf++;
+ }
+
+ buf = string(buf, end, pff[i].name, default_str_spec);
+ if (buf < end)
+ *buf = '=';
+ buf++;
+ buf = number(buf, end, (flags >> pff[i].shift) & pff[i].mask,
+ *pff[i].spec);
+
+ append = true;
+ }
+
+ return buf;
+}
+
static noinline_for_stack
char *flags_string(char *buf, char *end, void *flags_ptr,
struct printf_spec spec, const char *fmt)
@@ -1928,11 +2024,7 @@ char *flags_string(char *buf, char *end, void *flags_ptr,
switch (fmt[1]) {
case 'p':
- flags = *(unsigned long *)flags_ptr;
- /* Remove zone id */
- flags &= (1UL << NR_PAGEFLAGS) - 1;
- names = pageflag_names;
- break;
+ return format_page_flags(buf, end, *(unsigned long *)flags_ptr);
case 'v':
flags = *(unsigned long *)flags_ptr;
names = vmaflag_names;
@@ -2096,6 +2188,9 @@ EXPORT_SYMBOL_GPL(no_hash_pointers);
static int __init no_hash_pointers_enable(char *str)
{
+ if (no_hash_pointers)
+ return 0;
+
no_hash_pointers = true;
pr_warn("**********************************************************\n");
@@ -2186,8 +2281,11 @@ early_param("no_hash_pointers", no_hash_pointers_enable);
* Implements a "recursive vsnprintf".
* Do not use this feature without some mechanism to verify the
* correctness of the format string and va_list arguments.
- * - 'K' For a kernel pointer that should be hidden from unprivileged users
+ * - 'K' For a kernel pointer that should be hidden from unprivileged users.
+ * Use only for procfs, sysfs and similar files, not printk(); please
+ * read the documentation (path below) first.
* - 'NF' For a netdev_features_t
+ * - '4cc' V4L2 or DRM FourCC code, with endianness and raw numerical value.
* - 'h[CDN]' For a variable-length buffer, it prints it as a hex string with
* a certain separator (' ' by default):
* C colon
@@ -2225,7 +2323,8 @@ early_param("no_hash_pointers", no_hash_pointers_enable);
* Without an option prints the full name of the node
* f full name
* P node name, including a possible unit address
- * - 'x' For printing the address. Equivalent to "%lx".
+ * - 'x' For printing the address unmodified. Equivalent to "%lx".
+ * Please read the documentation (path below) before using!
* - '[ku]s' For a BPF/tracing related format specifier, e.g. used out of
* bpf_trace_printk() where [ku] prefix specifies either kernel (k)
* or user (u) memory to probe, and:
@@ -2285,6 +2384,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
return restricted_pointer(buf, end, ptr, spec);
case 'N':
return netdev_bits(buf, end, ptr, spec, fmt);
+ case '4':
+ return fourcc_string(buf, end, ptr, spec, fmt);
case 'a':
return address_val(buf, end, ptr, spec, fmt);
case 'd':
@@ -3135,8 +3236,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
switch (*fmt) {
case 'S':
case 's':
- case 'F':
- case 'f':
case 'x':
case 'K':
case 'e':