diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 3 | ||||
-rw-r--r-- | lib/Kconfig.debug | 10 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/cordic.c | 23 | ||||
-rw-r--r-- | lib/debugobjects.c | 5 | ||||
-rw-r--r-- | lib/iov_iter.c | 38 | ||||
-rw-r--r-- | lib/objagg.c | 501 | ||||
-rw-r--r-- | lib/raid6/test/Makefile | 4 | ||||
-rw-r--r-- | lib/rhashtable.c | 8 | ||||
-rw-r--r-- | lib/test_bpf.c | 14 | ||||
-rw-r--r-- | lib/test_firmware.c | 1 | ||||
-rw-r--r-- | lib/test_hexdump.c | 2 | ||||
-rw-r--r-- | lib/test_kmod.c | 1 | ||||
-rw-r--r-- | lib/test_objagg.c | 836 | ||||
-rw-r--r-- | lib/test_xarray.c | 50 | ||||
-rw-r--r-- | lib/ubsan.c | 3 | ||||
-rw-r--r-- | lib/xarray.c | 139 |
17 files changed, 1523 insertions, 117 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index a9965f4af4dd..7dbbcfe9cd90 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -624,3 +624,6 @@ config GENERIC_LIB_CMPDI2 config GENERIC_LIB_UCMPDI2 bool + +config OBJAGG + tristate "objagg" if COMPILE_TEST diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1af29b8224fd..b3c91b9e32f8 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1976,6 +1976,16 @@ config TEST_MEMCAT_P If unsure, say N. +config TEST_OBJAGG + tristate "Perform selftest on object aggreration manager" + default n + depends on OBJAGG + help + Enable this option to test object aggregation manager on boot + (or module load). + + If unsure, say N. + endif # RUNTIME_TESTING_MENU config MEMTEST diff --git a/lib/Makefile b/lib/Makefile index db06d1237898..f5262d30bfe6 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -75,6 +75,7 @@ obj-$(CONFIG_TEST_PARMAN) += test_parman.o obj-$(CONFIG_TEST_KMOD) += test_kmod.o obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o +obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG @@ -274,3 +275,4 @@ obj-$(CONFIG_GENERIC_LIB_LSHRDI3) += lshrdi3.o obj-$(CONFIG_GENERIC_LIB_MULDI3) += muldi3.o obj-$(CONFIG_GENERIC_LIB_CMPDI2) += cmpdi2.o obj-$(CONFIG_GENERIC_LIB_UCMPDI2) += ucmpdi2.o +obj-$(CONFIG_OBJAGG) += objagg.o diff --git a/lib/cordic.c b/lib/cordic.c index 6cf477839ebd..8ef27c12956f 100644 --- a/lib/cordic.c +++ b/lib/cordic.c @@ -16,15 +16,6 @@ #include <linux/module.h> #include <linux/cordic.h> -#define CORDIC_ANGLE_GEN 39797 -#define CORDIC_PRECISION_SHIFT 16 -#define CORDIC_NUM_ITER (CORDIC_PRECISION_SHIFT + 2) - -#define FIXED(X) ((s32)((X) << CORDIC_PRECISION_SHIFT)) -#define FLOAT(X) (((X) >= 0) \ - ? ((((X) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1) \ - : -((((-(X)) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1)) - static const s32 arctan_table[] = { 2949120, 1740967, @@ -64,16 +55,16 @@ struct cordic_iq cordic_calc_iq(s32 theta) coord.q = 0; angle = 0; - theta = FIXED(theta); + theta = CORDIC_FIXED(theta); signtheta = (theta < 0) ? -1 : 1; - theta = ((theta + FIXED(180) * signtheta) % FIXED(360)) - - FIXED(180) * signtheta; + theta = ((theta + CORDIC_FIXED(180) * signtheta) % CORDIC_FIXED(360)) - + CORDIC_FIXED(180) * signtheta; - if (FLOAT(theta) > 90) { - theta -= FIXED(180); + if (CORDIC_FLOAT(theta) > 90) { + theta -= CORDIC_FIXED(180); signx = -1; - } else if (FLOAT(theta) < -90) { - theta += FIXED(180); + } else if (CORDIC_FLOAT(theta) < -90) { + theta += CORDIC_FIXED(180); signx = -1; } diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 70935ed91125..14afeeb7d6ef 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -135,7 +135,6 @@ static void fill_pool(void) if (!new) return; - kmemleak_ignore(new); raw_spin_lock_irqsave(&pool_lock, flags); hlist_add_head(&new->node, &obj_pool); debug_objects_allocated++; @@ -1128,7 +1127,6 @@ static int __init debug_objects_replace_static_objects(void) obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); if (!obj) goto free; - kmemleak_ignore(obj); hlist_add_head(&obj->node, &objects); } @@ -1184,7 +1182,8 @@ void __init debug_objects_mem_init(void) obj_cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0, - SLAB_DEBUG_OBJECTS, NULL); + SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, + NULL); if (!obj_cache || debug_objects_replace_static_objects()) { debug_objects_enabled = 0; diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 7ebccb5c1637..54c248526b55 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -560,6 +560,38 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes, return bytes; } +static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, + __wsum *csum, struct iov_iter *i) +{ + struct pipe_inode_info *pipe = i->pipe; + size_t n, r; + size_t off = 0; + __wsum sum = *csum, next; + int idx; + + if (!sanity(i)) + return 0; + + bytes = n = push_pipe(i, bytes, &idx, &r); + if (unlikely(!n)) + return 0; + for ( ; n; idx = next_idx(idx, pipe), r = 0) { + size_t chunk = min_t(size_t, n, PAGE_SIZE - r); + char *p = kmap_atomic(pipe->bufs[idx].page); + next = csum_partial_copy_nocheck(addr, p + r, chunk, 0); + sum = csum_block_add(sum, next, off); + kunmap_atomic(p); + i->idx = idx; + i->iov_offset = r + chunk; + n -= chunk; + off += chunk; + addr += chunk; + } + i->count -= bytes; + *csum = sum; + return bytes; +} + size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { const char *from = addr; @@ -1438,8 +1470,12 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, const char *from = addr; __wsum sum, next; size_t off = 0; + + if (unlikely(iov_iter_is_pipe(i))) + return csum_and_copy_to_pipe_iter(addr, bytes, csum, i); + sum = *csum; - if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { + if (unlikely(iov_iter_is_discard(i))) { WARN_ON(1); /* for now */ return 0; } diff --git a/lib/objagg.c b/lib/objagg.c new file mode 100644 index 000000000000..c9b457a91153 --- /dev/null +++ b/lib/objagg.c @@ -0,0 +1,501 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/rhashtable.h> +#include <linux/list.h> +#include <linux/sort.h> +#include <linux/objagg.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/objagg.h> + +struct objagg { + const struct objagg_ops *ops; + void *priv; + struct rhashtable obj_ht; + struct rhashtable_params ht_params; + struct list_head obj_list; + unsigned int obj_count; +}; + +struct objagg_obj { + struct rhash_head ht_node; /* member of objagg->obj_ht */ + struct list_head list; /* member of objagg->obj_list */ + struct objagg_obj *parent; /* if the object is nested, this + * holds pointer to parent, otherwise NULL + */ + union { + void *delta_priv; /* user delta private */ + void *root_priv; /* user root private */ + }; + unsigned int refcount; /* counts number of users of this object + * including nested objects + */ + struct objagg_obj_stats stats; + unsigned long obj[0]; +}; + +static unsigned int objagg_obj_ref_inc(struct objagg_obj *objagg_obj) +{ + return ++objagg_obj->refcount; +} + +static unsigned int objagg_obj_ref_dec(struct objagg_obj *objagg_obj) +{ + return --objagg_obj->refcount; +} + +static void objagg_obj_stats_inc(struct objagg_obj *objagg_obj) +{ + objagg_obj->stats.user_count++; + objagg_obj->stats.delta_user_count++; + if (objagg_obj->parent) + objagg_obj->parent->stats.delta_user_count++; +} + +static void objagg_obj_stats_dec(struct objagg_obj *objagg_obj) +{ + objagg_obj->stats.user_count--; + objagg_obj->stats.delta_user_count--; + if (objagg_obj->parent) + objagg_obj->parent->stats.delta_user_count--; +} + +static bool objagg_obj_is_root(const struct objagg_obj *objagg_obj) +{ + /* Nesting is not supported, so we can use ->parent + * to figure out if the object is root. + */ + return !objagg_obj->parent; +} + +/** + * objagg_obj_root_priv - obtains root private for an object + * @objagg_obj: objagg object instance + * + * Note: all locking must be provided by the caller. + * + * Either the object is root itself when the private is returned + * directly, or the parent is root and its private is returned + * instead. + * + * Returns a user private root pointer. + */ +const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj) +{ + if (objagg_obj_is_root(objagg_obj)) + return objagg_obj->root_priv; + WARN_ON(!objagg_obj_is_root(objagg_obj->parent)); + return objagg_obj->parent->root_priv; +} +EXPORT_SYMBOL(objagg_obj_root_priv); + +/** + * objagg_obj_delta_priv - obtains delta private for an object + * @objagg_obj: objagg object instance + * + * Note: all locking must be provided by the caller. + * + * Returns user private delta pointer or NULL in case the passed + * object is root. + */ +const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj) +{ + if (objagg_obj_is_root(objagg_obj)) + return NULL; + return objagg_obj->delta_priv; +} +EXPORT_SYMBOL(objagg_obj_delta_priv); + +/** + * objagg_obj_raw - obtains object user private pointer + * @objagg_obj: objagg object instance + * + * Note: all locking must be provided by the caller. + * + * Returns user private pointer as was passed to objagg_obj_get() by "obj" arg. + */ +const void *objagg_obj_raw(const struct objagg_obj *objagg_obj) +{ + return objagg_obj->obj; +} +EXPORT_SYMBOL(objagg_obj_raw); + +static struct objagg_obj *objagg_obj_lookup(struct objagg *objagg, void *obj) +{ + return rhashtable_lookup_fast(&objagg->obj_ht, obj, objagg->ht_params); +} + +static int objagg_obj_parent_assign(struct objagg *objagg, + struct objagg_obj *objagg_obj, + struct objagg_obj *parent) +{ + void *delta_priv; + + delta_priv = objagg->ops->delta_create(objagg->priv, parent->obj, + objagg_obj->obj); + if (IS_ERR(delta_priv)) + return PTR_ERR(delta_priv); + + /* User returned a delta private, that means that + * our object can be aggregated into the parent. + */ + objagg_obj->parent = parent; + objagg_obj->delta_priv = delta_priv; + objagg_obj_ref_inc(objagg_obj->parent); + trace_objagg_obj_parent_assign(objagg, objagg_obj, + parent, + parent->refcount); + return 0; +} + +static int objagg_obj_parent_lookup_assign(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + struct objagg_obj *objagg_obj_cur; + int err; + + list_for_each_entry(objagg_obj_cur, &objagg->obj_list, list) { + /* Nesting is not supported. In case the object + * is not root, it cannot be assigned as parent. + */ + if (!objagg_obj_is_root(objagg_obj_cur)) + continue; + err = objagg_obj_parent_assign(objagg, objagg_obj, + objagg_obj_cur); + if (!err) + return 0; + } + return -ENOENT; +} + +static void __objagg_obj_put(struct objagg *objagg, + struct objagg_obj *objagg_obj); + +static void objagg_obj_parent_unassign(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + trace_objagg_obj_parent_unassign(objagg, objagg_obj, + objagg_obj->parent, + objagg_obj->parent->refcount); + objagg->ops->delta_destroy(objagg->priv, objagg_obj->delta_priv); + __objagg_obj_put(objagg, objagg_obj->parent); +} + +static int objagg_obj_root_create(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + objagg_obj->root_priv = objagg->ops->root_create(objagg->priv, + objagg_obj->obj); + if (IS_ERR(objagg_obj->root_priv)) + return PTR_ERR(objagg_obj->root_priv); + + trace_objagg_obj_root_create(objagg, objagg_obj); + return 0; +} + +static void objagg_obj_root_destroy(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + trace_objagg_obj_root_destroy(objagg, objagg_obj); + objagg->ops->root_destroy(objagg->priv, objagg_obj->root_priv); +} + +static int objagg_obj_init(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + int err; + + /* Try to find if the object can be aggregated under an existing one. */ + err = objagg_obj_parent_lookup_assign(objagg, objagg_obj); + if (!err) + return 0; + /* If aggregation is not possible, make the object a root. */ + return objagg_obj_root_create(objagg, objagg_obj); +} + +static void objagg_obj_fini(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + if (!objagg_obj_is_root(objagg_obj)) + objagg_obj_parent_unassign(objagg, objagg_obj); + else + objagg_obj_root_destroy(objagg, objagg_obj); +} + +static struct objagg_obj *objagg_obj_create(struct objagg *objagg, void *obj) +{ + struct objagg_obj *objagg_obj; + int err; + + objagg_obj = kzalloc(sizeof(*objagg_obj) + objagg->ops->obj_size, + GFP_KERNEL); + if (!objagg_obj) + return ERR_PTR(-ENOMEM); + objagg_obj_ref_inc(objagg_obj); + memcpy(objagg_obj->obj, obj, objagg->ops->obj_size); + + err = objagg_obj_init(objagg, objagg_obj); + if (err) + goto err_obj_init; + + err = rhashtable_insert_fast(&objagg->obj_ht, &objagg_obj->ht_node, + objagg->ht_params); + if (err) + goto err_ht_insert; + list_add(&objagg_obj->list, &objagg->obj_list); + objagg->obj_count++; + trace_objagg_obj_create(objagg, objagg_obj); + + return objagg_obj; + +err_ht_insert: + objagg_obj_fini(objagg, objagg_obj); +err_obj_init: + kfree(objagg_obj); + return ERR_PTR(err); +} + +static struct objagg_obj *__objagg_obj_get(struct objagg *objagg, void *obj) +{ + struct objagg_obj *objagg_obj; + + /* First, try to find the object exactly as user passed it, + * perhaps it is already in use. + */ + objagg_obj = objagg_obj_lookup(objagg, obj); + if (objagg_obj) { + objagg_obj_ref_inc(objagg_obj); + return objagg_obj; + } + + return objagg_obj_create(objagg, obj); +} + +/** + * objagg_obj_get - gets an object within objagg instance + * @objagg: objagg instance + * @obj: user-specific private object pointer + * + * Note: all locking must be provided by the caller. + * + * Size of the "obj" memory is specified in "objagg->ops". + * + * There are 3 main options this function wraps: + * 1) The object according to "obj" already exist. In that case + * the reference counter is incrementes and the object is returned. + * 2) The object does not exist, but it can be aggregated within + * another object. In that case, user ops->delta_create() is called + * to obtain delta data and a new object is created with returned + * user-delta private pointer. + * 3) The object does not exist and cannot be aggregated into + * any of the existing objects. In that case, user ops->root_create() + * is called to create the root and a new object is created with + * returned user-root private pointer. + * + * Returns a pointer to objagg object instance in case of success, + * otherwise it returns pointer error using ERR_PTR macro. + */ +struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj) +{ + struct objagg_obj *objagg_obj; + + objagg_obj = __objagg_obj_get(objagg, obj); + if (IS_ERR(objagg_obj)) + return objagg_obj; + objagg_obj_stats_inc(objagg_obj); + trace_objagg_obj_get(objagg, objagg_obj, objagg_obj->refcount); + return objagg_obj; +} +EXPORT_SYMBOL(objagg_obj_get); + +static void objagg_obj_destroy(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + trace_objagg_obj_destroy(objagg, objagg_obj); + --objagg->obj_count; + list_del(&objagg_obj->list); + rhashtable_remove_fast(&objagg->obj_ht, &objagg_obj->ht_node, + objagg->ht_params); + objagg_obj_fini(objagg, objagg_obj); + kfree(objagg_obj); +} + +static void __objagg_obj_put(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + if (!objagg_obj_ref_dec(objagg_obj)) + objagg_obj_destroy(objagg, objagg_obj); +} + +/** + * objagg_obj_put - puts an object within objagg instance + * @objagg: objagg instance + * @objagg_obj: objagg object instance + * + * Note: all locking must be provided by the caller. + * + * Symmetric to objagg_obj_get(). + */ +void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj) +{ + trace_objagg_obj_put(objagg, objagg_obj, objagg_obj->refcount); + objagg_obj_stats_dec(objagg_obj); + __objagg_obj_put(objagg, objagg_obj); +} +EXPORT_SYMBOL(objagg_obj_put); + +/** + * objagg_create - creates a new objagg instance + * @ops: user-specific callbacks + * @priv: pointer to a private data passed to the ops + * + * Note: all locking must be provided by the caller. + * + * The purpose of the library is to provide an infrastructure to + * aggregate user-specified objects. Library does not care about the type + * of the object. User fills-up ops which take care of the specific + * user object manipulation. + * + * As a very stupid example, consider integer numbers. For example + * number 8 as a root object. That can aggregate number 9 with delta 1, + * number 10 with delta 2, etc. This example is implemented as + * a part of a testing module in test_objagg.c file. + * + * Each objagg instance contains multiple trees. Each tree node is + * represented by "an object". In the current implementation there can be + * only roots and leafs nodes. Leaf nodes are called deltas. + * But in general, this can be easily extended for intermediate nodes. + * In that extension, a delta would be associated with all non-root + * nodes. + * + * Returns a pointer to newly created objagg instance in case of success, + * otherwise it returns pointer error using ERR_PTR macro. + */ +struct objagg *objagg_create(const struct objagg_ops *ops, void *priv) +{ + struct objagg *objagg; + int err; + + if (WARN_ON(!ops || !ops->root_create || !ops->root_destroy || + !ops->delta_create || !ops->delta_destroy)) + return ERR_PTR(-EINVAL); + objagg = kzalloc(sizeof(*objagg), GFP_KERNEL); + if (!objagg) + return ERR_PTR(-ENOMEM); + objagg->ops = ops; + objagg->priv = priv; + INIT_LIST_HEAD(&objagg->obj_list); + + objagg->ht_params.key_len = ops->obj_size; + objagg->ht_params.key_offset = offsetof(struct objagg_obj, obj); + objagg->ht_params.head_offset = offsetof(struct objagg_obj, ht_node); + + err = rhashtable_init(&objagg->obj_ht, &objagg->ht_params); + if (err) + goto err_rhashtable_init; + + trace_objagg_create(objagg); + return objagg; + +err_rhashtable_init: + kfree(objagg); + return ERR_PTR(err); +} +EXPORT_SYMBOL(objagg_create); + +/** + * objagg_destroy - destroys a new objagg instance + * @objagg: objagg instance + * + * Note: all locking must be provided by the caller. + */ +void objagg_destroy(struct objagg *objagg) +{ + trace_objagg_destroy(objagg); + WARN_ON(!list_empty(&objagg->obj_list)); + rhashtable_destroy(&objagg->obj_ht); + kfree(objagg); +} +EXPORT_SYMBOL(objagg_destroy); + +static int objagg_stats_info_sort_cmp_func(const void *a, const void *b) +{ + const struct objagg_obj_stats_info *stats_info1 = a; + const struct objagg_obj_stats_info *stats_info2 = b; + + if (stats_info1->is_root != stats_info2->is_root) + return stats_info2->is_root - stats_info1->is_root; + if (stats_info1->stats.delta_user_count != + stats_info2->stats.delta_user_count) + return stats_info2->stats.delta_user_count - + stats_info1->stats.delta_user_count; + return stats_info2->stats.user_count - stats_info1->stats.user_count; +} + +/** + * objagg_stats_get - obtains stats of the objagg instance + * @objagg: objagg instance + * + * Note: all locking must be provided by the caller. + * + * The returned structure contains statistics of all object + * currently in use, ordered by following rules: + * 1) Root objects are always on lower indexes than the rest. + * 2) Objects with higher delta user count are always on lower + * indexes. + * 3) In case more objects have the same delta user count, + * the objects are ordered by user count. + * + * Returns a pointer to stats instance in case of success, + * otherwise it returns pointer error using ERR_PTR macro. + */ +const struct objagg_stats *objagg_stats_get(struct objagg *objagg) +{ + struct objagg_stats *objagg_stats; + struct objagg_obj *objagg_obj; + size_t alloc_size; + int i; + + alloc_size = sizeof(*objagg_stats) + + sizeof(objagg_stats->stats_info[0]) * objagg->obj_count; + objagg_stats = kzalloc(alloc_size, GFP_KERNEL); + if (!objagg_stats) + return ERR_PTR(-ENOMEM); + + i = 0; + list_for_each_entry(objagg_obj, &objagg->obj_list, list) { + memcpy(&objagg_stats->stats_info[i].stats, &objagg_obj->stats, + sizeof(objagg_stats->stats_info[0].stats)); + objagg_stats->stats_info[i].objagg_obj = objagg_obj; + objagg_stats->stats_info[i].is_root = + objagg_obj_is_root(objagg_obj); + i++; + } + objagg_stats->stats_info_count = i; + + sort(objagg_stats->stats_info, objagg_stats->stats_info_count, + sizeof(struct objagg_obj_stats_info), + objagg_stats_info_sort_cmp_func, NULL); + + return objagg_stats; +} +EXPORT_SYMBOL(objagg_stats_get); + +/** + * objagg_stats_puts - puts stats of the objagg instance + * @objagg_stats: objagg instance stats + * + * Note: all locking must be provided by the caller. + */ +void objagg_stats_put(const struct objagg_stats *objagg_stats) +{ + kfree(objagg_stats); +} +EXPORT_SYMBOL(objagg_stats_put); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); +MODULE_DESCRIPTION("Object aggregation manager"); diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile index 5d73f5cb4d8a..79777645cac9 100644 --- a/lib/raid6/test/Makefile +++ b/lib/raid6/test/Makefile @@ -27,7 +27,7 @@ ifeq ($(ARCH),arm) CFLAGS += -I../../../arch/arm/include -mfpu=neon HAS_NEON = yes endif -ifeq ($(ARCH),arm64) +ifeq ($(ARCH),aarch64) CFLAGS += -I../../../arch/arm64/include HAS_NEON = yes endif @@ -41,7 +41,7 @@ ifeq ($(IS_X86),yes) gcc -c -x assembler - >&/dev/null && \ rm ./-.o && echo -DCONFIG_AS_AVX512=1) else ifeq ($(HAS_NEON),yes) - OBJS += neon.o neon1.o neon2.o neon4.o neon8.o + OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1 else HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\ diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 30526afa8343..852ffa5160f1 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -1179,8 +1179,7 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, unsigned int hash) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); - static struct rhash_head __rcu *rhnull = - (struct rhash_head __rcu *)NULLS_MARKER(0); + static struct rhash_head __rcu *rhnull; unsigned int index = hash & ((1 << tbl->nest) - 1); unsigned int size = tbl->size >> tbl->nest; unsigned int subhash = hash; @@ -1198,8 +1197,11 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, subhash >>= shift; } - if (!ntbl) + if (!ntbl) { + if (!rhnull) + INIT_RHT_NULLS_HEAD(rhnull); return &rhnull; + } return &ntbl[subhash].bucket; diff --git a/lib/test_bpf.c b/lib/test_bpf.c index aa22bcaec1dc..f3e570722a7e 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -39,6 +39,7 @@ #define SKB_HASH 0x1234aaab #define SKB_QUEUE_MAP 123 #define SKB_VLAN_TCI 0xffff +#define SKB_VLAN_PRESENT 1 #define SKB_DEV_IFINDEX 577 #define SKB_DEV_TYPE 588 @@ -725,8 +726,8 @@ static struct bpf_test tests[] = { CLASSIC, { }, { - { 1, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT }, - { 10, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT } + { 1, SKB_VLAN_TCI }, + { 10, SKB_VLAN_TCI } }, }, { @@ -739,8 +740,8 @@ static struct bpf_test tests[] = { CLASSIC, { }, { - { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }, - { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) } + { 1, SKB_VLAN_PRESENT }, + { 10, SKB_VLAN_PRESENT } }, }, { @@ -5289,8 +5290,8 @@ static struct bpf_test tests[] = { #endif { }, { - { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }, - { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) } + { 1, SKB_VLAN_PRESENT }, + { 10, SKB_VLAN_PRESENT } }, .fill_helper = bpf_fill_maxinsns6, .expected_errcode = -ENOTSUPP, @@ -6493,6 +6494,7 @@ static struct sk_buff *populate_skb(char *buf, int size) skb->hash = SKB_HASH; skb->queue_mapping = SKB_QUEUE_MAP; skb->vlan_tci = SKB_VLAN_TCI; + skb->vlan_present = SKB_VLAN_PRESENT; skb->vlan_proto = htons(ETH_P_IP); dev_net_set(&dev, &init_net); skb->dev = &dev; diff --git a/lib/test_firmware.c b/lib/test_firmware.c index b984806d7d7b..7cab9a9869ac 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -837,6 +837,7 @@ static ssize_t read_firmware_show(struct device *dev, if (req->fw->size > PAGE_SIZE) { pr_err("Testing interface must use PAGE_SIZE firmware for now\n"); rc = -EINVAL; + goto out; } memcpy(buf, req->fw->data, req->fw->size); diff --git a/lib/test_hexdump.c b/lib/test_hexdump.c index 626f580b4ff7..5144899d3c6b 100644 --- a/lib/test_hexdump.c +++ b/lib/test_hexdump.c @@ -99,7 +99,7 @@ static void __init test_hexdump_prepare_test(size_t len, int rowsize, const char *q = *result++; size_t amount = strlen(q); - strncpy(p, q, amount); + memcpy(p, q, amount); p += amount; *p++ = ' '; diff --git a/lib/test_kmod.c b/lib/test_kmod.c index e3ddd836491f..d82d022111e0 100644 --- a/lib/test_kmod.c +++ b/lib/test_kmod.c @@ -1214,7 +1214,6 @@ void unregister_test_dev_kmod(struct kmod_test_device *test_dev) dev_info(test_dev->dev, "removing interface\n"); misc_deregister(&test_dev->misc_dev); - kfree(&test_dev->misc_dev.name); mutex_unlock(&test_dev->config_mutex); mutex_unlock(&test_dev->trigger_mutex); diff --git a/lib/test_objagg.c b/lib/test_objagg.c new file mode 100644 index 000000000000..ab57144bb0cd --- /dev/null +++ b/lib/test_objagg.c @@ -0,0 +1,836 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/random.h> +#include <linux/objagg.h> + +struct tokey { + unsigned int id; +}; + +#define NUM_KEYS 32 + +static int key_id_index(unsigned int key_id) +{ + if (key_id >= NUM_KEYS) { + WARN_ON(1); + return 0; + } + return key_id; +} + +#define BUF_LEN 128 + +struct world { + unsigned int root_count; + unsigned int delta_count; + char next_root_buf[BUF_LEN]; + struct objagg_obj *objagg_objs[NUM_KEYS]; + unsigned int key_refs[NUM_KEYS]; +}; + +struct root { + struct tokey key; + char buf[BUF_LEN]; +}; + +struct delta { + unsigned int key_id_diff; +}; + +static struct objagg_obj *world_obj_get(struct world *world, + struct objagg *objagg, + unsigned int key_id) +{ + struct objagg_obj *objagg_obj; + struct tokey key; + int err; + + key.id = key_id; + objagg_obj = objagg_obj_get(objagg, &key); + if (IS_ERR(objagg_obj)) { + pr_err("Key %u: Failed to get object.\n", key_id); + return objagg_obj; + } + if (!world->key_refs[key_id_index(key_id)]) { + world->objagg_objs[key_id_index(key_id)] = objagg_obj; + } else if (world->objagg_objs[key_id_index(key_id)] != objagg_obj) { + pr_err("Key %u: God another object for the same key.\n", + key_id); + err = -EINVAL; + goto err_key_id_check; + } + world->key_refs[key_id_index(key_id)]++; + return objagg_obj; + +err_key_id_check: + objagg_obj_put(objagg, objagg_obj); + return ERR_PTR(err); +} + +static void world_obj_put(struct world *world, struct objagg *objagg, + unsigned int key_id) +{ + struct objagg_obj *objagg_obj; + + if (!world->key_refs[key_id_index(key_id)]) + return; + objagg_obj = world->objagg_objs[key_id_index(key_id)]; + objagg_obj_put(objagg, objagg_obj); + world->key_refs[key_id_index(key_id)]--; +} + +#define MAX_KEY_ID_DIFF 5 + +static void *delta_create(void *priv, void *parent_obj, void *obj) +{ + struct tokey *parent_key = parent_obj; + struct world *world = priv; + struct tokey *key = obj; + int diff = key->id - parent_key->id; + struct delta *delta; + + if (diff < 0 || diff > MAX_KEY_ID_DIFF) + return ERR_PTR(-EINVAL); + + delta = kzalloc(sizeof(*delta), GFP_KERNEL); + if (!delta) + return ERR_PTR(-ENOMEM); + delta->key_id_diff = diff; + world->delta_count++; + return delta; +} + +static void delta_destroy(void *priv, void *delta_priv) +{ + struct delta *delta = delta_priv; + struct world *world = priv; + + world->delta_count--; + kfree(delta); +} + +static void *root_create(void *priv, void *obj) +{ + struct world *world = priv; + struct tokey *key = obj; + struct root *root; + + root = kzalloc(sizeof(*root), GFP_KERNEL); + if (!root) + return ERR_PTR(-ENOMEM); + memcpy(&root->key, key, sizeof(root->key)); + memcpy(root->buf, world->next_root_buf, sizeof(root->buf)); + world->root_count++; + return root; +} + +static void root_destroy(void *priv, void *root_priv) +{ + struct root *root = root_priv; + struct world *world = priv; + + world->root_count--; + kfree(root); +} + +static int test_nodelta_obj_get(struct world *world, struct objagg *objagg, + unsigned int key_id, bool should_create_root) +{ + unsigned int orig_root_count = world->root_count; + struct objagg_obj *objagg_obj; + const struct root *root; + int err; + + if (should_create_root) + prandom_bytes(world->next_root_buf, + sizeof(world->next_root_buf)); + + objagg_obj = world_obj_get(world, objagg, key_id); + if (IS_ERR(objagg_obj)) { + pr_err("Key %u: Failed to get object.\n", key_id); + return PTR_ERR(objagg_obj); + } + if (should_create_root) { + if (world->root_count != orig_root_count + 1) { + pr_err("Key %u: Root was not created\n", key_id); + err = -EINVAL; + goto err_check_root_count; + } + } else { + if (world->root_count != orig_root_count) { + pr_err("Key %u: Root was incorrectly created\n", + key_id); + err = -EINVAL; + goto err_check_root_count; + } + } + root = objagg_obj_root_priv(objagg_obj); + if (root->key.id != key_id) { + pr_err("Key %u: Root has unexpected key id\n", key_id); + err = -EINVAL; + goto err_check_key_id; + } + if (should_create_root && + memcmp(world->next_root_buf, root->buf, sizeof(root->buf))) { + pr_err("Key %u: Buffer does not match the expected content\n", + key_id); + err = -EINVAL; + goto err_check_buf; + } + return 0; + +err_check_buf: +err_check_key_id: +err_check_root_count: + objagg_obj_put(objagg, objagg_obj); + return err; +} + +static int test_nodelta_obj_put(struct world *world, struct objagg *objagg, + unsigned int key_id, bool should_destroy_root) +{ + unsigned int orig_root_count = world->root_count; + + world_obj_put(world, objagg, key_id); + + if (should_destroy_root) { + if (world->root_count != orig_root_count - 1) { + pr_err("Key %u: Root was not destroyed\n", key_id); + return -EINVAL; + } + } else { + if (world->root_count != orig_root_count) { + pr_err("Key %u: Root was incorrectly destroyed\n", + key_id); + return -EINVAL; + } + } + return 0; +} + +static int check_stats_zero(struct objagg *objagg) +{ + const struct objagg_stats *stats; + int err = 0; + + stats = objagg_stats_get(objagg); + if (IS_ERR(stats)) + return PTR_ERR(stats); + + if (stats->stats_info_count != 0) { + pr_err("Stats: Object count is not zero while it should be\n"); + err = -EINVAL; + } + + objagg_stats_put(stats); + return err; +} + +static int check_stats_nodelta(struct objagg *objagg) +{ + const struct objagg_stats *stats; + int i; + int err; + + stats = objagg_stats_get(objagg); + if (IS_ERR(stats)) + return PTR_ERR(stats); + + if (stats->stats_info_count != NUM_KEYS) { + pr_err("Stats: Unexpected object count (%u expected, %u returned)\n", + NUM_KEYS, stats->stats_info_count); + err = -EINVAL; + goto stats_put; + } + + for (i = 0; i < stats->stats_info_count; i++) { + if (stats->stats_info[i].stats.user_count != 2) { + pr_err("Stats: incorrect user count\n"); + err = -EINVAL; + goto stats_put; + } + if (stats->stats_info[i].stats.delta_user_count != 2) { + pr_err("Stats: incorrect delta user count\n"); + err = -EINVAL; + goto stats_put; + } + } + err = 0; + +stats_put: + objagg_stats_put(stats); + return err; +} + +static void *delta_create_dummy(void *priv, void *parent_obj, void *obj) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static void delta_destroy_dummy(void *priv, void *delta_priv) +{ +} + +static const struct objagg_ops nodelta_ops = { + .obj_size = sizeof(struct tokey), + .delta_create = delta_create_dummy, + .delta_destroy = delta_destroy_dummy, + .root_create = root_create, + .root_destroy = root_destroy, +}; + +static int test_nodelta(void) +{ + struct world world = {}; + struct objagg *objagg; + int i; + int err; + + objagg = objagg_create(&nodelta_ops, &world); + if (IS_ERR(objagg)) + return PTR_ERR(objagg); + + err = check_stats_zero(objagg); + if (err) + goto err_stats_first_zero; + + /* First round of gets, the root objects should be created */ + for (i = 0; i < NUM_KEYS; i++) { + err = test_nodelta_obj_get(&world, objagg, i, true); + if (err) + goto err_obj_first_get; + } + + /* Do the second round of gets, all roots are already created, + * make sure that no new root is created + */ + for (i = 0; i < NUM_KEYS; i++) { + err = test_nodelta_obj_get(&world, objagg, i, false); + if (err) + goto err_obj_second_get; + } + + err = check_stats_nodelta(objagg); + if (err) + goto err_stats_nodelta; + + for (i = NUM_KEYS - 1; i >= 0; i--) { + err = test_nodelta_obj_put(&world, objagg, i, false); + if (err) + goto err_obj_first_put; + } + for (i = NUM_KEYS - 1; i >= 0; i--) { + err = test_nodelta_obj_put(&world, objagg, i, true); + if (err) + goto err_obj_second_put; + } + + err = check_stats_zero(objagg); + if (err) + goto err_stats_second_zero; + + objagg_destroy(objagg); + return 0; + +err_stats_nodelta: +err_obj_first_put: +err_obj_second_get: + for (i--; i >= 0; i--) + world_obj_put(&world, objagg, i); + + i = NUM_KEYS; +err_obj_first_get: +err_obj_second_put: + for (i--; i >= 0; i--) + world_obj_put(&world, objagg, i); +err_stats_first_zero: +err_stats_second_zero: + objagg_destroy(objagg); + return err; +} + +static const struct objagg_ops delta_ops = { + .obj_size = sizeof(struct tokey), + .delta_create = delta_create, + .delta_destroy = delta_destroy, + .root_create = root_create, + .root_destroy = root_destroy, +}; + +enum action { + ACTION_GET, + ACTION_PUT, +}; + +enum expect_delta { + EXPECT_DELTA_SAME, + EXPECT_DELTA_INC, + EXPECT_DELTA_DEC, +}; + +enum expect_root { + EXPECT_ROOT_SAME, + EXPECT_ROOT_INC, + EXPECT_ROOT_DEC, +}; + +struct expect_stats_info { + struct objagg_obj_stats stats; + bool is_root; + unsigned int key_id; +}; + +struct expect_stats { + unsigned int info_count; + struct expect_stats_info info[NUM_KEYS]; +}; + +struct action_item { + unsigned int key_id; + enum action action; + enum expect_delta expect_delta; + enum expect_root expect_root; + struct expect_stats expect_stats; +}; + +#define EXPECT_STATS(count, ...) \ +{ \ + .info_count = count, \ + .info = { __VA_ARGS__ } \ +} + +#define ROOT(key_id, user_count, delta_user_count) \ + {{user_count, delta_user_count}, true, key_id} + +#define DELTA(key_id, user_count) \ + {{user_count, user_count}, false, key_id} + +static const struct action_item action_items[] = { + { + 1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC, + EXPECT_STATS(1, ROOT(1, 1, 1)), + }, /* r: 1 d: */ + { + 7, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC, + EXPECT_STATS(2, ROOT(1, 1, 1), ROOT(7, 1, 1)), + }, /* r: 1, 7 d: */ + { + 3, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME, + EXPECT_STATS(3, ROOT(1, 1, 2), ROOT(7, 1, 1), + DELTA(3, 1)), + }, /* r: 1, 7 d: 3^1 */ + { + 5, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME, + EXPECT_STATS(4, ROOT(1, 1, 3), ROOT(7, 1, 1), + DELTA(3, 1), DELTA(5, 1)), + }, /* r: 1, 7 d: 3^1, 5^1 */ + { + 3, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(4, ROOT(1, 1, 4), ROOT(7, 1, 1), + DELTA(3, 2), DELTA(5, 1)), + }, /* r: 1, 7 d: 3^1, 3^1, 5^1 */ + { + 1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(4, ROOT(1, 2, 5), ROOT(7, 1, 1), + DELTA(3, 2), DELTA(5, 1)), + }, /* r: 1, 1, 7 d: 3^1, 3^1, 5^1 */ + { + 30, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC, + EXPECT_STATS(5, ROOT(1, 2, 5), ROOT(7, 1, 1), ROOT(30, 1, 1), + DELTA(3, 2), DELTA(5, 1)), + }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1 */ + { + 8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME, + EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 2), ROOT(30, 1, 1), + DELTA(3, 2), DELTA(5, 1), DELTA(8, 1)), + }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1, 8^7 */ + { + 8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 3), ROOT(30, 1, 1), + DELTA(3, 2), DELTA(8, 2), DELTA(5, 1)), + }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1, 8^7, 8^7 */ + { + 3, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(6, ROOT(1, 2, 4), ROOT(7, 1, 3), ROOT(30, 1, 1), + DELTA(8, 2), DELTA(3, 1), DELTA(5, 1)), + }, /* r: 1, 1, 7, 30 d: 3^1, 5^1, 8^7, 8^7 */ + { + 3, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(1, 2, 3), ROOT(7, 1, 3), ROOT(30, 1, 1), + DELTA(8, 2), DELTA(5, 1)), + }, /* r: 1, 1, 7, 30 d: 5^1, 8^7, 8^7 */ + { + 1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(1, 1, 2), ROOT(30, 1, 1), + DELTA(8, 2), DELTA(5, 1)), + }, /* r: 1, 7, 30 d: 5^1, 8^7, 8^7 */ + { + 1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(1, 0, 1), + DELTA(8, 2), DELTA(5, 1)), + }, /* r: 7, 30 d: 5^1, 8^7, 8^7 */ + { + 5, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC, + EXPECT_STATS(3, ROOT(7, 1, 3), ROOT(30, 1, 1), + DELTA(8, 2)), + }, /* r: 7, 30 d: 8^7, 8^7 */ + { + 5, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC, + EXPECT_STATS(4, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(5, 1, 1), + DELTA(8, 2)), + }, /* r: 7, 30, 5 d: 8^7, 8^7 */ + { + 6, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1), + DELTA(8, 2), DELTA(6, 1)), + }, /* r: 7, 30, 5 d: 8^7, 8^7, 6^5 */ + { + 8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(7, 1, 4), ROOT(5, 1, 2), ROOT(30, 1, 1), + DELTA(8, 3), DELTA(6, 1)), + }, /* r: 7, 30, 5 d: 8^7, 8^7, 8^7, 6^5 */ + { + 8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1), + DELTA(8, 2), DELTA(6, 1)), + }, /* r: 7, 30, 5 d: 8^7, 8^7, 6^5 */ + { + 8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(7, 1, 2), ROOT(5, 1, 2), ROOT(30, 1, 1), + DELTA(8, 1), DELTA(6, 1)), + }, /* r: 7, 30, 5 d: 8^7, 6^5 */ + { + 8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME, + EXPECT_STATS(4, ROOT(5, 1, 2), ROOT(7, 1, 1), ROOT(30, 1, 1), + DELTA(6, 1)), + }, /* r: 7, 30, 5 d: 6^5 */ + { + 8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(5, 1, 3), ROOT(7, 1, 1), ROOT(30, 1, 1), + DELTA(6, 1), DELTA(8, 1)), + }, /* r: 7, 30, 5 d: 6^5, 8^5 */ + { + 7, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC, + EXPECT_STATS(4, ROOT(5, 1, 3), ROOT(30, 1, 1), + DELTA(6, 1), DELTA(8, 1)), + }, /* r: 30, 5 d: 6^5, 8^5 */ + { + 30, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC, + EXPECT_STATS(3, ROOT(5, 1, 3), + DELTA(6, 1), DELTA(8, 1)), + }, /* r: 5 d: 6^5, 8^5 */ + { + 5, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(3, ROOT(5, 0, 2), + DELTA(6, 1), DELTA(8, 1)), + }, /* r: d: 6^5, 8^5 */ + { + 6, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME, + EXPECT_STATS(2, ROOT(5, 0, 1), + DELTA(8, 1)), + }, /* r: d: 6^5 */ + { + 8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC, + EXPECT_STATS(0, ), + }, /* r: d: */ +}; + +static int check_expect(struct world *world, + const struct action_item *action_item, + unsigned int orig_delta_count, + unsigned int orig_root_count) +{ + unsigned int key_id = action_item->key_id; + + switch (action_item->expect_delta) { + case EXPECT_DELTA_SAME: + if (orig_delta_count != world->delta_count) { + pr_err("Key %u: Delta count changed while expected to remain the same.\n", + key_id); + return -EINVAL; + } + break; + case EXPECT_DELTA_INC: + if (WARN_ON(action_item->action == ACTION_PUT)) + return -EINVAL; + if (orig_delta_count + 1 != world->delta_count) { + pr_err("Key %u: Delta count was not incremented.\n", + key_id); + return -EINVAL; + } + break; + case EXPECT_DELTA_DEC: + if (WARN_ON(action_item->action == ACTION_GET)) + return -EINVAL; + if (orig_delta_count - 1 != world->delta_count) { + pr_err("Key %u: Delta count was not decremented.\n", + key_id); + return -EINVAL; + } + break; + } + + switch (action_item->expect_root) { + case EXPECT_ROOT_SAME: + if (orig_root_count != world->root_count) { + pr_err("Key %u: Root count changed while expected to remain the same.\n", + key_id); + return -EINVAL; + } + break; + case EXPECT_ROOT_INC: + if (WARN_ON(action_item->action == ACTION_PUT)) + return -EINVAL; + if (orig_root_count + 1 != world->root_count) { + pr_err("Key %u: Root count was not incremented.\n", + key_id); + return -EINVAL; + } + break; + case EXPECT_ROOT_DEC: + if (WARN_ON(action_item->action == ACTION_GET)) + return -EINVAL; + if (orig_root_count - 1 != world->root_count) { + pr_err("Key %u: Root count was not decremented.\n", + key_id); + return -EINVAL; + } + } + + return 0; +} + +static unsigned int obj_to_key_id(struct objagg_obj *objagg_obj) +{ + const struct tokey *root_key; + const struct delta *delta; + unsigned int key_id; + + root_key = objagg_obj_root_priv(objagg_obj); + key_id = root_key->id; + delta = objagg_obj_delta_priv(objagg_obj); + if (delta) + key_id += delta->key_id_diff; + return key_id; +} + +static int +check_expect_stats_nums(const struct objagg_obj_stats_info *stats_info, + const struct expect_stats_info *expect_stats_info, + const char **errmsg) +{ + if (stats_info->is_root != expect_stats_info->is_root) { + if (errmsg) + *errmsg = "Incorrect root/delta indication"; + return -EINVAL; + } + if (stats_info->stats.user_count != + expect_stats_info->stats.user_count) { + if (errmsg) + *errmsg = "Incorrect user count"; + return -EINVAL; + } + if (stats_info->stats.delta_user_count != + expect_stats_info->stats.delta_user_count) { + if (errmsg) + *errmsg = "Incorrect delta user count"; + return -EINVAL; + } + return 0; +} + +static int +check_expect_stats_key_id(const struct objagg_obj_stats_info *stats_info, + const struct expect_stats_info *expect_stats_info, + const char **errmsg) +{ + if (obj_to_key_id(stats_info->objagg_obj) != + expect_stats_info->key_id) { + if (errmsg) + *errmsg = "incorrect key id"; + return -EINVAL; + } + return 0; +} + +static int check_expect_stats_neigh(const struct objagg_stats *stats, + const struct expect_stats *expect_stats, + int pos) +{ + int i; + int err; + + for (i = pos - 1; i >= 0; i--) { + err = check_expect_stats_nums(&stats->stats_info[i], + &expect_stats->info[pos], NULL); + if (err) + break; + err = check_expect_stats_key_id(&stats->stats_info[i], + &expect_stats->info[pos], NULL); + if (!err) + return 0; + } + for (i = pos + 1; i < stats->stats_info_count; i++) { + err = check_expect_stats_nums(&stats->stats_info[i], + &expect_stats->info[pos], NULL); + if (err) + break; + err = check_expect_stats_key_id(&stats->stats_info[i], + &expect_stats->info[pos], NULL); + if (!err) + return 0; + } + return -EINVAL; +} + +static int __check_expect_stats(const struct objagg_stats *stats, + const struct expect_stats *expect_stats, + const char **errmsg) +{ + int i; + int err; + + if (stats->stats_info_count != expect_stats->info_count) { + *errmsg = "Unexpected object count"; + return -EINVAL; + } + + for (i = 0; i < stats->stats_info_count; i++) { + err = check_expect_stats_nums(&stats->stats_info[i], + &expect_stats->info[i], errmsg); + if (err) + return err; + err = check_expect_stats_key_id(&stats->stats_info[i], + &expect_stats->info[i], errmsg); + if (err) { + /* It is possible that one of the neighbor stats with + * same numbers have the correct key id, so check it + */ + err = check_expect_stats_neigh(stats, expect_stats, i); + if (err) + return err; + } + } + return 0; +} + +static int check_expect_stats(struct objagg *objagg, + const struct expect_stats *expect_stats, + const char **errmsg) +{ + const struct objagg_stats *stats; + int err; + + stats = objagg_stats_get(objagg); + if (IS_ERR(stats)) + return PTR_ERR(stats); + err = __check_expect_stats(stats, expect_stats, errmsg); + objagg_stats_put(stats); + return err; +} + +static int test_delta_action_item(struct world *world, + struct objagg *objagg, + const struct action_item *action_item, + bool inverse) +{ + unsigned int orig_delta_count = world->delta_count; + unsigned int orig_root_count = world->root_count; + unsigned int key_id = action_item->key_id; + enum action action = action_item->action; + struct objagg_obj *objagg_obj; + const char *errmsg; + int err; + + if (inverse) + action = action == ACTION_GET ? ACTION_PUT : ACTION_GET; + + switch (action) { + case ACTION_GET: + objagg_obj = world_obj_get(world, objagg, key_id); + if (IS_ERR(objagg_obj)) + return PTR_ERR(objagg_obj); + break; + case ACTION_PUT: + world_obj_put(world, objagg, key_id); + break; + } + + if (inverse) + return 0; + err = check_expect(world, action_item, + orig_delta_count, orig_root_count); + if (err) + goto errout; + + errmsg = NULL; + err = check_expect_stats(objagg, &action_item->expect_stats, &errmsg); + if (err) { + pr_err("Key %u: Stats: %s\n", action_item->key_id, errmsg); + goto errout; + } + + return 0; + +errout: + /* This can only happen when action is not inversed. + * So in case of an error, cleanup by doing inverse action. + */ + test_delta_action_item(world, objagg, action_item, true); + return err; +} + +static int test_delta(void) +{ + struct world world = {}; + struct objagg *objagg; + int i; + int err; + + objagg = objagg_create(&delta_ops, &world); + if (IS_ERR(objagg)) + return PTR_ERR(objagg); + + for (i = 0; i < ARRAY_SIZE(action_items); i++) { + err = test_delta_action_item(&world, objagg, + &action_items[i], false); + if (err) + goto err_do_action_item; + } + + objagg_destroy(objagg); + return 0; + +err_do_action_item: + for (i--; i >= 0; i--) + test_delta_action_item(&world, objagg, &action_items[i], true); + + objagg_destroy(objagg); + return err; +} + +static int __init test_objagg_init(void) +{ + int err; + + err = test_nodelta(); + if (err) + return err; + return test_delta(); +} + +static void __exit test_objagg_exit(void) +{ +} + +module_init(test_objagg_init); +module_exit(test_objagg_exit); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); +MODULE_DESCRIPTION("Test module for objagg"); diff --git a/lib/test_xarray.c b/lib/test_xarray.c index aa47754150ce..0598e86af8fc 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -208,15 +208,19 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2)); /* We should see two elements in the array */ + rcu_read_lock(); xas_for_each(&xas, entry, ULONG_MAX) seen++; + rcu_read_unlock(); XA_BUG_ON(xa, seen != 2); /* One of which is marked */ xas_set(&xas, 0); seen = 0; + rcu_read_lock(); xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) seen++; + rcu_read_unlock(); XA_BUG_ON(xa, seen != 1); } XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0)); @@ -373,6 +377,12 @@ static noinline void check_reserve(struct xarray *xa) xa_erase_index(xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); + /* And so does xa_insert */ + xa_reserve(xa, 12345678, GFP_KERNEL); + XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 0); + xa_erase_index(xa, 12345678); + XA_BUG_ON(xa, !xa_empty(xa)); + /* Can iterate through a reserved entry */ xa_store_index(xa, 5, GFP_KERNEL); xa_reserve(xa, 6, GFP_KERNEL); @@ -436,7 +446,9 @@ static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, XA_BUG_ON(xa, xa_load(xa, max) != NULL); XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); + xas_lock(&xas); XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(min)) != xa_mk_value(index)); + xas_unlock(&xas); XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(min)); XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(min)); XA_BUG_ON(xa, xa_load(xa, max) != NULL); @@ -452,9 +464,11 @@ static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, XA_STATE(xas, xa, index); xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL); + xas_lock(&xas); XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0)); XA_BUG_ON(xa, xas.xa_index != index); XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); + xas_unlock(&xas); XA_BUG_ON(xa, !xa_empty(xa)); } #endif @@ -498,7 +512,7 @@ static noinline void check_multi_store(struct xarray *xa) rcu_read_unlock(); /* We can erase multiple values with a single store */ - xa_store_order(xa, 0, 63, NULL, GFP_KERNEL); + xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL); XA_BUG_ON(xa, !xa_empty(xa)); /* Even when the first slot is empty but the others aren't */ @@ -702,7 +716,7 @@ static noinline void check_multi_find_2(struct xarray *xa) } } -static noinline void check_find(struct xarray *xa) +static noinline void check_find_1(struct xarray *xa) { unsigned long i, j, k; @@ -748,6 +762,34 @@ static noinline void check_find(struct xarray *xa) XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0)); } XA_BUG_ON(xa, !xa_empty(xa)); +} + +static noinline void check_find_2(struct xarray *xa) +{ + void *entry; + unsigned long i, j, index = 0; + + xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { + XA_BUG_ON(xa, true); + } + + for (i = 0; i < 1024; i++) { + xa_store_index(xa, index, GFP_KERNEL); + j = 0; + index = 0; + xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { + XA_BUG_ON(xa, xa_mk_value(index) != entry); + XA_BUG_ON(xa, index != j++); + } + } + + xa_destroy(xa); +} + +static noinline void check_find(struct xarray *xa) +{ + check_find_1(xa); + check_find_2(xa); check_multi_find(xa); check_multi_find_2(xa); } @@ -1067,7 +1109,7 @@ static noinline void check_store_range(struct xarray *xa) __check_store_range(xa, 4095 + i, 4095 + j); __check_store_range(xa, 4096 + i, 4096 + j); __check_store_range(xa, 123456 + i, 123456 + j); - __check_store_range(xa, UINT_MAX + i, UINT_MAX + j); + __check_store_range(xa, (1 << 24) + i, (1 << 24) + j); } } } @@ -1146,10 +1188,12 @@ static noinline void check_account(struct xarray *xa) XA_STATE(xas, xa, 1 << order); xa_store_order(xa, 0, order, xa, GFP_KERNEL); + rcu_read_lock(); xas_load(&xas); XA_BUG_ON(xa, xas.xa_node->count == 0); XA_BUG_ON(xa, xas.xa_node->count > (1 << order)); XA_BUG_ON(xa, xas.xa_node->nr_values != 0); + rcu_read_unlock(); xa_store_order(xa, 1 << order, order, xa_mk_value(1 << order), GFP_KERNEL); diff --git a/lib/ubsan.c b/lib/ubsan.c index 59fee96c29a0..e4162f59a81c 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c @@ -427,8 +427,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds); -void __noreturn -__ubsan_handle_builtin_unreachable(struct unreachable_data *data) +void __ubsan_handle_builtin_unreachable(struct unreachable_data *data) { unsigned long flags; diff --git a/lib/xarray.c b/lib/xarray.c index 8b176f009c08..bbacca576593 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -610,8 +610,8 @@ static int xas_expand(struct xa_state *xas, void *head) * (see the xa_cmpxchg() implementation for an example). * * Return: If the slot already existed, returns the contents of this slot. - * If the slot was newly created, returns NULL. If it failed to create the - * slot, returns NULL and indicates the error in @xas. + * If the slot was newly created, returns %NULL. If it failed to create the + * slot, returns %NULL and indicates the error in @xas. */ static void *xas_create(struct xa_state *xas) { @@ -1334,44 +1334,31 @@ void *__xa_erase(struct xarray *xa, unsigned long index) XA_STATE(xas, xa, index); return xas_result(&xas, xas_store(&xas, NULL)); } -EXPORT_SYMBOL_GPL(__xa_erase); +EXPORT_SYMBOL(__xa_erase); /** - * xa_store() - Store this entry in the XArray. + * xa_erase() - Erase this entry from the XArray. * @xa: XArray. - * @index: Index into array. - * @entry: New entry. - * @gfp: Memory allocation flags. + * @index: Index of entry. * - * After this function returns, loads from this index will return @entry. - * Storing into an existing multislot entry updates the entry of every index. - * The marks associated with @index are unaffected unless @entry is %NULL. + * This function is the equivalent of calling xa_store() with %NULL as + * the third argument. The XArray does not need to allocate memory, so + * the user does not need to provide GFP flags. * - * Context: Process context. Takes and releases the xa_lock. May sleep - * if the @gfp flags permit. - * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry - * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation - * failed. + * Context: Any context. Takes and releases the xa_lock. + * Return: The entry which used to be at this index. */ -void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) +void *xa_erase(struct xarray *xa, unsigned long index) { - XA_STATE(xas, xa, index); - void *curr; - - if (WARN_ON_ONCE(xa_is_internal(entry))) - return XA_ERROR(-EINVAL); + void *entry; - do { - xas_lock(&xas); - curr = xas_store(&xas, entry); - if (xa_track_free(xa) && entry) - xas_clear_mark(&xas, XA_FREE_MARK); - xas_unlock(&xas); - } while (xas_nomem(&xas, gfp)); + xa_lock(xa); + entry = __xa_erase(xa, index); + xa_unlock(xa); - return xas_result(&xas, curr); + return entry; } -EXPORT_SYMBOL(xa_store); +EXPORT_SYMBOL(xa_erase); /** * __xa_store() - Store this entry in the XArray. @@ -1395,10 +1382,12 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) if (WARN_ON_ONCE(xa_is_internal(entry))) return XA_ERROR(-EINVAL); + if (xa_track_free(xa) && !entry) + entry = XA_ZERO_ENTRY; do { curr = xas_store(&xas, entry); - if (xa_track_free(xa) && entry) + if (xa_track_free(xa)) xas_clear_mark(&xas, XA_FREE_MARK); } while (__xas_nomem(&xas, gfp)); @@ -1407,45 +1396,33 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) EXPORT_SYMBOL(__xa_store); /** - * xa_cmpxchg() - Conditionally replace an entry in the XArray. + * xa_store() - Store this entry in the XArray. * @xa: XArray. * @index: Index into array. - * @old: Old value to test against. - * @entry: New value to place in array. + * @entry: New entry. * @gfp: Memory allocation flags. * - * If the entry at @index is the same as @old, replace it with @entry. - * If the return value is equal to @old, then the exchange was successful. + * After this function returns, loads from this index will return @entry. + * Storing into an existing multislot entry updates the entry of every index. + * The marks associated with @index are unaffected unless @entry is %NULL. * - * Context: Process context. Takes and releases the xa_lock. May sleep - * if the @gfp flags permit. - * Return: The old value at this index or xa_err() if an error happened. + * Context: Any context. Takes and releases the xa_lock. + * May sleep if the @gfp flags permit. + * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry + * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation + * failed. */ -void *xa_cmpxchg(struct xarray *xa, unsigned long index, - void *old, void *entry, gfp_t gfp) +void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) { - XA_STATE(xas, xa, index); void *curr; - if (WARN_ON_ONCE(xa_is_internal(entry))) - return XA_ERROR(-EINVAL); - - do { - xas_lock(&xas); - curr = xas_load(&xas); - if (curr == XA_ZERO_ENTRY) - curr = NULL; - if (curr == old) { - xas_store(&xas, entry); - if (xa_track_free(xa) && entry) - xas_clear_mark(&xas, XA_FREE_MARK); - } - xas_unlock(&xas); - } while (xas_nomem(&xas, gfp)); + xa_lock(xa); + curr = __xa_store(xa, index, entry, gfp); + xa_unlock(xa); - return xas_result(&xas, curr); + return curr; } -EXPORT_SYMBOL(xa_cmpxchg); +EXPORT_SYMBOL(xa_store); /** * __xa_cmpxchg() - Store this entry in the XArray. @@ -1471,6 +1448,8 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, if (WARN_ON_ONCE(xa_is_internal(entry))) return XA_ERROR(-EINVAL); + if (xa_track_free(xa) && !entry) + entry = XA_ZERO_ENTRY; do { curr = xas_load(&xas); @@ -1478,7 +1457,7 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, curr = NULL; if (curr == old) { xas_store(&xas, entry); - if (xa_track_free(xa) && entry) + if (xa_track_free(xa)) xas_clear_mark(&xas, XA_FREE_MARK); } } while (__xas_nomem(&xas, gfp)); @@ -1488,7 +1467,7 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, EXPORT_SYMBOL(__xa_cmpxchg); /** - * xa_reserve() - Reserve this index in the XArray. + * __xa_reserve() - Reserve this index in the XArray. * @xa: XArray. * @index: Index into array. * @gfp: Memory allocation flags. @@ -1496,33 +1475,32 @@ EXPORT_SYMBOL(__xa_cmpxchg); * Ensures there is somewhere to store an entry at @index in the array. * If there is already something stored at @index, this function does * nothing. If there was nothing there, the entry is marked as reserved. - * Loads from @index will continue to see a %NULL pointer until a - * subsequent store to @index. + * Loading from a reserved entry returns a %NULL pointer. * * If you do not use the entry that you have reserved, call xa_release() * or xa_erase() to free any unnecessary memory. * - * Context: Process context. Takes and releases the xa_lock, IRQ or BH safe - * if specified in XArray flags. May sleep if the @gfp flags permit. + * Context: Any context. Expects the xa_lock to be held on entry. May + * release the lock, sleep and reacquire the lock if the @gfp flags permit. * Return: 0 if the reservation succeeded or -ENOMEM if it failed. */ -int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) +int __xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) { XA_STATE(xas, xa, index); - unsigned int lock_type = xa_lock_type(xa); void *curr; do { - xas_lock_type(&xas, lock_type); curr = xas_load(&xas); - if (!curr) + if (!curr) { xas_store(&xas, XA_ZERO_ENTRY); - xas_unlock_type(&xas, lock_type); - } while (xas_nomem(&xas, gfp)); + if (xa_track_free(xa)) + xas_clear_mark(&xas, XA_FREE_MARK); + } + } while (__xas_nomem(&xas, gfp)); return xas_error(&xas); } -EXPORT_SYMBOL(xa_reserve); +EXPORT_SYMBOL(__xa_reserve); #ifdef CONFIG_XARRAY_MULTI static void xas_set_range(struct xa_state *xas, unsigned long first, @@ -1587,8 +1565,9 @@ void *xa_store_range(struct xarray *xa, unsigned long first, do { xas_lock(&xas); if (entry) { - unsigned int order = (last == ~0UL) ? 64 : - ilog2(last + 1); + unsigned int order = BITS_PER_LONG; + if (last + 1) + order = __ffs(last + 1); xas_set_order(&xas, last, order); xas_create(&xas); if (xas_error(&xas)) @@ -1662,7 +1641,7 @@ EXPORT_SYMBOL(__xa_alloc); * @index: Index of entry. * @mark: Mark number. * - * Attempting to set a mark on a NULL entry does not succeed. + * Attempting to set a mark on a %NULL entry does not succeed. * * Context: Any context. Expects xa_lock to be held on entry. */ @@ -1674,7 +1653,7 @@ void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) if (entry) xas_set_mark(&xas, mark); } -EXPORT_SYMBOL_GPL(__xa_set_mark); +EXPORT_SYMBOL(__xa_set_mark); /** * __xa_clear_mark() - Clear this mark on this entry while locked. @@ -1692,7 +1671,7 @@ void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) if (entry) xas_clear_mark(&xas, mark); } -EXPORT_SYMBOL_GPL(__xa_clear_mark); +EXPORT_SYMBOL(__xa_clear_mark); /** * xa_get_mark() - Inquire whether this mark is set on this entry. @@ -1732,7 +1711,7 @@ EXPORT_SYMBOL(xa_get_mark); * @index: Index of entry. * @mark: Mark number. * - * Attempting to set a mark on a NULL entry does not succeed. + * Attempting to set a mark on a %NULL entry does not succeed. * * Context: Process context. Takes and releases the xa_lock. */ @@ -1829,6 +1808,8 @@ void *xa_find_after(struct xarray *xa, unsigned long *indexp, entry = xas_find_marked(&xas, max, filter); else entry = xas_find(&xas, max); + if (xas.xa_node == XAS_BOUNDS) + break; if (xas.xa_shift) { if (xas.xa_index & ((1UL << xas.xa_shift) - 1)) continue; @@ -1899,7 +1880,7 @@ static unsigned int xas_extract_marked(struct xa_state *xas, void **dst, * * The @filter may be an XArray mark value, in which case entries which are * marked with that mark will be copied. It may also be %XA_PRESENT, in - * which case all entries which are not NULL will be copied. + * which case all entries which are not %NULL will be copied. * * The entries returned may not represent a snapshot of the XArray at a * moment in time. For example, if another thread stores to index 5, then |