From 7e936bd73483eb1873b9ab6f375c9a1d781bc5cb Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 20 Sep 2017 01:12:11 +0200 Subject: test_rhashtable: don't allocate huge static array Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- lib/test_rhashtable.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) (limited to 'lib') diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 0ffca990a833..c40d6e636f33 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -72,8 +72,6 @@ struct thread_data { struct test_obj *objs; }; -static struct test_obj array[MAX_ENTRIES]; - static struct rhashtable_params test_rht_params = { .head_offset = offsetof(struct test_obj, node), .key_offset = offsetof(struct test_obj, value), @@ -85,7 +83,7 @@ static struct rhashtable_params test_rht_params = { static struct semaphore prestart_sem; static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0); -static int insert_retry(struct rhashtable *ht, struct rhash_head *obj, +static int insert_retry(struct rhashtable *ht, struct test_obj *obj, const struct rhashtable_params params) { int err, retries = -1, enomem_retries = 0; @@ -93,7 +91,7 @@ static int insert_retry(struct rhashtable *ht, struct rhash_head *obj, do { retries++; cond_resched(); - err = rhashtable_insert_fast(ht, obj, params); + err = rhashtable_insert_fast(ht, &obj->node, params); if (err == -ENOMEM && enomem_retry) { enomem_retries++; err = -EBUSY; @@ -107,7 +105,7 @@ static int insert_retry(struct rhashtable *ht, struct rhash_head *obj, return err ? : retries; } -static int __init test_rht_lookup(struct rhashtable *ht) +static int __init test_rht_lookup(struct rhashtable *ht, struct test_obj *array) { unsigned int i; @@ -186,7 +184,7 @@ static void test_bucket_stats(struct rhashtable *ht) pr_warn("Test failed: Total count mismatch ^^^"); } -static s64 __init test_rhashtable(struct rhashtable *ht) +static s64 __init test_rhashtable(struct rhashtable *ht, struct test_obj *array) { struct test_obj *obj; int err; @@ -203,7 +201,7 @@ static s64 __init test_rhashtable(struct rhashtable *ht) struct test_obj *obj = &array[i]; obj->value.id = i * 2; - err = insert_retry(ht, &obj->node, test_rht_params); + err = insert_retry(ht, obj, test_rht_params); if (err > 0) insert_retries += err; else if (err) @@ -216,7 +214,7 @@ static s64 __init test_rhashtable(struct rhashtable *ht) test_bucket_stats(ht); rcu_read_lock(); - test_rht_lookup(ht); + test_rht_lookup(ht, array); rcu_read_unlock(); test_bucket_stats(ht); @@ -286,7 +284,7 @@ static int threadfunc(void *data) for (i = 0; i < entries; i++) { tdata->objs[i].value.id = i; tdata->objs[i].value.tid = tdata->id; - err = insert_retry(&ht, &tdata->objs[i].node, test_rht_params); + err = insert_retry(&ht, &tdata->objs[i], test_rht_params); if (err > 0) { insert_retries += err; } else if (err) { @@ -349,6 +347,10 @@ static int __init test_rht_init(void) test_rht_params.max_size = max_size ? : roundup_pow_of_two(entries); test_rht_params.nelem_hint = size; + objs = vzalloc((test_rht_params.max_size + 1) * sizeof(struct test_obj)); + if (!objs) + return -ENOMEM; + pr_info("Running rhashtable test nelem=%d, max_size=%d, shrinking=%d\n", size, max_size, shrinking); @@ -356,7 +358,8 @@ static int __init test_rht_init(void) s64 time; pr_info("Test %02d:\n", i); - memset(&array, 0, sizeof(array)); + memset(objs, 0, test_rht_params.max_size * sizeof(struct test_obj)); + err = rhashtable_init(&ht, &test_rht_params); if (err < 0) { pr_warn("Test failed: Unable to initialize hashtable: %d\n", @@ -364,9 +367,10 @@ static int __init test_rht_init(void) continue; } - time = test_rhashtable(&ht); + time = test_rhashtable(&ht, objs); rhashtable_destroy(&ht); if (time < 0) { + vfree(objs); pr_warn("Test failed: return code %lld\n", time); return -EINVAL; } @@ -374,6 +378,7 @@ static int __init test_rht_init(void) total_time += time; } + vfree(objs); do_div(total_time, runs); pr_info("Average test time: %llu\n", total_time); -- cgit v1.2.3 From f651616e799ff01d1e21abc053ee1e23ac1a2344 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 20 Sep 2017 01:12:12 +0200 Subject: test_rhashtable: don't use global entries variable pass the entries to test as an argument instead. Followup patch will add an rhlist test case; rhlist delete opererations are slow so we need to use a smaller number to test it. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- lib/test_rhashtable.c | 37 +++++++++++++++++++++++-------------- 1 file changed, 23 insertions(+), 14 deletions(-) (limited to 'lib') diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index c40d6e636f33..69f5b3849980 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -28,9 +28,9 @@ #define MAX_ENTRIES 1000000 #define TEST_INSERT_FAIL INT_MAX -static int entries = 50000; -module_param(entries, int, 0); -MODULE_PARM_DESC(entries, "Number of entries to add (default: 50000)"); +static int parm_entries = 50000; +module_param(parm_entries, int, 0); +MODULE_PARM_DESC(parm_entries, "Number of entries to add (default: 50000)"); static int runs = 4; module_param(runs, int, 0); @@ -67,6 +67,7 @@ struct test_obj { }; struct thread_data { + unsigned int entries; int id; struct task_struct *task; struct test_obj *objs; @@ -105,11 +106,12 @@ static int insert_retry(struct rhashtable *ht, struct test_obj *obj, return err ? : retries; } -static int __init test_rht_lookup(struct rhashtable *ht, struct test_obj *array) +static int __init test_rht_lookup(struct rhashtable *ht, struct test_obj *array, + unsigned int entries) { unsigned int i; - for (i = 0; i < entries * 2; i++) { + for (i = 0; i < entries; i++) { struct test_obj *obj; bool expected = !(i % 2); struct test_obj_val key = { @@ -142,7 +144,7 @@ static int __init test_rht_lookup(struct rhashtable *ht, struct test_obj *array) return 0; } -static void test_bucket_stats(struct rhashtable *ht) +static void test_bucket_stats(struct rhashtable *ht, unsigned int entries) { unsigned int err, total = 0, chain_len = 0; struct rhashtable_iter hti; @@ -184,7 +186,8 @@ static void test_bucket_stats(struct rhashtable *ht) pr_warn("Test failed: Total count mismatch ^^^"); } -static s64 __init test_rhashtable(struct rhashtable *ht, struct test_obj *array) +static s64 __init test_rhashtable(struct rhashtable *ht, struct test_obj *array, + unsigned int entries) { struct test_obj *obj; int err; @@ -212,12 +215,12 @@ static s64 __init test_rhashtable(struct rhashtable *ht, struct test_obj *array) pr_info(" %u insertions retried due to memory pressure\n", insert_retries); - test_bucket_stats(ht); + test_bucket_stats(ht, entries); rcu_read_lock(); - test_rht_lookup(ht, array); + test_rht_lookup(ht, array, entries); rcu_read_unlock(); - test_bucket_stats(ht); + test_bucket_stats(ht, entries); pr_info(" Deleting %d keys\n", entries); for (i = 0; i < entries; i++) { @@ -245,6 +248,7 @@ static struct rhashtable ht; static int thread_lookup_test(struct thread_data *tdata) { + unsigned int entries = tdata->entries; int i, err = 0; for (i = 0; i < entries; i++) { @@ -281,7 +285,7 @@ static int threadfunc(void *data) if (down_interruptible(&startup_sem)) pr_err(" thread[%d]: down_interruptible failed\n", tdata->id); - for (i = 0; i < entries; i++) { + for (i = 0; i < tdata->entries; i++) { tdata->objs[i].value.id = i; tdata->objs[i].value.tid = tdata->id; err = insert_retry(&ht, &tdata->objs[i], test_rht_params); @@ -305,7 +309,7 @@ static int threadfunc(void *data) } for (step = 10; step > 0; step--) { - for (i = 0; i < entries; i += step) { + for (i = 0; i < tdata->entries; i += step) { if (tdata->objs[i].value.id == TEST_INSERT_FAIL) continue; err = rhashtable_remove_fast(&ht, &tdata->objs[i].node, @@ -336,12 +340,16 @@ out: static int __init test_rht_init(void) { + unsigned int entries; int i, err, started_threads = 0, failed_threads = 0; u64 total_time = 0; struct thread_data *tdata; struct test_obj *objs; - entries = min(entries, MAX_ENTRIES); + if (parm_entries < 0) + parm_entries = 1; + + entries = min(parm_entries, MAX_ENTRIES); test_rht_params.automatic_shrinking = shrinking; test_rht_params.max_size = max_size ? : roundup_pow_of_two(entries); @@ -367,7 +375,7 @@ static int __init test_rht_init(void) continue; } - time = test_rhashtable(&ht, objs); + time = test_rhashtable(&ht, objs, entries); rhashtable_destroy(&ht); if (time < 0) { vfree(objs); @@ -409,6 +417,7 @@ static int __init test_rht_init(void) } for (i = 0; i < tcount; i++) { tdata[i].id = i; + tdata[i].entries = entries; tdata[i].objs = objs + i * entries; tdata[i].task = kthread_run(threadfunc, &tdata[i], "rhashtable_thrad[%d]", i); -- cgit v1.2.3 From a6359bd8dd1c3a15c09e7dbb533bf89d13b42acd Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 20 Sep 2017 01:12:13 +0200 Subject: test_rhashtable: add a check for max_size add a test that tries to insert more than max_size elements. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- lib/test_rhashtable.c | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) (limited to 'lib') diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 69f5b3849980..1eee90e6e394 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -246,6 +246,43 @@ static s64 __init test_rhashtable(struct rhashtable *ht, struct test_obj *array, static struct rhashtable ht; +static int __init test_rhashtable_max(struct test_obj *array, + unsigned int entries) +{ + unsigned int i, insert_retries = 0; + int err; + + test_rht_params.max_size = roundup_pow_of_two(entries / 8); + err = rhashtable_init(&ht, &test_rht_params); + if (err) + return err; + + for (i = 0; i < ht.max_elems; i++) { + struct test_obj *obj = &array[i]; + + obj->value.id = i * 2; + err = insert_retry(&ht, obj, test_rht_params); + if (err > 0) + insert_retries += err; + else if (err) + return err; + } + + err = insert_retry(&ht, &array[ht.max_elems], test_rht_params); + if (err == -E2BIG) { + err = 0; + } else { + pr_info("insert element %u should have failed with %d, got %d\n", + ht.max_elems, -E2BIG, err); + if (err == 0) + err = -1; + } + + rhashtable_destroy(&ht); + + return err; +} + static int thread_lookup_test(struct thread_data *tdata) { unsigned int entries = tdata->entries; @@ -386,7 +423,11 @@ static int __init test_rht_init(void) total_time += time; } + pr_info("test if its possible to exceed max_size %d: %s\n", + test_rht_params.max_size, test_rhashtable_max(objs, entries) == 0 ? + "no, ok" : "YES, failed"); vfree(objs); + do_div(total_time, runs); pr_info("Average test time: %llu\n", total_time); -- cgit v1.2.3 From cdd4de372ea06a18e104100b9f2c442527d26db1 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 20 Sep 2017 01:12:14 +0200 Subject: test_rhashtable: add test case for rhl_table interface also test rhltable. rhltable remove operations are slow as deletions require a list walk, thus test with 1/16th of the given entry count number to get a run duration similar to rhashtable one. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- lib/test_rhashtable.c | 196 +++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 194 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 1eee90e6e394..de4d0584631a 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #define MAX_ENTRIES 1000000 @@ -66,6 +67,11 @@ struct test_obj { struct rhash_head node; }; +struct test_obj_rhl { + struct test_obj_val value; + struct rhlist_head list_node; +}; + struct thread_data { unsigned int entries; int id; @@ -245,6 +251,186 @@ static s64 __init test_rhashtable(struct rhashtable *ht, struct test_obj *array, } static struct rhashtable ht; +static struct rhltable rhlt __initdata; + +static int __init test_rhltable(unsigned int entries) +{ + struct test_obj_rhl *rhl_test_objects; + unsigned long *obj_in_table; + unsigned int i, j, k; + int ret, err; + + if (entries == 0) + entries = 1; + + rhl_test_objects = vzalloc(sizeof(*rhl_test_objects) * entries); + if (!rhl_test_objects) + return -ENOMEM; + + ret = -ENOMEM; + obj_in_table = vzalloc(BITS_TO_LONGS(entries) * sizeof(unsigned long)); + if (!obj_in_table) + goto out_free; + + /* nulls_base not supported in rhlist interface */ + test_rht_params.nulls_base = 0; + err = rhltable_init(&rhlt, &test_rht_params); + if (WARN_ON(err)) + goto out_free; + + k = prandom_u32(); + ret = 0; + for (i = 0; i < entries; i++) { + rhl_test_objects[i].value.id = k; + err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, + test_rht_params); + if (WARN(err, "error %d on element %d\n", err, i)) + break; + if (err == 0) + set_bit(i, obj_in_table); + } + + if (err) + ret = err; + + pr_info("test %d add/delete pairs into rhlist\n", entries); + for (i = 0; i < entries; i++) { + struct rhlist_head *h, *pos; + struct test_obj_rhl *obj; + struct test_obj_val key = { + .id = k, + }; + bool found; + + rcu_read_lock(); + h = rhltable_lookup(&rhlt, &key, test_rht_params); + if (WARN(!h, "key not found during iteration %d of %d", i, entries)) { + rcu_read_unlock(); + break; + } + + if (i) { + j = i - 1; + rhl_for_each_entry_rcu(obj, pos, h, list_node) { + if (WARN(pos == &rhl_test_objects[j].list_node, "old element found, should be gone")) + break; + } + } + + cond_resched_rcu(); + + found = false; + + rhl_for_each_entry_rcu(obj, pos, h, list_node) { + if (pos == &rhl_test_objects[i].list_node) { + found = true; + break; + } + } + + rcu_read_unlock(); + + if (WARN(!found, "element %d not found", i)) + break; + + err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); + WARN(err, "rhltable_remove: err %d for iteration %d\n", err, i); + if (err == 0) + clear_bit(i, obj_in_table); + } + + if (ret == 0 && err) + ret = err; + + for (i = 0; i < entries; i++) { + WARN(test_bit(i, obj_in_table), "elem %d allegedly still present", i); + + err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, + test_rht_params); + if (WARN(err, "error %d on element %d\n", err, i)) + break; + if (err == 0) + set_bit(i, obj_in_table); + } + + pr_info("test %d random rhlist add/delete operations\n", entries); + for (j = 0; j < entries; j++) { + u32 i = prandom_u32_max(entries); + u32 prand = prandom_u32(); + + cond_resched(); + + if (prand == 0) + prand = prandom_u32(); + + if (prand & 1) { + prand >>= 1; + continue; + } + + err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); + if (test_bit(i, obj_in_table)) { + clear_bit(i, obj_in_table); + if (WARN(err, "cannot remove element at slot %d", i)) + continue; + } else { + if (WARN(err != -ENOENT, "removed non-existant element %d, error %d not %d", + i, err, -ENOENT)) + continue; + } + + if (prand & 1) { + prand >>= 1; + continue; + } + + err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); + if (err == 0) { + if (WARN(test_and_set_bit(i, obj_in_table), "succeeded to insert same object %d", i)) + continue; + } else { + if (WARN(!test_bit(i, obj_in_table), "failed to insert object %d", i)) + continue; + } + + if (prand & 1) { + prand >>= 1; + continue; + } + + i = prandom_u32_max(entries); + if (test_bit(i, obj_in_table)) { + err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); + WARN(err, "cannot remove element at slot %d", i); + if (err == 0) + clear_bit(i, obj_in_table); + } else { + err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); + WARN(err, "failed to insert object %d", i); + if (err == 0) + set_bit(i, obj_in_table); + } + } + + for (i = 0; i < entries; i++) { + cond_resched(); + err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); + if (test_bit(i, obj_in_table)) { + if (WARN(err, "cannot remove element at slot %d", i)) + continue; + } else { + if (WARN(err != -ENOENT, "removed non-existant element, error %d not %d", + err, -ENOENT)) + continue; + } + } + + rhltable_destroy(&rhlt); +out_free: + vfree(rhl_test_objects); + vfree(obj_in_table); + return ret; +} static int __init test_rhashtable_max(struct test_obj *array, unsigned int entries) @@ -480,11 +666,17 @@ static int __init test_rht_init(void) failed_threads++; } } - pr_info("Started %d threads, %d failed\n", - started_threads, failed_threads); rhashtable_destroy(&ht); vfree(tdata); vfree(objs); + + /* + * rhltable_remove is very expensive, default values can cause test + * to run for 2 minutes or more, use a smaller number instead. + */ + err = test_rhltable(entries / 16); + pr_info("Started %d threads, %d failed, rhltable test returns %d\n", + started_threads, failed_threads, err); return 0; } -- cgit v1.2.3 From 16dff336b33d87c15d9cbe933cfd275aae2a8251 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 19 Sep 2017 16:27:03 -0700 Subject: kobject: add kobject_uevent_net_broadcast() This removes some #ifdef pollution and will ease follow up patches. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- lib/kobject_uevent.c | 96 +++++++++++++++++++++++++++++----------------------- 1 file changed, 53 insertions(+), 43 deletions(-) (limited to 'lib') diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index e590523ea476..4f48cc3b11d5 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -294,6 +294,57 @@ static void cleanup_uevent_env(struct subprocess_info *info) } #endif +static int kobject_uevent_net_broadcast(struct kobject *kobj, + struct kobj_uevent_env *env, + const char *action_string, + const char *devpath) +{ + int retval = 0; +#if defined(CONFIG_NET) + struct uevent_sock *ue_sk; + + /* send netlink message */ + list_for_each_entry(ue_sk, &uevent_sock_list, list) { + struct sock *uevent_sock = ue_sk->sk; + struct sk_buff *skb; + size_t len; + + if (!netlink_has_listeners(uevent_sock, 1)) + continue; + + /* allocate message with the maximum possible size */ + len = strlen(action_string) + strlen(devpath) + 2; + skb = alloc_skb(len + env->buflen, GFP_KERNEL); + if (skb) { + char *scratch; + int i; + + /* add header */ + scratch = skb_put(skb, len); + sprintf(scratch, "%s@%s", action_string, devpath); + + /* copy keys to our continuous event payload buffer */ + for (i = 0; i < env->envp_idx; i++) { + len = strlen(env->envp[i]) + 1; + scratch = skb_put(skb, len); + strcpy(scratch, env->envp[i]); + } + + NETLINK_CB(skb).dst_group = 1; + retval = netlink_broadcast_filtered(uevent_sock, skb, + 0, 1, GFP_KERNEL, + kobj_bcast_filter, + kobj); + /* ENOBUFS should be handled in userspace */ + if (retval == -ENOBUFS || retval == -ESRCH) + retval = 0; + } else + retval = -ENOMEM; + } +#endif + return retval; +} + /** * kobject_uevent_env - send an uevent with environmental data * @@ -316,9 +367,6 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, const struct kset_uevent_ops *uevent_ops; int i = 0; int retval = 0; -#ifdef CONFIG_NET - struct uevent_sock *ue_sk; -#endif pr_debug("kobject: '%s' (%p): %s\n", kobject_name(kobj), kobj, __func__); @@ -427,46 +475,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, mutex_unlock(&uevent_sock_mutex); goto exit; } - -#if defined(CONFIG_NET) - /* send netlink message */ - list_for_each_entry(ue_sk, &uevent_sock_list, list) { - struct sock *uevent_sock = ue_sk->sk; - struct sk_buff *skb; - size_t len; - - if (!netlink_has_listeners(uevent_sock, 1)) - continue; - - /* allocate message with the maximum possible size */ - len = strlen(action_string) + strlen(devpath) + 2; - skb = alloc_skb(len + env->buflen, GFP_KERNEL); - if (skb) { - char *scratch; - - /* add header */ - scratch = skb_put(skb, len); - sprintf(scratch, "%s@%s", action_string, devpath); - - /* copy keys to our continuous event payload buffer */ - for (i = 0; i < env->envp_idx; i++) { - len = strlen(env->envp[i]) + 1; - scratch = skb_put(skb, len); - strcpy(scratch, env->envp[i]); - } - - NETLINK_CB(skb).dst_group = 1; - retval = netlink_broadcast_filtered(uevent_sock, skb, - 0, 1, GFP_KERNEL, - kobj_bcast_filter, - kobj); - /* ENOBUFS should be handled in userspace */ - if (retval == -ENOBUFS || retval == -ESRCH) - retval = 0; - } else - retval = -ENOMEM; - } -#endif + retval = kobject_uevent_net_broadcast(kobj, env, action_string, + devpath); mutex_unlock(&uevent_sock_mutex); #ifdef CONFIG_UEVENT_HELPER -- cgit v1.2.3 From 4a336a23d619e96aef37d4d054cfadcdd1b581ba Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 19 Sep 2017 16:27:04 -0700 Subject: kobject: copy env blob in one go No need to iterate over strings, just copy in one efficient memcpy() call. Tested: time perf record "(for f in `seq 1 3000` ; do ip netns add tast$f; done)" [ perf record: Woken up 10 times to write data ] [ perf record: Captured and wrote 8.224 MB perf.data (~359301 samples) ] real 0m52.554s # instead of 1m7.492s user 0m0.309s sys 0m51.375s # instead of 1m6.875s 9.88% ip [kernel.kallsyms] [k] netlink_broadcast_filtered 8.86% ip [kernel.kallsyms] [k] string 7.37% ip [kernel.kallsyms] [k] __ip6addrlbl_add 5.68% ip [kernel.kallsyms] [k] netlink_has_listeners 5.52% ip [kernel.kallsyms] [k] memcpy_erms 4.76% ip [kernel.kallsyms] [k] __alloc_skb 4.54% ip [kernel.kallsyms] [k] vsnprintf 3.94% ip [kernel.kallsyms] [k] format_decode 3.80% ip [kernel.kallsyms] [k] kmem_cache_alloc_node_trace 3.71% ip [kernel.kallsyms] [k] kmem_cache_alloc_node 3.66% ip [kernel.kallsyms] [k] kobject_uevent_env 3.38% ip [kernel.kallsyms] [k] strlen 2.65% ip [kernel.kallsyms] [k] _raw_spin_lock_irqsave 2.20% ip [kernel.kallsyms] [k] kfree 2.09% ip [kernel.kallsyms] [k] memset_erms 2.07% ip [kernel.kallsyms] [k] ___cache_free 1.95% ip [kernel.kallsyms] [k] kmem_cache_free 1.91% ip [kernel.kallsyms] [k] _raw_read_lock 1.45% ip [kernel.kallsyms] [k] ksize 1.25% ip [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 1.00% ip [kernel.kallsyms] [k] widen_string Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- lib/kobject_uevent.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'lib') diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 4f48cc3b11d5..78b2a7e378c0 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -317,18 +317,12 @@ static int kobject_uevent_net_broadcast(struct kobject *kobj, skb = alloc_skb(len + env->buflen, GFP_KERNEL); if (skb) { char *scratch; - int i; /* add header */ scratch = skb_put(skb, len); sprintf(scratch, "%s@%s", action_string, devpath); - /* copy keys to our continuous event payload buffer */ - for (i = 0; i < env->envp_idx; i++) { - len = strlen(env->envp[i]) + 1; - scratch = skb_put(skb, len); - strcpy(scratch, env->envp[i]); - } + skb_put_data(skb, env->buf, env->buflen); NETLINK_CB(skb).dst_group = 1; retval = netlink_broadcast_filtered(uevent_sock, skb, -- cgit v1.2.3 From d464e84eed02993d40ad55fdc19f4523e4deee5b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 19 Sep 2017 16:27:05 -0700 Subject: kobject: factorize skb setup in kobject_uevent_net_broadcast() We can build one skb and let it be cloned in netlink. This is much faster, and use less memory (all clones will share the same skb->head) Tested: time perf record (for f in `seq 1 3000` ; do ip netns add tast$f; done) [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 4.110 MB perf.data (~179584 samples) ] real 0m24.227s # instead of 0m52.554s user 0m0.329s sys 0m23.753s # instead of 0m51.375s 14.77% ip [kernel.kallsyms] [k] __ip6addrlbl_add 14.56% ip [kernel.kallsyms] [k] netlink_broadcast_filtered 11.65% ip [kernel.kallsyms] [k] netlink_has_listeners 6.19% ip [kernel.kallsyms] [k] _raw_spin_lock_irqsave 5.66% ip [kernel.kallsyms] [k] kobject_uevent_env 4.97% ip [kernel.kallsyms] [k] memset_erms 4.67% ip [kernel.kallsyms] [k] refcount_sub_and_test 4.41% ip [kernel.kallsyms] [k] _raw_read_lock 3.59% ip [kernel.kallsyms] [k] refcount_inc_not_zero 3.13% ip [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 1.55% ip [kernel.kallsyms] [k] __wake_up 1.20% ip [kernel.kallsyms] [k] strlen 1.03% ip [kernel.kallsyms] [k] __wake_up_common 0.93% ip [kernel.kallsyms] [k] consume_skb 0.92% ip [kernel.kallsyms] [k] netlink_trim 0.87% ip [kernel.kallsyms] [k] insert_header 0.63% ip [kernel.kallsyms] [k] unmap_page_range Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- lib/kobject_uevent.c | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) (limited to 'lib') diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 78b2a7e378c0..147db91c10d0 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -301,23 +301,26 @@ static int kobject_uevent_net_broadcast(struct kobject *kobj, { int retval = 0; #if defined(CONFIG_NET) + struct sk_buff *skb = NULL; struct uevent_sock *ue_sk; /* send netlink message */ list_for_each_entry(ue_sk, &uevent_sock_list, list) { struct sock *uevent_sock = ue_sk->sk; - struct sk_buff *skb; - size_t len; if (!netlink_has_listeners(uevent_sock, 1)) continue; - /* allocate message with the maximum possible size */ - len = strlen(action_string) + strlen(devpath) + 2; - skb = alloc_skb(len + env->buflen, GFP_KERNEL); - if (skb) { + if (!skb) { + /* allocate message with the maximum possible size */ + size_t len = strlen(action_string) + strlen(devpath) + 2; char *scratch; + retval = -ENOMEM; + skb = alloc_skb(len + env->buflen, GFP_KERNEL); + if (!skb) + continue; + /* add header */ scratch = skb_put(skb, len); sprintf(scratch, "%s@%s", action_string, devpath); @@ -325,16 +328,17 @@ static int kobject_uevent_net_broadcast(struct kobject *kobj, skb_put_data(skb, env->buf, env->buflen); NETLINK_CB(skb).dst_group = 1; - retval = netlink_broadcast_filtered(uevent_sock, skb, - 0, 1, GFP_KERNEL, - kobj_bcast_filter, - kobj); - /* ENOBUFS should be handled in userspace */ - if (retval == -ENOBUFS || retval == -ESRCH) - retval = 0; - } else - retval = -ENOMEM; + } + + retval = netlink_broadcast_filtered(uevent_sock, skb_get(skb), + 0, 1, GFP_KERNEL, + kobj_bcast_filter, + kobj); + /* ENOBUFS should be handled in userspace */ + if (retval == -ENOBUFS || retval == -ESRCH) + retval = 0; } + consume_skb(skb); #endif return retval; } -- cgit v1.2.3 From 411d788a23f7e20b8fc51b548c7204fdecc9d22e Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 21 Sep 2017 17:36:08 +0200 Subject: test_rhashtable: remove initdata annotation kbuild test robot reported a section mismatch warning w. gcc 4.x: WARNING: lib/test_rhashtable.o(.text+0x139e): Section mismatch in reference from the function rhltable_insert.clone.3() to the variable .init.data:rhlt so remove this annotation. Fixes: cdd4de372ea06 ("test_rhashtable: add test case for rhl_table interface") Reported-by: kbuild test robot Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- lib/test_rhashtable.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index de4d0584631a..8e83cbdc049c 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -251,7 +251,7 @@ static s64 __init test_rhashtable(struct rhashtable *ht, struct test_obj *array, } static struct rhashtable ht; -static struct rhltable rhlt __initdata; +static struct rhltable rhlt; static int __init test_rhltable(unsigned int entries) { -- cgit v1.2.3 From cf4c950b87ee2f547ad3abd3aca6ae3f3eb3443f Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 9 Oct 2017 14:30:52 -0700 Subject: once: switch to new jump label API Switch the DO_ONCE() macro from the deprecated jump label API to the new one. The new one is more readable, and for DO_ONCE() it also makes the generated code more icache-friendly: now the one-time initialization code is placed out-of-line at the jump target, rather than at the inline fallthrough case. Acked-by: Hannes Frederic Sowa Signed-off-by: Eric Biggers Signed-off-by: David S. Miller --- include/linux/once.h | 6 +++--- lib/once.c | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'lib') diff --git a/include/linux/once.h b/include/linux/once.h index 9c98aaa87cbc..724724918e8b 100644 --- a/include/linux/once.h +++ b/include/linux/once.h @@ -5,7 +5,7 @@ #include bool __do_once_start(bool *done, unsigned long *flags); -void __do_once_done(bool *done, struct static_key *once_key, +void __do_once_done(bool *done, struct static_key_true *once_key, unsigned long *flags); /* Call a function exactly once. The idea of DO_ONCE() is to perform @@ -38,8 +38,8 @@ void __do_once_done(bool *done, struct static_key *once_key, ({ \ bool ___ret = false; \ static bool ___done = false; \ - static struct static_key ___once_key = STATIC_KEY_INIT_TRUE; \ - if (static_key_true(&___once_key)) { \ + static DEFINE_STATIC_KEY_TRUE(___once_key); \ + if (static_branch_unlikely(&___once_key)) { \ unsigned long ___flags; \ ___ret = __do_once_start(&___done, &___flags); \ if (unlikely(___ret)) { \ diff --git a/lib/once.c b/lib/once.c index 05c8604627eb..831c5a6b0bb2 100644 --- a/lib/once.c +++ b/lib/once.c @@ -5,7 +5,7 @@ struct once_work { struct work_struct work; - struct static_key *key; + struct static_key_true *key; }; static void once_deferred(struct work_struct *w) @@ -14,11 +14,11 @@ static void once_deferred(struct work_struct *w) work = container_of(w, struct once_work, work); BUG_ON(!static_key_enabled(work->key)); - static_key_slow_dec(work->key); + static_branch_disable(work->key); kfree(work); } -static void once_disable_jump(struct static_key *key) +static void once_disable_jump(struct static_key_true *key) { struct once_work *w; @@ -51,7 +51,7 @@ bool __do_once_start(bool *done, unsigned long *flags) } EXPORT_SYMBOL(__do_once_start); -void __do_once_done(bool *done, struct static_key *once_key, +void __do_once_done(bool *done, struct static_key_true *once_key, unsigned long *flags) __releases(once_lock) { -- cgit v1.2.3 From 7a0947e755084b918e33242fd558e55cb443408e Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 17 Oct 2017 17:16:52 -0700 Subject: dql: make dql_init return void dql_init always returned 0, and the only place that uses it in network core code didn't care about the return value anyway. Signed-off-by: Stephen Hemminger Acked-by: Hiroaki SHIMODA Signed-off-by: David S. Miller --- include/linux/dynamic_queue_limits.h | 2 +- lib/dynamic_queue_limits.c | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'lib') diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h index a4be70398ce1..f69f98541953 100644 --- a/include/linux/dynamic_queue_limits.h +++ b/include/linux/dynamic_queue_limits.h @@ -98,7 +98,7 @@ void dql_completed(struct dql *dql, unsigned int count); void dql_reset(struct dql *dql); /* Initialize dql state */ -int dql_init(struct dql *dql, unsigned hold_time); +void dql_init(struct dql *dql, unsigned int hold_time); #endif /* _KERNEL_ */ diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c index f346715e2255..dbe61c4c2a97 100644 --- a/lib/dynamic_queue_limits.c +++ b/lib/dynamic_queue_limits.c @@ -127,12 +127,11 @@ void dql_reset(struct dql *dql) } EXPORT_SYMBOL(dql_reset); -int dql_init(struct dql *dql, unsigned hold_time) +void dql_init(struct dql *dql, unsigned int hold_time) { dql->max_limit = DQL_MAX_LIMIT; dql->min_limit = 0; dql->slack_hold_time = hold_time; dql_reset(dql); - return 0; } EXPORT_SYMBOL(dql_init); -- cgit v1.2.3 From 28033ae4e0f59782b3f3357d9c28852a7e84a894 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Tue, 7 Nov 2017 21:59:40 -0800 Subject: net: netlink: Update attr validation to require exact length for some types Attributes using NLA_U* and NLA_S* (where * is 8, 16,32 and 64) are expected to be an exact length. Split these data types from nla_attr_minlen into nla_attr_len and update validate_nla to require the attribute to have exact length for them. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- lib/nlattr.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) (limited to 'lib') diff --git a/lib/nlattr.c b/lib/nlattr.c index 3d8295c85505..8bf78b4b78f0 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -15,19 +15,23 @@ #include #include -static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = { +/* for these data types attribute length must be exactly given size */ +static const u8 nla_attr_len[NLA_TYPE_MAX+1] = { [NLA_U8] = sizeof(u8), [NLA_U16] = sizeof(u16), [NLA_U32] = sizeof(u32), [NLA_U64] = sizeof(u64), - [NLA_MSECS] = sizeof(u64), - [NLA_NESTED] = NLA_HDRLEN, [NLA_S8] = sizeof(s8), [NLA_S16] = sizeof(s16), [NLA_S32] = sizeof(s32), [NLA_S64] = sizeof(s64), }; +static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = { + [NLA_MSECS] = sizeof(u64), + [NLA_NESTED] = NLA_HDRLEN, +}; + static int validate_nla_bitfield32(const struct nlattr *nla, u32 *valid_flags_allowed) { @@ -65,6 +69,13 @@ static int validate_nla(const struct nlattr *nla, int maxtype, BUG_ON(pt->type > NLA_TYPE_MAX); + /* for data types NLA_U* and NLA_S* require exact length */ + if (nla_attr_len[pt->type]) { + if (attrlen != nla_attr_len[pt->type]) + return -ERANGE; + return 0; + } + switch (pt->type) { case NLA_FLAG: if (attrlen > 0) @@ -191,6 +202,8 @@ nla_policy_len(const struct nla_policy *p, int n) for (i = 0; i < n; i++, p++) { if (p->len) len += nla_total_size(p->len); + else if (nla_attr_len[p->type]) + len += nla_total_size(nla_attr_len[p->type]); else if (nla_attr_minlen[p->type]) len += nla_total_size(nla_attr_minlen[p->type]); } -- cgit v1.2.3