summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-07-22 19:24:28 -0500
committerRusty Russell <rusty@rustcorp.com.au>2008-07-22 19:24:28 +1000
commit3a642e99babe0617febb6f402e1e063479f489db (patch)
tree09f7d6c7b0a4e3869d11c739113e5dd5a8ff5a2c /kernel
parent2f0f2a334bc38b61a9afca951185cd3844ee709d (diff)
modules: Take a shortcut for checking if an address is in a module
This patch keeps track of the boundaries of module allocation, in order to speed up module_text_address(). Inspired by Arjan's version, which required arch-specific defines: Various pieces of the kernel (lockdep, latencytop, etc) tend to store backtraces, sometimes at a relatively high frequency. In itself this isn't a big performance deal (after all you're using diagnostics features), but there have been some complaints from people who have over 100 modules loaded that this is a tad too slow. This is due to the new backtracer code which looks at every slot on the stack to see if it's a kernel/module text address, so that's 1024 slots. 1024 times 100 modules... that's a lot of list walking. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/module.c24
1 files changed, 22 insertions, 2 deletions
diff --git a/kernel/module.c b/kernel/module.c
index 5c7eb0695b3c..d8b5605132a0 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -70,6 +70,9 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
static BLOCKING_NOTIFIER_HEAD(module_notify_list);
+/* Bounds of module allocation, for speeding __module_text_address */
+static unsigned long module_addr_min = -1UL, module_addr_max = 0;
+
int register_module_notifier(struct notifier_block * nb)
{
return blocking_notifier_chain_register(&module_notify_list, nb);
@@ -1779,6 +1782,20 @@ static inline void add_kallsyms(struct module *mod,
}
#endif /* CONFIG_KALLSYMS */
+static void *module_alloc_update_bounds(unsigned long size)
+{
+ void *ret = module_alloc(size);
+
+ if (ret) {
+ /* Update module bounds. */
+ if ((unsigned long)ret < module_addr_min)
+ module_addr_min = (unsigned long)ret;
+ if ((unsigned long)ret + size > module_addr_max)
+ module_addr_max = (unsigned long)ret + size;
+ }
+ return ret;
+}
+
/* Allocate and load the module: note that size of section 0 is always
zero, and we rely on this for optional sections. */
static struct module *load_module(void __user *umod,
@@ -1980,7 +1997,7 @@ static struct module *load_module(void __user *umod,
layout_sections(mod, hdr, sechdrs, secstrings);
/* Do the allocs. */
- ptr = module_alloc(mod->core_size);
+ ptr = module_alloc_update_bounds(mod->core_size);
if (!ptr) {
err = -ENOMEM;
goto free_percpu;
@@ -1988,7 +2005,7 @@ static struct module *load_module(void __user *umod,
memset(ptr, 0, mod->core_size);
mod->module_core = ptr;
- ptr = module_alloc(mod->init_size);
+ ptr = module_alloc_update_bounds(mod->init_size);
if (!ptr && mod->init_size) {
err = -ENOMEM;
goto free_core;
@@ -2645,6 +2662,9 @@ struct module *__module_text_address(unsigned long addr)
{
struct module *mod;
+ if (addr < module_addr_min || addr > module_addr_max)
+ return NULL;
+
list_for_each_entry(mod, &modules, list)
if (within(addr, mod->module_init, mod->init_text_size)
|| within(addr, mod->module_core, mod->core_text_size))