From bcd83ea6cbfee54e33d1527b87538dc99ca2137b Mon Sep 17 00:00:00 2001 From: Daniel Walter Date: Wed, 26 Sep 2012 22:08:38 +0200 Subject: tracing: Replace strict_strto* with kstrto* * remove old string conversions with kstrto* Link: http://lkml.kernel.org/r/20120926200838.GC1244@0x90.at Signed-off-by: Daniel Walter Signed-off-by: Steven Rostedt --- kernel/trace/trace_functions.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/trace/trace_functions.c') diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 507a7a9630bf..618dcf8bdb87 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -366,7 +366,7 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash, * We use the callback data field (which is a pointer) * as our counter. */ - ret = strict_strtoul(number, 0, (unsigned long *)&count); + ret = kstrtoul(number, 0, (unsigned long *)&count); if (ret) return ret; -- cgit v1.2.3 From 6f4156723c084bfc0c0f72205c541fafb8ad3ded Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 5 Oct 2012 12:13:07 -0400 Subject: tracing: Allow tracers to start at core initcall There's times during debugging that it is helpful to see traces of early boot functions. But the tracers are initialized at device_initcall() which is quite late during the boot process. Setting the kernel command line parameter ftrace=function will not show anything until the function tracer is initialized. This prevents being able to trace functions before device_initcall(). There's no reason that the tracers need to be initialized so late in the boot process. Move them up to core_initcall() as they still need to come after early_initcall() which initializes the tracing buffers. Cc: Thomas Gleixner Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 4 ++-- kernel/trace/trace_branch.c | 2 +- kernel/trace/trace_functions.c | 3 +-- kernel/trace/trace_functions_graph.c | 2 +- kernel/trace/trace_irqsoff.c | 2 +- kernel/trace/trace_sched_wakeup.c | 2 +- 6 files changed, 7 insertions(+), 8 deletions(-) (limited to 'kernel/trace/trace_functions.c') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 60ad606dc85f..4451aa3a55a0 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2868,7 +2868,7 @@ static int __init ftrace_mod_cmd_init(void) { return register_ftrace_command(&ftrace_mod_cmd); } -device_initcall(ftrace_mod_cmd_init); +core_initcall(ftrace_mod_cmd_init); static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *pt_regs) @@ -4055,7 +4055,7 @@ static int __init ftrace_nodyn_init(void) ftrace_enabled = 1; return 0; } -device_initcall(ftrace_nodyn_init); +core_initcall(ftrace_nodyn_init); static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } static inline void ftrace_startup_enable(int command) { } diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 8d3538b4ea5f..bd3e0eef4eaa 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c @@ -199,7 +199,7 @@ __init static int init_branch_tracer(void) } return register_tracer(&branch_trace); } -device_initcall(init_branch_tracer); +core_initcall(init_branch_tracer); #else static inline diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 618dcf8bdb87..bb227e380cb5 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -411,5 +411,4 @@ static __init int init_function_trace(void) init_func_cmd_traceon(); return register_tracer(&function_trace); } -device_initcall(init_function_trace); - +core_initcall(init_function_trace); diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 99b4378393d5..a84b55879bc4 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -1474,4 +1474,4 @@ static __init int init_graph_trace(void) return register_tracer(&graph_trace); } -device_initcall(init_graph_trace); +core_initcall(init_graph_trace); diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index d98ee8283b29..11edebda4548 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -698,4 +698,4 @@ __init static int init_irqsoff_tracer(void) return 0; } -device_initcall(init_irqsoff_tracer); +core_initcall(init_irqsoff_tracer); diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 02170c00c413..2f6af7833694 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -637,4 +637,4 @@ __init static int init_wakeup_tracer(void) return 0; } -device_initcall(init_wakeup_tracer); +core_initcall(init_wakeup_tracer); -- cgit v1.2.3 From 6d49e352ae9aed3f599041b0c0389aa924815f14 Mon Sep 17 00:00:00 2001 From: Nadia Yvette Chambers Date: Thu, 6 Dec 2012 10:39:54 +0100 Subject: propagate name change to comments in kernel source I've legally changed my name with New York State, the US Social Security Administration, et al. This patch propagates the name change and change in initials and login to comments in the kernel source as well. Signed-off-by: Nadia Yvette Chambers Signed-off-by: Jiri Kosina --- MAINTAINERS | 2 +- arch/alpha/include/asm/mmzone.h | 2 +- arch/frv/mm/pgalloc.c | 2 +- arch/mn10300/mm/pgtable.c | 2 +- arch/x86/mm/pgtable.c | 2 +- fs/hugetlbfs/inode.c | 2 +- fs/ncpfs/mmap.c | 2 +- include/linux/hash.h | 2 +- kernel/pid.c | 4 ++-- kernel/profile.c | 7 ++++--- kernel/trace/ftrace.c | 2 +- kernel/trace/trace.c | 2 +- kernel/trace/trace_functions.c | 2 +- kernel/trace/trace_irqsoff.c | 2 +- kernel/trace/trace_sched_wakeup.c | 2 +- kernel/wait.c | 2 +- lib/bitmap.c | 2 +- mm/hugetlb.c | 2 +- mm/page_alloc.c | 4 ++-- 19 files changed, 24 insertions(+), 23 deletions(-) (limited to 'kernel/trace/trace_functions.c') diff --git a/MAINTAINERS b/MAINTAINERS index 322db3ae83cf..e16a3f5a1b79 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3576,7 +3576,7 @@ S: Maintained F: drivers/input/touchscreen/htcpen.c HUGETLB FILESYSTEM -M: William Irwin +M: Nadia Yvette Chambers S: Maintained F: fs/hugetlbfs/ diff --git a/arch/alpha/include/asm/mmzone.h b/arch/alpha/include/asm/mmzone.h index 445dc42e0334..c5b5d6bac9ed 100644 --- a/arch/alpha/include/asm/mmzone.h +++ b/arch/alpha/include/asm/mmzone.h @@ -66,7 +66,7 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n) ((unsigned long)__va(NODE_DATA(kvaddr_to_nid(kaddr))->node_start_pfn \ << PAGE_SHIFT)) -/* XXX: FIXME -- wli */ +/* XXX: FIXME -- nyc */ #define kern_addr_valid(kaddr) (0) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) diff --git a/arch/frv/mm/pgalloc.c b/arch/frv/mm/pgalloc.c index 4fb63a36bd52..f6084bc524e8 100644 --- a/arch/frv/mm/pgalloc.c +++ b/arch/frv/mm/pgalloc.c @@ -77,7 +77,7 @@ void __set_pmd(pmd_t *pmdptr, unsigned long pmd) * checks at dup_mmap(), exec(), and other mmlist addition points * could be used. The locking scheme was chosen on the basis of * manfred's recommendations and having no core impact whatsoever. - * -- wli + * -- nyc */ DEFINE_SPINLOCK(pgd_lock); struct page *pgd_list; diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c index 4ebf117c3285..bd9ada693f95 100644 --- a/arch/mn10300/mm/pgtable.c +++ b/arch/mn10300/mm/pgtable.c @@ -95,7 +95,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) * checks at dup_mmap(), exec(), and other mmlist addition points * could be used. The locking scheme was chosen on the basis of * manfred's recommendations and having no core impact whatsoever. - * -- wli + * -- nyc */ DEFINE_SPINLOCK(pgd_lock); struct page *pgd_list; diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 8573b83a63d0..217eb705fac0 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -137,7 +137,7 @@ static void pgd_dtor(pgd_t *pgd) * against pageattr.c; it is the unique case in which a valid change * of kernel pagetables can't be lazily synchronized by vmalloc faults. * vmalloc faults work because attached pagetables are never freed. - * -- wli + * -- nyc */ #ifdef CONFIG_X86_PAE diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index c5bc355d8243..c98d0665fa5c 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -1,7 +1,7 @@ /* * hugetlbpage-backed filesystem. Based on ramfs. * - * William Irwin, 2002 + * Nadia Yvette Chambers, 2002 * * Copyright (C) 2002 Linus Torvalds. */ diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c index be20a7e171a0..63d14a99483d 100644 --- a/fs/ncpfs/mmap.c +++ b/fs/ncpfs/mmap.c @@ -89,7 +89,7 @@ static int ncp_file_mmap_fault(struct vm_area_struct *area, /* * If I understand ncp_read_kernel() properly, the above always * fetches from the network, here the analogue of disk. - * -- wli + * -- nyc */ count_vm_event(PGMAJFAULT); mem_cgroup_count_vm_event(area->vm_mm, PGMAJFAULT); diff --git a/include/linux/hash.h b/include/linux/hash.h index 24df9e70406f..61c97ae22e01 100644 --- a/include/linux/hash.h +++ b/include/linux/hash.h @@ -1,7 +1,7 @@ #ifndef _LINUX_HASH_H #define _LINUX_HASH_H /* Fast hashing routine for ints, longs and pointers. - (C) 2002 William Lee Irwin III, IBM */ + (C) 2002 Nadia Yvette Chambers, IBM */ /* * Knuth recommends primes in approximately golden ratio to the maximum diff --git a/kernel/pid.c b/kernel/pid.c index aebd4f5aaf41..fd996c1ed9f8 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -1,8 +1,8 @@ /* * Generic pidhash and scalable, time-bounded PID allocator * - * (C) 2002-2003 William Irwin, IBM - * (C) 2004 William Irwin, Oracle + * (C) 2002-2003 Nadia Yvette Chambers, IBM + * (C) 2004 Nadia Yvette Chambers, Oracle * (C) 2002-2004 Ingo Molnar, Red Hat * * pid-structures are backing objects for tasks sharing a given ID to chain diff --git a/kernel/profile.c b/kernel/profile.c index 76b8e77773ee..1f391819c42f 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -8,9 +8,10 @@ * Scheduler profiling support, Arjan van de Ven and Ingo Molnar, * Red Hat, July 2004 * Consolidation of architecture support code for profiling, - * William Irwin, Oracle, July 2004 + * Nadia Yvette Chambers, Oracle, July 2004 * Amortized hit count accounting via per-cpu open-addressed hashtables - * to resolve timer interrupt livelocks, William Irwin, Oracle, 2004 + * to resolve timer interrupt livelocks, Nadia Yvette Chambers, + * Oracle, 2004 */ #include @@ -256,7 +257,7 @@ EXPORT_SYMBOL_GPL(unregister_timer_hook); * pagetable hash functions, but uses a full hashtable full of finite * collision chains, not just pairs of them. * - * -- wli + * -- nyc */ static void __profile_flip_buffers(void *unused) { diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 9dcf15d38380..b1a817c14ed5 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -10,7 +10,7 @@ * Based on code in the latency_tracer, that is: * * Copyright (C) 2004-2006 Ingo Molnar - * Copyright (C) 2004 William Lee Irwin III + * Copyright (C) 2004 Nadia Yvette Chambers */ #include diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 31e4f55773f1..3cff052715fe 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -9,7 +9,7 @@ * * Based on code from the latency_tracer, that is: * Copyright (C) 2004-2006 Ingo Molnar - * Copyright (C) 2004 William Lee Irwin III + * Copyright (C) 2004 Nadia Yvette Chambers */ #include #include diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 507a7a9630bf..c7b83d03ff68 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -7,7 +7,7 @@ * Based on code from the latency_tracer, that is: * * Copyright (C) 2004-2006 Ingo Molnar - * Copyright (C) 2004 William Lee Irwin III + * Copyright (C) 2004 Nadia Yvette Chambers */ #include #include diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index d98ee8283b29..cddb3b40bf7b 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -7,7 +7,7 @@ * From code in the latency_tracer, that is: * * Copyright (C) 2004-2006 Ingo Molnar - * Copyright (C) 2004 William Lee Irwin III + * Copyright (C) 2004 Nadia Yvette Chambers */ #include #include diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 02170c00c413..45464947a5ff 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -7,7 +7,7 @@ * Based on code from the latency_tracer, that is: * * Copyright (C) 2004-2006 Ingo Molnar - * Copyright (C) 2004 William Lee Irwin III + * Copyright (C) 2004 Nadia Yvette Chambers */ #include #include diff --git a/kernel/wait.c b/kernel/wait.c index 7fdd9eaca2c3..6698e0c04ead 100644 --- a/kernel/wait.c +++ b/kernel/wait.c @@ -1,7 +1,7 @@ /* * Generic waiting primitives. * - * (C) 2004 William Irwin, Oracle + * (C) 2004 Nadia Yvette Chambers, Oracle */ #include #include diff --git a/lib/bitmap.c b/lib/bitmap.c index 06fdfa1aeba7..06f7e4fe8d2d 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -353,7 +353,7 @@ again: EXPORT_SYMBOL(bitmap_find_next_zero_area); /* - * Bitmap printing & parsing functions: first version by Bill Irwin, + * Bitmap printing & parsing functions: first version by Nadia Yvette Chambers, * second version by Paul Jackson, third by Joe Korty. */ diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 59a0059b39e2..3b7a20ea3808 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1,6 +1,6 @@ /* * Generic hugetlb support. - * (C) William Irwin, April 2004 + * (C) Nadia Yvette Chambers, April 2004 */ #include #include diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5b74de6702e0..57806b7e118b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -523,7 +523,7 @@ static inline int page_is_buddy(struct page *page, struct page *buddy, * If a block is freed, and its buddy is also free, then this * triggers coalescing into a block of larger size. * - * -- wli + * -- nyc */ static inline void __free_one_page(struct page *page, @@ -780,7 +780,7 @@ void __init init_cma_reserved_pageblock(struct page *page) * large block of memory acted on by a series of small allocations. * This behavior is a critical factor in sglist merging's success. * - * -- wli + * -- nyc */ static inline void expand(struct zone *zone, struct page *page, int low, int high, struct free_area *area, -- cgit v1.2.3 From 897f68a48b1f8fb6cb7493e1ee37e3ed7f879937 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 2 Nov 2012 17:52:35 -0400 Subject: ftrace: Use only the preempt version of function tracing The function tracer had two different versions of function tracing. The disabling of irqs version and the preempt disable version. As function tracing in very intrusive and can cause nasty recursion issues, it has its own recursion protection. But the old method to do this was a flat layer. If it detected that a recursion was happening then it would just return without recording. This made the preempt version (much faster than the irq disabling one) not very useful, because if an interrupt were to occur after the recursion flag was set, the interrupt would not be traced at all, because every function that was traced would think it recursed on itself (due to the context it preempted setting the recursive flag). Now that we have a recursion flag for every context level, we no longer need to worry about that. We can disable preemption, set the current context recursion check bit, and go on. If an interrupt were to come along, it would check its own context bit and happily continue to trace. As the preempt version is faster than the irq disable version, there's no more reason to keep the preempt version around. And the irq disable version still had an issue with missing out on tracing NMI code. Remove the irq disable function tracer version and have the preempt disable version be the default (and only version). Before this patch we had from running: # echo function > /debug/tracing/current_tracer # for i in `seq 10`; do ./hackbench 50; done Time: 12.028 Time: 11.945 Time: 11.925 Time: 11.964 Time: 12.002 Time: 11.910 Time: 11.944 Time: 11.929 Time: 11.941 Time: 11.924 (average: 11.9512) Now we have: # echo function > /debug/tracing/current_tracer # for i in `seq 10`; do ./hackbench 50; done Time: 10.285 Time: 10.407 Time: 10.243 Time: 10.372 Time: 10.380 Time: 10.198 Time: 10.272 Time: 10.354 Time: 10.248 Time: 10.253 (average: 10.3012) a 13.8% savings! Signed-off-by: Steven Rostedt --- kernel/trace/trace_functions.c | 61 ++++++++++-------------------------------- 1 file changed, 14 insertions(+), 47 deletions(-) (limited to 'kernel/trace/trace_functions.c') diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 8e3ad8082ab7..1c327ef13a9a 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -47,34 +47,6 @@ static void function_trace_start(struct trace_array *tr) tracing_reset_online_cpus(tr); } -static void -function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip, - struct ftrace_ops *op, struct pt_regs *pt_regs) -{ - struct trace_array *tr = func_trace; - struct trace_array_cpu *data; - unsigned long flags; - long disabled; - int cpu; - int pc; - - if (unlikely(!ftrace_function_enabled)) - return; - - pc = preempt_count(); - preempt_disable_notrace(); - local_save_flags(flags); - cpu = raw_smp_processor_id(); - data = tr->data[cpu]; - disabled = atomic_inc_return(&data->disabled); - - if (likely(disabled == 1)) - trace_function(tr, ip, parent_ip, flags, pc); - - atomic_dec(&data->disabled); - preempt_enable_notrace(); -} - /* Our option */ enum { TRACE_FUNC_OPT_STACK = 0x1, @@ -85,34 +57,34 @@ static struct tracer_flags func_flags; static void function_trace_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *pt_regs) - { struct trace_array *tr = func_trace; struct trace_array_cpu *data; unsigned long flags; - long disabled; + unsigned int bit; int cpu; int pc; if (unlikely(!ftrace_function_enabled)) return; - /* - * Need to use raw, since this must be called before the - * recursive protection is performed. - */ - local_irq_save(flags); - cpu = raw_smp_processor_id(); - data = tr->data[cpu]; - disabled = atomic_inc_return(&data->disabled); + pc = preempt_count(); + preempt_disable_notrace(); - if (likely(disabled == 1)) { - pc = preempt_count(); + bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX); + if (bit < 0) + goto out; + + cpu = smp_processor_id(); + data = tr->data[cpu]; + if (!atomic_read(&data->disabled)) { + local_save_flags(flags); trace_function(tr, ip, parent_ip, flags, pc); } + trace_clear_recursion(bit); - atomic_dec(&data->disabled); - local_irq_restore(flags); + out: + preempt_enable_notrace(); } static void @@ -185,11 +157,6 @@ static void tracing_start_function_trace(void) { ftrace_function_enabled = 0; - if (trace_flags & TRACE_ITER_PREEMPTONLY) - trace_ops.func = function_trace_call_preempt_only; - else - trace_ops.func = function_trace_call; - if (func_flags.val & TRACE_FUNC_OPT_STACK) register_ftrace_function(&trace_stack_ops); else -- cgit v1.2.3 From d41032a83b4683481cadff84bbf8e0eafeaba830 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 24 Jan 2013 07:52:34 -0500 Subject: tracing: Fix unsigned int compare of zero in recursion check Dan's smatch found a compare bug with the result of the trace_test_and_set_recursion() and comparing to less than zero. If the function fails, it returns -1, but was saved in an unsigned int, which will never be less than zero and will ignore the result of the test if a recursion did happen. Luckily this is the last of the recursion tests, as the infrastructure of ftrace would catch recursions before it got here, except for some few exceptions. Reported-by: Dan Carpenter Signed-off-by: Steven Rostedt --- kernel/trace/trace_functions.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/trace/trace_functions.c') diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 1c327ef13a9a..601152523326 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -61,7 +61,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, struct trace_array *tr = func_trace; struct trace_array_cpu *data; unsigned long flags; - unsigned int bit; + int bit; int cpu; int pc; -- cgit v1.2.3