From 52494535103986dbbf689b44d8c2c7efe2132b16 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 14 Nov 2012 16:26:40 -0800 Subject: rcu: Reduce rcutorture tracing Currently, rcutorture traces every read-side access. This can be problematic because even a two-minute rcutorture run on a two-CPU system can generate 28,853,363 reads. Normally, only a failing read is of interest, so this commit traces adjusts rcutorture's tracing to only trace failing reads. The resulting event tracing records the time and the ->completed value captured at the beginning of the RCU read-side critical section, allowing correlation with other event-tracing messages. Signed-off-by: Paul E. McKenney Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett [ paulmck: Add fix to build problem located by Randy Dunlap based on diagnosis by Steven Rostedt. ] --- kernel/rcutorture.c | 31 ++++++++++++++++++++++++------- 1 file changed, 24 insertions(+), 7 deletions(-) (limited to 'kernel/rcutorture.c') diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 31dea01c85fd..a583f1ce713d 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c @@ -46,6 +46,7 @@ #include #include #include +#include #include MODULE_LICENSE("GPL"); @@ -1028,7 +1029,6 @@ void rcutorture_trace_dump(void) return; if (atomic_xchg(&beenhere, 1) != 0) return; - do_trace_rcu_torture_read(cur_ops->name, (struct rcu_head *)~0UL); ftrace_dump(DUMP_ALL); } @@ -1042,13 +1042,16 @@ static void rcu_torture_timer(unsigned long unused) { int idx; int completed; + int completed_end; static DEFINE_RCU_RANDOM(rand); static DEFINE_SPINLOCK(rand_lock); struct rcu_torture *p; int pipe_count; + unsigned long long ts; idx = cur_ops->readlock(); completed = cur_ops->completed(); + ts = trace_clock_local(); p = rcu_dereference_check(rcu_torture_current, rcu_read_lock_bh_held() || rcu_read_lock_sched_held() || @@ -1058,7 +1061,6 @@ static void rcu_torture_timer(unsigned long unused) cur_ops->readunlock(idx); return; } - do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu); if (p->rtort_mbtest == 0) atomic_inc(&n_rcu_torture_mberror); spin_lock(&rand_lock); @@ -1071,10 +1073,16 @@ static void rcu_torture_timer(unsigned long unused) /* Should not happen, but... */ pipe_count = RCU_TORTURE_PIPE_LEN; } - if (pipe_count > 1) + completed_end = cur_ops->completed(); + if (pipe_count > 1) { + unsigned long __maybe_unused ts_rem = do_div(ts, NSEC_PER_USEC); + + do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts, + completed, completed_end); rcutorture_trace_dump(); + } __this_cpu_inc(rcu_torture_count[pipe_count]); - completed = cur_ops->completed() - completed; + completed = completed_end - completed; if (completed > RCU_TORTURE_PIPE_LEN) { /* Should not happen, but... */ completed = RCU_TORTURE_PIPE_LEN; @@ -1094,11 +1102,13 @@ static int rcu_torture_reader(void *arg) { int completed; + int completed_end; int idx; DEFINE_RCU_RANDOM(rand); struct rcu_torture *p; int pipe_count; struct timer_list t; + unsigned long long ts; VERBOSE_PRINTK_STRING("rcu_torture_reader task started"); set_user_nice(current, 19); @@ -1112,6 +1122,7 @@ rcu_torture_reader(void *arg) } idx = cur_ops->readlock(); completed = cur_ops->completed(); + ts = trace_clock_local(); p = rcu_dereference_check(rcu_torture_current, rcu_read_lock_bh_held() || rcu_read_lock_sched_held() || @@ -1122,7 +1133,6 @@ rcu_torture_reader(void *arg) schedule_timeout_interruptible(HZ); continue; } - do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu); if (p->rtort_mbtest == 0) atomic_inc(&n_rcu_torture_mberror); cur_ops->read_delay(&rand); @@ -1132,10 +1142,17 @@ rcu_torture_reader(void *arg) /* Should not happen, but... */ pipe_count = RCU_TORTURE_PIPE_LEN; } - if (pipe_count > 1) + completed_end = cur_ops->completed(); + if (pipe_count > 1) { + unsigned long __maybe_unused ts_rem = + do_div(ts, NSEC_PER_USEC); + + do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, + ts, completed, completed_end); rcutorture_trace_dump(); + } __this_cpu_inc(rcu_torture_count[pipe_count]); - completed = cur_ops->completed() - completed; + completed = completed_end - completed; if (completed > RCU_TORTURE_PIPE_LEN) { /* Should not happen, but... */ completed = RCU_TORTURE_PIPE_LEN; -- cgit v1.2.3 From de5e64378e6be3757a8533f55bbe1153349806dd Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 20 Dec 2012 14:11:28 -0500 Subject: rcutorture: Don't compare ptr with 0 Signed-off-by: Sasha Levin Reviewed-by: Josh Triplett Signed-off-by: Paul E. McKenney --- kernel/rcutorture.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/rcutorture.c') diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 31dea01c85fd..0249800c6112 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c @@ -1749,7 +1749,7 @@ static int rcu_torture_barrier_init(void) barrier_cbs_wq = kzalloc(n_barrier_cbs * sizeof(barrier_cbs_wq[0]), GFP_KERNEL); - if (barrier_cbs_tasks == NULL || barrier_cbs_wq == 0) + if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) return -ENOMEM; for (i = 0; i < n_barrier_cbs; i++) { init_waitqueue_head(&barrier_cbs_wq[i]); -- cgit v1.2.3 From 0e11c8e8a60f8591556d142c2e1e53eaf86ab528 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 10 Jan 2013 16:21:07 -0800 Subject: rcu: Make rcutorture's shuffler task shuffle recently added tasks A number of kthreads have been added to rcutorture, but the shuffler task was not informed of them, and thus did not shuffle them. This commit therefore adds the requisite shuffling, and, while in the area fixes up some whitespace issues. However, the shuffling is intended to keep randomly selected CPUs idle, which means that the RCU priority boosting kthreads need to avoid waking up every jiffy. This commit also makes that fix. Signed-off-by: Paul E. McKenney --- kernel/rcutorture.c | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) (limited to 'kernel/rcutorture.c') diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index a583f1ce713d..3ebc8bfb5525 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c @@ -846,7 +846,7 @@ static int rcu_torture_boost(void *arg) /* Wait for the next test interval. */ oldstarttime = boost_starttime; while (ULONG_CMP_LT(jiffies, oldstarttime)) { - schedule_timeout_uninterruptible(1); + schedule_timeout_interruptible(oldstarttime - jiffies); rcu_stutter_wait("rcu_torture_boost"); if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP) @@ -1318,19 +1318,35 @@ static void rcu_torture_shuffle_tasks(void) set_cpus_allowed_ptr(reader_tasks[i], shuffle_tmp_mask); } - if (fakewriter_tasks) { for (i = 0; i < nfakewriters; i++) if (fakewriter_tasks[i]) set_cpus_allowed_ptr(fakewriter_tasks[i], shuffle_tmp_mask); } - if (writer_task) set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask); - if (stats_task) set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask); + if (stutter_task) + set_cpus_allowed_ptr(stutter_task, shuffle_tmp_mask); + if (fqs_task) + set_cpus_allowed_ptr(fqs_task, shuffle_tmp_mask); + if (shutdown_task) + set_cpus_allowed_ptr(shutdown_task, shuffle_tmp_mask); +#ifdef CONFIG_HOTPLUG_CPU + if (onoff_task) + set_cpus_allowed_ptr(onoff_task, shuffle_tmp_mask); +#endif /* #ifdef CONFIG_HOTPLUG_CPU */ + if (stall_task) + set_cpus_allowed_ptr(stall_task, shuffle_tmp_mask); + if (barrier_cbs_tasks) + for (i = 0; i < n_barrier_cbs; i++) + if (barrier_cbs_tasks[i]) + set_cpus_allowed_ptr(barrier_cbs_tasks[i], + shuffle_tmp_mask); + if (barrier_task) + set_cpus_allowed_ptr(barrier_task, shuffle_tmp_mask); if (rcu_idle_cpu == -1) rcu_idle_cpu = num_online_cpus() - 1; -- cgit v1.2.3 From e4aa0da39b6a69f3442ebc33500c21176c5eb560 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 4 Feb 2013 13:36:13 -0500 Subject: rcu: Allow rcutorture to be built at low optimization levels The uses of trace_clock_local() are dead code when CONFIG_RCU_TRACE=n, but some compilers might nevertheless generate code calling this function. This commit therefore ensures that trace_clock_local() is invoked only when CONFIG_RCU_TRACE=y. Reported-by: Tetsuo Handa Signed-off-by: Paul E. McKenney Signed-off-by: Steven Rostedt --- kernel/rcutorture.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) (limited to 'kernel/rcutorture.c') diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index cd4c35d097a4..e1f3a8c96724 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c @@ -208,6 +208,20 @@ MODULE_PARM_DESC(rcutorture_runnable, "Start rcutorture at boot"); #define rcu_can_boost() 0 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ +#ifdef CONFIG_RCU_TRACE +static u64 notrace rcu_trace_clock_local(void) +{ + u64 ts = trace_clock_local(); + unsigned long __maybe_unused ts_rem = do_div(ts, NSEC_PER_USEC); + return ts; +} +#else /* #ifdef CONFIG_RCU_TRACE */ +static u64 notrace rcu_trace_clock_local(void) +{ + return 0ULL; +} +#endif /* #else #ifdef CONFIG_RCU_TRACE */ + static unsigned long shutdown_time; /* jiffies to system shutdown. */ static unsigned long boost_starttime; /* jiffies of next boost test start. */ DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ @@ -1051,7 +1065,7 @@ static void rcu_torture_timer(unsigned long unused) idx = cur_ops->readlock(); completed = cur_ops->completed(); - ts = trace_clock_local(); + ts = rcu_trace_clock_local(); p = rcu_dereference_check(rcu_torture_current, rcu_read_lock_bh_held() || rcu_read_lock_sched_held() || @@ -1075,8 +1089,6 @@ static void rcu_torture_timer(unsigned long unused) } completed_end = cur_ops->completed(); if (pipe_count > 1) { - unsigned long __maybe_unused ts_rem = do_div(ts, NSEC_PER_USEC); - do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts, completed, completed_end); rcutorture_trace_dump(); @@ -1122,7 +1134,7 @@ rcu_torture_reader(void *arg) } idx = cur_ops->readlock(); completed = cur_ops->completed(); - ts = trace_clock_local(); + ts = rcu_trace_clock_local(); p = rcu_dereference_check(rcu_torture_current, rcu_read_lock_bh_held() || rcu_read_lock_sched_held() || @@ -1144,9 +1156,6 @@ rcu_torture_reader(void *arg) } completed_end = cur_ops->completed(); if (pipe_count > 1) { - unsigned long __maybe_unused ts_rem = - do_div(ts, NSEC_PER_USEC); - do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts, completed, completed_end); rcutorture_trace_dump(); -- cgit v1.2.3