summaryrefslogtreecommitdiff
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h53
1 files changed, 29 insertions, 24 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 14dc7dd00902..70856a1e80d9 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -64,7 +64,7 @@ static void __init rcu_bootup_announce_oddness(void)
#ifdef CONFIG_TREE_PREEMPT_RCU
-struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
+struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt);
DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
static struct rcu_state *rcu_state = &rcu_preempt_state;
@@ -284,7 +284,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
* notify RCU core processing or task having blocked during the RCU
* read-side critical section.
*/
-static void rcu_read_unlock_special(struct task_struct *t)
+static noinline void rcu_read_unlock_special(struct task_struct *t)
{
int empty;
int empty_exp;
@@ -387,11 +387,11 @@ void __rcu_read_unlock(void)
struct task_struct *t = current;
barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
- --t->rcu_read_lock_nesting;
- barrier(); /* decrement before load of ->rcu_read_unlock_special */
- if (t->rcu_read_lock_nesting == 0 &&
- unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
- rcu_read_unlock_special(t);
+ if (--t->rcu_read_lock_nesting == 0) {
+ barrier(); /* decr before ->rcu_read_unlock_special load */
+ if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
+ rcu_read_unlock_special(t);
+ }
#ifdef CONFIG_PROVE_LOCKING
WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0);
#endif /* #ifdef CONFIG_PROVE_LOCKING */
@@ -633,18 +633,9 @@ EXPORT_SYMBOL_GPL(call_rcu);
*/
void synchronize_rcu(void)
{
- struct rcu_synchronize rcu;
-
if (!rcu_scheduler_active)
return;
-
- init_rcu_head_on_stack(&rcu.head);
- init_completion(&rcu.completion);
- /* Will wake me after RCU finished. */
- call_rcu(&rcu.head, wakeme_after_rcu);
- /* Wait for it. */
- wait_for_completion(&rcu.completion);
- destroy_rcu_head_on_stack(&rcu.head);
+ wait_rcu_gp(call_rcu);
}
EXPORT_SYMBOL_GPL(synchronize_rcu);
@@ -1203,9 +1194,12 @@ static int rcu_boost_kthread(void *arg)
int spincnt = 0;
int more2boost;
+ trace_rcu_utilization("Start boost kthread@init");
for (;;) {
rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
+ trace_rcu_utilization("End boost kthread@rcu_wait");
rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
+ trace_rcu_utilization("Start boost kthread@rcu_wait");
rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
more2boost = rcu_boost(rnp);
if (more2boost)
@@ -1213,11 +1207,14 @@ static int rcu_boost_kthread(void *arg)
else
spincnt = 0;
if (spincnt > 10) {
+ trace_rcu_utilization("End boost kthread@rcu_yield");
rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp);
+ trace_rcu_utilization("Start boost kthread@rcu_yield");
spincnt = 0;
}
}
/* NOTREACHED */
+ trace_rcu_utilization("End boost kthread@notreached");
return 0;
}
@@ -1266,11 +1263,9 @@ static void invoke_rcu_callbacks_kthread(void)
local_irq_save(flags);
__this_cpu_write(rcu_cpu_has_work, 1);
- if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
- local_irq_restore(flags);
- return;
- }
- wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
+ if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
+ current != __this_cpu_read(rcu_cpu_kthread_task))
+ wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
local_irq_restore(flags);
}
@@ -1464,7 +1459,8 @@ static int rcu_cpu_kthread_should_stop(int cpu)
/*
* Per-CPU kernel thread that invokes RCU callbacks. This replaces the
- * earlier RCU softirq.
+ * RCU softirq used in flavors and configurations of RCU that do not
+ * support RCU priority boosting.
*/
static int rcu_cpu_kthread(void *arg)
{
@@ -1475,9 +1471,12 @@ static int rcu_cpu_kthread(void *arg)
char work;
char *workp = &per_cpu(rcu_cpu_has_work, cpu);
+ trace_rcu_utilization("Start CPU kthread@init");
for (;;) {
*statusp = RCU_KTHREAD_WAITING;
+ trace_rcu_utilization("End CPU kthread@rcu_wait");
rcu_wait(*workp != 0 || kthread_should_stop());
+ trace_rcu_utilization("Start CPU kthread@rcu_wait");
local_bh_disable();
if (rcu_cpu_kthread_should_stop(cpu)) {
local_bh_enable();
@@ -1498,11 +1497,14 @@ static int rcu_cpu_kthread(void *arg)
spincnt = 0;
if (spincnt > 10) {
*statusp = RCU_KTHREAD_YIELDING;
+ trace_rcu_utilization("End CPU kthread@rcu_yield");
rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
+ trace_rcu_utilization("Start CPU kthread@rcu_yield");
spincnt = 0;
}
}
*statusp = RCU_KTHREAD_STOPPED;
+ trace_rcu_utilization("End CPU kthread@term");
return 0;
}
@@ -1535,7 +1537,10 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
if (!rcu_kthreads_spawnable ||
per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
return 0;
- t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
+ t = kthread_create_on_node(rcu_cpu_kthread,
+ (void *)(long)cpu,
+ cpu_to_node(cpu),
+ "rcuc%d", cpu);
if (IS_ERR(t))
return PTR_ERR(t);
if (cpu_online(cpu))