diff options
Diffstat (limited to 'include/linux/rcupdate.h')
-rw-r--r-- | include/linux/rcupdate.h | 46 |
1 files changed, 42 insertions, 4 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 115ead2b5155..275aa3f1062d 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -90,6 +90,25 @@ extern void do_trace_rcu_torture_read(char *rcutorturename, * that started after call_rcu() was invoked. RCU read-side critical * sections are delimited by rcu_read_lock() and rcu_read_unlock(), * and may be nested. + * + * Note that all CPUs must agree that the grace period extended beyond + * all pre-existing RCU read-side critical section. On systems with more + * than one CPU, this means that when "func()" is invoked, each CPU is + * guaranteed to have executed a full memory barrier since the end of its + * last RCU read-side critical section whose beginning preceded the call + * to call_rcu(). It also means that each CPU executing an RCU read-side + * critical section that continues beyond the start of "func()" must have + * executed a memory barrier after the call_rcu() but before the beginning + * of that RCU read-side critical section. Note that these guarantees + * include CPUs that are offline, idle, or executing in user mode, as + * well as CPUs that are executing in the kernel. + * + * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the + * resulting RCU callback function "func()", then both CPU A and CPU B are + * guaranteed to execute a full memory barrier during the time interval + * between the call to call_rcu() and the invocation of "func()" -- even + * if CPU A and CPU B are the same CPU (but again only if the system has + * more than one CPU). */ extern void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *head)); @@ -118,6 +137,9 @@ extern void call_rcu(struct rcu_head *head, * OR * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. * These may be nested. + * + * See the description of call_rcu() for more detailed information on + * memory ordering guarantees. */ extern void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *head)); @@ -137,6 +159,9 @@ extern void call_rcu_bh(struct rcu_head *head, * OR * anything that disables preemption. * These may be nested. + * + * See the description of call_rcu() for more detailed information on + * memory ordering guarantees. */ extern void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); @@ -191,6 +216,21 @@ extern void rcu_idle_enter(void); extern void rcu_idle_exit(void); extern void rcu_irq_enter(void); extern void rcu_irq_exit(void); + +#ifdef CONFIG_RCU_USER_QS +extern void rcu_user_enter(void); +extern void rcu_user_exit(void); +extern void rcu_user_enter_after_irq(void); +extern void rcu_user_exit_after_irq(void); +#else +static inline void rcu_user_enter(void) { } +static inline void rcu_user_exit(void) { } +static inline void rcu_user_enter_after_irq(void) { } +static inline void rcu_user_exit_after_irq(void) { } +static inline void rcu_user_hooks_switch(struct task_struct *prev, + struct task_struct *next) { } +#endif /* CONFIG_RCU_USER_QS */ + extern void exit_rcu(void); /** @@ -210,14 +250,12 @@ extern void exit_rcu(void); * to nest RCU_NONIDLE() wrappers, but the nesting level is currently * quite limited. If deeper nesting is required, it will be necessary * to adjust DYNTICK_TASK_NESTING_VALUE accordingly. - * - * This macro may be used from process-level code only. */ #define RCU_NONIDLE(a) \ do { \ - rcu_idle_exit(); \ + rcu_irq_enter(); \ do { a; } while (0); \ - rcu_idle_enter(); \ + rcu_irq_exit(); \ } while (0) /* |